repo_name stringlengths 7 65 | path stringlengths 5 185 | copies stringlengths 1 4 | size stringlengths 4 6 | content stringlengths 977 990k | license stringclasses 14 values | hash stringlengths 32 32 | line_mean float64 7.18 99.4 | line_max int64 31 999 | alpha_frac float64 0.25 0.95 | ratio float64 1.5 7.84 | autogenerated bool 1 class | config_or_test bool 2 classes | has_no_keywords bool 2 classes | has_few_assignments bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
erdc/proteus | proteus/tests/ci/test_Isosurface.py | 1 | 2141 | from future import standard_library
standard_library.install_aliases()
from builtins import range
from builtins import object
import os
import pytest
class TestIsosurface(object):
@classmethod
def setup_class(cls):
pass
@classmethod
def teardown_class(cls):
pass
def setup_method(self,method):
pass
def teardown_method(self,method):
[os.remove(f) for f in os.listdir(".") if f.endswith(".pov")]
FileList = ['isostats',
'proteus_default.log',
'proteus.inc',
'proteus.log']
for file in FileList:
if os.path.isfile(file):
os.remove(file)
@pytest.mark.skip
def test_povgen(self):
import difflib
import subprocess
import os
subprocess.check_call(['povgen.py',
os.path.join(os.path.dirname(os.path.abspath(__file__)),
'comparison_files',
'floating_bar'),
'-s',
'3'])
povfiles = []
for i in range(3):
filename = 'phi_t_0.000000_{0:04d}.pov'.format(i)
with open(filename, 'r') as f:
povfiles.append(f.readlines())
subprocess.check_call(['tar', 'xzf', os.path.join(os.path.dirname(os.path.abspath(__file__)),
'comparison_files',
'phi_t_0.000000_000.tgz')])
saved_povfiles = []
for i in range(3):
filename = 'phi_t_0.000000_{0:04d}.pov'.format(i)
with open(filename, 'r') as f:
saved_povfiles.append(f.readlines())
assert saved_povfiles[i] == povfiles[i], \
''.join(list(difflib.unified_diff(saved_povfiles[i],
povfiles[i],
"archived",
"test")))
| mit | 8a71ef8c81d2ff3c5d89f2f60c5e740c | 35.288136 | 101 | 0.450257 | 4.396304 | false | true | false | false |
erdc/proteus | proteus/ErrorEstimators.py | 1 | 2594 | """
Classes for a posteriori error estimation
.. inheritance-diagram:: proteus.ErrorEstimators
:parts: 1
"""
from __future__ import absolute_import
from builtins import range
from builtins import object
from .Profiling import logEvent
class HierarchicalMeshEstimator(object):
def __init__(self,mlTransport):
self.mlTransport = mlTransport
def calculate(self):
import numpy
from . import Norms
from . import FemTools
t=0.0
nLevels = len(self.mlTransport.uList)
assert nLevels > 1, "Need at least two grids for hierarchical mesh estimate"
coefficients = self.mlTransport.modelList[-1].coefficients
mCoarse = self.mlTransport.modelList[-2]
uCoarse = self.mlTransport.modelList[-2].u
mFine = self.mlTransport.modelList[-1]
uFine = self.mlTransport.modelList[-1].u
proj_uCoarse = [FemTools.FiniteElementFunction(mFine.u[ci].femSpace) for ci in range(coefficients.nc)]
proj_uCoarse_q = [numpy.zeros(mFine.q[('u',ci)].shape,'d') for ci in range(coefficients.nc)]
elementError = [numpy.zeros((mFine.mesh.nElements_global,),'d') for ci in range(coefficients.nc)]
elementTagArray = numpy.zeros((mFine.mesh.nElements_global,),'i')
elementTagArray.flat[:]=0.0
localRefinement=False
error = 0.0
for ci in range(coefficients.nc):
self.mlTransport.meshTransfers.prolong_bcListDict[ci][-1].matvec(uCoarse[ci].dof,
proj_uCoarse[ci].dof)
#load Dirichlet conditions in
for dofN,g in mFine.dirichletConditions[ci].DOFBoundaryConditionsDict.items():
proj_uCoarse[ci].dof[dofN] = g(mFine.dirichletConditions[ci].DOFBoundaryPointDict[dofN],t)
proj_uCoarse[ci].getValues(mFine.q['v',ci],proj_uCoarse_q[ci])
error += Norms.L2errorSFEM_local(mFine.q[('dV_u',ci)],
mFine.q[('u',ci)],
proj_uCoarse_q[ci],
elementError[ci])
for eN in range(mFine.mesh.nElements_global):
if (mFine.mesh.nElements_global*elementError[ci][eN]/error) > 5.0:
elementTagArray[eN] = 1
localRefinement=True
if not localRefinement:
elementTagArray.flat[:]=1
logEvent("error "+repr(error),level=3)#mwf debug turn off,"elementTagArray",elementTagArray
return (error,elementTagArray)
| mit | 206db896f1119818a49131cf248e60ff | 48.884615 | 111 | 0.603315 | 3.627972 | false | false | false | false |
erdc/proteus | proteus/SimTools.py | 1 | 126856 | """
Collect higher level tools for running simulation, processing results, etc
.. inheritance-diagram:: proteus.SimTools
:parts: 1
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from builtins import input
from builtins import range
from past.utils import old_div
from builtins import object
from . import Norms
import numpy
import numpy as np
from . import FemTools
from .Profiling import logEvent
from proteus import Comm
comm = Comm.get()
#dummy classes for computing exact solution norms with error functions
class zeroFunction(object):
def uOfX(self,x):
return 0.0
def uOfXT(self,x,t):
return 0.0
class zeroVectorFunction(object):
def __init__(self,shape):
self.shape=shape
def uOfX(self,x):
return numpy.zeros(self.shape,'d')
def uOfXT(self,x,t):
return numpy.zeros(self.shape,'d')
class SimulationProcessor(object):
"""
Collect some functionality for doing something with simulation results like
calculating error, saving it to disk, etc.
TO DO
be able to append data to file correctly
"""
import math
defaultFlags = {'simulationName':None, #label for simulation
'simulationNameProc':None, #label for simulation and processor id
'dataFile':'results.dat', #file for holding results
'dataDir' :'.', #where file is located
'appendResults':False, #append to existing data files?
'echo':False, #print to screen
'echoRelativeErrors':False, #print to screen also relative errors
'components':[0], #list of components to monitor
'errorQuantities':[None], #quantities in which to estimate error
'errorNorms':[None], #norms to use for error calc
'errorTimes':[None], #time levels at which to estimate error (All,Last, ...),
'errorTypes':[None], #types of error to calculate (u-u_h,mass, etc)
'plotQuantities':[None], #quantities to plot
'plotTimes':[None], #time levels to plot (All,Last,tList ...
'plotOptions':{'ensight':{'on':False},
'gnuplot':{'on':False},
'matlab':{'on':False},
'vtk':{'on':False}}, #options controlling way to plot
'storeQuantities':[None], #quantities to write to disk
'storeTimes':[None]} #time levels to store times (All, Last, tList ...)
notSetVal = -12345.0
#possible errors to measure
ErrorQuantities = ['m','u','grad(u)','velocity']
#possible error norms
ErrorNorms = ['L1','L2','LI','TV','H1','H1semi','W11','W11semi',
'L1_L1','L1_LI','L2_L2','L2_LI','TV_L1','TV_LI']
#
ErrorTimes = ['All','Last','tList']
#type of error to check (e.g., mass balance,
ErrorTypes = ['globalMassBalance','globalHeavisideMassBalance',
'localMassBalance',
'numericalSolution']
#
PlotTimes = ['All','Last','Init','tList']
#
PlotQuantities = ['u','u_exact','velocity','velocity_exact',"q:('%s',%d)","q:('%s',%d,%d)"]
#
PlotOptions = {'ensight':{'on':False},
'gnuplot':{'on':False,'setGnuplotGridSize':True},
'matlab':{'on':False,'usePDEtoolbox':False},
'vtk':{'on':False,'pause':False,'hardcopy':False}}
#
StoreTimes = ['All','Last','Init','tList']
#
StoreQuantities = ['u','u_dof','errorData','simulationData','mesh',"q:('%s',%d)",'multilevelModel']
#mwf temporary step before setting up fill archiving
#data members that need to be stored to reconstruct a solution:
SolutionDataStorageData = ['nSpace_global','name','dof','dofMap.l2g','dim_dof',
'isVector',
'CGDOFMap.lagrangesNodesArray','CGDOFMap.l2g']
def __init__(self,flags=None,nLevels=1,pFile=None,nFile=None,
analyticalSolution={}):
"""
save flags for what to compute, how to compute it,
and labels for storing things, etc
"""
self.analyticalSolution = {}
if analyticalSolution is not None:
self.analyticalSolution = analyticalSolution
self.timeValues = []
self.plotOffSet = None
self.stepPlotCalled = {}
self.stepPlotCalled['exact']=False; self.stepPlotCalled['elementQuantities']=False
self.stepPlotCalled['ensight']=False; self.stepPlotCalled['ensightElementQuantities']=False
self.plotWindowStart= {}
self.nLevels = nLevels
self.flags = {}#force a deep copy?
for key,val in SimulationProcessor.defaultFlags.items():
self.flags[key] = val
#mwf for postprocessing nodal values of coefficients etc
self.nodalQuadratureInfo = None
#store p and n files now
self.pFile = pFile; self.nFile = nFile
if flags is not None:
for key in list(self.flags.keys()):
if key in list(flags.keys()):
self.flags[key]=flags[key]
#end key found
#end for all keys
#end input flags given
#need to check flags given are in allowed ranges
self.errorData = {}
for ci in self.flags['components']:
self.errorData[ci] = {}
for il in range(self.nLevels):
self.errorData[ci][il] = {}
#end ci
if 'globalMassBalance' in self.flags['errorTypes']:
for ci in self.flags['components']:
for il in range(self.nLevels):
self.errorData[ci][il]['globalMass0'] = SimulationProcessor.notSetVal
self.errorData[ci][il]['globalMassF'] = [SimulationProcessor.notSetVal]
if 'globalHeavisideMassBalance' in self.flags['errorTypes']:
for ci in self.flags['components']:
for il in range(self.nLevels):
self.errorData[ci][il]['globalHeavisideMass0'] = SimulationProcessor.notSetVal
self.errorData[ci][il]['globalHeavisideMassF'] = [SimulationProcessor.notSetVal]
if 'localMassBalance' in self.flags['errorTypes']:
self.conservationResidual = {}
self.elementResidual = {}
for ci in self.flags['components']:
for il in range(self.nLevels):
self.errorData[ci][il]['localMassBalance'] = [SimulationProcessor.notSetVal]
self.conservationResidual[il] = None
self.elementResidual[il] = None
#
if 'numericalSolution' in self.flags['errorTypes']:
for equant in self.flags['errorQuantities']:
for enorm in self.flags['errorNorms']:
ktmp = 'error_'+equant+'_'+enorm
xtmp = 'exact_'+equant+'_'+enorm
#put in way to control cross component quantities
for ci in self.flags['components']:
for il in range(self.nLevels):
self.errorData[ci][il][ktmp] = []
self.errorData[ci][il][xtmp] = []
#for il
#for ci
#end for enorm
#end for
#end if
#save info about the spatial mesh?
self.simulationData = {}
self.simulationData['spatialMesh'] = {}
#save values for levels and time steps
for il in range(self.nLevels):
self.simulationData['spatialMesh'][il] = {}
self.simulationData['spatialMesh'][il]['nNodes_global']= []
self.simulationData['spatialMesh'][il]['h'] = []
self.simulationData['spatialMesh'][il]['hMin'] = []
#end il
self.solutionData = {}
if ('u_dof' in self.flags['storeQuantities']):
for ci in self.flags['components']:
self.solutionData[ci] = {}
for il in range(self.nLevels):
self.solutionData[ci][il] = {}
if 'u_dof' in self.flags['storeQuantities']:
self.solutionData[ci][il]['u_dof']= None
self.solutionData[ci][il]['l2g'] = None
#il
#ci
#u_dof
#mwf hack, decide if storing heavy data here, about to move fully to archiving
self.storeHeavyData = False
self.dataStorage = None
#mwf debug
#print "SimTools before dataStorage decision dataStorage=%s " % (self.dataStorage)
if self.flags['storeTimes'] != [None]:
import os
import shelve
absfile = os.path.join(self.flags['dataDir'],self.flags['dataFile'])
if self.flags['appendResults'] == True:
if not os.path.exists(absfile):
#figure out which exception to raise
assert 0, "SimTools append=True but couldn't find storage file=%s" % absfile
else:
if not os.path.exists(self.flags['dataDir']) and comm.isMaster():
os.makedirs(self.flags['dataDir'])
if os.path.exists(absfile):
logEvent("Warning SimTools storing data removing old data in %s " % absfile)
#try:
os.remove(absfile)
#except:
#pass
#
self.dataStorage = shelve.open(absfile)
#mwf debug
#print "SimTools opening dataStorage file=%s dataStorage=%s " % (absfile,self.dataStorage)
assert self.dataStorage is not None, "dataStorage is None storeTimes=%s absfile=%s " % (self.flags['storeTimes'],
absfile)
#end storing something
#end init
def preprocess(self,mlvt,tsim):
from . import Viewers
"""
calculate desired quantities before simulation starts
input :
p --- problem definition
n --- numerics definition
mlvt --- multilevel vector transport object that has quantities for measuring
tsim --- simulation time
TO DO:
local mass balance
solution plotting of initial condition?
"""
self.timeValues.append(tsim)
p = self.pFile; n = self.nFile
if 'globalMassBalance' in self.flags['errorTypes']:
for il,m in enumerate(mlvt.levelModelList):
for ci in range(p.coefficients.nc):
if ci in self.flags['components'] and ('m',ci) in m.q:
self.errorData[ci][il]['globalMass0'] = Norms.globalScalarDomainIntegral(m.q['abs(det(J))'],
m.elementQuadratureWeights[('m',ci)],
m.q[('m',ci)])
if self.flags['echo']:
logEvent("""t= %g globalMass0[%d][%d] = %g """ % (tsim,ci,il,
self.errorData[ci][il]['globalMass0']))
#end if
#end if
#end ci
#end il
#end calc globalMassBalance
if 'globalHeavisideMassBalance' in self.flags['errorTypes']:
for il,m in enumerate(mlvt.levelModelList):
for ci in range(p.coefficients.nc):
if ci in self.flags['components'] and ('m',ci) in m.q:
hm = numpy.where(m.q[('m',ci)] >= 0.0,1.0,0.0)
self.errorData[ci][il]['globalHeavisideMass0'] = Norms.globalScalarDomainIntegral(m.q['abs(det(J))'],
m.elementQuadratureWeights[('m',ci)],
hm)
if self.flags['echo']:
logEvent("""t= %g globalHeavisideMass0[%d][%d] = %g """ % (tsim,ci,il,
self.errorData[ci][il]['globalHeavisideMass0']))
#end if
#end if
#end ci
#end il
#end calc globalHeavisideMassBalance
#
#
#now include velocity as element quantity if velocity flag is given in plotQuanties
if 'velocity' in self.flags['plotQuantities']:
has_q_velocity = False ; has_ebq_global_velocity = False
for ci in self.flags['components']:
pcikey = "q:('velocity',%s)" % ci
if ('velocity',ci) in mlvt.levelModelList[-1].q:
has_q_velocity = True
if pcikey not in self.flags['plotQuantities']:
self.flags['plotQuantities'].append(pcikey)
pcikey = "ebq:('velocity',%s)" % ci
if ('velocity',ci) in mlvt.levelModelList[-1].ebq_global:
has_ebq_global_velocity = True
if pcikey not in self.flags['plotQuantities']:
self.flags['plotQuantities'].append(pcikey)
#ebq_global
#q
#end velocity key fix
### set options for various output types ...
for plotter in list(SimulationProcessor.defaultFlags['plotOptions'].keys()):
if plotter not in self.flags['plotOptions']:
self.flags['plotOptions'][plotter] = {'on':False}
#mwf debug
#import pdb
#pdb.set_trace()
#set on flags based on viewer
if 'viewerType' in dir(Viewers):
for plotter in list(self.flags['plotOptions'].keys()):
if plotter == Viewers.viewerType:
self.flags['plotOptions'][plotter]['on']=True
if (self.flags['plotOptions']['gnuplot']['on'] and
'setGnuplotGridSize' not in self.flags['plotOptions']['gnuplot']):
self.flags['plotOptions']['gnuplot']['setGnuplotGridSize'] = True
if (self.flags['plotOptions']['matlab']['on'] and
'usePDEtoolbox' not in self.flags['plotOptions']['matlab']):
self.flags['plotOptions']['matlab']['usePDEtoolbox'] = False
if self.flags['plotOptions']['vtk']['on']:
if 'pause' not in self.flags['plotOptions']['vtk']:
self.flags['plotOptions']['vtk']['pause']=False
if 'hardcopy' not in self.flags['plotOptions']['vtk']:
self.flags['plotOptions']['vtk']['hardcopy']=False
#try to setup ensight header files correctly
if self.flags['plotOptions']['ensight']['on']:
if 'caseFileName' not in self.flags['plotOptions']['ensight']:
self.flags['plotOptions']['ensight']['caseFileName'] = self.flags['simulationNameProc']
mlvt.levelModelList[-1].u[0].femSpace.writeMeshEnsight(self.flags['plotOptions']['ensight']['caseFileName'],
self.flags['plotOptions']['ensight']['caseFileName'])
self.ensightTimeSeries = []
ensight_q_header_written = False; ensight_ebq_global_header_written = False;
#NOTE: looks like ensight only allows one set of measured nodes
# so we enforce that either q: entries are plotted or ebq_global
#element quadrature entries
self.plottingQuadratureValuesForEnsight={'elements':False,'elementBoundaries':False}
for quant in self.flags['plotQuantities']:
recType = quant.split(':')
if len(recType) > 1 and recType[0] == 'q': #found element quadrature quantity
stval = eval(recType[1])
if stval in mlvt.levelModelList[-1].q and not ensight_q_header_written:
self.writeEnsightMeshForElementQuantities(self.flags['plotOptions']['ensight']['caseFileName'],mlvt)
ensight_q_header_written = True
self.plottingQuadratureValuesForEnsight['elements']=True
#
#q
#quant
if not self.plottingQuadratureValuesForEnsight['elements']:
for quant in self.flags['plotQuantities']:
recType = quant.split(':')
if len(recType) > 1 and recType[0] == 'ebq_global': #found element boundary quadrature (global) quantity
stval = eval(recType[1])
if stval in mlvt.levelModelList[-1].ebq_global and not ensight_ebq_global_header_written:
self.writeEnsightMeshForElementBoundaryQuantities(self.flags['plotOptions']['ensight']['caseFileName'],mlvt)
ensight_ebq_global_header_written = True
self.plottingQuadratureValuesForEnsight['elementBoundaries']=True
#
#ebq_global
#quantities
#go ahead and write some or all of preamble?
case_filename = self.flags['plotOptions']['ensight']['caseFileName']
caseOut=open(case_filename+'.case','a')
caseOut.write('VARIABLE\n')
caseOut.close()
mFinest = mlvt.levelModelList[-1]
#solution values
for ci in self.flags['components']:
mFinest.u[ci].femSpace.writeFunctionHeaderEnsight(mFinest.u[ci],case_filename,append=False,
firstVariable=False)
#velocity dofs
if mFinest.coefficients.vectorComponents is not None:
if len(mFinest.coefficients.vectorComponents) == 2:
vcomp = [mFinest.coefficients.vectorComponents[0],
mFinest.coefficients.vectorComponents[1]]
mFinest.u[vcomp[0]].femSpace.writeE2VectorFunctionHeaderEnsight(mFinest.u[vcomp[0]],
mFinest.u[vcomp[1]],
case_filename,
nOutput=mFinest.u[vcomp[0]].femSpace.nOutput-1,
append=False,
firstVariable=False)
elif len(mFinest.coefficients.vectorComponents) == 3:
vcomp = [mFinest.coefficients.vectorComponents[0],
mFinest.coefficients.vectorComponents[1],
mFinest.coefficients.vectorComponents[2]]
mFinest.u[vcomp[0]].femSpace.writeE3VectorFunctionHeaderEnsight(mFinest.u[vcomp[0]],
mFinest.u[vcomp[1]],
mFinest.u[vcomp[2]],
case_filename,
nOutput=mFinest.u[vcomp[0]].femSpace.nOutput-1,
append=False,
firstVariable=False)
#
for quant in self.flags['plotQuantities']:
recType = quant.split(':')
if len(recType) > 1 and recType[0] == 'q': #found element quadrature quantity
stval = eval(recType[1])
if (stval in mlvt.levelModelList[-1].q and
len(mlvt.levelModelList[-1].q[stval].shape) == 2): #found quantity and it's a scalar
self.writeScalarElementFunctionHeaderEnsight(stval,case_filename,append=False,firstVariable=False)
elif (stval in mlvt.levelModelList[-1].q and
len(mlvt.levelModelList[-1].q[stval].shape) == 3): #found quantity and it's a vector
self.writeVectorElementFunctionHeaderEnsight(stval,case_filename,append=False,firstVariable=False)
#vec
#in q dict
#element boundary (global) quadrature entries
if not self.plottingQuadratureValuesForEnsight['elements']:
for quant in self.flags['plotQuantities']:
recType = quant.split(':')
if len(recType) > 1 and recType[0] == 'ebq_global': #found element quadrature quantity
stval = eval(recType[1])
if (stval in mlvt.levelModelList[-1].ebq_global and
len(mlvt.levelModelList[-1].ebq_global[stval].shape) == 2): #found quantity and it's a scalar
self.writeScalarElementFunctionHeaderEnsight(stval,case_filename,append=False,firstVariable=False)
elif (stval in mlvt.levelModelList[-1].q and
len(mlvt.levelModelList[-1].q[stval].shape) == 3): #found quantity and it's a vector
self.writeVectorElementFunctionHeaderEnsight(stval,case_filename,append=False,firstVariable=False)
#vec
#in ebq_global dict
#quant
#end plot quantities
#end ensight configuration
#cek moving to Viewers.V_base
# if (('Init' in self.flags['plotTimes'] or 'All' in self.flags['plotTimes']) and
# 'u' in self.flags['plotQuantities'] and 'viewerType' in dir(Viewers)):#
# #and p.initialConditions is not None ):
# dgrid = (n.nn-1)*(2**n.nLevels) #default should be 50
# #mwf debug
# #import pdb
# #pdb.set_trace()
# if self.plotOffSet is None:
# self.plotOffSet = Viewers.windowNumber #keep from orphaning windows?
# #don't reset window number
# pause = False
# if self.flags['plotOptions']['vtk']['on']:
# pause = self.flags['plotOptions']['vtk']['pause']
# windowNumberTmp= mlvt.levelModelList[-1].viewSolution(plotOffSet=None,titleModifier=': Initial Condition',
# dgridnx=dgrid,dgridny=dgrid,pause=pause)
# #
# self.stepPlotEnsight(mlvt,tsim)
# #should create new windows if plotted here
# self.stepPlotExact(mlvt,tsim)
# self.stepPlotElementQuantities(mlvt,tsim)
# self.stepPlotElementQuantitiesEnsight(mlvt,tsim)
# if self.flags['plotOptions']['ensight']['on']:
# self.ensightTimeSeries.append(tsim)
#cek
#
if (('Init' in self.flags['storeTimes'] or 'All' in self.flags['storeTimes']) and
p.initialConditions is not None):
if 'u' in self.flags['storeQuantities']:
mlvt.levelModelList[-1].saveSolution()
self.stepStoreQuantities(mlvt,tsim)
#end if
if 'mesh' in self.flags['storeQuantities'] and 'mesh' not in self.dataStorage:
#write out mesh information that is needed by at least ensight?
meshDict = {}
pm = mlvt.levelModelList[-1].mesh
meshDict['nSpace_global']=mlvt.levelModelList[-1].nSpace_global
mmemb = ['nNodes_global','nElements_global','nodeArray','elementNodesArray','nElementBoundaries_global',
'elementBoundaryNodesArray']
for mm in mmemb:
meshDict[mm] = getattr(pm,mm)
self.dataStorage['mesh']=meshDict
#mwf hack
#self.dataStorage['wholeMesh']=mlvt.levelModelList[-1].mesh
#end preproc
def processTimeLevel(self,mlvt,tsim=None,plotOffSet=None):
"""calculate desired quantities after each macro time step
Parameters
----------
mlvt : multilevel vector transport that holds the quantities to measure
tsim : simulation time
"""
# input :
# p --- problem definition
# n --- numerics definition
#
p = self.pFile; n = self.nFile
if tsim is None:
mlvt.levelModelList[-1].timeIntegration.t
self.timeValues.append(tsim)
if plotOffSet is not None:
self.plotOffSet = plotOffSet
if 'All' in self.flags['errorTimes'] or tsim in self.flags['errorTimes']:
self.stepProcessError(mlvt,tsim)
if 'All' in self.flags['storeTimes'] or tsim in self.flags['storeTimes']:
mlvt.levelModelList[-1].saveSolution()
self.stepStoreQuantities(mlvt,tsim)
return self.plotOffSet
def postprocess(self,mlvt,tsim):
"""
calculate desired quantities after simulation ends
Parameters
----------
mlvt : multilevel vector transport that holds the quantities to measure
tsim : simulation time
"""
p = self.pFile; n = self.nFile
if 'Last' in self.flags['errorTimes'] or tsim in self.flags['errorTimes']:
self.stepProcessError(mlvt,tsim)
#now look at global mass balance (could do similar thing each step too)
if 'globalMassBalance' in self.flags['errorTypes']:
for il,m in enumerate(mlvt.levelModelList):
for ci in range(p.coefficients.nc):
if ci in self.flags['components'] and ('m',ci) in m.q:
self.errorData[ci][il]['globalMassF'].append(Norms.globalScalarDomainIntegral(m.q['abs(det(J))'][0:m.mesh.subdomainMesh.nElements_owned],
m.elementQuadratureWeights[('m',ci)],
m.q[('m',ci)][0:m.mesh.subdomainMesh.nElements_owned]))
if self.flags['echo']:
logEvent("""t= %g globalMassF[%d][%d] = %g globalMassDiff[%d][%d]= %g""" % \
(tsim,ci,il,self.errorData[ci][il]['globalMassF'][-1],ci,il,
self.errorData[ci][il]['globalMassF'][-1]-self.errorData[ci][il]['globalMass0']))
#end if
#end if
#end for ci
#end for il
#end calc globalMassBalance
if 'globalHeavisideMassBalance' in self.flags['errorTypes']:
for il,m in enumerate(mlvt.levelModelList):
for ci in range(p.coefficients.nc):
if ci in self.flags['components'] and ('m',ci) in m.q:
hm = numpy.where(m.q[('m',ci)][0:m.mesh.subdomainMesh.nElements_owned] >= 0.0,1.0,0.0)
self.errorData[ci][il]['globalHeavisideMassF'].append(Norms.globalScalarDomainIntegral(m.q['abs(det(J))'][0:m.mesh.subdomainMesh.nElements_owned],
m.elementQuadratureWeights[('m',ci)],
hm))
if self.flags['echo']:
logEvent("""t= %g globalHeavisideMassF[%d][%d] = %g globalHeavisideMassDiff[%d][%d]= %g""" % \
(tsim,ci,il,self.errorData[ci][il]['globalHeavisideMassF'][-1],ci,il,
abs(self.errorData[ci][il]['globalHeavisideMassF'][-1]-self.errorData[ci][il]['globalHeavisideMass0'])))
#end if
#end if
#end for ci
#end for il
#end calc globalMassBalance
#compute space-time norms, ...
if 'numericalSolution' in self.flags['errorTypes']:
for il,m in enumerate(mlvt.levelModelList):
for ci in range(p.coefficients.nc):
if ci in self.flags['components']:
for snorm in ['L2','L1','LI','H1','H1semi','W11','W11semi']:
kerr = 'error_'+'u'+'_'+snorm
kexa = 'exact_'+'u'+'_'+snorm
calcNorm = kerr in self.errorData[ci][il]
if calcNorm and 'L2_'+snorm in self.flags['errorNorms']:
errTL2 = 0.0
eLast = 0.0
exaTL2 = 0.0
exaLast= 0.0
for it in range(1,len(self.timeValues)):
dt = self.timeValues[it]-self.timeValues[it-1] #timeValues has t0
errTL2 += 0.5*dt*(self.errorData[ci][il][kerr][it-1]**2 +
eLast**2)
eLast = self.errorData[ci][il][kerr][it-1]
exaTL2 += 0.5*dt*(self.errorData[ci][il][kexa][it-1]**2 +
exaLast**2)
exaLast = self.errorData[ci][il][kexa][it-1]
#end it
kerrtL2 = kerr+'_L2'
kexatL2 = kexa+'_L2'
self.errorData[ci][il][kerrtL2] = self.math.sqrt(errTL2)
self.errorData[ci][il][kexatL2] = self.math.sqrt(exaTL2)
if self.flags['echo']:
print("""t= %g; %s[%d][%d]= %g;""" % (tsim,kerrtL2,ci,il,errTL2))
#end if
if calcNorm and 'L1_'+snorm in self.flags['errorNorms']:
errTL1 = 0.0
eLast = 0.0
#mwf debug
#print """postproc timeValues= %s """ % self.timeValues
exaTL1 = 0.0
for it in range(1,len(self.timeValues)):
dt = self.timeValues[it]-self.timeValues[it-1] #timeValues has t0
errTL1+= 0.5*dt*(self.errorData[ci][il][kerr][it-1] +
eLast)
eLast = self.errorData[ci][il][kerr][it-1]
#mwf debug
#print """postproc timeValues[%d]=%g err[%d]=%g errTL1=%g """ \
# % (it,self.timeValues[it],it-1,eLast,errTL1)
exaTL1+= 0.5*dt*(self.errorData[ci][il][kexa][it-1] +
exaLast)
exaLast = self.errorData[ci][il][kexa][it-1]
#end it
kerrtL1 = kerr+'_L1'
kexatL1 = kexa+'_L1'
self.errorData[ci][il][kerrtL1] = errL1TV
self.errorData[ci][il][kexatL1] = exaL1TV
if self.flags['echo']:
print("""t= %g; %s[%d][%d]= %g;""" % (tsim,kerrtL1,ci,il,errL1TV))
#end if
#if calcL1+snorm
if calcNorm and 'LI_'+snorm in self.flags['errorNorms']:
errTLI = 0.0
exaTLI = 0.0
for it in range(1,len(self.timeValues)):
errTLI = max(errTLI,self.errorData[ci][il][kerr][it-1])
exaTLI = max(exaTLI,self.errorData[ci][il][kexa][it-1])
#it
kerrtLI = kerr+'_LI' ;
kexatLI = kexa+'_LI' ;
self.errorData[ci][il][kerrtLI] = errTLI
self.errorData[ci][il][kexatLI] = exaTLI
if self.flags['echo']:
print("""t= %g; %s[%d][%d]= %g;""" % (tsim,kerrtLI,ci,il,errTLI))
#end if
#calcLI norm
#end space norms
#end if calc
#end for ci
#end for il
if self.flags['plotOptions']['ensight']['on']:
mlvt.levelModelList[-1].u[0].femSpace.endTimeSeriesEnsight(self.ensightTimeSeries,
self.flags['plotOptions']['ensight']['caseFileName'],
self.flags['plotOptions']['ensight']['caseFileName'])
#
#assume that 'u' taken care of in step process???
if 'Last' in self.flags['storeTimes']:
self.stepStoreQuantities(mlvt,tsim)
#now in stepStoreQuantities?
#if 'u_dof' in self.flags['storeQuantities']:
# for ci in self.flags['components']:
# for il,m in enumerate(mlvt.levelModelList):
# self.solutionData[ci][il]['u_dof'] = m.u[ci].dof
# self.solutionData[ci][il]['l2g'] = m.u[ci].femSpace.dofMap.l2g
# #for
# #for
##if
#store everything basically? ...
#if 'multilevelModel' in self.flags['storeQuantities']:
# #need to make
# self.dataStorage[('multilevelModel',tsim)]=mlvt
#
#if
self.saveToDisk()
def saveToDisk(self):
"""
TO DO
save error to disk
make sure can append if necessary?
"""
if self.flags['storeTimes'] != [None]:
assert self.dataStorage is not None, "dataStorage None storeTimes= %s " % self.flags['storeTimes']
if 'simulationData' in self.flags['storeQuantities']:
self.dataStorage['timeValues'] = self.timeValues
self.dataStorage['simulationData']= self.simulationData
self.dataStorage['flags'] = self.flags
if 'errorData' in self.flags['storeQuantities']:
self.dataStorage['errorData']=self.errorData
#what else to store?
if ('u_dof' in self.flags['storeQuantities']):
self.dataStorage['solutionData'] = self.solutionData
#end solutionData
self.dataStorage.close()
#
#don't store anything
#end def
def stepProcessError(self,mlvt,tsim):
""" calculate desired error quantities for a single step
Parameters
----------
mlvt : multilevel vector transport that holds the quantities to measure
tsim : simulation time
"""
# TO DO:
# synchronize Norms L*error*AF[,2] functions used to calculate error
# setup to work in parallel
p = self.pFile; n = self.nFile
for il,m in enumerate(mlvt.levelModelList):
self.simulationData['spatialMesh'][il]['nNodes_global'].append(m.mesh.nNodes_global)
self.simulationData['spatialMesh'][il]['h'].append(m.mesh.h)
self.simulationData['spatialMesh'][il]['hMin'].append(m.mesh.hMin)
#end il
#assumes this is a correct time to compute error
hasAnalyticalSolution = {}
hasAnalyticalSolutionVelocity = {}
for ci in range(p.coefficients.nc):
hasAnalyticalSolution[ci] = (ci in self.analyticalSolution and
self.analyticalSolution[ci] is not None)
hasAnalyticalSolutionVelocity[ci] = ('analyticalSolutionVelocity' in dir(p) and
p.analyticalSolutionVelocity is not None and
ci in p.analyticalSolutionVelocity and
p.analyticalSolutionVelocity[ci] is not None)
#ci
class gradWrapper(object):
def __init__(self,ex):
self.ex = ex
def uOfX(self,X):
return self.ex.duOfX(X)
def uOfXT(self,X,T):
return self.ex.duOfXT(X,T)
#grad wrapper
import math
if 'numericalSolution' in self.flags['errorTypes']:
for il,m in enumerate(mlvt.levelModelList):
#first see if need to project solution for calculations at all
needProj = False
uproj = None; uprojGrad = None
velproj = {}
for ci in range(p.coefficients.nc):
if ci in self.flags['components'] and not hasAnalyticalSolution[ci]:
needProj = True
if needProj:
uproj,uprojGrad = projectToFinestLevel(mlvt,il,tsim)#assumes conforming unless used MultilevelTransfer...NC
#mwf hack now allow evaluation of postprocessed velocities on fine grid
#import testStuff
for ci in range(p.coefficients.nc):
if (ci in self.flags['components']and
not hasAnalyticalSolutionVelocity[ci] and
n.conservativeFlux is not None and 'velocity' in self.flags['errorQuantities']):
#mwf debug
logEvent("SimTools proj velocity for error calling projectVelocityToFinestLevelNC")
velproj[ci] = projectVelocityToFinestLevelNC(mlvt,il,ci)
# CALCULATE THE L2 ERROR IN PRESSURE
if 'p' in self.flags['errorQuantities'] and hasattr(m,'analyticalPressureSolution'):
assert hasattr(m,'analyticalPressureSolution'), "analyticalPressureSolution must be provided"
# COMPUTE MEAN VALUE OF PRESSURE
pressureAnalyticalSolution = m.analyticalPressureSolution[0]
x = m.q['x'][0:m.mesh.subdomainMesh.nElements_owned]
abs_det_J = m.q['abs(det(J))'][0:m.mesh.subdomainMesh.nElements_owned]
quad_weight = list(m.elementQuadratureWeights.values())[0]
pressureNumericalSolution = m.q['p'][0:m.mesh.subdomainMesh.nElements_owned]
# compute mean values
mean_value_exact_p = 0.0
mean_value_numerical_p = 0.0
for eN in range (x.shape[0]):
for k in range(x.shape[1]):
mean_value_exact_p += pressureAnalyticalSolution.uOfXT(x[eN,k],tsim)*quad_weight[k]*abs_det_J[eN,k]
mean_value_numerical_p += pressureNumericalSolution[eN,k]*quad_weight[k]*abs_det_J[eN,k]
# remove mean value of numerical solution and add mean value of exact solution
pressureNumericalSolution += mean_value_exact_p - mean_value_numerical_p
err = Norms.L2errorSFEMvsAF2(pressureAnalyticalSolution,
x,
abs_det_J,
quad_weight,
pressureNumericalSolution,
T=tsim)
kerr = 'error_'+'p'+'_'+'L2'
if self.flags['echo']:
logEvent("""\nt= %g; %s= %g;""" % (tsim,kerr,err),level=0)
# END OF COMPUTING THE L2 ERROR OF THE PRESSURE
for ci in range(p.coefficients.nc):
if ci in self.flags['components']:
if not hasAnalyticalSolution[ci]:
udense = mlvt.levelModelList[-1].q[('u',ci)]
gradu_dense= mlvt.levelModelList[-1].q[('grad(u)',ci)]
if not hasAnalyticalSolutionVelocity[ci] and 'velocity' in self.flags['errorQuantities']:
veldense = mlvt.levelModelList[-1].q[('velocity',ci)]
mFine = mlvt.levelModelList[-1]
calcL2u = ('L2' in self.flags['errorNorms'] and
'u' in self.flags['errorQuantities'])
if calcL2u:
err = -12345.0
exa = 1.0
if hasAnalyticalSolution[ci]:
err = Norms.L2errorSFEMvsAF2(self.analyticalSolution[ci],
m.q['x'][0:m.mesh.subdomainMesh.nElements_owned],
m.q['dV'][0:m.mesh.subdomainMesh.nElements_owned],
np.ones_like(list(m.elementQuadratureWeights.values())[0]),
m.q[('u',ci)][0:m.mesh.subdomainMesh.nElements_owned],
T=tsim)
exa = Norms.L2errorSFEMvsAF2(zeroFunction(),
m.q['x'][0:m.mesh.subdomainMesh.nElements_owned],
m.q['dV'][0:m.mesh.subdomainMesh.nElements_owned],
np.ones_like(list(m.elementQuadratureWeights.values())[0]),
m.q[('u',ci)][0:m.mesh.subdomainMesh.nElements_owned],
T=tsim)
else:
err = Norms.L2errorSFEM(mFine.q[('dV_u',ci)][0:mFine.mesh.subdomainMesh.nElements_owned],udense[0:mFine.mesh.subdomainMesh.nElements_owned],uproj[ci][0:mFine.mesh.subdomainMesh.nElements_owned])
exa = Norms.L2errorSFEM(mFine.q[('dV_u',ci)][0:mFine.mesh.subdomainMesh.nElements_owned],udense[0:mFine.mesh.subdomainMesh.nElements_owned],
numpy.zeros(udense[0:mFine.mesh.subdomainMesh.nElements_owned].shape,'d'))
kerr = 'error_'+'u'+'_'+'L2'
kexa = 'exact_'+'u'+'_'+'L2'
self.errorData[ci][il][kerr].append(err)
self.errorData[ci][il][kexa].append(exa)
if self.flags['echo']:
if self.flags['echoRelativeErrors']:
logEvent("""\nt= %g; %s[%d][%d]= %g; relative_error= %g;""" % (tsim,kerr,ci,il,err,old_div(err,(exa+1E-15))),level=0)
else:
logEvent("""\nt= %g; %s[%d][%d]= %g;""" % (tsim,kerr,ci,il,err),level=0)
#end if
#if calcL2u
calcL1u = ('L1' in self.flags['errorNorms'] and
'u' in self.flags['errorQuantities'])
if calcL1u:
err = -12345.0
exa = 1.0
if hasAnalyticalSolution[ci]:
err = Norms.L1errorSFEMvsAF2(self.analyticalSolution[ci],
m.q['x'][0:m.mesh.subdomainMesh.nElements_owned],
m.q['dV'][0:m.mesh.subdomainMesh.nElements_owned],
np.ones_like(list(m.elementQuadratureWeights.values())[0]),
m.q[('u',ci)][0:m.mesh.subdomainMesh.nElements_owned],
T=tsim)
exa = Norms.L1errorSFEMvsAF2(zeroFunction(),
m.q['x'][0:m.mesh.subdomainMesh.nElements_owned],
m.q['dV'][0:m.mesh.subdomainMesh.nElements_owned],
np.ones_like(list(m.elementQuadratureWeights.values())[0]),
m.q[('u',ci)][0:m.mesh.subdomainMesh.nElements_owned],
T=tsim)
else:
err = Norms.L1errorSFEM(mFine.q[('dV_u',ci)][0:mFine.mesh.subdomainMesh.nElements_owned],udense[0:mFine.mesh.subdomainMesh.nElements_owned],uproj[ci][0:mFine.mesh.subdomainMesh.nElements_owned])
exa = Norms.L1errorSFEM(mFine.q[('dV_u',ci)][0:mFine.mesh.subdomainMesh.nElements_owned],udense[0:mFine.mesh.subdomainMesh.nElements_owned],
numpy.zeros(udense[0:mFine.mesh.subdomainMesh.nElements_owned].shape,'d'))
kerr = 'error_'+'u'+'_'+'L1'
kexa = 'exact_'+'u'+'_'+'L1'
self.errorData[ci][il][kerr].append(err)
self.errorData[ci][il][kexa].append(exa)
if self.flags['echo']:
if self.flags['echoRelativeErrors']:
logEvent("""\nt= %g; %s[%d][%d]= %g; relative_error= %g;""" % (tsim,kerr,ci,il,err,old_div(err,(exa+1E-15))),level=0)
else:
logEvent("""\nt= %g; %s[%d][%d]= %g;""" % (tsim,kerr,ci,il,err),level=0)
#end if
#if calcL1u
calcLIu = ('LI' in self.flags['errorNorms'] and
'u' in self.flags['errorQuantities'])
if calcLIu:
err = -12345.0
exa = 0.0
if hasAnalyticalSolution[ci]:
err = Norms.LIerrorSFEMvsAF(self.analyticalSolution[ci],
m.q['x'][0:m.mesh.subdomainMesh.nElements_owned],
m.q[('u',ci)][0:m.mesh.subdomainMesh.nElements_owned],
T=tsim)
exa = Norms.LIerrorSFEMvsAF(zeroFunction(),
m.q['x'][0:m.mesh.subdomainMesh.nElements_owned],
m.q[('u',ci)][0:m.mesh.subdomainMesh.nElements_owned],
T=tsim)
else:
err = max(numpy.absolute(uproj[ci][0:mFine.mesh.subdomainMesh.nElements_owned].flat[:]-udense[0:mFine.mesh.subdomainMesh.nElements_owned].flat[:]))
exa = max(numpy.absolute(udense[0:mFine.mesh.subdomainMesh.nElements_owned].flat))
kerr = 'error_'+'u'+'_'+'LI'
kexa = 'exact_'+'u'+'_'+'LI'
self.errorData[ci][il][kerr].append(err)
self.errorData[ci][il][kexa].append(exa)
if self.flags['echo']:
if self.flags['echoRelativeErrors']:
logEvent("""\nt= %g; %s[%d][%d]= %g; relative_error= %g;""" % (tsim,kerr,ci,il,err,old_div(err,(exa+1E-15))),level=0)
else:
logEvent("""\nt= %g; %s[%d][%d]= %g;""" % (tsim,kerr,ci,il,err),level=0)
#end if
#
#calcLIu
calcH1u = ('H1' in self.flags['errorNorms'] and
'u' in self.flags['errorQuantities'])
if calcH1u:
err = -12345.0; err0 = -12345.0; err1 = -12345.0
exa = 1.0; exa0 = 1.0; exa1 = 1.0
if hasAnalyticalSolution[ci]:
err0 = Norms.L2errorSFEMvsAF2(self.analyticalSolution[ci],
m.q['x'][0:m.mesh.subdomainMesh.nElements_owned],
m.q['abs(det(J))'][0:m.mesh.subdomainMesh.nElements_owned],
list(m.elementQuadratureWeights.values())[0],
m.q[('u',ci)][0:m.mesh.subdomainMesh.nElements_owned],
T=tsim)
exa0 = Norms.L2errorSFEMvsAF2(zeroFunction(),
m.q['x'][0:m.mesh.subdomainMesh.nElements_owned],
m.q['abs(det(J))'][0:m.mesh.subdomainMesh.nElements_owned],
list(m.elementQuadratureWeights.values())[0],
m.q[('u',ci)][0:m.mesh.subdomainMesh.nElements_owned],
T=tsim)
else:
err0 = Norms.L2errorSFEM(mlvt.levelModelList[-1].q[('dV_u',ci)][0:mFine.mesh.subdomainMesh.nElements_owned],udense[0:mFine.mesh.subdomainMesh.nElements_owned],uproj[ci][0:mFine.mesh.subdomainMesh.nElements_owned])
exa0 = Norms.L2errorSFEM(mlvt.levelModelList[-1].q[('dV_u',ci)][0:mFine.mesh.subdomainMesh.nElements_owned],udense[0:mFine.mesh.subdomainMesh.nElements_owned],
numpy.zeros(udense[0:mFine.mesh.subdomainMesh.nElements_owned].shape,'d'))
#now gradients
if hasAnalyticalSolution[ci]:
err1 = Norms.L2errorVFEMvsAF(gradWrapper(p.analyticalSolution[ci]),
m.q['x'][0:m.mesh.subdomainMesh.nElements_owned],
m.q[('dV_u',ci)][0:m.mesh.subdomainMesh.nElements_owned],
m.q[('grad(u)',ci)][0:m.mesh.subdomainMesh.nElements_owned],
T=tsim)
exa1 = Norms.L2errorVFEMvsAF(gradWrapper(p.analyticalSolution[ci]),
m.q['x'][0:m.mesh.subdomainMesh.nElements_owned],
m.q[('dV_u',ci)][0:m.mesh.subdomainMesh.nElements_owned],
numpy.zeros(m.q[('grad(u)',ci)][0:m.mesh.subdomainMesh.nElements_owned].shape,
'd'),
T=tsim)
else:
err1 = Norms.L2errorVFEM(mlvt.levelModelList[-1].q[('dV_u',ci)][0:mFine.mesh.subdomainMesh.nElements_owned],
gradu_dense[0:mFine.mesh.subdomainMesh.nElements_owned],uprojGrad[ci][0:mFine.mesh.subdomainMesh.nElements_owned])
exa1 = Norms.L2errorVFEM(mlvt.levelModelList[-1].q[('dV_u',ci)][0:mFine.mesh.subdomainMesh.nElements_owned],
gradu_dense[0:mFine.mesh.subdomainMesh.nElements_owned],
numpy.zeros(gradu_dense[0:mFine.mesh.subdomainMesh.nElements_owned].shape,
'd'))
err = math.sqrt(err0**2 + err1**2)
exa = math.sqrt(exa0**2 + exa1**2)
kerr = 'error_'+'u'+'_'+'H1'
kexa = 'exact_'+'u'+'_'+'H1'
self.errorData[ci][il][kerr].append(err)
self.errorData[ci][il][kexa].append(exa)
if self.flags['echo']:
if self.flags['echoRelativeErrors']:
logEvent("""\nt= %g; %s[%d][%d]= %g; relative_error= %g;""" % (tsim,kerr,ci,il,err,old_div(err,(exa+1E-15))),level=0)
else:
logEvent("""\nt= %g; %s[%d][%d]= %g;""" % (tsim,kerr,ci,il,err),level=0)
#end if
#if calcH1u
calcH1semiU = ('H1semi' in self.flags['errorNorms'] and
'u' in self.flags['errorQuantities'])
if calcH1semiU:
err = -12345.0;
exa = 1.0;
if hasAnalyticalSolution[ci]:
err = Norms.L2errorVFEMvsAF(gradWrapper(p.analyticalSolution[ci]),
m.q['x'][0:m.mesh.subdomainMesh.nElements_owned],
m.q[('dV_u',ci)][0:m.mesh.subdomainMesh.nElements_owned],
m.q[('grad(u)',ci)][0:m.mesh.subdomainMesh.nElements_owned],
T=tsim)
exa = Norms.L2errorVFEMvsAF(gradWrapper(p.analyticalSolution[ci]),
m.q['x'][0:m.mesh.subdomainMesh.nElements_owned],
m.q[('dV_u',ci)][0:m.mesh.subdomainMesh.nElements_owned],
numpy.zeros(m.q[('grad(u)',ci)][0:m.mesh.subdomainMesh.nElements_owned].shape,
'd'),
T=tsim)
else:
err = Norms.L2errorVFEM(mlvt.levelModelList[-1].q[('dV_u',ci)][0:mFine.mesh.subdomainMesh.nElements_owned],
gradu_dense[0:mFine.mesh.subdomainMesh.nElements_owned],uprojGrad[ci][0:mFine.mesh.subdomainMesh.nElements_owned])
exa = Norms.L2errorVFEM(mlvt.levelModelList[-1].q[('dV_u',ci)][0:mFine.mesh.subdomainMesh.nElements_owned],
gradu_dense[0:mFine.mesh.subdomainMesh.nElements_owned],
numpy.zeros(gradu_dense[0:mFine.mesh.subdomainMesh.nElements_owned].shape,
'd'))
kerr = 'error_'+'u'+'_'+'H1semi'
kexa = 'exact_'+'u'+'_'+'H1semi'
self.errorData[ci][il][kerr].append(err)
self.errorData[ci][il][kexa].append(exa)
if self.flags['echo']:
if self.flags['echoRelativeErrors']:
logEvent("""\nt= %g; %s[%d][%d]= %g; relative_error= %g;""" % (tsim,kerr,ci,il,err,old_div(err,(exa+1E-15))),level=0)
else:
logEvent("""\nt= %g; %s[%d][%d]= %g;""" % (tsim,kerr,ci,il,err),level=0)
#end if
#if calcH1semiu
calcW11u = ('W11' in self.flags['errorNorms'] and
'u' in self.flags['errorQuantities'])
if calcW11u:
err = -12345.0; err0 = -12345.0; err1 = -12345.0
exa = 1.0; exa0 = 1.0; exa1 = 1.0
if hasAnalyticalSolution[ci]:
err0 = Norms.L1errorSFEMvsAF2(self.analyticalSolution[ci],
m.q['x'][0:m.mesh.subdomainMesh.nElements_owned],
m.q['abs(det(J))'][0:m.mesh.subdomainMesh.nElements_owned],
list(m.elementQuadratureWeights.values())[0],
m.q[('u',ci)][0:m.mesh.subdomainMesh.nElements_owned],
T=tsim)
exa0 = Norms.L1errorSFEMvsAF2(zeroFunction(),
m.q['x'][0:m.mesh.subdomainMesh.nElements_owned],
m.q['abs(det(J))'][0:m.mesh.subdomainMesh.nElements_owned],
list(m.elementQuadratureWeights.values())[0],
m.q[('u',ci)][0:m.mesh.subdomainMesh.nElements_owned],
T=tsim)
else:
err0 = Norms.L1errorSFEM(mlvt.levelModelList[-1].q[('dV_u',ci)][0:mFine.mesh.subdomainMesh.nElements_owned],udense[0:mFine.mesh.subdomainMesh.nElements_owned],uproj[ci][0:mFine.mesh.subdomainMesh.nElements_owned])
exa0 = Norms.L1errorSFEM(mlvt.levelModelList[-1].q[('dV_u',ci)][0:mFine.mesh.subdomainMesh.nElements_owned],udense[0:mFine.mesh.subdomainMesh.nElements_owned],
numpy.zeros(udense[0:mFine.mesh.subdomainMesh.nElements_owned].shape,'d'))
#now gradients
if hasAnalyticalSolution[ci]:
err1 = Norms.L1errorVFEMvsAF(gradWrapper(p.analyticalSolution[ci]),
m.q['x'][0:m.mesh.subdomainMesh.nElements_owned],
m.q[('dV_u',ci)][0:m.mesh.subdomainMesh.nElements_owned],
m.q[('grad(u)',ci)][0:m.mesh.subdomainMesh.nElements_owned],
T=tsim)
exa1 = Norms.L1errorVFEMvsAF(gradWrapper(p.analyticalSolution[ci]),
m.q['x'][0:m.mesh.subdomainMesh.nElements_owned],
m.q[('dV_u',ci)][0:m.mesh.subdomainMesh.nElements_owned],
numpy.zeros(m.q[('grad(u)',ci)][0:m.mesh.subdomainMesh.nElements_owned].shape,
'd'),
T=tsim)
else:
err1 = Norms.L1errorVFEM(mlvt.levelModelList[-1].q[('dV_u',ci)][0:mFine.mesh.subdomainMesh.nElements_owned],
gradu_dense[0:mFine.mesh.subdomainMesh.nElements_owned],uprojGrad[ci][0:mFine.mesh.subdomainMesh.nElements_owned])
exa1 = Norms.L1errorVFEM(mlvt.levelModelList[-1].q[('dV_u',ci)][0:mFine.mesh.subdomainMesh.nElements_owned],
gradu_dense[0:mFine.mesh.subdomainMesh.nElements_owned],
numpy.zeros(gradu_dense[0:mFine.mesh.subdomainMesh.nElements_owned].shape,
'd'))
err = err0 + err1
exa = exa0 + exa1
kerr = 'error_'+'u'+'_'+'W11'
kexa = 'exact_'+'u'+'_'+'W11'
self.errorData[ci][il][kerr].append(err)
self.errorData[ci][il][kexa].append(exa)
if self.flags['echo']:
if self.flags['echoRelativeErrors']:
logEvent("""\nt= %g; %s[%d][%d]= %g; relative_error= %g;""" % (tsim,kerr,ci,il,err,old_div(err,(exa+1E-15))),level=0)
else:
logEvent("""\nt= %g; %s[%d][%d]= %g;""" % (tsim,kerr,ci,il,err),level=0)
#end if
#if calcW11u
calcW11semiU = ('W11semi' in self.flags['errorNorms'] and
'u' in self.flags['errorQuantities'])
if calcW11semiU:
err = -12345.0;
exa = 1.0;
if hasAnalyticalSolution[ci]:
err = Norms.L1errorVFEMvsAF(gradWrapper(p.analyticalSolution[ci]),
m.q['x'][0:m.mesh.subdomainMesh.nElements_owned],
m.q[('dV_u',ci)][0:m.mesh.subdomainMesh.nElements_owned],
m.q[('grad(u)',ci)][0:m.mesh.subdomainMesh.nElements_owned],
T=tsim)
exa = Norms.L1errorVFEMvsAF(gradWrapper(p.analyticalSolution[ci]),
m.q['x'][0:m.mesh.subdomainMesh.nElements_owned],
m.q[('dV_u',ci)][0:m.mesh.subdomainMesh.nElements_owned],
numpy.zeros(m.q[('grad(u)',ci)][0:m.mesh.subdomainMesh.nElements_owned].shape,
'd'),
T=tsim)
else:
err = Norms.L1errorVFEM(mlvt.levelModelList[-1].q[('dV_u',ci)][0:mFine.mesh.subdomainMesh.nElements_owned],
gradu_dense[0:mFine.mesh.subdomainMesh.nElements_owned],uprojGrad[ci][0:mFine.mesh.subdomainMesh.nElements_owned])
exa = Norms.L1errorVFEM(mlvt.levelModelList[-1].q[('dV_u',ci)][0:mFine.mesh.subdomainMesh.nElements_owned],
gradu_dense[0:mFine.mesh.subdomainMesh.nElements_owned],
numpy.zeros(gradu_dense[0:mFine.mesh.subdomainMesh.nElements_owned].shape,
'd'))
kerr = 'error_'+'u'+'_'+'W11semi'
kexa = 'exact_'+'u'+'_'+'W11semi'
self.errorData[ci][il][kerr].append(err)
self.errorData[ci][il][kexa].append(exa)
if self.flags['echo']:
if self.flags['echoRelativeErrors']:
logEvent("""\nt= %g; %s[%d][%d]= %g; relative_error= %g;""" % (tsim,kerr,ci,il,err,old_div(err,(exa+1E-15))),level=0)
else:
logEvent("""\nt= %g; %s[%d][%d]= %g;""" % (tsim,kerr,ci,il,err),level=0)
#end if
#if calcH1semiu
calcTVu = ('TV' in self.flags['errorNorms'] and
'u' in self.flags['errorQuantities'])
if calcTVu:
err = -12345.0
exa = 1.0#need to compute using interpolation conditions?
#one version of discrete seminorm
#err = Norms.TVseminormSFEM(m.u[ci].dof,m.u[ci].femSpace.dofMap.l2g)
#what about just using W11seminorm
shtmp = (m.q[('grad(u)',ci)][0:m.mesh.subdomainMesh.nElements_owned].shape[-1])
err = Norms.L1errorVFEMvsAF(zeroVectorFunction(shtmp),
m.q['x'][0:m.mesh.subdomainMesh.nElements_owned],
m.q[('dV_u',ci)][0:m.mesh.subdomainMesh.nElements_owned],
m.q[('grad(u)',ci)][0:m.mesh.subdomainMesh.nElements_owned])
#for now use L1 norm of exact solution to normalize?
if hasAnalyticalSolution[ci]:
exa = Norms.L1errorVFEMvsAF(gradWrapper(p.analyticalSolution[ci]),
m.q['x'][0:m.mesh.subdomainMesh.nElements_owned],
m.q[('dV_u',ci)][0:m.mesh.subdomainMesh.nElements_owned],
numpy.zeros(m.q[('grad(u)',ci)][0:m.mesh.subdomainMesh.nElements_owned].shape,
'd'),
T=tsim)
else:
exa = Norms.L1errorVFEM(mlvt.levelModelList[-1].q[('dV_u',ci)][0:mFine.mesh.subdomainMesh.nElements_owned],
gradu_dense[0:mFine.mesh.subdomainMesh.nElements_owned],
numpy.zeros(gradu_dense[0:mFine.mesh.subdomainMesh.nElements_owned].shape,
'd'))
kerr = 'error_'+'u'+'_'+'TV'
kexa = 'exact_'+'u'+'_'+'TV'
self.errorData[ci][il][kerr].append(err)
self.errorData[ci][il][kexa].append(exa)
if self.flags['echo']:
if self.flags['echoRelativeErrors']:
logEvent("""\nt= %g; %s[%d][%d]= %g; relative_error= %g;""" % (tsim,kerr,ci,il,err,old_div(err,(exa+1E-15))),level=0)
else:
logEvent("""\nt= %g; %s[%d][%d]= %g;""" % (tsim,kerr,ci,il,err),level=0)
#end if
#end calcTV
############### velocity specific calculations ###############
calcL2vel = ('L2' in self.flags['errorNorms'] and
'velocity' in self.flags['errorQuantities'])
if calcL2vel:
err = -12345.0
exa = 1.0
if hasAnalyticalSolutionVelocity[ci]:
err = Norms.L2errorVFEMvsAF(p.analyticalSolutionVelocity[ci],
m.q['x'][0:m.mesh.subdomainMesh.nElements_owned],
m.q[('dV_u',ci)][0:m.mesh.subdomainMesh.nElements_owned],
m.q[('velocity',ci)][0:m.mesh.subdomainMesh.nElements_owned],
T=tsim)
exa = Norms.L2errorVFEMvsAF(p.analyticalSolutionVelocity[ci],
m.q['x'][0:m.mesh.subdomainMesh.nElements_owned],
m.q[('dV_u',ci)][0:m.mesh.subdomainMesh.nElements_owned],
numpy.zeros(m.q[('velocity',ci)][0:m.mesh.subdomainMesh.nElements_owned].shape,
'd'),
T=tsim)
else:
#now try to project velocity to finer grids?
#mwf debug
#print """SimTools calcL2vel ci=%d veldense.shape=%s velproj[ci].shape=%s """ % (ci,veldense.shape,velproj[ci].shape)
err = Norms.L2errorVFEM(mlvt.levelModelList[-1].q[('dV_u',ci)][0:mFine.mesh.subdomainMesh.nElements_owned],
veldense[0:mFine.mesh.subdomainMesh.nElements_owned],velproj[ci][0:mFine.mesh.subdomainMesh.nElements_owned])
exa = Norms.L2errorVFEM(mlvt.levelModelList[-1].q[('dV_u',ci)][0:mFine.mesh.subdomainMesh.nElements_owned],
veldense[0:mFine.mesh.subdomainMesh.nElements_owned],
numpy.zeros(veldense[0:mFine.mesh.subdomainMesh.nElements_owned].shape,
'd'))
kerr = 'error_'+'velocity'+'_'+'L2'
kexa = 'exact_'+'velocity'+'_'+'L2'
self.errorData[ci][il][kerr].append(err)
self.errorData[ci][il][kexa].append(exa)
if self.flags['echo']:
if self.flags['echoRelativeErrors']:
logEvent("""\nt= %g; %s[%d][%d]= %g; relative_error= %g;""" % (tsim,kerr,ci,il,err,old_div(err,(exa+1E-15))),level=0)
else:
logEvent("""\nt= %g; %s[%d][%d]= %g;""" % (tsim,kerr,ci,il,err),level=0)
#end if
#if calcL2vel
calcL1vel = ('L1' in self.flags['errorNorms'] and
'velocity' in self.flags['errorQuantities'])
if calcL1vel:
err = -12345.0
exa = 1.0
if hasAnalyticalSolutionVelocity[ci]:
err = Norms.L1errorVFEMvsAF(p.analyticalSolutionVelocity[ci],
m.q['x'][0:m.mesh.subdomainMesh.nElements_owned],
m.q[('dV_u',ci)][0:m.mesh.subdomainMesh.nElements_owned],
m.q[('velocity',ci)][0:m.mesh.subdomainMesh.nElements_owned],
T=tsim)
exa = Norms.L1errorVFEMvsAF(p.analyticalSolutionVelocity[ci],
m.q['x'][0:m.mesh.subdomainMesh.nElements_owned],
m.q[('dV_u',ci)][0:m.mesh.subdomainMesh.nElements_owned],
numpy.zeros(m.q[('velocity',ci)][0:m.mesh.subdomainMesh.nElements_owned].shape,
'd'),
T=tsim)
else:
#now try to project velocity to finer grids?
err = Norms.L1errorVFEM(mlvt.levelModelList[-1].q[('dV_u',ci)][0:mFine.mesh.subdomainMesh.nElements_owned],
veldense,velproj[ci][0:mFine.mesh.subdomainMesh.nElements_owned])
exa = Norms.L1errorVFEM(mlvt.levelModelList[-1].q[('dV_u',ci)][0:mFine.mesh.subdomainMesh.nElements_owned],
veldense[0:mFine.mesh.subdomainMesh.nElements_owned],
numpy.zeros(veldense[0:mFine.mesh.subdomainMesh.nElements_owned].shape,
'd'))
kerr = 'error_'+'velocity'+'_'+'L1'
kexa = 'exact_'+'velocity'+'_'+'L1'
self.errorData[ci][il][kerr].append(err)
self.errorData[ci][il][kexa].append(exa)
if self.flags['echo']:
if self.flags['echoRelativeErrors']:
logEvent("""\nt= %g; %s[%d][%d]= %g; relative_error= %g;""" % (tsim,kerr,ci,il,err,old_div(err,(exa+1E-15))),level=0)
else:
logEvent("""\nt= %g; %s[%d][%d]= %g;""" % (tsim,kerr,ci,il,err),level=0)
#end if
#if calcL2vel
calcLIvel = ('LI' in self.flags['errorNorms'] and
'velocity' in self.flags['errorQuantities'])
if calcLIvel:
err = -12345.0
exa = 1.0
if hasAnalyticalSolutionVelocity[ci]:
err = Norms.LIerrorVFEMvsAF(p.analyticalSolutionVelocity[ci],
m.q['x'][0:m.mesh.subdomainMesh.nElements_owned],
m.q[('dV_u',ci)][0:m.mesh.subdomainMesh.nElements_owned],
m.q[('velocity',ci)][0:m.mesh.subdomainMesh.nElements_owned],
T=tsim)
exa = Norms.LIerrorVFEMvsAF(p.analyticalSolutionVelocity[ci],
m.q['x'][0:m.mesh.subdomainMesh.nElements_owned],
m.q[('dV_u',ci)][0:m.mesh.subdomainMesh.nElements_owned],
numpy.zeros(m.q[('velocity',ci)][0:m.mesh.subdomainMesh.nElements_owned].shape,
'd'),
T=tsim)
else:
#now try to project velocity to finer grids?
err = max(numpy.absolute(veldense.flat[:]-velproj[ci].flat[:]))
exa = max(numpy.absolute(veldense.flat))
kerr = 'error_'+'velocity'+'_'+'LI'
kexa = 'exact_'+'velocity'+'_'+'LI'
self.errorData[ci][il][kerr].append(err)
self.errorData[ci][il][kexa].append(exa)
if self.flags['echo']:
if self.flags['echoRelativeErrors']:
logEvent("""\nt= %g; %s[%d][%d]= %g; relative_error= %g;""" % (tsim,kerr,ci,il,err,old_div(err,(exa+1E-15))),level=0)
else:
logEvent("""\nt= %g; %s[%d][%d]= %g;""" % (tsim,kerr,ci,il,err),level=0)
#end if
#if calcLIvel
#end if calc
#end for ci
#end for il
#end if numerical solution
if 'localMassBalance' in self.flags['errorTypes']:
from . import cfemIntegrals
for ci in self.flags['components']:
for il,m in enumerate(mlvt.levelModelList):
#
if self.conservationResidual[il] is None:
self.conservationResidual[il] = numpy.zeros((m.mesh.nElements_global,),'d')
else:
self.conservationResidual[il].flat[:] = 0.0
if self.elementResidual[il] is None:
self.elementResidual[il] = numpy.array(m.elementResidual[ci],'d')
else:
self.elementResidual[il].flat[:] = m.elementResidual[ci].flat[:]
if n.conservativeFlux is None or ci not in list(n.conservativeFlux.keys()) or 'dg' in n.conservativeFlux[ci]:#have to adjust residual appropriately for different methods
pass
else:
flux = -1.0*m.ebq_global[('totalFlux',ci)]
cfemIntegrals.updateExteriorElementBoundaryFlux(m.mesh.exteriorElementBoundariesArray,
m.mesh.elementBoundaryElementsArray,
m.mesh.elementBoundaryLocalElementBoundariesArray,
flux,
m.ebq[('w*dS_u',ci)],
self.elementResidual[il])
#removing boundary flux from
if n.conservativeFlux is None or ci not in list(n.conservativeFlux.keys()) or 'dg' in n.conservativeFlux[ci]:
cfemIntegrals.calculateConservationResidualDG(self.elementResidual[il],self.conservationResidual[il])
else:
cfemIntegrals.calculateConservationResidual(m.ebq['n'],
m.ebq[('dS_u',ci)],
self.elementResidual[il],
m.ebq[('velocity',ci)],
self.conservationResidual[il])
maxConsError = max(numpy.absolute(self.conservationResidual[il].flat[0:m.mesh.subdomainMesh.nElements_owned]))
self.errorData[ci][il]['localMassBalance'].append(maxConsError)
if self.flags['echo']:
logEvent("""\nt= %g; max_localMassBalanceError[%d][%d]= %g; """ % (tsim,ci,il,maxConsError),level=0)
#il
#ci
#local mass balance
#now look at global mass balance (could do similar thing each step too)
if 'globalMassBalance' in self.flags['errorTypes']:
for il,m in enumerate(mlvt.levelModelList):
for ci in range(p.coefficients.nc):
if ci in self.flags['components'] and ('m',ci) in m.q:
globalMass = Norms.globalScalarDomainIntegral(m.q['abs(det(J))'],
m.elementQuadratureWeights[('m',ci)],
m.q[('m',ci)])
globErr = abs(globalMass-self.errorData[ci][il]['globalMass0'])
self.errorData[ci][il]['globalMassF'].append(globalMass)
if self.flags['echo']:
logEvent("""t= %g globalMassF[%d][%d] = %g globalMassDiff[%d][%d]= %g""" % \
(tsim,ci,il,globalMass,ci,il,globErr),level=0)
#end if
#end if
#end for ci
#end for il
#end calc globalMassBalance
if 'globalHeavisideMassBalance' in self.flags['errorTypes']:
for il,m in enumerate(mlvt.levelModelList):
for ci in range(p.coefficients.nc):
if ci in self.flags['components'] and ('m',ci) in m.q:
hm = numpy.where(m.q[('m',ci)] >= 0.0,1.0,0.0)
globalMass = Norms.globalScalarDomainIntegral(m.q['abs(det(J))'],
m.elementQuadratureWeights[('m',ci)],
hm)
globErr = abs(globalMass-self.errorData[ci][il]['globalHeavisideMass0'])
self.errorData[ci][il]['globalHeavisideMassF'].append(globalMass)
if self.flags['echo']:
logEvent("""t= %g globalHeavisideMassF[%d][%d] = %g globalHeavisideMassDiff[%d][%d]= %g""" % \
(tsim,ci,il,globalMass,ci,il,globErr),level=0)
#end if
#end if
#end for ci
#end for il
#end calc globalMassBalance
#end def
def getScalarElementStorageKeys(self,mlvt,tsim):
"""
simple utility to pull out keys for things in element quadrature dictionary that
need to be stored
"""
scalarElementStorageKeys = []
for quant in [a for a in self.flags['storeQuantities'] if a is not None]:
recType = quant.split(':')
if len(recType) > 1 and recType[0] == 'q': #found element quadrature quantity
stval = eval(recType[1])
if (stval in mlvt.levelModelList[-1].q and
len(mlvt.levelModelList[-1].q[stval].shape) == 2): #found quantity and it's a scalar
scalarElementStorageKeys.append(stval)
return scalarElementStorageKeys
def getVectorElementStorageKeys(self,mlvt,tsim):
"""
simple utility to pull out keys for things in element quadrature dictionary that
need to be stored
"""
vectorElementStorageKeys = []
for quant in [a for a in self.flags['storeQuantities'] if a is not None]:
recType = quant.split(':')
if len(recType) > 1 and recType[0] == 'q': #found element quadrature quantity
stval = eval(recType[1])
if (stval in mlvt.levelModelList[-1].q and
len(mlvt.levelModelList[-1].q[stval].shape) == 3): #found quantity and it's a vector
vectorElementStorageKeys.append(stval)
return vectorElementStorageKeys
def getTensorElementStorageKeys(self,mlvt,tsim):
"""
simple utility to pull out keys for things in element quadrature dictionary that
need to be stored
"""
tensorElementStorageKeys = []
for quant in [a for a in self.flags['storeQuantities'] if a is not None]:
recType = quant.split(':')
if len(recType) > 1 and recType[0] == 'q': #found element quadrature quantity
stval = eval(recType[1])
if (stval in mlvt.levelModelList[-1].q and
len(mlvt.levelModelList[-1].q[stval].shape) == 4): #found quantity and it's a tensor
tensorElementStorageKeys.append(stval)
return tensorElementStorageKeys
def getScalarElementBoundaryStorageKeys(self,mlvt,tsim):
"""
simple utility to pull out keys for things in element boundary quadrature dictionary that
need to be stored
"""
scalarElementStorageKeys = []
for quant in [a for a in self.flags['storeQuantities'] if a is not None]:
recType = quant.split(':')
if len(recType) > 1 and recType[0] == 'ebq_global': #found element quadrature quantity
stval = eval(recType[1])
if (stval in mlvt.levelModelList[-1].ebq_global and
len(mlvt.levelModelList[-1].ebq_global[stval].shape) == 2): #found quantity and it's a scalar
scalarElementStorageKeys.append(stval)
return scalarElementStorageKeys
def getVectorElementBoundaryStorageKeys(self,mlvt,tsim):
"""
simple utility to pull out keys for things in element boundary quadrature dictionary that
need to be stored
"""
vectorElementStorageKeys = []
for quant in [a for a in self.flags['storeQuantities'] if a is not None]:
recType = quant.split(':')
if len(recType) > 1 and recType[0] == 'ebq_global': #found element quadrature quantity
stval = eval(recType[1])
if (stval in mlvt.levelModelList[-1].ebq_global and
len(mlvt.levelModelList[-1].ebq_global[stval].shape) == 3): #found quantity and it's a vector
vectorElementStorageKeys.append(stval)
return vectorElementStorageKeys
def getTensorElementBoundaryStorageKeys(self,mlvt,tsim):
"""
simple utility to pull out keys for things in element boundary quadrature dictionary that
need to be stored
"""
tensorElementStorageKeys = []
for quant in [a for a in self.flags['storeQuantities'] if a is not None]:
recType = quant.split(':')
if len(recType) > 1 and recType[0] == 'ebq_global': #found element quadrature quantity
stval = eval(recType[1])
if (stval in mlvt.levelModelList[-1].ebq_global and
len(mlvt.levelModelList[-1].ebq_global[stval].shape) == 4): #found quantity and it's a tensor
tensorElementStorageKeys.append(stval)
return tensorElementStorageKeys
def getScalarExteriorElementBoundaryStorageKeys(self,mlvt,tsim):
"""
simple utility to pull out keys for things in exterior element boundary quadrature dictionary that
need to be stored
"""
scalarElementStorageKeys = []
for quant in [a for a in self.flags['storeQuantities'] if a is not None]:
recType = quant.split(':')
if len(recType) > 1 and recType[0] == 'ebqe': #found element quadrature quantity
stval = eval(recType[1])
if (stval in mlvt.levelModelList[-1].ebqe and
len(mlvt.levelModelList[-1].ebqe[stval].shape) == 2): #found quantity and it's a scalar
scalarElementStorageKeys.append(stval)
return scalarElementStorageKeys
def getVectorExteriorElementBoundaryStorageKeys(self,mlvt,tsim):
"""
simple utility to pull out keys for things in exterior element boundary quadrature dictionary that
need to be stored
"""
vectorElementStorageKeys = []
for quant in [a for a in self.flags['storeQuantities'] if a is not None]:
recType = quant.split(':')
if len(recType) > 1 and recType[0] == 'ebqe': #found element quadrature quantity
stval = eval(recType[1])
if (stval in mlvt.levelModelList[-1].ebqe and
len(mlvt.levelModelList[-1].ebqe[stval].shape) == 3): #found quantity and it's a vector
vectorElementStorageKeys.append(stval)
return vectorElementStorageKeys
def getTensorExteriorElementBoundaryStorageKeys(self,mlvt,tsim):
"""
simple utility to pull out keys for things in exterior element boundary quadrature dictionary that
need to be stored
"""
tensorElementStorageKeys = []
for quant in [a for a in self.flags['storeQuantities'] if a is not None]:
recType = quant.split(':')
if len(recType) > 1 and recType[0] == 'ebqe': #found element quadrature quantity
stval = eval(recType[1])
if (stval in mlvt.levelModelList[-1].ebqe and
len(mlvt.levelModelList[-1].ebqe[stval].shape) == 4): #found quantity and it's a tensor
tensorElementStorageKeys.append(stval)
return tensorElementStorageKeys
def stepPlotElementQuantitiesEnsight(self,mlvt,tsim):
"""
sort through desired quantities in quadrature dictionaries like m, dm, to plot
p --- problem definition
n --- numerics definition
mlvt --- multilevel vector transport that holds the quantities to measure
tsim --- simulation time
assumes this is the correct time to plot
and plotOffSet is set correctly
"""
if self.flags['plotOptions']['ensight']['on'] == False:
return False
p = self.pFile; n = self.nFile
plottedSomething = False
for quant in self.flags['plotQuantities']:
recType = quant.split(':')
if len(recType) > 1 and recType[0] == 'q': #found element quadrature quantity
stval = eval(recType[1])
if (stval in mlvt.levelModelList[-1].q and
len(mlvt.levelModelList[-1].q[stval].shape) == 2): #found quantity and it's a scalar
plottedSomething = True
self.plotScalarElementQuantityEnsight(stval,mlvt,tsim)
elif (stval in mlvt.levelModelList[-1].q and
len(mlvt.levelModelList[-1].q[stval].shape) == 3): #found quantity and it's a vector
plottedSomething = True
self.plotVectorElementQuantityEnsight(stval,mlvt,tsim)
elif len(recType) > 1 and recType[0] == 'ebq_global': #found global element boundary quantity
stval = eval(recType[1])
if (stval in mlvt.levelModelList[-1].ebq_global and
len(mlvt.levelModelList[-1].ebq_global[stval].shape) == 2): #found quantity and it's a scalar
plottedSomething = True
self.plotScalarGlobalElementBoundaryQuantityEnsight(stval,mlvt,tsim)
elif (stval in mlvt.levelModelList[-1].ebq_global and
len(mlvt.levelModelList[-1].ebq_global[stval].shape) == 3): #found quantity and its a vector
self.plotVectorGlobalElementBoundaryQuantityEnsight(stval,mlvt,tsim)
plottedSomething = False
#ebq_global vector
#has key
#ebq_global
#quantities
def stepStoreQuantities(self,mlvt,tsim):
"""
shelve quantities for a given time instance, if self.storeHeavyData == True
soon to be deprecated and will use Archiver tools instead
Quadrature dictionary quantities are shelved in a dictionary
whose key is the corresponding quadrature dictionary name.
The stored dictionary has fields 'x' 't' and 'vals' which hold
the quadrature points (assumed static for now) as well as
lists of the desired quantities at the requested time
levels. The quantities to be stored are specified in
flags['storeQuantities'] in the format
"q:(%s,%d)",ebq:(%s,%d) etc e.g., "q:('u',0)" will store component 0 solution values from q
dataStorage['q']['x']= [[0.,0.,0.],[...],...] element quadrature points for 1st time called
dataStorage['q']['t']= [0.0,0.1,...,1.0]
dataStorage['q']['vals'] = [subset of q at t=0.0, subset of q at t=0.1, ...]
if multilevelModel is set, stores basically everything
Input
p --- problem definition
n --- numerics definition
mlvt --- multilevel vector transport that holds the quantities to measure
tsim --- simulation time
assumes this is the correct time to store
TODO: add option for storage directory
"""
assert self.dataStorage is not None, "dataStorage None storeTimes= %s " % self.flags['storeTimes']
if self.storeHeavyData == False:
return
p = self.pFile; n = self.nFile
#mwf debug
#print """SimTools entering stepStore t=%s dataFile=%s """ % (tsim,self.flags['dataFile'])
m = mlvt.levelModelList[-1]
q = {}; ebq = {}; ebq_global = {};
solutionData = {};
for quant in self.flags['storeQuantities']:
recType = quant.split(':')
if len(recType) > 1:
quadDict = recType[0]
stval = eval(recType[1])
if recType[0] == 'q' and stval in m.q:
q[stval]=m.q[stval]
#if not q.has_key('x'):
# q['x']= m.q['x']
elif recType[0] == 'ebq' and stval in m.ebq:
ebq[stval] = m.ebq[stval]
#if not ebq.has_key('x'):
# ebq['x']=m.ebq['x']
elif recType[0] == 'ebq_global' and stval in m.ebq_global:
ebq_global = m.ebq_global[stval]
#if not ebq_global.has_key('x'):
# ebq_global['x']=m.ebq_global['x']
elif recType == 'u_dof':
for ci in self.flags['components']:
solutionData[ci]={}
for il,im in enumerate(mlvt.levelModelList):
solutionData[ci][il] = {}
solutionData[ci][il]['u_dof'] = im.u[ci].dof
solutionData[ci][il]['l2g'] = im.u[ci].femSpace.dofMap.l2g
#found quad dict entry for storage
#end quantities
for d in ['q','ebq','ebq_global','solutionData']:
dval = eval(d)
#mwf debug
#print """SimTools stepStore tsim=%s d=%s len(dval)=%d """ % (tsim,d,len(dval))
if len(dval) > 0:
if d in self.dataStorage:
dtmp = self.dataStorage[d]
dtmp['t'].append(tsim)
dtmp['vals'].append(dval)
self.dataStorage[d] = dtmp
#mwf debug
#print """SimTools data Has Key %s """ % d
else:
#mwf debug
#print """SimTools data did not Have Key %s """ %d
self.dataStorage[d]={}
dtmp = {'t':[tsim],'vals':[dval],'x':getattr(m,d)['x']}
self.dataStorage[d] = dtmp
#not already stored
#end something to store
#end loop through things to store
#def
def computeNodalQuadratureInfo(self,mlvt,t):
"""
if need values of quantities at mesh nodes and don't have them already, use this
only compute values on finest mesh for now
"""
from . import Quadrature
self.nodalQuadratureInfo = {}
vt = mlvt.levelModelList[-1]
nd = vt.nSpace_global; nq = nd+1 ; ne = vt.mesh.nElements_global
quad = Quadrature.SimplexLobattoQuadrature(nd,1)
self.nodalQuadratureInfo['elementQuadraturePoints'] = numpy.array(quad.points,'d')
self.nodalQuadratureInfo['x'] = numpy.zeros((ne,nq,3),'d')
self.nodalQuadratureInfo['J'] = numpy.zeros((ne,nq,nd,nd),'d')
self.nodalQuadratureInfo['inverse(J)'] = numpy.zeros((ne,nq,nd,nd),'d')
self.nodalQuadratureInfo['det(J)'] = numpy.zeros((ne,nq),'d')
vt.u[0].femSpace.elementMaps.getValues(self.nodalQuadratureInfo['elementQuadraturePoints'],
self.nodalQuadratureInfo['x'])
vt.u[0].femSpace.elementMaps.getJacobianValues(self.nodalQuadratureInfo['elementQuadraturePoints'],
self.nodalQuadratureInfo['J'],
self.nodalQuadratureInfo['inverse(J)'],
self.nodalQuadratureInfo['det(J)'])
self.nodalQuadratureInfo['abs(det(J))']=numpy.absolute(self.nodalQuadratureInfo['det(J)'])
for cj in self.flags['components']:
self.nodalQuadratureInfo[('v',cj)] = numpy.zeros((ne,nq,nd+1),'d')
self.nodalQuadratureInfo[('grad(v)',cj)] = numpy.zeros((ne,nq,nd+1,nd),'d')
vt.u[cj].femSpace.getBasisValues(self.nodalQuadratureInfo['elementQuadraturePoints'],
self.nodalQuadratureInfo[('v',cj)])
vt.u[cj].femSpace.getBasisGradientValues(self.nodalQuadratureInfo['elementQuadraturePoints'],
self.nodalQuadratureInfo['inverse(J)'],
self.nodalQuadratureInfo[('grad(v)',cj)])
#cj
#wasteful
for key in list(vt.q.keys()):
if key not in list(self.nodalQuadratureInfo.keys()):
tmp = list(vt.q[key].shape)
if len(tmp) > 1:
tmp[1] = nd+1
self.nodalQuadratureInfo[key] = numpy.zeros(tuple(tmp),'d')
#key not in already
#keys
#mwf debug
#for key in self.nodalQuadratureInfo.keys():
# print """SimTools nodalQuadrature Dict key= %s, shape=%s \n""" % (key,self.nodalQuadratureInfo[key].shape)
#print """SimTools nodalQuadrature x = %s """ % self.nodalQuadratureInfo['x']
#really wasteful, but need to get spatial quantities at new points
import copy
self.nodalQuadratureInfo['coefficients'] = copy.deepcopy(vt.coefficients)
self.nodalQuadratureInfo['coefficients'].initializeElementQuadrature(t,self.nodalQuadratureInfo)
#
def stepPlotEnsight(self,mlvt,tsim):
"""
plot solution and solution 'velocity' for ensight/paraview
p --- problem definition
n --- numerics definition
mlvt --- multilevel vector transport that holds the quantities to measure
tsim --- simulation time
assumes this is the correct time to plot
and plotOffSet is set correctly
assumes that initial case,sos, geo files set somewhere else
"""
p = self.pFile; n = self.nFile
if self.flags['plotOptions']['ensight']['on'] == False:
return False
mFinest = mlvt.levelModelList[-1]
for ci in self.flags['components']:
mFinest.u[ci].femSpace.writeFunctionEnsight(mFinest.u[ci],
self.flags['plotOptions']['ensight']['caseFileName'],
append=True,
firstVariable=False)
#ci
if mFinest.coefficients.vectorComponents is not None:
if len(mFinest.coefficients.vectorComponents) == 2:
vcomp = [mFinest.coefficients.vectorComponents[0],
mFinest.coefficients.vectorComponents[1]]
mFinest.u[vcomp[0]].femSpace.writeE2VectorFunctionEnsight(mFinest.u[vcomp[0]],
mFinest.u[vcomp[1]],
self.flags['plotOptions']['ensight']['caseFileName'],
nOutput=mFinest.u[vcomp[0]].femSpace.nOutput-1,
append=True,
firstVariable=False)
elif len(mFinest.coefficients.vectorComponents) == 3:
vcomp = [mFinest.coefficients.vectorComponents[0],
mFinest.coefficients.vectorComponents[1],
mFinest.coefficients.vectorComponents[2]]
mFinest.u[vcomp[0]].femSpace.writeE3VectorFunctionEnsight(mFinest.u[vcomp[0]],
mFinest.u[vcomp[1]],
mFinest.u[vcomp[2]],
self.flags['plotOptions']['ensight']['caseFileName'],
nOutput=mFinest.u[vcomp[0]].femSpace.nOutput-1,
append=True,
firstVariable=False)
#3d
#vector components
self.stepPlotCalled['ensight'] = True
return False
#stepPlotEnsight
def writeEnsightMeshForElementQuantities(self,filename,mlvt,tsim=0.0):
caseOut=open(filename+'.case','a')
caseOut.write('measured: '+filename+'q.geo\n')
caseOut.close()
meshOut=open(filename+'q.geo','w')
meshOut.write('Element quadrature\n')
meshOut.write('particle coordinates\n')
meshOut.write('%8i\n' % (mlvt.levelModelList[-1].mesh.nElements_global*mlvt.levelModelList[-1].nQuadraturePoints_element,))
pN=1
for eN in range(mlvt.levelModelList[-1].mesh.nElements_global):
for k in range(mlvt.levelModelList[-1].nQuadraturePoints_element):
meshOut.write('%8i%12.5E%12.5E%12.5E\n' % (pN,
mlvt.levelModelList[-1].q['x'][eN,k,0],
mlvt.levelModelList[-1].q['x'][eN,k,1],
mlvt.levelModelList[-1].q['x'][eN,k,2]))
pN+=1
meshOut.close()
def writeEnsightMeshForElementBoundaryQuantities(self,filename,mlvt,tsim=0.0):
caseOut=open(filename+'.case','a')
caseOut.write('measured: '+filename+'ebq.geo\n')
caseOut.close()
meshOut=open(filename+'ebq.geo','w')
meshOut.write('Element boundary quadrature\n')
meshOut.write('particle coordinates\n')
meshOut.write('%8i\n' % (mlvt.levelModelList[-1].mesh.nElementBoundaries_global*mlvt.levelModelList[-1].nElementBoundaryQuadraturePoints_elementBoundary,))
pN=1
for ebN in range(mlvt.levelModelList[-1].mesh.nElementBoundaries_global):
for k in range(mlvt.levelModelList[-1].nElementBoundaryQuadraturePoints_elementBoundary):
meshOut.write('%8i%12.5E%12.5E%12.5E\n' % (pN,
mlvt.levelModelList[-1].ebq_global['x'][ebN,k,0],
mlvt.levelModelList[-1].ebq_global['x'][ebN,k,1],
mlvt.levelModelList[-1].ebq_global['x'][ebN,k,2]))
pN+=1
meshOut.close()
def writeScalarElementFunctionHeaderEnsight(self,ckey,filename,append=False,firstVariable=True,case_filename=None):
if case_filename is None:
case_filename = filename
if not append:
caseOut=open(case_filename+'.case','a')
if firstVariable == True:
caseOut.write('VARIABLE\n')
line="scalar per measured node: %s%s %s%s%s.scl****\n" % (ckey[0],ckey[1],filename,
ckey[0],ckey[1]);
caseOut.write(line)
caseOut.close()
#
def writeVectorElementFunctionHeaderEnsight(self,ckey,filename,append=False,firstVariable=True,case_filename=None):
if case_filename is None:
case_filename = filename
if not append:
caseOut=open(case_filename+'.case','a')
if firstVariable == True:
caseOut.write('VARIABLE\n')
line= "vector per measured node: %s%s %s%s%s.vec****\n" % (ckey[0],ckey[1],filename,
ckey[0],ckey[1]);
caseOut.write(line)
caseOut.close()
#
def plotScalarElementQuantityEnsight(self,ckey,mlvt,tsim):
"""
Ensight plotting routine to look at scalar quantity stored in element quad dictionary q
ckey --- what should be plotted
p --- problem definition
n --- numerics definition
mlvt --- multilevel vector transport that holds the quantities to measure
tsim --- simulation time
assumes this is the correct time to plot
and plotOffSet is set correctly
assumes that initial case,sos, geo files set somewhere else
"""
p = self.pFile; n = self.nFile
if self.flags['plotOptions']['ensight']['on'] == False:
return False
case_filename = self.flags['plotOptions']['ensight']['caseFileName']
filename_ci = "%s%s%s.scl%4.4i" %(case_filename,ckey[0],ckey[1],mlvt.levelModelList[-1].u[ckey[1]].femSpace.nOutput-1)
uOut = open(filename_ci,'w')
uOut.write("%s%s\n" % (ckey[0],ckey[1]))
n=0
#slow
for eN in range(mlvt.levelModelList[-1].mesh.nElements_global):
for k in range(mlvt.levelModelList[-1].nQuadraturePoints_element):
#has to be 6 per line
uOut.write('%12.5e' % mlvt.levelModelList[-1].q[ckey][eN,k])
n+=1
if n%6==0:
uOut.write('\n')
uOut.write('\n')
uOut.close()
self.stepPlotCalled['ensightElementQuantities']= True
def plotVectorElementQuantityEnsight(self,ckey,mlvt,tsim,scaleOutput=None):
"""
Ensight plotting routine to look at vector quantity stored in element quad dictionary q
ckey --- what should be plotted
p --- problem definition
n --- numerics definition
mlvt --- multilevel vector transport that holds the quantities to measure
tsim --- simulation time
assumes this is the correct time to plot
and plotOffSet is set correctly
assumes that initial case,sos, geo files set somewhere else
TODO : check format for 3d
"""
p = self.pFile; n = self.nFile
if self.flags['plotOptions']['ensight']['on'] == False:
return False
case_filename = self.flags['plotOptions']['ensight']['caseFileName']
filename_ci = "%s%s%s.vec%4.4i" %(case_filename,ckey[0],ckey[1],mlvt.levelModelList[-1].u[ckey[1]].femSpace.nOutput-1)
uOut = open(filename_ci,'w')
uOut.write("%s%s\n" % (ckey[0],ckey[1]))
n=0
vmax=1. #no scaling by default
if scaleOutput == 'maxComponent':
vmax =max(mlvt.levelModelList[-1].q[ckey].flat[:])+1.0e-8
print("WARNING SimTools Ensight_q_%s: Scaling velocity for output by %s" % (ckey,vmax))
for eN in range(mlvt.levelModelList[-1].mesh.nElements_global):
for k in range(mlvt.levelModelList[-1].nQuadraturePoints_element):
for i in range(mlvt.levelModelList[-1].q[ckey].shape[-1]):
uOut.write('%12.5e' % (old_div(mlvt.levelModelList[-1].q[ckey][eN,k,i],vmax)))
for i in range(mlvt.levelModelList[-1].q[ckey].shape[-1],3):
uOut.write('%12.5e' % (0.0))
if n%2==1:
uOut.write('\n')
n+=1
if n%2==1:
uOut.write('\n')
uOut.close()
self.stepPlotCalled['ensightElementQuantities']= True
def plotScalarGlobalElementBoundaryQuantityEnsight(self,ckey,mlvt,tsim):
"""
Ensight plotting routine to look at scalar quantity stored in element quad dictionary ebq_global
ckey --- what should be plotted
p --- problem definition
n --- numerics definition
mlvt --- multilevel vector transport that holds the quantities to measure
tsim --- simulation time
assumes this is the correct time to plot
and plotOffSet is set correctly
assumes that initial case,sos, geo files set somewhere else
"""
p = self.pFile; n = self.nFile
if self.flags['plotOptions']['ensight']['on'] == False:
return False
if (self.plottingQuadratureValuesForEnsight['elements'] == True or
self.plottingQuadratureValuesForEnsight['elementBoundaries'] == False):
return False
case_filename = self.flags['plotOptions']['ensight']['caseFileName']
filename_ci = "%s%s%s.scl%4.4i" %(case_filename,ckey[0],ckey[1],mlvt.levelModelList[-1].u[ckey[1]].femSpace.nOutput-1)
uOut = open(filename_ci,'w')
uOut.write("%s%s\n" % (ckey[0],ckey[1]))
n=0
for ebN in range(mlvt.levelModelList[-1].mesh.nElementBoundaries_global):
for k in range(mlvt.levelModelList[-1].nElementBoundaryQuadraturePoints_elementBoundary):
#has to be 6 per line
uOut.write('%12.5e' % mlvt.levelModelList[-1].ebq_global[ckey][ebN,k])
n+=1
if n%6==0:
uOut.write('\n')
uOut.write('\n')
uOut.close()
self.stepPlotCalled['ensightElementQuantities']= True
def plotVectorGlobalElementBoundaryQuantityEnsight(self,ckey,mlvt,tsim,scaleOutput=None):
"""
Ensight plotting routine to look at vector quantity stored in element quad dictionary ebq_global
ckey --- what should be plotted
p --- problem definition
n --- numerics definition
mlvt --- multilevel vector transport that holds the quantities to measure
tsim --- simulation time
assumes this is the correct time to plot
and plotOffSet is set correctly
assumes that initial case,sos, geo files set somewhere else
TODO : check format for 3d
"""
if self.flags['plotOptions']['ensight']['on'] == False:
return False
if (self.plottingQuadratureValuesForEnsight['elements'] == True or
self.plottingQuadratureValuesForEnsight['elementBoundaries'] == False):
return False
p = self.pFile; n = self.nFile
case_filename = self.flags['plotOptions']['ensight']['caseFileName']
filename_ci = "%s%s%s.vec%4.4i" %(case_filename,ckey[0],ckey[1],mlvt.levelModelList[-1].u[ckey[1]].femSpace.nOutput-1)
uOut = open(filename_ci,'w')
uOut.write("%s%s\n" % (ckey[0],ckey[1]))
n=0
vmax=1. #no scaling by default
if scaleOutput == 'maxComponent':
vmax =max(mlvt.levelModelList[-1].q[ckey].flat[:])+1.0e-8
print("WARNING SimTools Ensight_ebq_global_%s: Scaling velocity for output by %s" % (ckey,vmax))
for ebN in range(mlvt.levelModelList[-1].mesh.nElementBoundaries_global):
for k in range(mlvt.levelModelList[-1].nElementBoundaryQuadraturePoints_elementBoundary):
for i in range(mlvt.levelModelList[-1].ebq_global[ckey].shape[-1]):
uOut.write('%12.5e' % (old_div(mlvt.levelModelList[-1].ebq_global[ckey][ebN,k,i],vmax)))
for i in range(mlvt.levelModelList[-1].ebq_global[ckey].shape[-1],3):
uOut.write('%12.5e' % (0.0))
if n%2==1:
uOut.write('\n')
n+=1
if n%2==1:
uOut.write('\n')
uOut.close()
self.stepPlotCalled['ensightElementQuantities']= True
#end SimulationProcessor
########################################################################
#project solutions to fine grid for computing error
########################################################################
def projectToFinestLevel(mlTransport,level,tsim=0.0,verbose=0):
"""use multilevel transport prolongation to get fine grid information
starting at level.
returns quadrature dictionary of projected values on fine grid
"""
#TODO
# appears broken (error values not consistent) 1/13/10
# set uproj to be size of level down to mfine rather than full hiearachy
import numpy
nLevels = len(mlTransport.uList)
assert 0 <= level and level <= nLevels, "projectToFinestLevel range= [0,%d]" % nLevels-1
coefficients = mlTransport.levelModelList[-1].coefficients
uqprojFine = [numpy.zeros(mlTransport.levelModelList[-1].q[('u',ci)].shape,'d')
for ci in range(coefficients.nc)]
graduqProjFine = [numpy.zeros(mlTransport.levelModelList[-1].q[('grad(u)',ci)].shape,'d')
for ci in range(coefficients.nc)]
uproj = [[FemTools.FiniteElementFunction(m.u[ci].femSpace) for m in mlTransport.levelModelList]
for ci in range(coefficients.nc)]
mFine = mlTransport.levelModelList[-1]
for ci in range(coefficients.nc):
for l in range(nLevels):
uproj[ci][l].dof[:] = 0.0
m = mlTransport.levelModelList[level]
uproj[ci][level].dof[:] = m.u[ci].dof
if level < nLevels-1:
for lf in range(level,nLevels-1):
mlTransport.meshTransfers.prolong_bcListDict[ci][lf+1].matvec(uproj[ci][lf].dof,
uproj[ci][lf+1].dof)
#load Dirichlet conditions in
for dofN,g in mlTransport.levelModelList[lf+1].dirichletConditions[ci].DOFBoundaryConditionsDict.items():
uproj[ci][lf+1].dof[dofN] = g(mlTransport.levelModelList[lf+1].dirichletConditions[ci].DOFBoundaryPointDict[dofN],tsim)
#dirichlet conditions
#lf up to fine
uproj[ci][-1].getValues(mFine.q['v',ci],uqprojFine[ci])
uproj[ci][-1].getGradientValues(mFine.q['grad(v)',ci],graduqProjFine[ci])
#mwf debug
#from proteusGraphical import vtkViewers
#import pdb
#pdb.set_trace()
else: #already on fine
uqprojFine[ci].flat[:] = mFine.q[('u',ci)].flat[:]
graduqProjFine[ci].flat[:] = mFine.q[('grad(u)',ci)].flat[:]
#else
#ci
if verbose > 2:
from . import Viewers
if 'viewerType' in dir(Viewers) and Viewers.viewerType == 'gnuplot' and mFine.nSpace_global == 2:
for ci in range(coefficients.nc):
for eN in range(uqprojFine[ci].shape[0]):
for k in range(uqprojFine[ci].shape[1]):
Viewers.datFile.write("%12.5e %12.5e %12.5e \n" % (mFine.q['x'][eN,k,0],
mFine.q['x'][eN,k,1],
uqprojFine[ci][eN,k]))
#
#eN
Viewers.datFile.write("\n \n#end uqproj ci=%d level=%d \n" % (ci,level))
ggrid = (3-1)*(2**nLevels)
title="uqproj ci=%d level=%d " % (ci,level)
cmd = "set dgrid3d %d,%d,16; set contour base; set term x11 %i; splot \'%s\' index %i with lines title \"%s\" \n" % (ggrid,ggrid,Viewers.windowNumber,
Viewers.datFilename,
Viewers.plotNumber,
title)
Viewers.cmdFile.write(cmd)
Viewers.viewerPipe.write(cmd)
Viewers.newPlot()
Viewers.newWindow()
input('press return to continue')
#end ci
#end viewer typ
return uqprojFine,graduqProjFine
########################################################################
#try computing solution values directly on fine grid using coarse grid
#for nonconforming approximations
########################################################################
def generateParentInfo(mlMesh):
"""
get array P[l,e] = e_c, where element e_c is the parent of element e
P[0,:] = -1
"""
#mwf now use new interface
P = mlMesh.elementParentsArrayList
import numpy
P = {}
nLevels = len(mlMesh.meshList)
P[0] = numpy.ones((mlMesh.meshList[0].nElements_global,),'i')
P[0][:] = -1
for l in range(1,nLevels):
P[l] = mlMesh.elementParentsArrayList[l]
return P
def getIntegrationPointsOnCoarseGrid(xf,lf,P,lc):
"""
given array of points N^f_e x n_q x 3 on level lf
generate dictionary on coarse
grid that's N^c_e x n_q^c(e_c) x 3 and holds the integration points
assigned to the correct coarse grid element. If using uniform refinement
should have the same number of integration points per coarse grid element
but the number depends on the level of refinement eg n_q^4(lf-lc).
In general the number won't be the same for nonuniform refinement
"""
nEf = len(P[lf])
assert nEf == xf.shape[0], "fine grid mismatch lf=%d nEf=%d xf.shape=%s " % (lf,nEf,xf.shape)
nq = xf.shape[1]
nEc = len(P[lc])
ldiff = lf-lc
#mwf debug
#print """testStuff generateCoarseGridX lf=%d nEf=%d xf.shape=%s lc=%d nEc=%d ldiff=%d """ % (lf,nEf,xf.shape,lc,nEc,
# ldiff)
xc = {}
for ec in range(nEc):
xc[ec] = {}
for ef in range(nEf):
ec = ef
for l in range(ldiff):
ep = P[lf-l][ec]
#mwf debug
#print "genCoarseGridX ef=%d l=%d ec=%d ep=%d " % (ef,l,ec,ep)
ec = ep
for iq in range(nq):
xc[ec][(ef,iq)] = xf[ef,iq,:]
#iq
#ef
#mwf debug
#print """getIntPointsOnCoarseGrid lf=%s lc=%s \n xf=%s""" % (lf,lc,xf)
#for ec in range(nEc):
# print """xc[%s] len=%d x=%s """ % (ec,len(xc[ec]),xc[ec])
return xc
def projectVelocityToFinestLevelNC(mlTransport,level,ci=0,tsim=0.0,verbose=0):
"""
use brute force evaluation to get coarse grid quantities on fine grid
starting at level.
returns quadrature dictionary of projected values on fine grid
"""
import numpy
nLevels = len(mlTransport.levelModelList)
assert 0 <= level and level < nLevels, "projectVelocityToFinestLevelNC range= [0,%d]" % nLevels-1
mFine = mlTransport.levelModelList[-1]
mCoarse= mlTransport.levelModelList[level]
if mCoarse.velocityPostProcessor is None:
return None
P = generateParentInfo(mlTransport.mlMeshSave)
nEf = mFine.q['x'].shape[0]; nqf = mFine.q['x'].shape[1]
lf = nLevels-1 #index into list
ldiff = lf-level
if level == lf:
velciprojFine = mFine.q[('velocity',ci)]
xArray = mFine.q['x']
else:
#mwf debug
#import pdb
#pdb.set_trace()
xc= getIntegrationPointsOnCoarseGrid(mFine.q['x'],lf,P,level)
nEc = len(xc)
#will get extra padding for some coarse cells with nonuniform refinement
#but these points should get skipped in error calculation
nqc = max([len(xc[i]) for i in range(nEc)])
xArray = numpy.zeros((nEc,nqc,3),'d')
for ec in range(nEc):
iqc = 0
for k,x in xc[ec].items():
#mwf debug
#print "ec=%d iqc=%d x=%s " % (ec,iqc,x)
xArray[ec,iqc,:] = x
iqc += 1
#end x
#now padd xArray using last value if number of integration points is less than max
#over domain
if iqc < nqc:
for iqp in range(iqc,nqc):
xArray[ec,iqp,:]=xArray[ec,iqc-1,:]
#end ec
velciprojFine = numpy.zeros(mFine.q[('velocity',ci)].shape,'d')
if mCoarse.velocityPostProcessor.postProcessingTypes[ci] == 'point-eval':
#assume constant solution/potential gradient over coarse grid
print("WARNING projectVelocityToFinestLevelNC type= point-eval assuming constant potential on coarse grid")
for ef in range(nEf):
ec = ef
for l in range(ldiff):
ep = P[lf-l][ec]
ec = ep
for iq in range(nqf):
if ('a',ci,ci) in mFine.q:
velciprojFine[ef,iq,:] = -numpy.dot(mFine.q[('a',ci,ci)][ef,iq,:,:],
mCoarse.q[('grad(phi)',ci)][ec,0,:])
else:
velciprojFine[ef,iq,:] = 0.0
if ('f',ci) in mFine.q:
velciprojFine[ef,iq,:] += mFine.q[('f',ci)][ef,iq,:]
#iq
#ef
else:
velci0 = mCoarse.velocityPostProcessor.evaluateElementVelocityField(xArray,ci)
for ec in range(nEc):
iqc = 0
for k,x in xc[ec].items():
ef = k[0]; iqf = k[1]
velciprojFine[ef,iqf,:] = velci0[ec,iqc,:]
iqc += 1
#x
#ec
#postprocessing type
#else on level
if verbose > 2:
print("""velocityProjNC \n xArray=%s velciprojFine= %s \n""" % (xArray,
velciprojFine))
if verbose > 1:
from . import Viewers
if 'viewerType' in dir(Viewers) and Viewers.viewerType == 'gnuplot' and mFine.nSpace_global == 2:
max_u=max(numpy.absolute(numpy.take(velciprojFine,[0],2).flat))
max_v=max(numpy.absolute(numpy.take(velciprojFine,[1],2).flat))
L = min((max(mFine.mesh.nodeArray[:,0]),max(mFine.mesh.nodeArray[:,1])))
scale =10.0*max((max_u,max_v,1.0e-16))/L
for eN in range(mFine.q['x'].shape[0]):
for iq in range(mFine.q['x'].shape[1]):
x = mFine.q['x'][eN,iq,:]
v = velciprojFine[eN,iq,:]
Viewers.datFile.write("%12.5e %12.5e %12.5e %12.5e \n" % (x[0],x[1],
old_div(v[0],scale),
old_div(v[1],scale)))
Viewers.datFile.write("\n \n#end velciproj ci=%d level=%d" % (ci,level))
title = "velciproj ci=%d level=%d " % (ci,level)
cmd = "set term x11 %i; plot \'%s\' index %i with vectors title \"%s\" \n" % (Viewers.windowNumber,
Viewers.datFilename,
Viewers.plotNumber,
title)
Viewers.cmdFile.write(cmd)
Viewers.viewerPipe.write(cmd)
Viewers.newPlot()
Viewers.newWindow()
input('press return to continue')
#now try just coarse grid velocity
max_u=max(numpy.absolute(numpy.take(mCoarse.q[('velocity',ci)],[0],2).flat))
max_v=max(numpy.absolute(numpy.take(mCoarse.q[('velocity',ci)],[1],2).flat))
L = min((max(mFine.mesh.nodeArray[:,0]),max(mFine.mesh.nodeArray[:,1])))
scale =10.0*max((max_u,max_v,1.0e-16))/L
for eN in range(mCoarse.q['x'].shape[0]):
for iq in range(mCoarse.q['x'].shape[1]):
x = mCoarse.q['x'][eN,iq,:]
v = mCoarse.q[('velocity',ci)][eN,iq,:]
Viewers.datFile.write("%12.5e %12.5e %12.5e %12.5e \n" % (x[0],x[1],
old_div(v[0],scale),
old_div(v[1],scale)))
Viewers.datFile.write("\n \n#end coarse velocity ci=%d level=%d" % (ci,level))
title = "coarse velocity ci=%d level=%d " % (ci,level)
cmd = "set term x11 %i; plot \'%s\' index %i with vectors title \"%s\" \n" % (Viewers.windowNumber,
Viewers.datFilename,
Viewers.plotNumber,
title)
Viewers.cmdFile.write(cmd)
Viewers.viewerPipe.write(cmd)
Viewers.newPlot()
Viewers.newWindow()
input('press return to continue')
#end gnuplot
#end verbose
return velciprojFine
| mit | 6c5ec4f64abd4b2298f97ee4909153df | 58.278505 | 245 | 0.472693 | 4.217568 | false | false | false | false |
klen/muffin | docs/conf.py | 1 | 6578 | # -*- coding: utf-8 -*-
"""Setup Muffin documentation."""
import sys
import os
import pkg_resources
sys.path.append(os.path.abspath('_themes'))
sys.path.append(os.path.abspath('.'))
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx_copybutton']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Muffin'
copyright = u'2015, Kirill Klenov' # noqa
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
try:
release = pkg_resources.get_distribution('Muffin').version
except pkg_resources.DistributionNotFound:
print('To build the documentation, The distribution information of Muffin')
print('Has to be available. Either install the package into your')
print('development environment or run "setup.py develop" to setup the')
print('metadata. A virtualenv is recommended!')
sys.exit(1)
del pkg_resources
version = release
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'pydata_sphinx_theme'
# html_theme = 'sphinx_rtd_theme'
# html_theme = 'aiohttp_theme'
html_logo = 'static/logo.png'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
# 'logo_link': 'https://github.com/klen/muffin',
'github_url': 'https://github.com/klen/muffin',
'icon_links': [
{
'name': 'PyPI',
'url': 'https://pypi.org/project/muffin',
'icon': 'fas fa-box',
}
],
# 'logo_only': True,
# 'canonical_url': "https://klen.github.io/muffin/",
}
html_sidebars = {
"**": ["search-field.html", "sidebar-nav-bs.html", "custom-sidebar.html"],
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = ['_themes']
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = "muffin-favicon.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['static']
# html_css_files = ['theme.css']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = True
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
html_use_modindex = False
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Muffindoc'
# -- Options for LaTeX output --------------------------------------------------
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('latexindex', 'Muffin.tex', u'Muffin Documentation', u'Kirill Klenov', 'manual'),
]
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
latex_use_modindex = False
latex_elements = {
'fontpkg': r'\usepackage{mathpazo}',
'papersize': 'a4paper',
'pointsize': '12pt',
'preamble': r'\usepackage{flaskstyle}'
}
latex_use_parts = True
latex_additional_files = [
# 'muffinstyle.sty',
'static/logo.png'
]
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
# epub_title = ''
# epub_author = ''
# epub_publisher = ''
# epub_copyright = ''
# The language of the text. It defaults to the language option
# or en if the language is not set.
# epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
# epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
# epub_identifier = ''
# A unique identification for the text.
# epub_uid = ''
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_post_files = []
# A list of files that should not be packed into the epub file.
# epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
# epub_tocdepth = 3
intersphinx_mapping = {
"python": ("http://docs.python.org/3", None),
"multidict": ("https://multidict.readthedocs.io/en/stable/", None),
"yarl": ("https://yarl.readthedocs.io/en/stable/", None),
"asgi_tools": ("https://klen.github.io/asgi-tools/", None),
}
# pygments_style = 'tango'
autodoc_member_order = 'bysource'
autodoc_typehints = 'description'
| mit | df8004d16a90e5c5debb5671fb60006a | 30.932039 | 86 | 0.688963 | 3.517647 | false | false | false | false |
erdc/proteus | proteus/tests/MeshAdaptPUMI/gauge_compare/dambreak_Colagrossi_2D_rdmc/vof_n.py | 3 | 2900 | from proteus.default_n import *
from proteus import (StepControl,
TimeIntegration,
NonlinearSolvers,
LinearSolvers,
LinearAlgebraTools)
import vof_p as physics
from proteus.mprans import VOF
from proteus import Context
ct = Context.get()
domain = ct.domain
nd = ct.domain.nd
mesh = domain.MeshOptions
if ct.useHex or ct.structured:
nnx = ct.nnx
nny = ct.nny
# time stepping
runCFL = ct.runCFL
if ct.timeDiscretization=='vbdf':
timeIntegration = TimeIntegration.VBDF
timeOrder=2
stepController = StepControl.Min_dt_cfl_controller
elif ct.timeDiscretization=='flcbdf':
timeIntegration = TimeIntegration.FLCBDF
#stepController = FLCBDF_controller
stepController = StepControl.Min_dt_cfl_controller
time_tol = 10.0*ct.vof_nl_atol_res
atol_u = {0:time_tol}
rtol_u = {0:time_tol}
else:
timeIntegration = TimeIntegration.BackwardEuler_cfl
stepController = StepControl.Min_dt_cfl_controller
# mesh options
nLevels = ct.nLevels
parallelPartitioningType = mesh.parallelPartitioningType
nLayersOfOverlapForParallel = mesh.nLayersOfOverlapForParallel
restrictFineSolutionToAllMeshes = mesh.restrictFineSolutionToAllMeshes
triangleOptions = mesh.triangleOptions
elementQuadrature = ct.elementQuadrature
elementBoundaryQuadrature = ct.elementBoundaryQuadrature
femSpaces = {0: ct.basis}
massLumping = False
numericalFluxType = VOF.NumericalFlux
conservativeFlux = None
subgridError = VOF.SubgridError(coefficients=physics.coefficients,
nd=ct.domain.nd)
shockCapturing = VOF.ShockCapturing(coefficients=physics.coefficients,
nd=ct.domain.nd,
shockCapturingFactor=ct.vof_shockCapturingFactor,
lag=ct.vof_lag_shockCapturing)
fullNewtonFlag = True
multilevelNonlinearSolver = NonlinearSolvers.Newton
levelNonlinearSolver = NonlinearSolvers.Newton
nonlinearSmoother = None
linearSmoother = None
matrix = LinearAlgebraTools.SparseMatrix
if ct.useOldPETSc:
multilevelLinearSolver = LinearSolvers.PETSc
levelLinearSolver = LinearSolvers.PETSc
else:
multilevelLinearSolver = LinearSolvers.KSP_petsc4py
levelLinearSolver = LinearSolvers.KSP_petsc4py
if ct.useSuperlu:
multilevelLinearSolver = LinearSolvers.LU
levelLinearSolver = LinearSolvers.LU
linear_solver_options_prefix = 'vof_'
nonlinearSolverConvergenceTest = 'rits'
levelNonlinearSolverConvergenceTest = 'rits'
linearSolverConvergenceTest = 'r-true'
tolFac = 0.0
linTolFac = 0.0
l_atol_res = 0.1*ct.vof_nl_atol_res
nl_atol_res = ct.vof_nl_atol_res
useEisenstatWalker = False#True
maxNonlinearIts = 50
maxLineSearches = 0
auxiliaryVariables = ct.domain.auxiliaryVariables['vof']
| mit | 50ed8bbf92c92404b6f810316a515793 | 29.851064 | 88 | 0.717586 | 3.360371 | false | false | true | false |
erdc/proteus | scripts/povgen.py | 1 | 1345 | #!/usr/bin/env python
"""
A script for generating povray frames of zero level set
"""
from builtins import range
import argparse
import tables
import numpy as np
from proteus import Comm, Domain, Isosurface
parser = argparse.ArgumentParser()
parser.add_argument("prefix",
help="The prefix of the h5 files")
parser.add_argument("-L", type=float, default=[1.0, 1.0, 1.0], nargs='+',
help="extents of bounding box")
parser.add_argument("-x", type=float, default=[0.0, 0.0, 0.0], nargs='+',
help="lower left front corner")
parser.add_argument("-s", "--steps", type=int, default=0,
help="number of time steps to process")
args = parser.parse_args()
domain = Domain.RectangularDomain(L=args.L, x=args.x)
comm = Comm.init()
h5 = tables.open_file(args.prefix + ".h5", "r")
isosurface = Isosurface.Isosurface((('phi_t', (0.0,)),),
domain,
writeBoundary=False)
def runSteps():
for i in range(args.steps):
isosurface.attachHDF5(h5, i)
isosurface.calculate(checkTime=False)
import cProfile
cProfile.run('runSteps()','isostats')
import pstats
p = pstats.Stats('isostats')
p.strip_dirs().sort_stats('time').print_stats()
p.strip_dirs().sort_stats('cumulative').print_stats()
h5.close()
| mit | ec6f5a22dee0f55958603d24102835bb | 33.487179 | 73 | 0.627509 | 3.337469 | false | false | false | false |
erdc/proteus | proteus/tests/HotStart_3P/pressureincrement_p.py | 1 | 2823 | from __future__ import absolute_import
from builtins import object
from math import *
from proteus import *
from proteus.default_p import *
from .NS_hotstart import *
#domain = ctx.domain
#nd = ctx.nd
name = "pressureincrement"
from proteus.mprans import PresInc
coefficients=PresInc.Coefficients(rho_f_min = (1.0-1.0e-8)*rho_1,
rho_s_min = (1.0-1.0e-8)*rho_s,
nd = nd,
modelIndex=1,
fluidModelIndex=0,
fixNullSpace=fixNullSpace_PresInc,
INTEGRATE_BY_PARTS_DIV_U=INTEGRATE_BY_PARTS_DIV_U_PresInc)
LevelModelType = PresInc.LevelModel
def getDBC_phi(x,flag):
None
def getAdvectiveFlux_qt(x,flag):
if manufactured_solution==1: #u.n!=0
if (flag==1): #left boundary
return lambda x,t: -np.sin(x[0])*np.sin(x[1]+t)
elif (flag==2): # right boundary
return lambda x,t: np.sin(x[0])*np.sin(x[1]+t)
elif (flag==3): # bottom boundary
return lambda x,t: -np.cos(x[0])*np.cos(x[1]+t)
else:
return lambda x,t: np.cos(x[0])*np.cos(x[1]+t)
else: #u.n=0
return lambda x,t: 0.
def getDiffusiveFlux_phi(x,flag):
return lambda x,t: 0.
class getIBC_phi(object):
def __init__(self):
pass
def uOfXT(self,x,t):
return 0.0
initialConditions = {0:getIBC_phi()}
dirichletConditions = {0:getDBC_phi}
advectiveFluxBoundaryConditions = {0:getAdvectiveFlux_qt}
diffusiveFluxBoundaryConditions = {0:{0:getDiffusiveFlux_phi}}
| mit | 80c9273b33f6b07cd46a597cdd566cfa | 59.06383 | 275 | 0.334396 | 5.095668 | false | false | false | false |
erdc/proteus | scripts/pome_gen_poly.py | 1 | 2175 | from __future__ import print_function
from builtins import zip
from builtins import range
import math
n_domain_vertices = 100
domain_vertices =[(0.75*math.sin(2.0*math.pi*float(n)/float(n_domain_vertices))+0.5,0.75*math.cos(2.0*math.pi*float(n)/float(n_domain_vertices))+0.5) for n in range(n_domain_vertices)]
grain_centers = [(0.25,0.25),(0.25,0.75),(0.75,0.75),(0.75,0.25),(0.5,0.5),(0.5+0.5,0.5),(0.5-0.5,0.5),(0.5,0.5-0.5),(0.5,0.5+0.5)]
radii = [0.15,0.15,0.15,0.15,0.125,0.1,0.1,0.1,0.1]
points_on_grain = 50
nvertices = len(domain_vertices) + len(grain_centers)*points_on_grain
poly = open('pome.poly','w')
poly.write('%d %d %d %d \n' % (nvertices,2,0,1))
#write vertices
poly.write("#vertices \n")
for v,p in enumerate(domain_vertices):
poly.write('%d %18.12e %18.12e %d \n' % (v+1,p[0],p[1],1))
#write segments
nSegmentGroups = 0
segments=[]
segmentGroups=[]
for sN in range(len(domain_vertices)-1):
segments.append([sN,sN+1])
segmentGroups.append(nSegmentGroups)
segments.append([len(domain_vertices)-1,0])
segmentGroups.append(nSegmentGroups)
vStart = len(domain_vertices)
sStart = len(segments)
nSegmentGroups = nSegmentGroups+1
for g,c in enumerate(grain_centers):
for gb in range(points_on_grain):
pb = (radii[g]*math.sin(float(gb)/float(points_on_grain)*2.0*math.pi),radii[g]*math.cos(float(gb)/float(points_on_grain)*2.0*math.pi))
poly.write('%d %18.12e %18.12e %d \n' % (vStart + gb+1,c[0]+pb[0],c[1]+pb[1],1+g))
for gb in range(points_on_grain-1):
segments.append([sStart+gb,sStart+gb+1])
segmentGroups.append(nSegmentGroups)
segments.append([sStart+points_on_grain-1,sStart])
segmentGroups.append(nSegmentGroups)
vStart = vStart + points_on_grain
sStart = sStart + points_on_grain
nSegmentGroups = nSegmentGroups+1
poly.write('%d %d \n' % (len(segments),1))
print(segments)
poly.write("#segments \n")
for sN,s,sG in zip(list(range(len(segments))),segments,segmentGroups):
poly.write('%d %d %d %d \n' % (sN+1,s[0]+1,s[1]+1,sG))
poly.write('%d \n' % len(grain_centers))
for gN,g in enumerate(grain_centers):
poly.write('%d %18.12e %18.12e \n' % (gN,g[0],g[1]))
poly.close()
| mit | 65770848c01bf60497d60d9c4ed05440 | 43.387755 | 185 | 0.667126 | 2.405973 | false | false | false | false |
erdc/proteus | proteus/tests/levelset/rotation/vof_rotation_2d_p.py | 1 | 2488 | from __future__ import absolute_import
from builtins import object
from proteus import *
from proteus.default_p import *
from proteus.ctransportCoefficients import smoothedHeaviside
try:
from .rotation2D import *
except:
from rotation2D import *
from proteus.mprans import VOF
name=soname+"_vof"
"""
The non-conservative level set description of a bubble in a two-phase flow
"""
LevelModelType = VOF.LevelModel
##\ingroup test
#\file vof_rotation_2d_p.py
#
# \todo finish vof_rotation_2d_p.py
if applyRedistancing:
coefficients = VOF.Coefficients(LS_model=0,V_model=0,RD_model=1,ME_model=2,checkMass=checkMass,
epsFact=epsFact_vof,useMetrics=useMetrics)
elif not onlyVOF:
coefficients = VOF.Coefficients(LS_model=0,V_model=0,RD_model=None,ME_model=1,checkMass=checkMass,
epsFact=epsFact_vof,useMetrics=useMetrics)
else:
coefficients = VOF.Coefficients(RD_model=None,ME_model=0,checkMass=checkMass,
epsFact=epsFact_vof,useMetrics=useMetrics)
def Heaviside(phi):
if phi > 0:
return 1.0
elif phi < 0:
return 0.0
else:
return 0.5
class Rotation_phi(object):
def __init__(self,center=[0.5,0.75,0.5],radius=0.15):
self.radius = radius
self.center = center
def uOfX(self,X):
dx = X[0]-self.center[0]; dy = X[1]-self.center[1];
dBubble = self.radius - sqrt(dx**2 + dy**2)
return smoothedHeaviside(epsFactHeaviside*he,dBubble)#Heaviside(dBubble)
#end
def uOfXT(self,X,t):
return self.uOfX(X)
#end
#end Rotation_phi
class Rotation_phi_cylinder(object):
def __init__(self,center=[0.5,0.75,0.5],radius=0.15):
self.radius = radius
self.center = center
def uOfX(self,X):
dx = X[0]-self.center[0]; dy = X[1]-self.center[1];
dBubble = self.radius - sqrt(dx**2 + dy**2)
return smoothedHeaviside(epsFactHeaviside*he,dBubble)#Heaviside(dBubble)
#end
def uOfXT(self,X,t):
return self.uOfX(X)
#end
#end Rotation_phi
analyticalSolutions = None
def getDBC(x,flag):
pass
dirichletConditions = {0:getDBC}
initialConditions = {0:Rotation_phi(center=[0.0,0.5],radius=0.25)}
fluxBoundaryConditions = {0:'outFlow'}
#cek made no flux since v.n = 0 for this v
def getAFBC(x,flag):
return lambda x,t: 0.0
advectiveFluxBoundaryConditions = {0:getAFBC}
diffusiveFluxBoundaryConditions = {0:{}}
| mit | ad77d0c84c398da3027e5fc31689b8aa | 27.272727 | 102 | 0.655547 | 2.927059 | false | false | false | false |
erdc/proteus | proteus/tests/CLSVOF/with_RANS3PF/pressureInitial_p.py | 1 | 1293 | from __future__ import absolute_import
from builtins import object
from math import *
from proteus import *
from proteus.default_p import *
try:
from .multiphase import *
except:
from multiphase import *
from proteus.mprans import PresInit
name = "pressureInitial"
#LevelModelType = PresInit.LevelModel
coefficients=PresInit.Coefficients(nd=nd,
modelIndex=PINIT_model,
fluidModelIndex=V_model,
pressureModelIndex=PRESSURE_model)
#pressure increment should be zero on any pressure dirichlet boundaries
def getDBC_pInit(x,flag):
if flag == boundaryTags['top']:
return lambda x,t: 0.0
#the advectiveFlux should be zero on any no-flow boundaries
def getAdvectiveFlux_pInit(x,flag):
if flag != boundaryTags['top']:
return lambda x,t: 0.0
def getDiffusiveFlux_pInit(x,flag):
if flag != boundaryTags['top']:
return lambda x,t: 0.0
class getIBC_pInit(object):
def __init__(self):
pass
def uOfXT(self,x,t):
return 0.0
initialConditions = {0:getIBC_pInit()}
dirichletConditions = {0:getDBC_pInit}
advectiveFluxBoundaryConditions = {0:getAdvectiveFlux_pInit}
diffusiveFluxBoundaryConditions = {0:{0:getDiffusiveFlux_pInit}}
| mit | f257330de9c833de5767f618a3e4bf3d | 29.785714 | 71 | 0.675174 | 3.475806 | false | false | false | false |
erdc/proteus | proteus/tests/poisson_2d/poisson_het_2d_dgpk_n.py | 1 | 2470 | from __future__ import absolute_import
from builtins import range
from proteus import *
from proteus.default_n import *
from proteus import defaults
defaults.reset_default_n()
try:
from .poisson_het_2d_p import *
except:
from poisson_het_2d_p import *
parallel = True
direct=False
polynomial_order = 2
numerical_flux_flag = 'SIPG'
timeIntegration = NoIntegration
nDTout = 1
if polynomial_order == 2:
femSpaces = dict((i,DG_AffineQuadraticOnSimplexWithNodalBasis) for i in range(nc))
else:
femSpaces = dict((i,DG_AffineLinearOnSimplexWithNodalBasis) for i in range(nc))
elementQuadrature = SimplexGaussQuadrature(nd,4)
elementBoundaryQuadrature = SimplexGaussQuadrature(nd-1,4)
nn = 81
nLevels = 1
if parallel:
nLevels = 1
nn = nn*2**(nLevels-1)
nLevels = 1
subgridError = None
shockCapturing = None
if numerical_flux_flag == 'SIPG':
numericalFluxType = Advection_DiagonalUpwind_Diffusion_SIPG
elif numerical_flux_flag == 'IIPG':
numericalFluxType = Advection_DiagonalUpwind_Diffusion_IIPG
elif numerical_flux_flag == 'LDG':
numericalFluxType = Diffusion_LDG
else:
numericalFluxType = Advection_DiagonalUpwind_Diffusion_NIPG
multilevelNonlinearSolver = NLNI
levelNonlinearSolver = Newton
maxNonlinearIts = 1
fullNewtonFlag = True
tolFac = 0.0
nl_atol_res = 1.0e-8
linTolFac = 0.0
l_atol_res = 1.0e-9
matrix = SparseMatrix
if parallel:
multilevelLinearSolver = KSP_petsc4py
levelLinearSolver = KSP_petsc4py
#pick number of layers to use in overlap and type of partition
if numerical_flux_flag == 'LDG':
nLayersOfOverlapForParallel = 2
else:
nLayersOfOverlapForParallel = 1
#type of partition
parallelPartitioningType = MeshParallelPartitioningTypes.element
linearSolverConvergenceTest= 'r-true'
from petsc4py import PETSc
OptDB = PETSc.Options()
OptDB.clear()
if direct:
OptDB.setValue('ksp_type','preonly')
OptDB.setValue('pc_type','lu')
OptDB.setValue('pc_factor_type','superlu_dist')
else:
OptDB.setValue('ksp_type','cg')
OptDB.setValue('pc_asm_type','basic')
OptDB.setValue('pc_asm_overlap',2)
OptDB.setValue('sub_ksp_type','preonly')
OptDB.setValue('sub_pc_type','lu')
OptDB.setValue('sub_pc_factor_type','superlu')
else:
multilevelLinearSolver = LU
levelLinearSolver = LU
linearSolverConvergenceTest= 'r'
conservativeFlux = None
hex = False
quad = False
| mit | 75cd64a0e74e14548d05cccab87131b5 | 25.276596 | 86 | 0.716194 | 3.030675 | false | false | false | false |
erdc/proteus | scripts/gen_eqp_quad.py | 1 | 4663 | #!/bin/env python
import math
import sympy
from sympy.integrals import intpoly
import numpy as np
from sympy.abc import I, J, K, N
from sympy import Sum
from sympy.geometry import Triangle, Point
from sympy.matrices import Matrix
from sympy.printing.cxxcode import cxxcode
x,y,z = sympy.symbols('x,y,z')
n01_x,n01_y,n01_z = sympy.symbols('n01_x,n01_y,n01_z')
n02_x,n02_y,n02_z = sympy.symbols('n02_x,n02_y,n02_z')
n31_x,n31_y,n31_z = sympy.symbols('n31_x,n31_y,n31_z')
n32_x,n32_y,n32_z = sympy.symbols('n32_x,n32_y,n32_z')
f=open("equivalent_polynomials_coefficients_quad.h",'w')
f.write("""#ifndef EQUIVALENT_POLYNOMIALS_COEFFICIENTS_QUAD_H
#define EQUIVALENT_POLYNOMIALS_COEFFICIENTS_QUAD_H
namespace equivalent_polynomials
{
template<int nP>
inline void _calculate_b(double theta01, double theta02, double theta31, double theta32,
double phi0, double phi1, double phi2, double phi3,
double* b_H, double* b_ImH, double* b_D);
""")
use_simplify=True
def simplify(expression):
if use_simplify:
return sympy.simplify(expression)
else:
return expression
nSpace=3
maxOrder = 5
nDOF1D=maxOrder + 1
unit_tet = [[(0, 0, 0),
(1, 0, 0),
(0, 1, 0),
(0, 0, 1)],
[1, 2, 3],
[2, 3, 0],
[3, 0, 1],
[0, 1, 2]]
n0=(0,0,0)
n1=(1,0,0)
n2=(0,1,0)
n3=(0,0,1)
theta01,theta02,theta31,theta32 = sympy.symbols('theta01,theta02,theta31,theta32')
phi0,phi1,phi2,phi3 = sympy.symbols('phi0,phi1,phi2,phi3')
n01 = (theta01,0,0)
n02 = (0,theta02,0)
n31 = (theta31, 0, 1-theta31)
n32 = (0, theta32, 1-theta32)
sub_tet0 = [[n1,#0
n01,#1
n02,#2
n31],#3
[3, 2, 1], #n31, n02, n01
[2, 3, 0], #n02, n31, n1
[0, 3, 1], #n1, n31, n01
[0, 1, 2]] #n1, n01, n02
sub_tet1 = [[n2,#0
n02,#1
n32,#2
n31],#3
[1, 3, 2], #n02, n31, n32
[0, 2, 3], #n2, n32, n31
[3, 1, 0], #n31, n02, n2
[0, 1, 2]] #n2, n02, n32
sub_tet2 = [[n1,#0
n2,#1
n02,#2
n31],#3
[2, 3, 1], #n02, n31, n2
[2, 0, 3], #n02, n1, n31
[3, 0, 1], #n31, n1, n2
[1, 0, 2]] #n2, n1, n02
b_H={}
b_1mH={}
b_D={}
for i in range(nDOF1D):
for j in range(nDOF1D-i):
for k in range(nDOF1D-i-j):
basis_ijk = x**i*y**j*z**k
print("basis function ", i, j, k)
T0 = simplify(intpoly.polytope_integrate(sub_tet0, basis_ijk))
T1 = simplify(intpoly.polytope_integrate(sub_tet1, basis_ijk))
T2 = simplify(intpoly.polytope_integrate(sub_tet2, basis_ijk))
frag = simplify(T0 + T1 + T2)
b_H[(i,j,k)] = frag
b_1mH[(i,j,k)] = intpoly.polytope_integrate(unit_tet, basis_ijk) - frag
theta01_expr = 0.5 - 0.5*(phi1 + phi0)/(phi1-phi0)
theta02_expr = 0.5 - 0.5*(phi2 + phi0)/(phi2-phi0)
theta31_expr = 0.5 - 0.5*(phi1 + phi3)/(phi1-phi3)
theta32_expr = 0.5 - 0.5*(phi2 + phi3)/(phi2-phi3)
b_D[(i,j,k)] = frag.diff(theta01,simplify=use_simplify)*(theta01_expr.diff(phi0,simplify=False) + theta01_expr.diff(phi1,simplify=False)) + \
frag.diff(theta02,simplify=use_simplify)*(theta02_expr.diff(phi0,simplify=False) + theta02_expr.diff(phi2,simplify=False)) + \
frag.diff(theta31,simplify=use_simplify)*(theta31_expr.diff(phi3,simplify=False) + theta31_expr.diff(phi1,simplify=False)) + \
frag.diff(theta32,simplify=use_simplify)*(theta32_expr.diff(phi3,simplify=False) + theta32_expr.diff(phi2,simplify=False))
for order in range(1,maxOrder):
nDOF1D=order + 1
print("Order ", order)
f.write(""" template<>
inline void _calculate_b<{0:d}>(double theta01, double theta02, double theta31, double theta32,
double phi0, double phi1, double phi2, double phi3,
double* b_H, double* b_ImH, double* b_D)
{{
""".format(order))
n=0
for i in range(nDOF1D):
for j in range(nDOF1D-i):
for k in range(nDOF1D-i-j):
f.write(" b_H[{0:d}] = {1:s};\n".format(n,cxxcode(b_H[(i,j,k)])))
f.write(" b_ImH[{0:d}] = {1:s};\n".format(n,cxxcode(b_1mH[(i,j,k)])))
f.write(" b_D[{0:d}] = {1:s};\n".format(n,cxxcode(b_D[(i,j,k)])))
n+=1
f.write(""" }
""")
f.write("""}//equivalent_polynomials
#endif
""")
f.close()
| mit | 430a0db4d803b2d779a0421175921d1c | 35.147287 | 153 | 0.538923 | 2.610862 | false | false | false | false |
caleb531/automata | automata/regex/postfix.py | 1 | 4614 | #!/usr/bin/env python3
"""Classes and methods for converting lists of tokens to postfix ordering."""
import abc
from collections import deque
from itertools import zip_longest
import automata.base.exceptions as exceptions
from automata.regex.lexer import Token
class Operator(Token):
"""Subclass of token defining an operator."""
@abc.abstractmethod
def get_precedence(self):
raise NotImplementedError
class InfixOperator(Operator):
"""Subclass of operator defining an infix operator."""
@abc.abstractmethod
def op(self, left, right):
raise NotImplementedError
class PostfixOperator(Operator):
"""Subclass of operator defining an postfix operator."""
@abc.abstractmethod
def op(self, left):
raise NotImplementedError
class Literal(Token):
"""Subclass of token defining a literal."""
@abc.abstractmethod
def val(self):
raise NotImplementedError
class RightParen(Token):
"""Subclass of token defining a right parenthesis."""
def __repr__(self):
return '<)>'
class LeftParen(Token):
"""Subclass of token defining a left parenthesis."""
def __repr__(self):
return '<(>'
def validate_tokens(token_list):
"""Validate the inputted tokens list (in infix ordering)."""
token_list_prev = [None]
token_list_prev.extend(token_list)
paren_counter = 0
for prev_token, curr_token in zip_longest(token_list_prev, token_list):
# No postfix or infix operators at the beginning
if prev_token is None and isinstance(curr_token, (InfixOperator, PostfixOperator)):
raise exceptions.InvalidRegexError(f"Token '{curr_token}' cannot appear at the start of a statement.")
# No postfix operators at the end of a statement or right before another operator or right paren
elif isinstance(prev_token, InfixOperator):
if curr_token is None:
raise exceptions.InvalidRegexError(f"'{prev_token}' cannot appear at the end of a statement.")
elif isinstance(curr_token, (InfixOperator, PostfixOperator, RightParen)):
raise exceptions.InvalidRegexError(f"'{prev_token}' cannot appear immediately before '{curr_token}'.")
# No left parens right before infix or postfix operators, or right before a right paren
elif isinstance(prev_token, LeftParen):
if isinstance(curr_token, (InfixOperator, PostfixOperator, RightParen)):
raise exceptions.InvalidRegexError(f"'{prev_token}' cannot appear immediately before '{prev_token}'.")
# Track open/closed parens
paren_counter += 1
elif isinstance(prev_token, RightParen):
paren_counter -= 1
if paren_counter < 0:
raise exceptions.InvalidRegexError("Token list has mismatched parethesis.")
if paren_counter != 0:
raise exceptions.InvalidRegexError("Token list has unclosed parethesis.")
def tokens_to_postfix(tokens):
"""Takes in a list of tokens and changes them to postfix ordering."""
stack = deque()
res = []
def comp_precedence(a, b):
"""Compare precedence of operators (two tokens)."""
return a.get_precedence() <= b.get_precedence()
for c in tokens:
if isinstance(c, Literal):
res.append(c)
elif isinstance(c, RightParen):
while len(stack) > 0 and not isinstance(stack[-1], LeftParen):
res.append(stack.pop())
stack.pop()
elif isinstance(c, LeftParen):
stack.append(c)
elif not stack or isinstance(stack[-1], LeftParen) or not comp_precedence(c, stack[-1]):
stack.append(c)
else:
while stack and not isinstance(stack[-1], LeftParen) and comp_precedence(c, stack[-1]):
res.append(stack.pop())
stack.append(c)
while stack:
res.append(stack.pop())
return res
def parse_postfix_tokens(postfix_tokens):
"""Parse list of postfix tokens to produce value of expression."""
stack = deque()
for token in postfix_tokens:
if isinstance(token, InfixOperator):
right = stack.pop()
left = stack.pop()
stack.append(token.op(left, right))
elif isinstance(token, PostfixOperator):
left = stack.pop()
stack.append(token.op(left))
elif isinstance(token, Literal):
stack.append(token.val())
else:
raise exceptions.InvalidRegexError(f"Invalid token type {type(token)}")
return stack[0]
| mit | 1249cbf75dc91e5063ff82d5cf25b32a | 30.82069 | 118 | 0.64326 | 4.356941 | false | false | false | false |
fabric-bolt/fabric-bolt | fabric_bolt/task_runners/base.py | 3 | 11897 | import os
import re
import subprocess
import shutil
from django.apps import apps
from django.utils.text import slugify
from django.conf import settings
from django.core.cache import cache
class BaseTaskRunnerBackend(object):
special_options = ['no_agent', 'forward-agent', 'config', 'disable-known-hosts', 'keepalive', 'password',
'parallel', 'no-pty', 'reject-unknown-hosts', 'skip-bad-hosts', 'timeout', 'command-timeout',
'user', 'warn-only', 'pool-size', 'key_filename']
def get_urls(self):
return []
def get_detail_template(self):
raise NotImplementedError('You must implement get_detail_template()')
def pre_start_task(self, deployment, project, request):
"""
Can be used by child classes to do any pre-task work
"""
pass
def get_task_details(self, project, task_name):
if task_name:
for details in self.get_fabric_tasks(project):
if details[0] == task_name:
return details
return None
def get_special_options(self):
# These options are passed to Fabric as: fab task --abort-on-prompts=True --user=root ...
return self.special_options
def check_output(self, command, shell=False):
# Need to use bash since some of the commands are prefixed with "source"
executable = None
if shell:
executable = getattr(settings, 'SHELL', '/bin/bash')
return subprocess.check_output(command, shell=shell, executable=executable)
def check_output_with_ssh_key(self, command):
if getattr(settings, 'GIT_SSH_KEY_LOCATION', None):
return self.check_output(
'ssh-agent bash -c "ssh-add {};{}"'.format(settings.GIT_SSH_KEY_LOCATION, command),
shell=True
)
else:
return self.check_output([command], shell=True)
def update_project_git(self, project, cache_dir, repo_dir):
if not os.path.exists(repo_dir):
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
self.check_output_with_ssh_key('git clone {} {}'.format(project.repo_url, repo_dir))
else:
self.check_output_with_ssh_key(
'cd {0};git stash;git pull'.format(repo_dir)
)
def clean_obsolete_project_git(self, project):
cache.delete_many(['project_{}_fabfile_tasks'.format(project.id),
'project_{}_fabfile_path'.format(project.id)])
repo_dir = os.path.join(settings.PUBLIC_DIR, '.repo_caches', slugify(project.name))
if os.path.exists(repo_dir):
shutil.rmtree(repo_dir)
def setup_virtual_env_if_needed(self, repo_dir):
env_dir = os.path.join(repo_dir, 'env')
if not os.path.exists(env_dir):
os.makedirs(env_dir)
self.check_output("virtualenv {}".format(env_dir), shell=True)
def update_project_requirements(self, project, repo_dir, activate_loc):
pip_installs = ' '.join(project.fabfile_requirements.splitlines())
self.check_output_with_ssh_key(
'source {} && cd {};pip install {}'.format(activate_loc, repo_dir, pip_installs)
)
def get_fabfile_path(self, project):
if project.use_repo_fabfile:
cache_key = 'project_{}_fabfile_path'.format(project.pk)
cached_result = cache.get(cache_key)
if cached_result:
return cached_result
cache_dir = os.path.join(settings.PUBLIC_DIR, '.repo_caches')
repo_dir = os.path.join(cache_dir, slugify(project.name))
self.update_project_git(project, cache_dir, repo_dir)
self.setup_virtual_env_if_needed(repo_dir)
activate_loc = os.path.join(repo_dir, 'env', 'bin', 'activate')
self.update_project_requirements(project, repo_dir, activate_loc)
result = os.path.join(repo_dir, 'fabfile.py'), activate_loc
cache.set(cache_key, result, settings.FABRIC_TASK_CACHE_TIMEOUT)
return result
else:
return settings.FABFILE_PATH, None
def parse_task_details(self, name, task_output):
lines = task_output.splitlines()
docstring = '\n'.join([line.strip() for line in lines[2:-2]]).strip()
arguments_line = lines[-2].strip()
if docstring == 'No docstring provided':
docstring = None
arguments_line = arguments_line[11:].strip()
arguments = []
if arguments_line:
for arg in arguments_line.split(', '):
m = re.match(r"^([^=]+)(=(\'?)([^']*)\3)?$", arg)
if m.group(2): # found argument with default value
if m.group(3) == "'": # default value is a string
arguments.append((m.group(1), m.group(4)))
else: # found an argument with some other default value.
# all fab arguments are translated to strings, so this doesnt make sense. Ignore the default.
arguments.append(m.group(1))
else:
arguments.append(m.group(1))
# Class based Tasks have a 'self' argument - strip it.
if arguments and arguments[0] == 'self':
arguments = arguments[1:]
return name, docstring, arguments
def get_fabric_tasks(self, project):
"""
Generate a list of fabric tasks that are available
"""
cache_key = 'project_{}_fabfile_tasks'.format(project.pk)
cached_result = cache.get(cache_key)
if cached_result:
return cached_result
try:
fabfile_path, activate_loc = self.get_fabfile_path(project)
if activate_loc:
output = self.check_output(
'source {};fab --list --list-format=short --fabfile={}'.format(activate_loc, fabfile_path),
shell=True
)
else:
output = self.check_output(
'fab --list --list-format=short --fabfile={}'.format(fabfile_path),
shell=True
)
lines = output.splitlines()
tasks = []
for line in lines:
name = line.strip()
if activate_loc:
o = self.check_output(
'source {};fab --display={} --fabfile={}'.format(activate_loc, name, fabfile_path),
shell=True
)
else:
o = self.check_output(
['fab', '--display={}'.format(name), '--fabfile={}'.format(fabfile_path)]
)
tasks.append(self.parse_task_details(name, o))
cache.set(cache_key, tasks, settings.FABRIC_TASK_CACHE_TIMEOUT)
except Exception as e:
tasks = []
return tasks
def clean_key_string(self, key):
key = key.replace('"', '\\"') # escape double quotes
key = key.replace(',', '\,') # escape commas, that would be adding a new value
key = key.replace('=', '\=') # escape = because that would be setting a new key
return key
def clean_value_string(self, value):
value = value.replace('"', '\\"') # escape double quotes
value = value.replace(',', '\,') # escape commas, that would be adding a new value
value = value.replace('=', '\=') # escape = because that would be setting a new key
return value
def clean_arg_key_string(self, key):
# this has to be a valid python function argument, so we can get pretty strict here
key = re.sub(r'[^0-9a-zA-Z_]', '', key) # remove anything that isn't a number, letter, or underscore
return key
def get_key_value_string(self, key, config):
key = self.clean_key_string(key)
if config.data_type == config.BOOLEAN_TYPE:
return key + ('' if config.get_value() else '=')
elif config.data_type == config.NUMBER_TYPE:
return key + '=' + str(config.get_value())
else:
return '{}={}'.format(key, self.clean_value_string(config.get_value()))
def update_config_values_from_session(self, configs, session):
configs = configs.copy()
for key, config in configs.iteritems():
if session.get('configuration_values', {}).get(key, None) is not None:
config.set_value(session['configuration_values'][key])
del session['configuration_values'][key]
arg_values = session.get('configuration_values', {})
return configs, arg_values
def build_command(self, project, deployment, session, abort_on_prompts=True):
# Get the dictionary of configurations for this stage
configs = deployment.stage.get_configurations()
configs, arg_values = self.update_config_values_from_session(configs, session)
task_args = [key for key, config in configs.iteritems() if config.task_argument and config.task_name == deployment.task.name]
task_configs = [key for key, config in configs.iteritems() if not config.task_argument]
command_to_config = {x.replace('-', '_'): x for x in self.get_special_options()}
# Take the special env variables out
normal_task_configs = list(set(task_configs) - set(command_to_config.keys()))
# Special ones get set a different way
special_task_configs = list(set(task_configs) & set(command_to_config.keys()))
command = 'fab ' + deployment.task.name
task_details = self.get_task_details(project, deployment.task.name)
task_args = list(set(task_args + [x[0] if isinstance(x, tuple) else x for x in task_details[2]]))
if task_args:
key_value_strings = []
for key in task_args:
if key in configs:
value = unicode(configs[key].get_value())
elif key in arg_values:
value = unicode(arg_values[key])
else:
continue
cleaned_key = self.clean_arg_key_string(key)
value = self.clean_value_string(value)
key_value_strings.append('{}="{}"'.format(cleaned_key, value))
if key_value_strings:
command += ':'
command += ','.join(key_value_strings)
if normal_task_configs:
command += ' --set '
command += '"' + ','.join(self.get_key_value_string(key, configs[key]) for key in normal_task_configs) + '"'
if special_task_configs:
for key in special_task_configs:
if key == 'key_filename':
command += ' -i ' + configs[key].get_value()
else:
command += ' --' + self.get_key_value_string(command_to_config[key], configs[key])
if abort_on_prompts:
command += ' --abort-on-prompts'
hosts = deployment.stage.hosts.values_list('name', flat=True)
if hosts:
command += ' --hosts=' + ','.join(hosts)
if not configs.get('key_filename'):
# Get global SSH
SSHConfig = apps.get_model('hosts', 'SSHConfig')
ssh_config = SSHConfig.objects.first()
if ssh_config:
command += ' -i ' + ssh_config.private_key_file.file.name
if not configs.get('user'):
command += ' -u ' + ssh_config.remote_user
fabfile_path, active_loc = self.get_fabfile_path(project)
command += ' --fabfile={}'.format(fabfile_path)
if active_loc:
return 'source {};'.format(active_loc) + ' ' + command
else:
return command
| mit | 779789a5737a4a814dc1810966e3de43 | 37.626623 | 133 | 0.56199 | 4.063183 | false | true | false | false |
fabric-bolt/fabric-bolt | fabric_bolt/accounts/forms.py | 10 | 3496 | import string
import random
from django import forms
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group
from django.contrib.auth.forms import PasswordResetForm
from django.utils.translation import ugettext_lazy as _
class UserChangeForm(forms.ModelForm):
"""
A form for updating users.
"""
user_level = forms.ModelChoiceField(queryset=Group.objects.all())
is_active = forms.ChoiceField(choices=((True, 'Active'), (False, 'Disabled')), label='Status')
class Meta:
model = get_user_model()
fields = ['email', 'first_name', 'last_name', 'user_level', 'is_active', 'template']
def __init__(self, *args, **kwargs):
# form instance and initial values
initial = kwargs.get('initial', {})
instance = kwargs.get('instance', {})
user_is_admin = kwargs.pop('user_is_admin', False)
# Set initial values for the non-model questions
if instance:
# Get user's group
groups = instance.groups.all()
initial['user_level'] = groups[0].id if groups.exists() else None
# Map is_active question to model property
initial['is_active'] = instance.is_active
kwargs['initial'] = initial
super(UserChangeForm, self).__init__(*args, **kwargs)
if not user_is_admin:
self.fields.pop('user_level', None)
self.fields.pop('is_active', None)
f = self.fields.get('user_permissions', None)
if f is not None:
f.queryset = f.queryset.select_related('content_type')
def save(self, commit=True):
"""
Save the model instance with the correct Auth Group based on the user_level question
"""
instance = super(UserChangeForm, self).save(commit=commit)
if commit:
self.set_permissions(instance)
return instance
def set_permissions(self, instance):
# Assign user to selected group
if self.cleaned_data.get('user_level', False):
instance.groups.clear()
instance.groups.add(self.cleaned_data['user_level'])
# Set staff status based on user group
instance.is_staff = instance.user_is_admin()
instance.save()
class UserCreationForm(UserChangeForm):
"""
A form for creating new users. Includes all the required fields, plus a
repeated password.
"""
error_messages = {'duplicate_email': _("A user with that email already exists."), }
def clean_email(self):
"""
Set a nicer error message than the ORM.
"""
email = self.cleaned_data["email"]
try:
get_user_model()._default_manager.get(email=email)
except get_user_model().DoesNotExist:
return email
raise forms.ValidationError(self.error_messages['duplicate_email'])
def save(self, commit=True):
"""
Save the model instance with the correct Auth Group based on the user_level question
"""
instance = super(UserCreationForm, self).save(commit=commit)
random_password = ''.join(random.choice(string.ascii_uppercase + string.digits) for x in range(32))
instance.set_password(random_password)
instance.save()
email_form = PasswordResetForm({'email': self.cleaned_data['email']})
email_form.is_valid()
email_form.save(email_template_name='accounts/welcome_email.html')
return instance
| mit | c0d6b0612035e5b7553f1fe92e683e21 | 32.941748 | 107 | 0.627002 | 4.222222 | false | false | false | false |
instana/python-sensor | instana/autoprofile/schedule.py | 1 | 1213 | # (c) Copyright IBM Corp. 2021
# (c) Copyright Instana Inc. 2020
import threading
import time
from ..log import logger
class TimerWraper(object):
def __init__(self):
self.timer = None
self.cancel_lock = threading.Lock()
self.canceled = False
def cancel(self):
with self.cancel_lock:
self.canceled = True
self.timer.cancel()
def delay(timeout, func, *args):
def func_wrapper():
try:
func(*args)
except Exception:
logger.error('Error in delayed function', exc_info=True)
t = threading.Timer(timeout, func_wrapper, ())
t.start()
return t
def schedule( timeout, interval, func, *args):
tw = TimerWraper()
def func_wrapper():
start = time.time()
try:
func(*args)
except Exception:
logger.error('Error in scheduled function', exc_info=True)
with tw.cancel_lock:
if not tw.canceled:
tw.timer = threading.Timer(abs(interval - (time.time() - start)), func_wrapper, ())
tw.timer.start()
tw.timer = threading.Timer(timeout, func_wrapper, ())
tw.timer.start()
return tw | mit | 2a2ba0d8decae588be161147c84cf901 | 21.481481 | 99 | 0.575433 | 3.912903 | false | false | false | false |
instana/python-sensor | instana/agent/aws_lambda.py | 1 | 3564 | # (c) Copyright IBM Corp. 2021
# (c) Copyright Instana Inc. 2020
"""
The Instana agent (for AWS Lambda functions) that manages
monitoring state and reporting that data.
"""
import time
from ..log import logger
from ..util import to_json
from .base import BaseAgent
from ..version import VERSION
from ..collector.aws_lambda import AWSLambdaCollector
from ..options import AWSLambdaOptions
class AWSLambdaAgent(BaseAgent):
""" In-process agent for AWS Lambda """
def __init__(self):
super(AWSLambdaAgent, self).__init__()
self.collector = None
self.options = AWSLambdaOptions()
self.report_headers = None
self._can_send = False
# Update log level from what Options detected
self.update_log_level()
logger.info("Stan is on the AWS Lambda scene. Starting Instana instrumentation version: %s", VERSION)
if self._validate_options():
self._can_send = True
self.collector = AWSLambdaCollector(self)
self.collector.start()
else:
logger.warning("Required INSTANA_AGENT_KEY and/or INSTANA_ENDPOINT_URL environment variables not set. "
"We will not be able monitor this function.")
def can_send(self):
"""
Are we in a state where we can send data?
@return: Boolean
"""
return self._can_send
def get_from_structure(self):
"""
Retrieves the From data that is reported alongside monitoring data.
@return: dict()
"""
return {'hl': True, 'cp': 'aws', 'e': self.collector.get_fq_arn()}
def report_data_payload(self, payload):
"""
Used to report metrics and span data to the endpoint URL in self.options.endpoint_url
"""
response = None
try:
if self.report_headers is None:
# Prepare request headers
self.report_headers = dict()
self.report_headers["Content-Type"] = "application/json"
self.report_headers["X-Instana-Host"] = self.collector.get_fq_arn()
self.report_headers["X-Instana-Key"] = self.options.agent_key
self.report_headers["X-Instana-Time"] = str(round(time.time() * 1000))
response = self.client.post(self.__data_bundle_url(),
data=to_json(payload),
headers=self.report_headers,
timeout=self.options.timeout,
verify=self.options.ssl_verify,
proxies=self.options.endpoint_proxy)
if 200 <= response.status_code < 300:
logger.debug("report_data_payload: Instana responded with status code %s", response.status_code)
else:
logger.info("report_data_payload: Instana responded with status code %s", response.status_code)
except Exception as exc:
logger.debug("report_data_payload: connection error (%s)", type(exc))
return response
def _validate_options(self):
"""
Validate that the options used by this Agent are valid. e.g. can we report data?
"""
return self.options.endpoint_url is not None and self.options.agent_key is not None
def __data_bundle_url(self):
"""
URL for posting metrics to the host agent. Only valid when announced.
"""
return "%s/bundle" % self.options.endpoint_url
| mit | 1a56e03ef443e43318521bf269ee7fdd | 36.515789 | 116 | 0.5867 | 4.330498 | false | false | false | false |
instana/python-sensor | instana/collector/aws_lambda.py | 1 | 2060 | # (c) Copyright IBM Corp. 2021
# (c) Copyright Instana Inc. 2020
"""
AWS Lambda Collector: Manages the periodic collection of metrics & snapshot data
"""
from ..log import logger
from .base import BaseCollector
from ..util import DictionaryOfStan
from ..util.aws import normalize_aws_lambda_arn
class AWSLambdaCollector(BaseCollector):
""" Collector for AWS Lambda """
def __init__(self, agent):
super(AWSLambdaCollector, self).__init__(agent)
logger.debug("Loading AWS Lambda Collector")
self.context = None
self.event = None
self._fq_arn = None
# How often to report data
self.report_interval = 5
self.snapshot_data = DictionaryOfStan()
self.snapshot_data_sent = False
def collect_snapshot(self, event, context):
self.context = context
self.event = event
try:
plugin_data = dict()
plugin_data["name"] = "com.instana.plugin.aws.lambda"
plugin_data["entityId"] = self.get_fq_arn()
self.snapshot_data["plugins"] = [plugin_data]
except Exception:
logger.debug("collect_snapshot error", exc_info=True)
return self.snapshot_data
def should_send_snapshot_data(self):
return self.snapshot_data and self.snapshot_data_sent is False
def prepare_payload(self):
payload = DictionaryOfStan()
payload["spans"] = None
payload["metrics"] = None
if not self.span_queue.empty():
payload["spans"] = self.queued_spans()
if self.should_send_snapshot_data():
payload["metrics"] = self.snapshot_data
self.snapshot_data_sent = True
return payload
def get_fq_arn(self):
if self._fq_arn is not None:
return self._fq_arn
if self.context is None:
logger.debug("Attempt to get qualified ARN before the context object is available")
return ''
self._fq_arn = normalize_aws_lambda_arn(self.context)
return self._fq_arn
| mit | e5c615d6047b7d37107dbf9e3be8b713 | 29.746269 | 95 | 0.618932 | 4.071146 | false | false | false | false |
instana/python-sensor | instana/instrumentation/wsgi.py | 1 | 2301 | # (c) Copyright IBM Corp. 2021
# (c) Copyright Instana Inc. 2020
"""
Instana WSGI Middleware
"""
import opentracing as ot
import opentracing.ext.tags as tags
from ..singletons import agent, tracer
from ..util.secrets import strip_secrets_from_query
class InstanaWSGIMiddleware(object):
""" Instana WSGI middleware """
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
env = environ
def new_start_response(status, headers, exc_info=None):
"""Modified start response with additional headers."""
tracer.inject(self.scope.span.context, ot.Format.HTTP_HEADERS, headers)
headers.append(('Server-Timing', "intid;desc=%s" % self.scope.span.context.trace_id))
res = start_response(status, headers, exc_info)
sc = status.split(' ')[0]
if 500 <= int(sc) <= 511:
self.scope.span.mark_as_errored()
self.scope.span.set_tag(tags.HTTP_STATUS_CODE, sc)
self.scope.close()
return res
ctx = tracer.extract(ot.Format.HTTP_HEADERS, env)
self.scope = tracer.start_active_span("wsgi", child_of=ctx)
if agent.options.extra_http_headers is not None:
for custom_header in agent.options.extra_http_headers:
# Headers are available in this format: HTTP_X_CAPTURE_THIS
wsgi_header = ('HTTP_' + custom_header.upper()).replace('-', '_')
if wsgi_header in env:
self.scope.span.set_tag("http.header.%s" % custom_header, env[wsgi_header])
if 'PATH_INFO' in env:
self.scope.span.set_tag('http.path', env['PATH_INFO'])
if 'QUERY_STRING' in env and len(env['QUERY_STRING']):
scrubbed_params = strip_secrets_from_query(env['QUERY_STRING'], agent.options.secrets_matcher,
agent.options.secrets_list)
self.scope.span.set_tag("http.params", scrubbed_params)
if 'REQUEST_METHOD' in env:
self.scope.span.set_tag(tags.HTTP_METHOD, env['REQUEST_METHOD'])
if 'HTTP_HOST' in env:
self.scope.span.set_tag("http.host", env['HTTP_HOST'])
return self.app(environ, new_start_response)
| mit | 013871e18dcaf6fb5c6e38cda72af90a | 38 | 106 | 0.598435 | 3.723301 | false | false | false | false |
instana/python-sensor | tests/autoprofile/test_runtime.py | 1 | 1120 | # (c) Copyright IBM Corp. 2021
# (c) Copyright Instana Inc. 2020
import unittest
import signal
import os
from instana.autoprofile.profiler import Profiler
from instana.autoprofile.runtime import runtime_info, register_signal
class RuntimeTestCase(unittest.TestCase):
def test_register_signal(self):
if runtime_info.OS_WIN:
return
result = {'handler': 0}
def _handler(signum, frame):
result['handler'] += 1
register_signal(signal.SIGUSR1, _handler)
os.kill(os.getpid(), signal.SIGUSR1)
os.kill(os.getpid(), signal.SIGUSR1)
signal.signal(signal.SIGUSR1, signal.SIG_DFL)
self.assertEqual(result['handler'], 2)
'''def test_register_signal_default(self):
result = {'handler': 0}
def _handler(signum, frame):
result['handler'] += 1
register_signal(signal.SIGUSR1, _handler, once = True)
os.kill(os.getpid(), signal.SIGUSR1)
os.kill(os.getpid(), signal.SIGUSR1)
self.assertEqual(result['handler'], 1)'''
if __name__ == '__main__':
unittest.main()
| mit | 9586f982cd1ede086436310ed848e90c | 21.857143 | 69 | 0.624107 | 3.648208 | false | true | false | false |
instana/python-sensor | instana/tracer.py | 1 | 6536 | # (c) Copyright IBM Corp. 2021
# (c) Copyright Instana Inc. 2016
from __future__ import absolute_import
import os
import re
import time
import traceback
import opentracing as ot
from basictracer import BasicTracer
from .util.ids import generate_id
from .span_context import SpanContext
from .span import InstanaSpan, RegisteredSpan
from .recorder import StanRecorder, InstanaSampler
from .propagators.http_propagator import HTTPPropagator
from .propagators.text_propagator import TextPropagator
from .propagators.binary_propagator import BinaryPropagator
class InstanaTracer(BasicTracer):
def __init__(self, scope_manager=None, recorder=None):
if recorder is None:
recorder = StanRecorder()
super(InstanaTracer, self).__init__(
recorder, InstanaSampler(), scope_manager)
self._propagators[ot.Format.HTTP_HEADERS] = HTTPPropagator()
self._propagators[ot.Format.TEXT_MAP] = TextPropagator()
self._propagators[ot.Format.BINARY] = BinaryPropagator()
def start_active_span(self,
operation_name,
child_of=None,
references=None,
tags=None,
start_time=None,
ignore_active_span=False,
finish_on_close=True):
# create a new Span
span = self.start_span(
operation_name=operation_name,
child_of=child_of,
references=references,
tags=tags,
start_time=start_time,
ignore_active_span=ignore_active_span,
)
return self.scope_manager.activate(span, finish_on_close)
def start_span(self,
operation_name=None,
child_of=None,
references=None,
tags=None,
start_time=None,
ignore_active_span=False):
"Taken from BasicTracer so we can override generate_id calls to ours"
start_time = time.time() if start_time is None else start_time
# See if we have a parent_ctx in `references`
parent_ctx = None
if child_of is not None:
parent_ctx = (
child_of if isinstance(child_of, SpanContext)
else child_of.context)
elif references is not None and len(references) > 0:
# TODO only the first reference is currently used
parent_ctx = references[0].referenced_context
# retrieve the active SpanContext
if not ignore_active_span and parent_ctx is None:
scope = self.scope_manager.active
if scope is not None:
parent_ctx = scope.span.context
# Assemble the child ctx
gid = generate_id()
ctx = SpanContext(span_id=gid)
if parent_ctx is not None and parent_ctx.trace_id is not None:
if hasattr(parent_ctx, '_baggage') and parent_ctx._baggage is not None:
ctx._baggage = parent_ctx._baggage.copy()
ctx.trace_id = parent_ctx.trace_id
ctx.sampled = parent_ctx.sampled
ctx.long_trace_id = parent_ctx.long_trace_id
ctx.trace_parent = parent_ctx.trace_parent
ctx.instana_ancestor = parent_ctx.instana_ancestor
ctx.level = parent_ctx.level
ctx.correlation_type = parent_ctx.correlation_type
ctx.correlation_id = parent_ctx.correlation_id
ctx.traceparent = parent_ctx.traceparent
ctx.tracestate = parent_ctx.tracestate
else:
ctx.trace_id = gid
ctx.sampled = self.sampler.sampled(ctx.trace_id)
if parent_ctx is not None:
ctx.level = parent_ctx.level
ctx.correlation_type = parent_ctx.correlation_type
ctx.correlation_id = parent_ctx.correlation_id
ctx.traceparent = parent_ctx.traceparent
ctx.tracestate = parent_ctx.tracestate
# Tie it all together
span = InstanaSpan(self,
operation_name=operation_name,
context=ctx,
parent_id=(None if parent_ctx is None else parent_ctx.span_id),
tags=tags,
start_time=start_time)
if parent_ctx is not None:
span.synthetic = parent_ctx.synthetic
if operation_name in RegisteredSpan.EXIT_SPANS:
self.__add_stack(span)
return span
def inject(self, span_context, format, carrier, disable_w3c_trace_context=False):
if format in self._propagators:
return self._propagators[format].inject(span_context, carrier, disable_w3c_trace_context)
raise ot.UnsupportedFormatException()
def extract(self, format, carrier, disable_w3c_trace_context=False):
if format in self._propagators:
return self._propagators[format].extract(carrier, disable_w3c_trace_context)
raise ot.UnsupportedFormatException()
def __add_stack(self, span, limit=30):
"""
Adds a backtrace to <span>. The default length limit for
stack traces is 30 frames. A hard limit of 40 frames is enforced.
"""
try:
sanitized_stack = []
if limit > 40:
limit = 40
trace_back = traceback.extract_stack()
trace_back.reverse()
for frame in trace_back:
# Exclude Instana frames unless we're in dev mode
if "INSTANA_DEBUG" not in os.environ:
if re_tracer_frame.search(frame[0]) is not None:
continue
if re_with_stan_frame.search(frame[2]) is not None:
continue
sanitized_stack.append({
"c": frame[0],
"n": frame[1],
"m": frame[2]
})
if len(sanitized_stack) > limit:
# (limit * -1) gives us negative form of <limit> used for
# slicing from the end of the list. e.g. stack[-30:]
span.stack = sanitized_stack[(limit*-1):]
else:
span.stack = sanitized_stack
except Exception:
# No fail
pass
# Used by __add_stack
re_tracer_frame = re.compile(r"/instana/.*\.py$")
re_with_stan_frame = re.compile('with_instana')
| mit | 5e22cfd2341dfb88025da06d5a765a20 | 35.719101 | 101 | 0.570532 | 4.173691 | false | false | false | false |
instana/python-sensor | tests/frameworks/test_pyramid.py | 1 | 9240 | # (c) Copyright IBM Corp. 2021
# (c) Copyright Instana Inc. 2020
from __future__ import absolute_import
import unittest
import urllib3
import tests.apps.pyramid_app
from ..helpers import testenv
from instana.singletons import tracer
class TestPyramid(unittest.TestCase):
def setUp(self):
""" Clear all spans before a test run """
self.http = urllib3.PoolManager()
self.recorder = tracer.recorder
self.recorder.clear_spans()
def tearDown(self):
""" Do nothing for now """
return None
def test_vanilla_requests(self):
r = self.http.request('GET', testenv["pyramid_server"] + '/')
self.assertEqual(r.status, 200)
spans = self.recorder.queued_spans()
self.assertEqual(1, len(spans))
def test_get_request(self):
with tracer.start_active_span('test'):
response = self.http.request('GET', testenv["pyramid_server"] + '/')
spans = self.recorder.queued_spans()
self.assertEqual(3, len(spans))
pyramid_span = spans[0]
urllib3_span = spans[1]
test_span = spans[2]
assert response
self.assertEqual(200, response.status)
assert('X-INSTANA-T' in response.headers)
assert(int(response.headers['X-INSTANA-T'], 16))
self.assertEqual(response.headers['X-INSTANA-T'], pyramid_span.t)
assert('X-INSTANA-S' in response.headers)
assert(int(response.headers['X-INSTANA-S'], 16))
self.assertEqual(response.headers['X-INSTANA-S'], pyramid_span.s)
assert('X-INSTANA-L' in response.headers)
self.assertEqual(response.headers['X-INSTANA-L'], '1')
assert('Server-Timing' in response.headers)
server_timing_value = "intid;desc=%s" % pyramid_span.t
self.assertEqual(response.headers['Server-Timing'], server_timing_value)
self.assertIsNone(tracer.active_span)
# Same traceId
self.assertEqual(test_span.t, urllib3_span.t)
self.assertEqual(urllib3_span.t, pyramid_span.t)
# Parent relationships
self.assertEqual(urllib3_span.p, test_span.s)
self.assertEqual(pyramid_span.p, urllib3_span.s)
# Synthetic
self.assertIsNone(pyramid_span.sy)
self.assertIsNone(urllib3_span.sy)
self.assertIsNone(test_span.sy)
# Error logging
self.assertIsNone(test_span.ec)
self.assertIsNone(urllib3_span.ec)
self.assertIsNone(pyramid_span.ec)
# HTTP SDK span
self.assertEqual("sdk", pyramid_span.n)
assert(pyramid_span.data["sdk"])
self.assertEqual('http', pyramid_span.data["sdk"]["name"])
self.assertEqual('entry', pyramid_span.data["sdk"]["type"])
sdk_data = pyramid_span.data["sdk"]["custom"]
self.assertEqual('127.0.0.1:' + str(testenv['pyramid_port']), sdk_data["tags"]["http.host"])
self.assertEqual('/', sdk_data["tags"]["http.url"])
self.assertEqual('GET', sdk_data["tags"]["http.method"])
self.assertEqual(200, sdk_data["tags"]["http.status"])
self.assertNotIn("message", sdk_data["tags"])
self.assertNotIn("http.path_tpl", sdk_data["tags"])
# urllib3
self.assertEqual("test", test_span.data["sdk"]["name"])
self.assertEqual("urllib3", urllib3_span.n)
self.assertEqual(200, urllib3_span.data["http"]["status"])
self.assertEqual(testenv["pyramid_server"] + '/', urllib3_span.data["http"]["url"])
self.assertEqual("GET", urllib3_span.data["http"]["method"])
self.assertIsNotNone(urllib3_span.stack)
self.assertTrue(type(urllib3_span.stack) is list)
self.assertTrue(len(urllib3_span.stack) > 1)
def test_synthetic_request(self):
headers = {
'X-INSTANA-SYNTHETIC': '1'
}
with tracer.start_active_span('test'):
response = self.http.request('GET', testenv["pyramid_server"] + '/', headers=headers)
spans = self.recorder.queued_spans()
self.assertEqual(3, len(spans))
pyramid_span = spans[0]
urllib3_span = spans[1]
test_span = spans[2]
assert response
self.assertEqual(200, response.status)
self.assertTrue(pyramid_span.sy)
self.assertIsNone(urllib3_span.sy)
self.assertIsNone(test_span.sy)
def test_500(self):
with tracer.start_active_span('test'):
response = self.http.request('GET', testenv["pyramid_server"] + '/500')
spans = self.recorder.queued_spans()
self.assertEqual(3, len(spans))
pyramid_span = spans[0]
urllib3_span = spans[1]
test_span = spans[2]
assert response
self.assertEqual(500, response.status)
assert('X-INSTANA-T' in response.headers)
assert(int(response.headers['X-INSTANA-T'], 16))
self.assertEqual(response.headers['X-INSTANA-T'], pyramid_span.t)
assert('X-INSTANA-S' in response.headers)
assert(int(response.headers['X-INSTANA-S'], 16))
self.assertEqual(response.headers['X-INSTANA-S'], pyramid_span.s)
assert('X-INSTANA-L' in response.headers)
self.assertEqual(response.headers['X-INSTANA-L'], '1')
assert('Server-Timing' in response.headers)
server_timing_value = "intid;desc=%s" % pyramid_span.t
self.assertEqual(response.headers['Server-Timing'], server_timing_value)
self.assertIsNone(tracer.active_span)
# Same traceId
self.assertEqual(test_span.t, urllib3_span.t)
self.assertEqual(test_span.t, pyramid_span.t)
# Parent relationships
self.assertEqual(urllib3_span.p, test_span.s)
self.assertEqual(pyramid_span.p, urllib3_span.s)
# Error logging
self.assertIsNone(test_span.ec)
self.assertEqual(1, urllib3_span.ec)
self.assertEqual(1, pyramid_span.ec)
# wsgi
self.assertEqual("sdk", pyramid_span.n)
self.assertEqual('http', pyramid_span.data["sdk"]["name"])
self.assertEqual('entry', pyramid_span.data["sdk"]["type"])
sdk_data = pyramid_span.data["sdk"]["custom"]
self.assertEqual('127.0.0.1:' + str(testenv['pyramid_port']), sdk_data["tags"]["http.host"])
self.assertEqual('/500', sdk_data["tags"]["http.url"])
self.assertEqual('GET', sdk_data["tags"]["http.method"])
self.assertEqual(500, sdk_data["tags"]["http.status"])
self.assertEqual("internal error", sdk_data["tags"]["message"])
self.assertNotIn("http.path_tpl", sdk_data["tags"])
# urllib3
self.assertEqual("test", test_span.data["sdk"]["name"])
self.assertEqual("urllib3", urllib3_span.n)
self.assertEqual(500, urllib3_span.data["http"]["status"])
self.assertEqual(testenv["pyramid_server"] + '/500', urllib3_span.data["http"]["url"])
self.assertEqual("GET", urllib3_span.data["http"]["method"])
self.assertIsNotNone(urllib3_span.stack)
self.assertTrue(type(urllib3_span.stack) is list)
self.assertTrue(len(urllib3_span.stack) > 1)
def test_exception(self):
with tracer.start_active_span('test'):
response = self.http.request('GET', testenv["pyramid_server"] + '/exception')
spans = self.recorder.queued_spans()
self.assertEqual(3, len(spans))
pyramid_span = spans[0]
urllib3_span = spans[1]
test_span = spans[2]
assert response
self.assertEqual(500, response.status)
self.assertIsNone(tracer.active_span)
# Same traceId
self.assertEqual(test_span.t, urllib3_span.t)
self.assertEqual(test_span.t, pyramid_span.t)
# Parent relationships
self.assertEqual(urllib3_span.p, test_span.s)
self.assertEqual(pyramid_span.p, urllib3_span.s)
# Error logging
self.assertIsNone(test_span.ec)
self.assertEqual(1, urllib3_span.ec)
self.assertEqual(1, pyramid_span.ec)
# HTTP SDK span
self.assertEqual("sdk", pyramid_span.n)
self.assertEqual('http', pyramid_span.data["sdk"]["name"])
self.assertEqual('entry', pyramid_span.data["sdk"]["type"])
sdk_data = pyramid_span.data["sdk"]["custom"]
self.assertEqual('127.0.0.1:' + str(testenv['pyramid_port']), sdk_data["tags"]["http.host"])
self.assertEqual('/exception', sdk_data["tags"]["http.url"])
self.assertEqual('GET', sdk_data["tags"]["http.method"])
self.assertEqual(500, sdk_data["tags"]["http.status"])
self.assertEqual("fake exception", sdk_data["tags"]["message"])
self.assertNotIn("http.path_tpl", sdk_data["tags"])
# urllib3
self.assertEqual("test", test_span.data["sdk"]["name"])
self.assertEqual("urllib3", urllib3_span.n)
self.assertEqual(500, urllib3_span.data["http"]["status"])
self.assertEqual(testenv["pyramid_server"] + '/exception', urllib3_span.data["http"]["url"])
self.assertEqual("GET", urllib3_span.data["http"]["method"])
self.assertIsNotNone(urllib3_span.stack)
self.assertTrue(type(urllib3_span.stack) is list)
self.assertTrue(len(urllib3_span.stack) > 1)
| mit | 69852bc40f2b08ae754e2b4fdf503f34 | 36.408907 | 100 | 0.622727 | 3.593932 | false | true | false | false |
instana/python-sensor | instana/propagators/base_propagator.py | 1 | 11634 | # (c) Copyright IBM Corp. 2021
# (c) Copyright Instana Inc. 2020
from __future__ import absolute_import
import sys
from instana.log import logger
from instana.util.ids import header_to_id, header_to_long_id
from instana.span_context import SpanContext
from instana.w3c_trace_context.traceparent import Traceparent
from instana.w3c_trace_context.tracestate import Tracestate
import os
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
# The carrier can be a dict or a list.
# Using the trace header as an example, it can be in the following forms
# for extraction:
# X-Instana-T
# HTTP_X_INSTANA_T
#
# The second form above is found in places like Django middleware for
# incoming requests.
#
# For injection, we only support the standard format:
# X-Instana-T
class BasePropagator(object):
HEADER_KEY_T = 'X-INSTANA-T'
HEADER_KEY_S = 'X-INSTANA-S'
HEADER_KEY_L = 'X-INSTANA-L'
HEADER_KEY_SYNTHETIC = 'X-INSTANA-SYNTHETIC'
HEADER_KEY_TRACEPARENT = "traceparent"
HEADER_KEY_TRACESTATE = "tracestate"
LC_HEADER_KEY_T = 'x-instana-t'
LC_HEADER_KEY_S = 'x-instana-s'
LC_HEADER_KEY_L = 'x-instana-l'
LC_HEADER_KEY_SYNTHETIC = 'x-instana-synthetic'
ALT_LC_HEADER_KEY_T = 'http_x_instana_t'
ALT_LC_HEADER_KEY_S = 'http_x_instana_s'
ALT_LC_HEADER_KEY_L = 'http_x_instana_l'
ALT_LC_HEADER_KEY_SYNTHETIC = 'http_x_instana_synthetic'
ALT_HEADER_KEY_TRACEPARENT = "http_traceparent"
ALT_HEADER_KEY_TRACESTATE = "http_tracestate"
# ByteArray variations
B_HEADER_KEY_T = b'x-instana-t'
B_HEADER_KEY_S = b'x-instana-s'
B_HEADER_KEY_L = b'x-instana-l'
B_HEADER_KEY_SYNTHETIC = b'x-instana-synthetic'
B_HEADER_SERVER_TIMING = b'server-timing'
B_HEADER_KEY_TRACEPARENT = b'traceparent'
B_HEADER_KEY_TRACESTATE = b'tracestate'
B_ALT_LC_HEADER_KEY_T = b'http_x_instana_t'
B_ALT_LC_HEADER_KEY_S = b'http_x_instana_s'
B_ALT_LC_HEADER_KEY_L = b'http_x_instana_l'
B_ALT_LC_HEADER_KEY_SYNTHETIC = b'http_x_instana_synthetic'
B_ALT_HEADER_KEY_TRACEPARENT = b'http_traceparent'
B_ALT_HEADER_KEY_TRACESTATE = b'http_tracestate'
def __init__(self):
self._tp = Traceparent()
self._ts = Tracestate()
@staticmethod
def _extract_headers_dict(carrier):
"""
This method converts the incoming carrier into a dict
:param carrier:
:return: dc dictionary
"""
try:
if isinstance(carrier, dict):
dc = carrier
elif hasattr(carrier, "__dict__"):
dc = carrier.__dict__
else:
dc = dict(carrier)
except Exception:
logger.debug("extract: Couldn't convert %s", carrier)
dc = None
return dc
@staticmethod
def _get_ctx_level(level):
"""
Extract the level value and return it, as it may include correlation values
:param level:
:return:
"""
try:
ctx_level = int(level.split(",")[0]) if level else 1
except Exception:
ctx_level = 1
return ctx_level
@staticmethod
def _set_correlation_properties(level, ctx):
"""
Set the correlation values if they are present
:param level:
:param ctx:
:return:
"""
try:
ctx.correlation_type = level.split(",")[1].split("correlationType=")[1].split(";")[0]
if "correlationId" in level:
ctx.correlation_id = level.split(",")[1].split("correlationId=")[1].split(";")[0]
except Exception:
logger.debug("extract instana correlation type/id error:", exc_info=True)
def _get_participating_trace_context(self, span_context):
"""
This method is called for getting the updated traceparent and tracestate values
:param span_context:
:return: traceparent, tracestate
"""
if span_context.long_trace_id and not span_context.trace_parent:
tp_trace_id = span_context.long_trace_id
else:
tp_trace_id = span_context.trace_id
traceparent = span_context.traceparent
tracestate = span_context.tracestate
traceparent = self._tp.update_traceparent(traceparent, tp_trace_id, span_context.span_id, span_context.level)
# In suppression mode do not update the tracestate and
# do not add the 'in=' key-value pair to the incoming tracestate
# Just propagate the incoming tracestate (if any) unchanged.
if span_context.suppression:
return traceparent, tracestate
tracestate = self._ts.update_tracestate(tracestate, span_context.trace_id, span_context.span_id)
return traceparent, tracestate
def __determine_span_context(self, trace_id, span_id, level, synthetic, traceparent, tracestate,
disable_w3c_trace_context):
"""
This method determines the span context depending on a set of conditions being met
Detailed description of the conditions can be found in the instana internal technical-documentation,
under section http-processing-for-instana-tracers
:param trace_id: instana trace id
:param span_id: instana span id
:param level: instana level
:param synthetic: instana synthetic
:param traceparent:
:param tracestate:
:param disable_w3c_trace_context: flag used to enable w3c trace context only on HTTP requests
:return: ctx
"""
correlation = False
disable_traceparent = os.environ.get("INSTANA_DISABLE_W3C_TRACE_CORRELATION", "")
instana_ancestor = None
ctx = SpanContext()
if level and "correlationType" in level:
trace_id, span_id = [None] * 2
correlation = True
ctx_level = self._get_ctx_level(level)
if ctx_level == 0 or level == '0':
trace_id = ctx.trace_id = None
span_id = ctx.span_id = None
ctx.correlation_type = None
ctx.correlation_id = None
if trace_id and span_id:
ctx.trace_id = trace_id[-16:] # only the last 16 chars
ctx.span_id = span_id[-16:] # only the last 16 chars
ctx.synthetic = synthetic is not None
if len(trace_id) > 16:
ctx.long_trace_id = trace_id
elif not disable_w3c_trace_context and traceparent and trace_id is None and span_id is None:
_, tp_trace_id, tp_parent_id, _ = self._tp.get_traceparent_fields(traceparent)
if tracestate and "in=" in tracestate:
instana_ancestor = self._ts.get_instana_ancestor(tracestate)
if disable_traceparent == "":
ctx.trace_id = tp_trace_id[-16:]
ctx.span_id = tp_parent_id
ctx.synthetic = synthetic is not None
ctx.trace_parent = True
ctx.instana_ancestor = instana_ancestor
ctx.long_trace_id = tp_trace_id
else:
if instana_ancestor:
ctx.trace_id = instana_ancestor.t
ctx.span_id = instana_ancestor.p
ctx.synthetic = synthetic is not None
elif synthetic:
ctx.synthetic = synthetic
if correlation:
self._set_correlation_properties(level, ctx)
if traceparent:
ctx.traceparent = traceparent
ctx.tracestate = tracestate
ctx.level = ctx_level
return ctx
def __extract_instana_headers(self, dc):
"""
Search carrier for the *HEADER* keys and return the tracing key-values
:param dc: The dict or list potentially containing context
:return: trace_id, span_id, level, synthetic
"""
trace_id, span_id, level, synthetic = [None] * 4
# Headers can exist in the standard X-Instana-T/S format or the alternate HTTP_X_INSTANA_T/S style
try:
trace_id = dc.get(self.LC_HEADER_KEY_T) or dc.get(self.ALT_LC_HEADER_KEY_T) or dc.get(
self.B_HEADER_KEY_T) or dc.get(self.B_ALT_LC_HEADER_KEY_T)
if trace_id:
trace_id = header_to_long_id(trace_id)
span_id = dc.get(self.LC_HEADER_KEY_S) or dc.get(self.ALT_LC_HEADER_KEY_S) or dc.get(
self.B_HEADER_KEY_S) or dc.get(self.B_ALT_LC_HEADER_KEY_S)
if span_id:
span_id = header_to_id(span_id)
level = dc.get(self.LC_HEADER_KEY_L) or dc.get(self.ALT_LC_HEADER_KEY_L) or dc.get(
self.B_HEADER_KEY_L) or dc.get(self.B_ALT_LC_HEADER_KEY_L)
if level and PY3 is True and isinstance(level, bytes):
level = level.decode("utf-8")
synthetic = dc.get(self.LC_HEADER_KEY_SYNTHETIC) or dc.get(self.ALT_LC_HEADER_KEY_SYNTHETIC) or dc.get(
self.B_HEADER_KEY_SYNTHETIC) or dc.get(self.B_ALT_LC_HEADER_KEY_SYNTHETIC)
if synthetic:
synthetic = synthetic in ['1', b'1']
except Exception:
logger.debug("extract error:", exc_info=True)
return trace_id, span_id, level, synthetic
def __extract_w3c_trace_context_headers(self, dc):
"""
Search carrier for the *HEADER* keys and return the tracing key-values
:param dc: The dict or list potentially containing context
:return: traceparent, tracestate
"""
traceparent, tracestate = [None] * 2
try:
traceparent = dc.get(self.HEADER_KEY_TRACEPARENT) or dc.get(self.ALT_HEADER_KEY_TRACEPARENT) or dc.get(
self.B_HEADER_KEY_TRACEPARENT) or dc.get(self.B_ALT_HEADER_KEY_TRACEPARENT)
if traceparent and PY3 is True and isinstance(traceparent, bytes):
traceparent = traceparent.decode("utf-8")
tracestate = dc.get(self.HEADER_KEY_TRACESTATE) or dc.get(self.ALT_HEADER_KEY_TRACESTATE) or dc.get(
self.B_HEADER_KEY_TRACESTATE) or dc.get(self.B_ALT_HEADER_KEY_TRACESTATE)
if tracestate and PY3 is True and isinstance(tracestate, bytes):
tracestate = tracestate.decode("utf-8")
except Exception:
logger.debug("extract error:", exc_info=True)
return traceparent, tracestate
def extract(self, carrier, disable_w3c_trace_context=False):
"""
This method overrides one of the Baseclasses as with the introduction of W3C trace context for the HTTP
requests more extracting steps and logic was required
:param disable_w3c_trace_context:
:param carrier:
:return: the context or None
"""
try:
traceparent, tracestate = [None] * 2
headers = self._extract_headers_dict(carrier=carrier)
if headers is None:
return None
headers = {k.lower(): v for k, v in headers.items()}
trace_id, span_id, level, synthetic = self.__extract_instana_headers(dc=headers)
if not disable_w3c_trace_context:
traceparent, tracestate = self.__extract_w3c_trace_context_headers(dc=headers)
if traceparent:
traceparent = self._tp.validate(traceparent)
ctx = self.__determine_span_context(trace_id, span_id, level, synthetic, traceparent, tracestate,
disable_w3c_trace_context)
return ctx
except Exception:
logger.debug("extract error:", exc_info=True)
| mit | 0db7778a2b3bd4f415858d0e4c4a7cd3 | 37.523179 | 117 | 0.606928 | 3.581897 | false | false | false | false |
instana/python-sensor | instana/autoprofile/profile.py | 1 | 3900 | # (c) Copyright IBM Corp. 2021
# (c) Copyright Instana Inc. 2020
import math
import os
import uuid
import time
class Profile(object):
CATEGORY_CPU = 'cpu'
CATEGORY_MEMORY = 'memory'
CATEGORY_TIME = 'time'
TYPE_CPU_USAGE = 'cpu-usage'
TYPE_MEMORY_ALLOCATION_RATE = 'memory-allocation-rate'
TYPE_BLOCKING_CALLS = 'blocking-calls'
UNIT_NONE = ''
UNIT_MILLISECOND = 'millisecond'
UNIT_MICROSECOND = 'microsecond'
UNIT_NANOSECOND = 'nanosecond'
UNIT_BYTE = 'byte'
UNIT_KILOBYTE = 'kilobyte'
UNIT_PERCENT = 'percent'
UNIT_SAMPLE = 'sample'
RUNTIME_PYTHON = 'python'
def __init__(self, category, typ, unit, roots, duration, timespan):
self.process_id = str(os.getpid())
self.id = generate_uuid()
self.runtime = Profile.RUNTIME_PYTHON
self.category = category
self.type = typ
self.unit = unit
self.roots = roots
self.duration = duration
self.timespan = timespan
self.timestamp = millis()
def to_dict(self):
profile_dict = {
'pid': self.process_id,
'id': self.id,
'runtime': self.runtime,
'category': self.category,
'type': self.type,
'unit': self.unit,
'roots': [root.to_dict() for root in self.roots],
'duration': self.duration,
'timespan': self.timespan,
'timestamp': self.timestamp
}
return profile_dict
class CallSite:
__slots__ = [
'method_name',
'file_name',
'file_line',
'measurement',
'num_samples',
'children'
]
def __init__(self, method_name, file_name, file_line):
self.method_name = method_name
self.file_name = file_name
self.file_line = file_line
self.measurement = 0
self.num_samples = 0
self.children = dict()
def create_key(self, method_name, file_name, file_line):
return '{0} ({1}:{2})'.format(method_name, file_name, file_line)
def find_child(self, method_name, file_name, file_line):
key = self.create_key(method_name, file_name, file_line)
if key in self.children:
return self.children[key]
return None
def add_child(self, child):
self.children[self.create_key(child.method_name, child.file_name, child.file_line)] = child
def remove_child(self, child):
del self.children[self.create_key(child.method_name, child.file_name, child.file_line)]
def find_or_add_child(self, method_name, file_name, file_line):
child = self.find_child(method_name, file_name, file_line)
if child == None:
child = CallSite(method_name, file_name, file_line)
self.add_child(child)
return child
def increment(self, value, count):
self.measurement += value
self.num_samples += count
def normalize(self, factor):
self.measurement = self.measurement / factor
self.num_samples = int(math.ceil(self.num_samples / factor))
for child in self.children.values():
child.normalize(factor)
def floor(self):
self.measurement = int(self.measurement)
for child in self.children.values():
child.floor()
def to_dict(self):
children_dicts = []
for child in self.children.values():
children_dicts.append(child.to_dict())
call_site_dict = {
'method_name': self.method_name,
'file_name': self.file_name,
'file_line': self.file_line,
'measurement': self.measurement,
'num_samples': self.num_samples,
'children': children_dicts
}
return call_site_dict
def millis():
return int(round(time.time() * 1000))
def generate_uuid():
return str(uuid.uuid4())
| mit | 6f349996fb8b566d9b98e2abda5421fb | 27.467153 | 99 | 0.584103 | 3.696682 | false | false | false | false |
instana/python-sensor | instana/util/__init__.py | 1 | 4319 | # (c) Copyright IBM Corp. 2021
# (c) Copyright Instana Inc. 2020
import json
import sys
import time
from collections import defaultdict
import pkg_resources
try:
from urllib import parse
except ImportError:
import urlparse as parse
import urllib
from ..log import logger
if sys.version_info.major == 2:
string_types = basestring
else:
string_types = str
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
def nested_dictionary():
return defaultdict(DictionaryOfStan)
# Simple implementation of a nested dictionary.
DictionaryOfStan = nested_dictionary
def to_json(obj):
"""
Convert obj to json. Used mostly to convert the classes in json_span.py until we switch to nested
dicts (or something better)
:param obj: the object to serialize to json
:return: json string
"""
try:
def extractor(o):
if not hasattr(o, '__dict__'):
logger.debug("Couldn't serialize non dict type: %s", type(o))
return {}
else:
return {k.lower(): v for k, v in o.__dict__.items() if v is not None}
return json.dumps(obj, default=extractor, sort_keys=False, separators=(',', ':')).encode()
except Exception:
logger.debug("to_json non-fatal encoding issue: ", exc_info=True)
def to_pretty_json(obj):
"""
Convert obj to pretty json. Used mostly in logging/debugging.
:param obj: the object to serialize to json
:return: json string
"""
try:
def extractor(o):
if not hasattr(o, '__dict__'):
logger.debug("Couldn't serialize non dict type: %s", type(o))
return {}
else:
return {k.lower(): v for k, v in o.__dict__.items() if v is not None}
return json.dumps(obj, default=extractor, sort_keys=True, indent=4, separators=(',', ':'))
except Exception:
logger.debug("to_pretty_json non-fatal encoding issue: ", exc_info=True)
def package_version():
"""
Determine the version of this package.
:return: String representing known version
"""
version = ""
try:
version = pkg_resources.get_distribution('instana').version
except pkg_resources.DistributionNotFound:
version = 'unknown'
return version
def get_default_gateway():
"""
Attempts to read /proc/self/net/route to determine the default gateway in use.
:return: String - the ip address of the default gateway or None if not found/possible/non-existant
"""
try:
hip = None
# The first line is the header line
# We look for the line where the Destination is 00000000 - that is the default route
# The Gateway IP is encoded backwards in hex.
with open("/proc/self/net/route") as routes:
for line in routes:
parts = line.split('\t')
if parts[1] == '00000000':
hip = parts[2]
if hip is not None and len(hip) == 8:
# Reverse order, convert hex to int
return "%i.%i.%i.%i" % (int(hip[6:8], 16), int(hip[4:6], 16), int(hip[2:4], 16), int(hip[0:2], 16))
except Exception:
logger.warning("get_default_gateway: ", exc_info=True)
def every(delay, task, name):
"""
Executes a task every `delay` seconds
:param delay: the delay in seconds
:param task: the method to run. The method should return False if you want the loop to stop.
:return: None
"""
next_time = time.time() + delay
while True:
time.sleep(max(0, next_time - time.time()))
try:
if task() is False:
break
except Exception:
logger.debug("Problem while executing repetitive task: %s", name, exc_info=True)
# skip tasks if we are behind schedule:
next_time += (time.time() - next_time) // delay * delay + delay
def validate_url(url):
"""
Validate if <url> is a valid url
Examples:
- "http://localhost:5000" - valid
- "http://localhost:5000/path" - valid
- "sandwich" - invalid
@param url: string
@return: Boolean
"""
try:
result = parse.urlparse(url)
return all([result.scheme, result.netloc])
except Exception:
pass
return False
| mit | 27faeb1e93f371c9dfe8d04c4a70d6a4 | 27.045455 | 111 | 0.601991 | 3.908597 | false | false | false | false |
veekun/pokedex | pokedex/tests/test_database_sanity.py | 1 | 6762 | import pytest
parametrize = pytest.mark.parametrize
from collections import Counter
import re
from sqlalchemy.orm import aliased, joinedload, lazyload
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy.sql import func
from pokedex.db import tables, util
def test_encounter_slots(session):
"""Encounters have a version, which has a version group; encounters also
have an encounter_slot, which has a version group. The two version
groups should match, universally.
"""
version_group_a = aliased(tables.VersionGroup)
version_group_b = aliased(tables.VersionGroup)
sanity_q = session.query(tables.Encounter) \
.join((tables.EncounterSlot, tables.Encounter.slot)) \
.join((version_group_a, tables.EncounterSlot.version_group)) \
.join((tables.Version, tables.Encounter.version)) \
.join((version_group_b, tables.Version.version_group)) \
.filter(version_group_a.id != version_group_b.id)
# Encounter slots all match the encounters they belong to
assert sanity_q.count() == 0
def test_encounter_regions(session):
"""Check that encounter locations match the region of the game they're from.
"""
sanity_q = session.query(tables.Encounter) \
.join((tables.Version, tables.Encounter.version)) \
.join((tables.VersionGroup, tables.Version.version_group)) \
.join((tables.LocationArea, tables.Encounter.location_area)) \
.join((tables.Location, tables.LocationArea.location)) \
.join((tables.Region, tables.Location.region)) \
.filter(~tables.VersionGroup.version_group_regions.any(tables.VersionGroupRegion.region_id == tables.Region.id))
for e in sanity_q.limit(20):
acceptable_regions = " or ".join(r.identifier for r in e.version.version_group.regions)
if e.location_area.location.region is not None:
print("{e} ({e.pokemon.identifier}, {e.slot.method.identifier}, {e.version.identifier}) is in {e.location_area.location.region.identifier} ({e.location_area.location.identifier}) but should be in {acceptable_regions} ({e.version.identifier})".format(e=e, acceptable_regions=acceptable_regions))
else:
print("{e} ({e.pokemon.identifier}, {e.slot.method.identifier}, {e.version.identifier}) is in a pseudo-location ({e.location_area.location.identifier}) that is not part of any region, but should be in {acceptable_regions} ({e.version.identifier})".format(e=e, acceptable_regions=acceptable_regions))
# Encounter regions match the games they belong to
assert sanity_q.count() == 0
@parametrize('cls', tables.mapped_classes)
def test_nonzero_autoincrement_ids(session, cls):
"""Check that autoincrementing ids don't contain zeroes
MySQL doesn't like these, see e.g. bug #580
"""
if 'id' not in cls.__table__.c:
return
if not cls.__table__.c.id.autoincrement:
return
try:
util.get(session, cls, id=0)
except NoResultFound:
pass
else:
pytest.fail("No zero id in %s" % cls.__name__)
def test_unique_form_order(session):
"""Check that one PokemonForm.order value isn't used for more species"""
species_by_form_order = {}
query = session.query(tables.PokemonForm)
query = query.options(joinedload('pokemon.species'))
for form in query:
try:
previous_species = species_by_form_order[form.order]
except KeyError:
species_by_form_order[form.order] = form.species
else:
assert previous_species == form.species, (
"PokemonForm.order == %s is used for %s and %s" % (
form.order,
species_by_form_order[form.order].name,
form.species.name))
def test_pokedex_numbers(session):
"""Check that pokedex numbers are contiguous (there are no gaps)"""
dex_query = session.query(tables.Pokedex).order_by(tables.Pokedex.id)
failed = False
for dex in dex_query:
query = session.query(tables.PokemonDexNumber.pokedex_number).filter_by(pokedex_id=dex.id)
numbers = set([x[0] for x in query.all()])
for i in range(1, max(numbers)):
if i not in numbers:
print("number {n} is missing from the {dex.name} pokedex".format(n=i, dex=dex))
failed = True
assert not failed, "missing pokedex numbers"
def test_default_forms(session):
"""Check that each pokemon has one default form and each species has one
default pokemon."""
q = session.query(tables.Pokemon)
# TODO: could use table.Pokemon.forms.and_: https://docs.sqlalchemy.org/en/14/orm/queryguide.html#orm-queryguide-join-on-augmented
q = q.outerjoin(tables.PokemonForm, (tables.PokemonForm.pokemon_id == tables.Pokemon.id) & (tables.PokemonForm.is_default==True))
q = q.options(lazyload('*'))
q = q.group_by(tables.Pokemon)
q = q.add_columns(func.count(tables.PokemonForm.id))
for pokemon, num_default_forms in q:
if num_default_forms == 0:
pytest.fail("pokemon %s has no default forms" % pokemon.name)
elif num_default_forms > 1:
pytest.fail("pokemon %s has %d default forms" % (pokemon.name, num_default_forms))
q = session.query(tables.PokemonSpecies)
q = q.outerjoin(tables.Pokemon, (tables.Pokemon.species_id == tables.PokemonSpecies.id) & (tables.Pokemon.is_default==True))
q = q.options(lazyload('*'))
q = q.group_by(tables.PokemonSpecies)
q = q.add_columns(func.count(tables.Pokemon.id))
for species, num_default_pokemon in q:
if num_default_pokemon == 0:
pytest.fail("species %s has no default pokemon" % species.name)
elif num_default_pokemon > 1:
pytest.fail("species %s has %d default pokemon" % (species.name, num_default_pokemon))
ROUTE_RE = re.compile(u'route-\\d+')
def test_location_identifiers(session):
"""Check that location identifiers for some common locations are prefixed
with the region name, ala kalos-route-2"""
q = session.query(tables.Location)
q = q.join(tables.Region)
q = q.options(lazyload('*'))
for loc in q:
if (loc.identifier in [u'victory-road', u'pokemon-league', u'safari-zone']
or ROUTE_RE.match(loc.identifier)):
if loc.region:
region = loc.region.identifier.lower()
suggested_identifier = region + "-" + loc.identifier
pytest.fail("location %d: identifier %s should be prefixed with its region (e.g. %s)" % (loc.id, loc.identifier, suggested_identifier))
else:
pytest.fail("location %d: identifier %s should be prefixed with its region" % (loc.id, loc.identifier))
| mit | 2aeae56cccbb8e96dee9b9db300a6e75 | 42.909091 | 311 | 0.660603 | 3.616043 | false | true | false | false |
veekun/pokedex | pokedex/util/media.py | 1 | 20051 |
"""Media accessors
All media accessor __init__s take a `root` argument, which should be a path
to the root of the media directory.
Alternatively, `root` can be a custom MediaFile subclass.
Most __init__s take an ORM object as a second argument.
Their various methods take a number of arguments specifying exactly which
file you want (such as the female sprite, backsprite, etc.).
ValueError is raised when the specified file cannot be found.
The accessors use fallbacks: for example Bulbasaur's males and females look the
same, so if you request Bulbasaur's female sprite, it will give you the common
image. Or for a Pokemon without individual form sprites, you will get the
common base sprite. Or for versions witout shiny Pokemon, you will always
get the non-shiny version (that's how shiny Pokemon looked there!).
However arguments such as `animated` don't use fallbacks.
You can set `strict` to True to disable these fallbacks and cause ValueError
to be raised when the exact specific file you asked for is not found. This is
useful for listing non-duplicate sprites, for example.
Use keyword arguments when calling the media-getting methods, unless noted
otherwise.
The returned "file" objects have useful attributes like relative_path,
path, and open().
All images are in the PNG format, except animations (GIF). All sounds are OGGs.
"""
import os
from functools import partial
import six
class MediaFile(object):
"""Represents a file: picture, sound, etc.
Attributes:
path_elements: List of directory/file names that make up relative_path
relative_path: Filesystem path relative to the root
path: Absolute path to the file
exists: True if the file exists
media_available: false if no media is available at the given root.
open(): Open the file
"""
def __init__(self, root, *path_elements):
self.path_elements = path_elements
self.root = root
@property
def relative_path(self):
return os.path.join(*self.path_elements)
@property
def path(self):
return os.path.join(self.root, *self.path_elements)
def open(self):
"""Open this file for reading, in the appropriate mode (i.e. binary)
"""
return open(self.path, 'rb')
@property
def exists(self):
return os.path.exists(self.path)
@property
def media_available(self):
return os.path.isdir(self.root)
def __eq__(self, other):
return self.path == other.path
def __ne__(self, other):
return self.path != other.path
def __str__(self):
return '<Pokedex file %s>' % self.relative_path
class BaseMedia(object):
def __init__(self, root):
if isinstance(root, six.string_types):
self.file_class = partial(MediaFile, root)
else:
self.file_class = root
@property
def available(self):
return self.file_class().media_available
def from_path_elements(self, path_elements, basename, extension,
surely_exists=False):
filename = basename + extension
path_elements = [self.toplevel_dir] + path_elements + [filename]
mfile = self.file_class(*path_elements)
if surely_exists or mfile.exists:
return mfile
else:
raise ValueError('File %s not found' % mfile.path)
class _BasePokemonMedia(BaseMedia):
toplevel_dir = 'pokemon'
has_gender_differences = False
is_species = False
is_proper = False
introduced_in = 0
# Info about of what's inside the pokemon main sprite directories, so we
# don't have to check directory existence all the time.
_pokemon_sprite_info = {
'red-blue': (1, set('back gray'.split())),
'red-green': (1, set('back gray'.split())),
'yellow': (1, set('back gray gbc'.split())),
'gold': (2, set('back shiny'.split())),
'silver': (2, set('back shiny'.split())),
'crystal': (2, set('animated back shiny'.split())),
'ruby-sapphire': (3, set('back shiny'.split())),
'emerald': (3, set('animated back shiny frame2'.split())),
'firered-leafgreen': (3, set('back shiny'.split())),
'diamond-pearl': (4, set('back shiny female frame2'.split())),
'platinum': (4, set('back shiny female frame2'.split())),
'heartgold-soulsilver': (4, set('back shiny female frame2'.split())),
'black-white': (5, set('back shiny female'.split())),
}
def __init__(self, root, species_id, form_postfix=None):
BaseMedia.__init__(self, root)
self.species_id = str(species_id)
self.form_postfix = form_postfix
def _get_file(self, path_elements, extension, strict, surely_exists=False):
basename = str(self.species_id)
if self.form_postfix:
fullname = basename + self.form_postfix
try:
return self.from_path_elements(
path_elements, fullname, extension,
surely_exists=surely_exists)
except ValueError:
if strict:
raise
return self.from_path_elements(path_elements, basename, extension,
surely_exists=surely_exists)
def sprite(self,
version='black-white',
# The media directories are in this order:
animated=False,
back=False,
color=None,
shiny=False,
female=False,
frame=None,
strict=False,
):
"""Get a main sprite sprite for a pokemon.
Everything except version should be given as a keyword argument.
Either specify version as an ORM object, or give the version path as
a string (which is the only way to get 'red-green'). Leave the default
for the latest version.
animated: get a GIF animation (currently Crystal & Emerald only)
back: get a backsprite instead of a front one
color: can be 'color' (RGBY only) or 'gbc' (Yellow only)
shiny: get a shiny sprite. In old versions, gives a normal sprite unless
`strict` is set
female: get a female sprite instead of male. For pokemon with no sexual
dimorphism, gets the common sprite unless `strict` is set.
frame: set to 2 to get the second frame of the animation
(Emerald, DPP, and HG/SS only)
If the sprite is not found, raise a ValueError.
"""
if isinstance(version, six.string_types):
version_dir = version
try:
generation, info = self._pokemon_sprite_info[version_dir]
except KeyError:
raise ValueError('Version directory %s not found', version_dir)
else:
version_dir = version.identifier
try:
generation, info = self._pokemon_sprite_info[version_dir]
except KeyError:
version_group = version.version_group
version_dir = '-'.join(
v.identifier for v in version_group.versions)
try:
generation, info = self._pokemon_sprite_info[version_dir]
except KeyError:
raise ValueError('Version directory %s not found', version_dir)
if generation < self.introduced_in:
raise ValueError("Pokemon %s didn't exist in %s" % (
self.species_id, version_dir))
path_elements = ['main-sprites', version_dir]
if animated:
if 'animated' not in info:
raise ValueError("No animated sprites for %s" % version_dir)
path_elements.append('animated')
extension = '.gif'
else:
extension = '.png'
if back:
if version_dir == 'emerald':
# Emerald backsprites are the same as ruby/sapphire
if strict:
raise ValueError("Emerald uses R/S backsprites")
if animated:
raise ValueError("No animated backsprites for Emerald")
path_elements[1] = version_dir = 'ruby-sapphire'
if version_dir == 'crystal' and animated:
raise ValueError("No animated backsprites for Crystal")
path_elements.append('back')
if color == 'gray':
if 'gray' not in info:
raise ValueError("No grayscale sprites for %s" % version_dir)
path_elements.append('gray')
elif color == 'gbc':
if 'gbc' not in info:
raise ValueError("No GBC sprites for %s" % version_dir)
path_elements.append('gbc')
elif color:
raise ValueError("Unknown color scheme: %s" % color)
if shiny:
if 'shiny' in info:
path_elements.append('shiny')
elif strict:
raise ValueError("No shiny sprites for %s" % version_dir)
if female:
female_sprite = self.has_gender_differences
# Chimecho's female back frame 2 sprite has one hand in
# a slightly different pose, in Platinum and HGSS
# (we have duplicate sprites frame 1, for convenience)
if self.species_id == '358' and back and version_dir in (
'platinum', 'heartgold-soulsilver'):
female_sprite = True
female_sprite = female_sprite and 'female' in info
if female_sprite:
path_elements.append('female')
elif strict:
raise ValueError(
'Pokemon %s has no gender differences' % self.species_id)
if not frame or frame == 1:
pass
elif frame == 2:
if 'frame2' in info:
path_elements.append('frame%s' % frame)
else:
raise ValueError("No frame 2 for %s" % version_dir)
else:
raise ValueError("Bad frame %s" % frame)
return self._get_file(path_elements, extension, strict=strict,
# Avoid a stat in the common case
surely_exists=(self.is_species and version_dir == 'black-white'
and not back and not female))
def _maybe_female(self, path_elements, female, strict):
if female:
if self.has_gender_differences:
elements = path_elements + ['female']
try:
return self._get_file(elements, '.png', strict=strict)
except ValueError:
if strict:
raise
elif strict:
raise ValueError(
'Pokemon %s has no gender differences' % self.species_id)
return self._get_file(path_elements, '.png', strict=strict)
def icon(self, female=False, strict=False):
"""Get the Pokemon's menu icon"""
return self._maybe_female(['icons'], female, strict)
def sugimori(self, female=False, strict=False):
"""Get the Pokemon's official art, drawn by Ken Sugimori"""
return self._maybe_female(['sugimori'], female, strict)
def overworld(self,
direction='down',
shiny=False,
female=False,
frame=1,
strict=False,
):
"""Get an overworld sprite
direction: 'up', 'down', 'left', or 'right'
shiny: true for a shiny sprite
female: true for female sprite (or the common one for both M & F)
frame: 2 for the second animation frame
strict: disable fallback for `female`
"""
path_elements = ['overworld']
if shiny:
path_elements.append('shiny')
if female:
if self.has_gender_differences:
path_elements.append('female')
elif strict:
raise ValueError('No female overworld sprite')
else:
female = False
path_elements.append(direction)
if frame and frame > 1:
path_elements.append('frame%s' % frame)
try:
return self._get_file(path_elements, '.png', strict=strict)
except ValueError:
if female and not strict:
path_elements.remove('female')
return self._get_file(path_elements, '.png', strict=strict)
else:
raise
def footprint(self, strict=False):
"""Get the Pokemon's footprint"""
return self._get_file(['footprints'], '.png', strict=strict)
def trozei(self, strict=False):
"""Get the Pokemon's animated Trozei sprite"""
return self._get_file(['trozei'], '.gif', strict=strict)
def cry(self, strict=False):
"""Get the Pokemon's cry"""
return self._get_file(['cries'], '.ogg', strict=strict)
def cropped_sprite(self, strict=False):
"""Get the Pokemon's cropped sprite"""
return self._get_file(['cropped'], '.png', strict=strict)
class PokemonFormMedia(_BasePokemonMedia):
"""Media related to a PokemonForm
"""
is_proper = True
def __init__(self, root, pokemon_form):
species_id = pokemon_form.species.id
if pokemon_form.form_identifier:
form_postfix = '-' + pokemon_form.form_identifier
else:
form_postfix = None
_BasePokemonMedia.__init__(self, root, species_id, form_postfix)
self.form = pokemon_form
species = pokemon_form.species
self.has_gender_differences = species.has_gender_differences
self.introduced_in = pokemon_form.version_group.generation_id
class PokemonSpeciesMedia(_BasePokemonMedia):
"""Media related to a PokemonSpecies
"""
is_species = True
is_proper = True
def __init__(self, root, species):
_BasePokemonMedia.__init__(self, root, species.id)
self.has_gender_differences = species.has_gender_differences
self.introduced_in = species.generation_id
class UnknownPokemonMedia(_BasePokemonMedia):
"""Media related to the unknown Pokemon ("?")
Note that not a lot of files are available for it.
"""
def __init__(self, root):
_BasePokemonMedia.__init__(self, root, '0')
class EggMedia(_BasePokemonMedia):
"""Media related to a pokemon egg
Note that not a lot of files are available for these.
Give a Manaphy as `species` to get the Manaphy egg.
"""
def __init__(self, root, species=None):
if species and species.identifier == 'manaphy':
postfix = '-manaphy'
else:
postfix = None
_BasePokemonMedia.__init__(self, root, 'egg', postfix)
class SubstituteMedia(_BasePokemonMedia):
"""Media related to the Substitute sprite
Note that not a lot of files are available for Substitute.
"""
def __init__(self, root):
_BasePokemonMedia.__init__(self, root, 'substitute')
class _BaseItemMedia(BaseMedia):
toplevel_dir = 'items'
def underground(self, rotation=0):
"""Get the item's sprite as it appears in the Sinnoh underground
Rotation can be 0, 90, 180, or 270.
"""
if rotation:
basename = self.identifier + '-%s' % rotation
else:
basename = self.identifier
return self.from_path_elements(['underground'], basename, '.png')
class ItemMedia(_BaseItemMedia):
"""Media related to an item
"""
def __init__(self, root, item):
_BaseItemMedia.__init__(self, root)
self.item = item
self.identifier = item.identifier
def sprite(self, version=None):
"""Get the item's sprite
If version is not given, use the latest version.
"""
identifier = self.identifier
# Handle machines
# We check the identifier, so that we don't query the machine
# information for any item.
if identifier.startswith(('tm', 'hm')):
try:
int(identifier[2:])
except ValueError:
# Not really a TM/HM
pass
else:
machines = self.item.machines
if version:
try:
machine = [
m for m in machines
if m.version_group == version.version_group
][0]
except IndexError:
raise ValueError("%s doesn't exist in %s" % (
identifier, version.identifier))
else:
# They're ordered, so get the last one
machine = machines[-1]
type_identifier = machine.move.type.identifier
identifier = identifier[:2] + '-' + type_identifier
elif identifier.startswith('data-card-'):
try:
int(identifier[10:])
except ValueError:
# Not a real data card???
pass
else:
identifier = 'data-card'
if version is not None:
generation_id = version.generation.id
if generation_id <= 3 and identifier == 'dowsing-mchn':
identifier = 'itemfinder'
try:
gen = 'gen%s' % generation_id
return self.from_path_elements([gen], identifier, '.png')
except ValueError:
pass
return self.from_path_elements([], identifier, '.png',
surely_exists=True)
def underground(self, rotation=0):
"""Get the item's sprite as it appears in the Sinnoh underground
Rotation can be 0, 90, 180, or 270.
"""
if not self.item.appears_underground:
raise ValueError("%s doesn't appear underground" % self.identifier)
return super(ItemMedia, self).underground(rotation=rotation)
def berry_image(self):
"""Get a berry's big sprite
"""
if not self.item.berry:
raise ValueError("%s is not a berry" % self.identifier)
return self.from_path_elements(['berries'], self.identifier, '.png')
class UndergroundRockMedia(_BaseItemMedia):
"""Media related to a rock in the Sinnoh underground
rock_type can be one of: i, ii, o, o-big, s, t, z
"""
def __init__(self, root, rock_type):
_BaseItemMedia.__init__(self, root)
self.identifier = 'rock-%s' % rock_type
class UndergroundSphereMedia(_BaseItemMedia):
"""Media related to a sphere in the Sinnoh underground
color can be one of: red, blue, green, pale, prism
"""
def __init__(self, root, color, big=False):
_BaseItemMedia.__init__(self, root)
self.identifier = '%s-sphere' % color
if big:
self.identifier += '-big'
class _SimpleIconMedia(BaseMedia):
def __init__(self, root, thing):
BaseMedia.__init__(self, root)
self.identifier = thing.identifier
def icon(self):
return self.from_path_elements([], self.identifier, '.png')
class DamageClassMedia(_SimpleIconMedia):
toplevel_dir = 'damage-classes'
class HabitatMedia(_SimpleIconMedia):
toplevel_dir = 'habitats'
class ShapeMedia(_SimpleIconMedia):
toplevel_dir = 'shapes'
class ItemPocketMedia(_SimpleIconMedia):
toplevel_dir = 'item-pockets'
def icon(self, selected=False):
if selected:
return self.from_path_elements(
['selected'], self.identifier, '.png')
else:
return self.from_path_elements([], self.identifier, '.png')
class _LanguageIconMedia(_SimpleIconMedia):
def icon(self, lang='en'):
return self.from_path_elements([lang], self.identifier, '.png')
class ContestTypeMedia(_LanguageIconMedia):
toplevel_dir = 'contest-types'
class TypeMedia(_LanguageIconMedia):
toplevel_dir = 'types'
''' XXX: No accessors for:
chrome
fonts
ribbons
'''
| mit | 0cf814dfa089ae4477200e1f251994e5 | 35.589416 | 83 | 0.583761 | 4.13082 | false | false | false | false |
veekun/pokedex | scripts/xd-tutors.py | 5 | 5004 | # Encoding: UTF-8
"""Add XD tutors to the database
This is an unmaintained one-shot script, only included in the repo for reference.
"""
from pokedex.db import connect, tables, util
session = connect()
emerald = util.get(session, tables.Version, 'emerald')
fire_red = util.get(session, tables.Version, 'firered')
emerald_version_group = emerald.version_group
xd_version_group = util.get(session, tables.Version, 'xd').version_group
colo_version_group = util.get(session, tables.Version, 'colosseum').version_group
tutor = util.get(session, tables.PokemonMoveMethod, 'tutor')
level_up = util.get(session, tables.PokemonMoveMethod, 'level-up')
# According to every source I could find, the following can be taught to
# exactly the same set of Pokémon which learn it from the FR/LG/E tutor: --ete
for move_identifier in '''
body-slam
double-edge
dream-eater
icy-wind
mimic
seismic-toss
substitute
swagger
thunder-wave
'''.split():
move = util.get(session, tables.Move, move_identifier)
print move
query = session.query(tables.PokemonMove.pokemon_id)
query = query.filter_by(method=tutor)
query = query.filter_by(move=move)
em = set(p for (p, ) in query.filter_by(version_group=emerald.version_group).all())
fr = set(p for (p, ) in query.filter_by(version_group=fire_red.version_group).all())
assert not fr or not em.symmetric_difference(fr)
for pokemon_id in em:
pokemon_move = tables.PokemonMove()
pokemon_move.pokemon_id = pokemon_id
pokemon_move.move = move
pokemon_move.method = tutor
pokemon_move.level = 0
pokemon_move.version_group = xd_version_group
session.add(pokemon_move)
# These are only found in XD:
xd_tutor_data = {
'nightmare': 'butterfree clefairy clefable jigglypuff wigglytuff meowth '
'persian abra kadabra alakazam slowpoke slowbro gastly haunter gengar '
'drowzee hypno exeggcute exeggutor lickitung starmie mr-mime jynx '
'lapras porygon mewtwo mew hoothoot noctowl cleffa igglybuff natu xatu '
'aipom espeon umbreon murkrow slowking misdreavus girafarig dunsparce '
'sneasel houndour houndoom porygon2 stantler smoochum tyranitar lugia '
'ho-oh celebi ralts kirlia gardevoir masquerain shedinja sableye '
'roselia gulpin swalot spinda shuppet banette duskull dusclops '
'chimecho absol jirachi deoxys '.split(),
'selfdestruct': 'geodude graveler golem grimer muk shellder cloyster '
'gastly haunter gengar onix voltorb electrode exeggcute exeggutor '
'koffing weezing snorlax mewtwo mew sudowoodo pineco forretress '
'steelix qwilfish slugma magcargo corsola seedot nuzleaf shiftry '
'nosepass gulpin swalot wailmer wailord camerupt torkoal lunatone '
'solrock baltoy claydol glalie metang metagross regirock regice '
'registeel'.split(),
'sky-attack': 'pidgey pidgeotto pidgeot spearow fearow doduo dodrio '
'aerodactyl articuno zapdos moltres mew hoothoot noctowl togetic '
'natu xatu murkrow delibird skarmory ho-oh taillow swellow wingull '
'pelipper swablu altaria'.split(),
'faint-attack': ['mew'],
'fake-out': ['mew'],
'hypnosis': ['mew'],
'night-shade': ['mew'],
'role-play': ['mew'],
'zap-cannon': ['mew'],
}
for move_identifier, pokemon_identifiers in xd_tutor_data.items():
move = util.get(session, tables.Move, move_identifier)
for pokemon_identifier in pokemon_identifiers:
species = util.get(session, tables.PokemonSpecies, pokemon_identifier)
try:
pokemon, = species.pokemon
except ValueError:
assert pokemon_identifier == 'deoxys'
pokemon = species.default_pokemon
print move, pokemon
pokemon_move = tables.PokemonMove()
pokemon_move.pokemon = pokemon
pokemon_move.move = move
pokemon_move.method = tutor
pokemon_move.level = 0
pokemon_move.version_group = xd_version_group
session.add(pokemon_move)
# And unfortunately, we have to copy level-up moves. To both XD and Colosseum.
for pokemon_id, move_id, level, order in set(
session.query(
tables.PokemonMove.pokemon_id,
tables.PokemonMove.move_id,
tables.PokemonMove.level,
tables.PokemonMove.order,
)
.filter_by(method=level_up)
.filter_by(version_group=emerald_version_group)
):
for version_group in xd_version_group, colo_version_group:
print pokemon_id, move_id
pokemon_move = tables.PokemonMove()
pokemon_move.pokemon_id = pokemon_id
pokemon_move.move_id = move_id
pokemon_move.method = level_up
pokemon_move.level = level
pokemon_move.order = order
pokemon_move.version_group = version_group
session.add(pokemon_move)
session.commit()
| mit | 64dec07f093bc9f9af804d85fed39ec8 | 39.674797 | 88 | 0.672197 | 3.160455 | false | false | false | false |
veekun/pokedex | scripts/markdown-identifiers.py | 5 | 5441 | # Encoding: UTF-8
"""Rewrite markdown links from [Label]{category:thing} to just {category:thing}
There was a version of this script that rewrote stuff from an even earlier
format. Git log should find it without problems.
This is an unmaintained one-shot script, only included in the repo for
reference.
"""
from functools import partial
import sys
import re
from sqlalchemy.orm.exc import MultipleResultsFound
from sqlalchemy.sql.expression import func
from pokedex.db import connect, tables, util
sanity_re = re.compile(ur"^[-A-Za-z0-9 é\[\]{}.%':;,×/()\"|–`—!*♂♀\\]$")
# RE that matches anything that might look like a link
fuzzy_link_re = re.compile(r"""
\[
[^]]+
\]?
\{
[^}]+
\}""", re.VERBOSE)
# Very specific RE that matches links that appear in source Markdown strings
strict_link_re = re.compile(r"""
\[
(?P<label>
[-A-Za-z 0-9'.]{,30}
)
\]
\{
(?P<category>
[a-z]{,20}
)
:
(?P<target>
[-a-z 0-9]{,40}
)
\}
""", re.VERBOSE)
# Format of the resulting links
result_link_re = re.compile(r"""
^
\[
(?P<label>
[^]]*
)
\]
\{
(?P<category>
[a-z]+
)
:
(?P<target>
[-a-z0-9]+
)
\}
$
""", re.VERBOSE)
english_id = 9
manual_replacements = {
'[Pewter Museum of Science]{location:pewter-city}':
'the Museum of Science in {location:pewter-city}',
'[Oreburgh Mining Museum]{location:mining-museum}':
'{location:mining-museum} in {location:oreburgh-city}',
}
def is_md_col(column):
return column.info.get('format') == 'markdown'
def get_replacement(session, entire_text, context, matchobj):
label = matchobj.group('label')
category = matchobj.group('category')
target = matchobj.group('target') or label
try:
result = manual_replacements[matchobj.group(0)]
except KeyError:
if category == 'mechanic':
target = target.lower()
target = target.replace(' ', '-')
wanted_label = ''
else:
query = None
if category == 'item':
table = tables.Item
elif category == 'ability':
table = tables.Ability
elif category == 'move':
table = tables.Move
elif category == 'type':
table = tables.Type
elif category == 'pokemon':
table = tables.Pokemon
elif category == 'location':
table = tables.Location
else:
print
print repr(entire_text)
print repr(matchobj.group(0))
raise ValueError('Category %s not implemented' % category)
try:
thingy = util.get(session, table, target)
wanted_label = thingy.name
except:
print
print repr(entire_text)
print repr(matchobj.group(0))
raise
if wanted_label.lower() == label.lower():
result = "[]{%s:%s}" % (category, target)
else:
result = "[%s]{%s:%s}" % (label, category, target)
if wanted_label:
print
print context
print "%-40s" % matchobj.group(0),
print '%s != %s' % (label, wanted_label)
assert result_link_re.match(result), result
return result
def main(argv):
session = connect()
for cls in tables.mapped_classes:
for translation_class in cls.translation_classes:
columns = translation_class.__table__.c
md_columns = [c for c in columns if c.info.get('format') == 'markdown']
if not md_columns:
continue
for row in session.query(translation_class):
if row.local_language_id != english_id:
continue
for column in md_columns:
markdown = getattr(row, column.name)
if not markdown:
continue
text = unicode(markdown)
# Make sure everything that remotely looks like a link is one
links = fuzzy_link_re.findall(text)
if not links:
continue
for link in links:
assert strict_link_re.findall(link), (strict_link_re.findall(link), [link])
# Do the replacement
context = '%s %s %s' % (translation_class.__name__, row.foreign_id, column.name)
replaced = strict_link_re.sub(
partial(get_replacement, session, text, context),
text,
)
setattr(row, column.name, replaced)
if argv and argv[0] == '--commit':
session.commit()
print 'Committed'
else:
print 'Run with --commit to commit changes'
if __name__ == '__main__':
main(sys.argv[1:])
| mit | 235f2ca1f65493e8d649df01470b4311 | 31.136095 | 100 | 0.482968 | 4.2663 | false | false | false | false |
veekun/pokedex | scripts/pokemon_species.py | 5 | 5715 | # Encoding: UTF-8
"""Reorganize Pokemon, PokemonForm, etc. to Species, Pokemon, etc.
This is an unmaintained one-shot script, only included in the repo for
reference.
"""
import csv
import os
from pokedex import defaults
number_of_species = 649
high_id_start = 10000
csv_dir = defaults.get_default_csv_dir()
def to_dict(filename):
fullname = os.path.join(csv_dir, filename)
reader = csv.reader(open(fullname))
column_names = reader.next()
entries = dict()
for row in reader:
row_dict = dict(zip(column_names, row))
entries[row_dict.get('id', row_dict.get('pokemon_id'))] = row_dict
return entries, column_names
pokemon, pokemon_columns = to_dict('pokemon.csv')
forms, form_columns = to_dict('pokemon_forms.csv')
form_groups, form_group_columns = to_dict('pokemon_form_groups.csv')
evolution_chains, evolution_chain_columns = to_dict('evolution_chains.csv')
result_columns = dict(
species='''id identifier generation_id evolves_from_species_id
evolution_chain_id color_id shape_id habitat_id
growth_rate_id gender_rate capture_rate base_happiness is_baby
hatch_counter has_gender_differences forms_switchable'''.split(),
pokemon='''id species_id height weight base_experience order'''.split(),
form='''id form_identifier pokemon_id introduced_in_version_group_id
is_default is_battle_only order'''.split(),
chain='''id baby_trigger_item_id'''.split(),
)
def normalize_id(id):
id = int(id)
if id > number_of_species:
id = id - high_id_start + number_of_species
return id
def put(dct, entry):
"""Put entry in dct. If already there, check it's the same.
"""
id = int(entry['id'])
if id in dct:
if entry == dct[id]:
pass
else:
print entry
print dct[id]
assert False
else:
dct[id] = entry
forms_switchable = dict(
castform=True,
unown=False,
darmanitan=True,
basculin=False,
rotom=True,
shaymin=True,
deerling=True,
sawsbuck=True,
arceus=True,
pichu=False,
giratina=True,
burmy=True,
wormadam=False,
deoxys=True,
genesect=True,
meloetta=True,
gastrodon=False,
cherrim=True,
shellos=False,
)
result_species = dict()
result_pokemon = dict()
result_forms = dict()
result_chains = dict()
for form_id, source_form in forms.items():
pokemon_id = source_form['unique_pokemon_id'] or source_form['form_base_pokemon_id']
species_id = source_form['form_base_pokemon_id']
source_pokemon = pokemon[pokemon_id]
source_evolution_chain = evolution_chains[source_pokemon['evolution_chain_id']]
try:
source_group = form_groups[species_id]
except KeyError:
source_group = dict(is_battle_only=0)
all_fields = dict(source_form)
all_fields.update(source_group)
all_fields.update(source_pokemon)
all_fields.update(source_evolution_chain)
del all_fields['id']
new_species = dict()
for column_name in result_columns['species']:
if column_name == 'id':
new_species[column_name] = normalize_id(species_id)
elif column_name == 'evolves_from_species_id':
new_species[column_name] = pokemon[species_id]['evolves_from_pokemon_id']
elif column_name == 'shape_id':
new_species[column_name] = all_fields['pokemon_shape_id']
elif column_name == 'forms_switchable':
if species_id in form_groups:
new_species[column_name] = forms_switchable[source_pokemon['identifier']]
else:
new_species[column_name] = 0
else:
new_species[column_name] = all_fields[column_name]
put(result_species, new_species)
new_pokemon = dict()
for column_name in result_columns['pokemon']:
if column_name == 'id':
new_pokemon[column_name] = normalize_id(pokemon_id)
elif column_name == 'species_id':
new_pokemon[column_name] = species_id
else:
new_pokemon[column_name] = all_fields[column_name]
put(result_pokemon, new_pokemon)
new_form = dict()
for column_name in result_columns['form']:
if column_name == 'id':
new_form[column_name] = normalize_id(form_id)
elif column_name == 'pokemon_id':
new_form[column_name] = normalize_id(pokemon_id)
elif column_name == 'form_identifier':
new_form[column_name] = source_form['identifier']
elif column_name == 'is_battle_only':
if source_form['unique_pokemon_id'] == source_form['form_base_pokemon_id']:
# Default form, herefore not battle-only
new_form[column_name] = '0'
else:
# Keep
new_form[column_name] = all_fields[column_name]
else:
new_form[column_name] = all_fields[column_name]
put(result_forms, new_form)
new_chain = dict(source_evolution_chain)
del new_chain['growth_rate_id']
put(result_chains, new_chain)
def write_csv(dct, fieldnames, filename):
fullname = os.path.join(csv_dir, filename)
reader = csv.DictWriter(open(fullname, 'w'), fieldnames)
reader.writerow(dict((n,n) for n in fieldnames))
for id, row in sorted(dct.items()):
reader.writerow(row)
write_csv(result_species, result_columns['species'], 'pokemon_species.csv')
write_csv(result_pokemon, result_columns['pokemon'], 'pokemon.csv')
write_csv(result_forms, result_columns['form'], 'pokemon_forms.csv')
write_csv(result_chains, result_columns['chain'], 'evolution_chains.csv')
| mit | efd94223d06b6374775985076087ba01 | 33.847561 | 89 | 0.626422 | 3.426259 | false | false | false | false |
beetbox/beets | beetsplug/mbsync.py | 2 | 7421 | # This file is part of beets.
# Copyright 2016, Jakob Schnitzer.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Update library's tags using MusicBrainz.
"""
from beets.plugins import BeetsPlugin, apply_item_changes
from beets import autotag, library, ui, util
from beets.autotag import hooks
from collections import defaultdict
import re
MBID_REGEX = r"(\d|\w){8}-(\d|\w){4}-(\d|\w){4}-(\d|\w){4}-(\d|\w){12}"
class MBSyncPlugin(BeetsPlugin):
def __init__(self):
super().__init__()
def commands(self):
cmd = ui.Subcommand('mbsync',
help='update metadata from musicbrainz')
cmd.parser.add_option(
'-p', '--pretend', action='store_true',
help='show all changes but do nothing')
cmd.parser.add_option(
'-m', '--move', action='store_true', dest='move',
help="move files in the library directory")
cmd.parser.add_option(
'-M', '--nomove', action='store_false', dest='move',
help="don't move files in library")
cmd.parser.add_option(
'-W', '--nowrite', action='store_false',
default=None, dest='write',
help="don't write updated metadata to files")
cmd.parser.add_format_option()
cmd.func = self.func
return [cmd]
def func(self, lib, opts, args):
"""Command handler for the mbsync function.
"""
move = ui.should_move(opts.move)
pretend = opts.pretend
write = ui.should_write(opts.write)
query = ui.decargs(args)
self.singletons(lib, query, move, pretend, write)
self.albums(lib, query, move, pretend, write)
def singletons(self, lib, query, move, pretend, write):
"""Retrieve and apply info from the autotagger for items matched by
query.
"""
for item in lib.items(query + ['singleton:true']):
item_formatted = format(item)
if not item.mb_trackid:
self._log.info('Skipping singleton with no mb_trackid: {0}',
item_formatted)
continue
# Do we have a valid MusicBrainz track ID?
if not re.match(MBID_REGEX, item.mb_trackid):
self._log.info('Skipping singleton with invalid mb_trackid:' +
' {0}', item_formatted)
continue
# Get the MusicBrainz recording info.
track_info = hooks.track_for_mbid(item.mb_trackid)
if not track_info:
self._log.info('Recording ID not found: {0} for track {0}',
item.mb_trackid,
item_formatted)
continue
# Apply.
with lib.transaction():
autotag.apply_item_metadata(item, track_info)
apply_item_changes(lib, item, move, pretend, write)
def albums(self, lib, query, move, pretend, write):
"""Retrieve and apply info from the autotagger for albums matched by
query and their items.
"""
# Process matching albums.
for a in lib.albums(query):
album_formatted = format(a)
if not a.mb_albumid:
self._log.info('Skipping album with no mb_albumid: {0}',
album_formatted)
continue
items = list(a.items())
# Do we have a valid MusicBrainz album ID?
if not re.match(MBID_REGEX, a.mb_albumid):
self._log.info('Skipping album with invalid mb_albumid: {0}',
album_formatted)
continue
# Get the MusicBrainz album information.
album_info = hooks.album_for_mbid(a.mb_albumid)
if not album_info:
self._log.info('Release ID {0} not found for album {1}',
a.mb_albumid,
album_formatted)
continue
# Map release track and recording MBIDs to their information.
# Recordings can appear multiple times on a release, so each MBID
# maps to a list of TrackInfo objects.
releasetrack_index = {}
track_index = defaultdict(list)
for track_info in album_info.tracks:
releasetrack_index[track_info.release_track_id] = track_info
track_index[track_info.track_id].append(track_info)
# Construct a track mapping according to MBIDs (release track MBIDs
# first, if available, and recording MBIDs otherwise). This should
# work for albums that have missing or extra tracks.
mapping = {}
for item in items:
if item.mb_releasetrackid and \
item.mb_releasetrackid in releasetrack_index:
mapping[item] = releasetrack_index[item.mb_releasetrackid]
else:
candidates = track_index[item.mb_trackid]
if len(candidates) == 1:
mapping[item] = candidates[0]
else:
# If there are multiple copies of a recording, they are
# disambiguated using their disc and track number.
for c in candidates:
if (c.medium_index == item.track and
c.medium == item.disc):
mapping[item] = c
break
# Apply.
self._log.debug('applying changes to {}', album_formatted)
with lib.transaction():
autotag.apply_metadata(album_info, mapping)
changed = False
# Find any changed item to apply MusicBrainz changes to album.
any_changed_item = items[0]
for item in items:
item_changed = ui.show_model_changes(item)
changed |= item_changed
if item_changed:
any_changed_item = item
apply_item_changes(lib, item, move, pretend, write)
if not changed:
# No change to any item.
continue
if not pretend:
# Update album structure to reflect an item in it.
for key in library.Album.item_keys:
a[key] = any_changed_item[key]
a.store()
# Move album art (and any inconsistent items).
if move and lib.directory in util.ancestry(items[0].path):
self._log.debug('moving album {0}', album_formatted)
a.move()
| mit | db18977fae98d5e4421a83ff92ebf454 | 40.691011 | 79 | 0.537798 | 4.41201 | false | false | false | false |
beetbox/beets | test/testall.py | 1 | 1255 | #!/usr/bin/env python
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
import os
import re
import sys
import unittest
pkgpath = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) or '..'
sys.path.insert(0, pkgpath)
def suite():
s = unittest.TestSuite()
# Get the suite() of every module in this directory beginning with
# "test_".
for fname in os.listdir(os.path.join(pkgpath, 'test')):
match = re.match(r'(test_\S+)\.py$', fname)
if match:
modname = match.group(1)
s.addTest(__import__(modname).suite())
return s
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| mit | 7406ef166c813d05206fa7531debea34 | 30.375 | 78 | 0.694821 | 3.83792 | false | true | false | false |
compas-dev/compas | src/compas_rhino/artists/polyhedronartist.py | 1 | 1681 | from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import compas_rhino
from compas.artists import ShapeArtist
from compas.colors import Color
from .artist import RhinoArtist
class PolyhedronArtist(RhinoArtist, ShapeArtist):
"""Artist for drawing polyhedron shapes.
Parameters
----------
polyhedron : :class:`~compas.geometry.Polyhedron`
A COMPAS polyhedron.
layer : str, optional
The layer that should contain the drawing.
**kwargs : dict, optional
Additional keyword arguments.
For more info, see :class:`RhinoArtist` and :class:`ShapeArtist`.
"""
def __init__(self, polyhedron, layer=None, **kwargs):
super(PolyhedronArtist, self).__init__(shape=polyhedron, layer=layer, **kwargs)
def draw(self, color=None):
"""Draw the polyhedron associated with the artist.
Parameters
----------
color : tuple[int, int, int] | tuple[float, float, float] | :class:`~compas.colors.Color`, optional
The RGB color of the polyhedron.
Default is :attr:`compas.artists.ShapeArtist.color`.
Returns
-------
list[System.Guid]
The GUIDs of the objects created in Rhino.
"""
color = Color.coerce(color) or self.color
vertices = [list(vertex) for vertex in self.shape.vertices]
faces = self.shape.faces
guid = compas_rhino.draw_mesh(
vertices,
faces,
layer=self.layer,
name=self.shape.name,
color=color.rgb255,
disjoint=True,
)
return [guid]
| mit | 4cbab2820394c9d91c2d8831fe5fff7f | 29.563636 | 107 | 0.614515 | 4.070218 | false | false | false | false |
compas-dev/compas | src/compas/files/gltf/gltf_children.py | 1 | 1656 | from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
class GLTFChildren(object):
def __init__(self, context, values):
self._values = list(values)
self._context = context
for v in values:
self.check_node_context(v)
def __repr__(self):
return repr(self._values)
def __iter__(self):
return iter(self._values)
def __len__(self):
return len(self._values)
def __bool__(self):
return bool(self._values)
def check_node_context(self, v):
if v not in self._context.nodes:
raise Exception("Cannot find Node {}.".format(v))
def append(self, value):
self.check_node_context(value)
self._values.append(value)
def extend(self, values):
for value in values:
self.check_node_context(value)
self._values.extend(values)
def insert(self, index, value):
self.check_node_context(value)
self._values.insert(index, value)
def remove(self, value):
self._values.remove(value)
def pop(self, index=None):
self._values.pop(index or (len(self._values) - 1))
def clear(self):
self._values.clear()
def index(self, value, start=None, end=None):
self._values.index(value, start or 0, end or (len(self._values) - 1))
def count(self, value):
self._values.count(value)
def sort(self, key=None, reverse=False):
self._values.sort(key=key, reverse=reverse)
def reverse(self):
self._values.reverse()
def copy(self):
return self._values.copy()
| mit | cc58557896deb5a2ea6e8ed670614f49 | 24.875 | 77 | 0.596618 | 3.763636 | false | false | false | false |
widdowquinn/pyani | pyani/fastani.py | 1 | 14832 | # -*- coding: utf-8 -*-
# (c) The University of Strathclyde 2021–Present
# Author: Bailey Harrington
#
# Contact: bailey.harrington@strath.ac.uk
#
# Bailey Harrington,
# Strathclyde Institute for Pharmacy and Biomedical Sciences,
# Cathedral Street,
# Glasgow,
# G4 0RE,
# Scotland,
# UK
#
# The MIT License
#
# Copyright (c) 2021–Present University of Strathclyde
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Code to implement the fastANI average nucleotide identity method."""
import logging
import platform
import os
import re
import shutil
import subprocess
from logging import Logger
from pathlib import Path
from typing import NamedTuple, Callable, Dict, List, Optional, Tuple
# import pandas as pd
from Bio import SeqIO
from . import pyani_config
from . import pyani_files
from . import pyani_jobs
from . import PyaniException
from .pyani_tools import ANIResults, BLASTcmds, BLASTexes, BLASTfunctions
class PyaniFastANIException(PyaniException):
"""Exception raised when there is a problem with fastANI"""
class ComparisonResult(NamedTuple):
reference: Path
query: Path
ani: float
matches: int
fragments: int
def get_version(fastani_exe: Path = pyani_config.FASTANI_DEFAULT) -> str:
"""Return FastANI package version as a string.
:param fastani_exe: path to FastANI executable
We expect fastANI to return a string on STDOUT as
.. code-block:: bash
$ ./fastANI -v
version 1.32
we concatenate this with the OS name.
The following circumstances are explicitly reported as strings:
- no executable at passed path
- non-executable file at passed path (this includes cases where the user doesn't have execute permissions on the file)
- no version info returned
"""
try:
fastani_path = Path(shutil.which(fastani_exe)) # type:ignore
except TypeError:
return f"{fastani_exe} is not found in $PATH"
if fastani_path is None:
return f"{fastani_exe} is not found in $PATH"
if not fastani_path.is_file(): # no executable
return f"No fastANI executable at {fastani_path}"
# This should catch cases when the file can't be executed by the user
if not os.access(fastani_path, os.X_OK): # file exists but not executable
return f"fastANI exists at {fastani_path} but not executable"
cmdline = [fastani_exe, "-v"] # type: List
result = subprocess.run(
cmdline,
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
check=True,
) # type CompletedProcess
match = re.search(
r"(?<=version\s)[0-9\.]*", str(result.stderr + result.stdout, "utf-8")
)
version = match.group() # type: ignore
if 0 == len(version.strip()):
return f"fastANI exists at {fastani_path} but could not retrieve version"
return f"{platform.system()}_{version} ({fastani_path})"
# Generate list of Job objects, one per fastANI run -> this is maybe not necessary
def generate_fastani_jobs(
filenames: List[Path],
outdir: Path = Path("."),
fastani_exe: Path = pyani_config.FASTANI_DEFAULT,
fragLen: int = 3000,
kmerSize: int = 16,
minFraction: float = 0.2,
jobprefix: str = "fastANI",
): # should this have a hint for return type?
"""Return list of Jobs describing fastANI command lines.
:param filenames: a list of paths to input FASTA files
:param outdir: path to output directory
:param fastani_exe: location of the fastANI binary
:param fragLen: fragment length to use
:param kmerSize: kmer size to use
:param minFraction: minimum portion of the genomes that must match to trust ANI
:param jobprefix:
Loop over all FASTA files, generating Jobs describing fastANI command lines
for each pairwise comparison.
"""
fastcmds = generate_fastani_commands(
filenames, outdir, fastani_exe, fragLen, kmerSize, minFraction
)
joblist = []
for idx, fastcmd in enumerate(fastcmds):
fastjob = pyani_jobs.Job(f"{jobprefix}_{idx:06d}", fastcmd)
joblist.append(fastjob)
return joblist
def generate_fastani_commands(
filenames: List[Path],
outdir: Path = Path("."),
fastani_exe: Path = pyani_config.FASTANI_DEFAULT,
fragLen: int = 3000,
kmerSize: int = 16,
minFraction: float = 0.2,
) -> List[str]:
"""Return list of fastANI command lines.
:param filenames: a list of paths to input FASTA files
:param outdir: path to output directory
:param fastani_exe: location of the fastANI binary
:param fragLen: fragment length to use
:param kmerSize: kmer size to use
:param minFraction: minimum portion of the genomes that must match to trust ANI
Loop over all FASTA files generating fastANI command lines for each pairwise comparison.
"""
fastani_cmdlines = []
filenames = sorted(filenames) # enforce ordering of filenames
for idx, query in enumerate(filenames):
for index, ref in enumerate(filenames):
if idx == index:
pass # do we want to compare things to themselves?
fastcmd = construct_fastani_cmdline(
query, ref, outdir, fastani_exe, fragLen, kmerSize, minFraction
)
fastani_cmdlines.append(fastcmd)
return fastani_cmdlines
# ¶ If int type is specified here, can a value of None be passed?
# ¶ I have added default values because I am not sure.
# ¶ This currently does not use the list option of fastANI
def construct_fastani_cmdline(
query: Path,
ref: Path,
outdir: Path = Path("."),
fastani_exe: Path = pyani_config.FASTANI_DEFAULT,
fragLen: int = 3000,
kmerSize: int = 16,
minFraction: float = 0.2,
) -> str:
"""Will return a fastcmd item
:param query: path to query file
:param ref: path to reference file
:param outdir: path to output directory
:param fastani_exe: path to fastANI executable
:param fragLen: fragment length to use
:param kmerSize: kmer size to use
:param minFraction: minimum portion of the genomes that must match to trust ANI
"""
logger = logging.getLogger(__name__)
# Cast path strings to pathlib.Path for safety
query, ref = Path(query), Path(ref)
# Compile commands
outsubdir = outdir / pyani_config.ALIGNDIR["fastANI"]
outsubdir.mkdir(exist_ok=True)
outfile = outsubdir / f"{query.stem}_vs_{ref.stem}.fastani"
fastcmd = f"{fastani_exe} -q {query} -r {ref} -o {outfile} --fragLen {fragLen} -k {kmerSize} --minFraction {minFraction}"
logger.debug("Compiled command: %s", fastcmd)
return fastcmd
def parse_fastani_file(filename: Path) -> ComparisonResult:
"""
Return (ref genome, query genome, ANI estimate, orthologous matches,
sequence fragments) tuple.
:param filename: Path, path to the input file
Extracts the ANI estimate, the number of orthologous matches, and the
number of sequence fragments considered from the fastANI output file.
We assume that all fastANI comparisons are pairwise: one query and
one reference file. The fastANI file should contain a single line.
fsatANI *can* produce multi-line output, if a list of query/reference
files is given to it.
"""
# ¶ Example code from a different project
# def add_snp(holder, type, key, *value):
# holder[key] = type(*value)
# Create some sort of holder:
# ¶ The following is for an output file with multiple lines
# results = []
# for line in [_.strip().split() for _ in open(filename, "r").readlines()]:
# if len(line) == 5:
# # Convert types from string to numeric
# line[2] = float(line[2]) / 100 # ANI value
# line[3] = int(line[3]) # number of matching fragments
# line[4] = int(line[4]) # total number of fragments
# results.append(ComparisonResult(*line))
# else:
# raise ValueError(f"Line contains too many/too few items: {line}")
# continue
# return results
line = open(filename, "r").readline().strip().split()
if not line: # No file content; either run failed or no detectable similarity
raise PyaniFastANIException(f"Input file {filename} is empty")
return ComparisonResult(
line[0], line[1], 0.01 * float(line[2]), int(line[3]), int(line[4])
)
def process_files(outdir: Path, org_lengths: Dict) -> ANIResults:
"""Return tuple of fastANI results for files in passed directory.
:param outdir: Path, path to the directory containing output files
:param org_lengths: dictionary of total sequence lengths, keyed by sequence
Returns the following pandas dataframes in an ANIResults object;
query sequences are rows, reference sequences are columns:
- alignment_lengths - asymmetrical: total length of alignment
- percentage_identity - asymmetrical: percentage identity of alignment
- alignment_coverage - asymmetrical: coverage of query and reference
- similarity_errors - asymmetrical: count of similarity errors
May throw a ZeroDivisionError if one or more fastANI runs failed, or a
very distant sequence was included in the analysis.
"""
logger = logging.getLogger(__name__)
# Process directory to identify input files
outfiles = pyani_files.get_input_files(outdir, ".out")
# Hold data in ANIResults object
results = ANIResults(list(org_lengths.keys()), "fastANI")
# Fill diagonal NA values for alignment_length with org_lengths
# ¶ Is this necessary? Or can ANIm not do org X org comparisons? Will the ANIResults object always have NAs on the diagonal?
for org, length in list(org_lengths.items()):
results.alignment_lengths[org][org] = length
# Process .out files assuming that the filename format holds:
# org1_vs_org2.out
for outfile in outfiles:
qname, rname = outfile.stem.split("_vs_")
# We may have .out files from other analyses int eh same directory
# If this occurs, we raise a warning and skip the .out file
if qname not in list(org_lengths.keys()):
if logger:
logger.warning(
"Query name %s not in input sequence list, skipping %s",
qname,
outfile,
)
continue
if rname not in list(org_lengths.keys()):
if logger:
logger.warning(
"Reference name %s not in input sequence list, skipping %s",
rname,
outfile,
)
continue
resultList = parse_fastani_file(
outfile
) # Returns a list of ComparisonResult objects
for result in resultList:
if result.matches == 0 and logger is not None:
if logger:
logger.warning(
"Total alignment length reported in %s is zero!", outfile
)
tot_length = result.matches
sim_errors = result.fragments - tot_length
query_cover = float(tot_length) / org_lengths[qname]
# Calculate percentage ID of aligned length. This may fail if
# total length is zero.
# The ZeroDivisionError that would arise should be handled
# Common causes are that a NUCmer run failed, or that a very
# distant sequence was included in the analysis.
try:
perc_id = 1 - float(sim_errors) / tot_length
except ZeroDivisionError:
perc_id = 0 # set arbitrary value of zero identity
results.zero_error = True
results.add_tot_length(qname, rname, tot_length)
results.add_sim_errors(qname, rname, sim_errors)
results.add_pid(qname, rname, perc_id)
results.add_coverage(qname, rname, query_cover, None)
return results
# """
# class ComparisonResult(NamedTuple):
# reference: Path
# query: Path
# ani: float
# matches: int
# fragments: int
# fastANI is a fast alignment-free implementation for computing whole-genome
# Average Nucleotide Identity (ANI) between genomes
# -----------------
# Example usage:
# $ fastANI -q genome1.fa -r genome2.fa -o output.txt
# $ fastANI -q genome1.fa --rl genome_list.txt -o output.txt
#
# Available options
# -----------------
# -h, --help
# Print this help page
#
# -r <value>, --ref <value>
# reference genome (fasta/fastq)[.gz]
#
# --refList <value>, --rl <value>
# a file containing list of reference genome files, one genome per line
#
# -q <value>, --query <value>
# query genome (fasta/fastq)[.gz]
#
# --ql <value>, --queryList <value>
# a file containing list of query genome files, one genome per line
#
# -k <value>, --kmer <value>
# kmer size <= 16 [default : 16]
#
# -t <value>, --threads <value>
# thread count for parallel execution [default : 1]
#
# --fragLen <value>
# fragment length [default : 3,000]
#
# --minFraction <value>
# minimum fraction of genome that must be shared for trusting ANI. If
# reference and query genome size differ, smaller one among the two is
# considered. [default : 0.2]
#
# --visualize
# output mappings for visualization, can be enabled for single genome to
# single genome comparison only [disabled by default]
#
# --matrix
# also output ANI values as lower triangular matrix (format inspired from
# phylip). If enabled, you should expect an output file with .matrix
# extension [disabled by default]
#
# -o <value>, --output <value> [required]
# output file name
#
# -v, --version
# Show version
#
# """
| mit | f7f73397afc6f615e62599549cfc20bd | 34.715663 | 128 | 0.658953 | 3.845874 | false | false | false | false |
compas-dev/compas | src/compas/files/gltf/gltf_content.py | 1 | 21043 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from compas.files.gltf.data_classes import TextureInfoData
from compas.files.gltf.gltf_mesh import GLTFMesh
from compas.files.gltf.gltf_node import GLTFNode
from compas.files.gltf.gltf_scene import GLTFScene
from compas.files.gltf.helpers import get_weighted_mesh_vertices
from compas.geometry import multiply_matrices
from compas.geometry import transform_points
class GLTFContent(object):
"""
Class for managing the content of a glTF file.
Attributes
----------
scenes : dict
Dictionary containing (int, :class:`~compas.files.GLTFScene`) pairs.
default_scene_key : int or None
Key of the scene to be displayed on loading the glTF.
nodes : dict
Dictionary containing (int, :class:`~compas.files.GLTFNode`) pairs.
meshes : dict
Dictionary containing (int, :class:`~compas.files.GLTFMesh`) pairs.
cameras : dict
Dictionary containing (int, :class:`~compas.files.data_classes.CameraData`) pairs.
animations : dict
Dictionary containing (int, :class:`~compas.files.data_classes.AnimationData`) pairs.
skins : dict
Dictionary containing (int, :class:`~compas.files.data_classes.SkinData`) pairs.
materials : dict
Dictionary containing (int, :class:`~compas.files.data_classes.MaterialData`) pairs.
textures : dict
Dictionary containing (int, :class:`~compas.files.data_classes.TextureData`) pairs.
samplers : dict
Dictionary containing (int, :class:`~compas.files.data_classes.SamplerData`) pairs.
images : dict
Dictionary containing (int, :class:`~compas.files.data_classes.ImageData`) pairs.
extras : object
extensions : object
"""
def __init__(self):
self.scenes = {}
self.default_scene_key = None
self.nodes = {}
self.meshes = {}
self.cameras = {}
self.animations = {}
self.skins = {}
self.materials = {}
self.textures = {}
self.samplers = {}
self.images = {}
self.extras = None
self.extensions = None
self.extensions_used = None
@property
def default_or_first_scene(self):
key = self.default_scene_key or 0
return self.scenes[key]
def check_if_forest(self):
"""Raises an exception if :attr:`compas.files.GLTFContent.nodes` is not a disjoint
union of rooted trees.
Returns
-------
"""
visited_nodes = set()
def visit(key):
node = self.nodes[key]
if key in visited_nodes:
raise Exception("Nodes do not form a rooted forest.")
visited_nodes.add(key)
for child_key in node.children:
visit(child_key)
for scene in self.scenes.values():
for node_key in scene.children:
visit(node_key)
def remove_orphans(self):
"""Removes orphaned objects.
Returns
-------
"""
node_visit_log = {key: False for key in self.nodes}
mesh_visit_log = {key: False for key in self.meshes}
camera_visit_log = {key: False for key in self.cameras}
material_visit_log = {key: False for key in self.materials}
texture_visit_log = {key: False for key in self.textures}
sampler_visit_log = {key: False for key in self.samplers}
image_visit_log = {key: False for key in self.images}
def visit_node(key):
node = self.nodes[key]
node_visit_log[key] = True
if node.mesh_key is not None:
mesh_visit_log[node.mesh_key] = True
if node.camera is not None:
camera_visit_log[node.camera] = True
for child_key in node.children:
visit_node(child_key)
# walk through scenes and update visit logs of nodes, meshes, and cameras.
for scene in self.scenes.values():
for node_key in scene.children:
visit_node(node_key)
# remove unvisited nodes
self._remove_unvisited(node_visit_log, self.nodes)
# remove unvisited meshes
self._remove_unvisited(mesh_visit_log, self.meshes)
# remove unvisited cameras
self._remove_unvisited(camera_visit_log, self.cameras)
# remove animations referencing no existing nodes
for animation_key, animation in self.animations.items():
visited_sampler_keys = []
for channel in animation.channels:
if not node_visit_log[channel.target.node]:
animation.channels.remove(channel)
else:
visited_sampler_keys.append(channel.sampler)
animation.samplers_dict = {
key: animation.samplers_dict[key] for key in animation.samplers_dict if key in visited_sampler_keys
}
if not animation.samplers_dict:
del self.animations[animation_key]
# remove skins referencing no existing nodes
for key, skin_data in self.skins.items():
for joint_key in skin_data.joints:
if not node_visit_log[joint_key]:
skin_data.joints.remove(joint_key)
if not skin_data.joints:
del self.skins[key]
# walk through existing meshes and update materials visit log
for mesh in self.meshes.values():
for primitive in mesh.primitive_data_list:
if primitive.material is not None:
material_visit_log[primitive.material] = True
# remove unvisited materials
self._remove_unvisited(material_visit_log, self.materials)
# walk through existing materials and update textures visit log
def check_extensions_texture_recursively(item):
# get the extensions that are in the attributes
for a in dir(item):
if not a.startswith("__") and not callable(getattr(item, a)):
# ipy does not like this one: if isinstance(getattr(item, a), TextureInfoData):
if getattr(getattr(item, a), "IS_TEXTURE_INFO_DATA", False):
texture_visit_log[getattr(item, a).index] = True
# ipy does not like this one: elif isinstance(getattr(item, a), BaseGLTFDataClass):
elif getattr(getattr(item, a), "IS_BASE_GLTF_DATA", False):
check_extensions_texture_recursively(getattr(item, a))
if item.extensions is not None:
for _, e in item.extensions.items():
check_extensions_texture_recursively(e)
for material in self.materials.values():
check_extensions_texture_recursively(material)
# remove unvisited textures
self._remove_unvisited(texture_visit_log, self.textures)
# walk through existing textures and update visit logs of samplers and images
for texture in self.textures.values():
if texture.sampler is not None:
sampler_visit_log[texture.sampler] = True
if texture.source is not None:
image_visit_log[texture.source] = True
# remove unvisited samplers
self._remove_unvisited(sampler_visit_log, self.samplers)
# remove unvisited images
self._remove_unvisited(image_visit_log, self.images)
def _remove_unvisited(self, log, dictionary):
for key, visited in log.items():
if not visited:
del dictionary[key]
def update_node_transforms_and_positions(self):
"""Walks through all nodes and updates their transforms and positions. To be used when
scene or nodes have been added or the nodes' matrices or TRS attributes have been set or updated.
Returns
-------
"""
for scene in self.scenes.values():
self.update_scene_transforms_and_positions(scene)
def update_scene_transforms_and_positions(self, scene):
"""Walks through the scene tree and updates transforms and positions. To be used when
nodes have been added or the nodes' matrices or TRS attributes have been set or updated.
Parameters
----------
scene : :class:`~compas.files.GLTFScene`
Returns
-------
"""
origin = [0, 0, 0]
for node_key in scene.children:
node = self.nodes[node_key]
node.transform = node.matrix or node.get_matrix_from_trs()
node.position = transform_points([origin], node.transform)[0]
queue = [node_key]
while queue:
cur_key = queue.pop(0)
cur = self.nodes[cur_key]
for child_key in cur.children:
child = self.nodes[child_key]
child.transform = multiply_matrices(cur.transform, child.matrix or child.get_matrix_from_trs())
child.position = transform_points([origin], child.transform)[0]
queue.append(child_key)
def get_node_faces(self, node):
"""Returns the faces of the mesh at ``node``, if any.
Parameters
----------
node : :class:`~compas.files.GLTFNode`
Returns
-------
list
"""
mesh_data = self.meshes.get(node.mesh_key)
if mesh_data is None:
return None
return mesh_data.faces
def get_node_vertices(self, node):
"""Returns the vertices of the mesh at ``node``, if any.
Parameters
----------
node : :class:`~compas.files.GLTFNode`
Returns
-------
list
"""
mesh_data = self.meshes.get(node.mesh_key)
if mesh_data is None:
return None
if node.weights is None:
return mesh_data.vertices
return get_weighted_mesh_vertices(mesh_data, node.weights)
def get_node_by_name(self, name):
"""Returns the node with a specific name.
Parameters
----------
name : str
The name of the node
Returns
-------
node : :class:`compas.files.GLTFNode` or `None`
"""
for key in self.nodes:
if self.nodes[key].name == name:
return self.nodes[key]
return None
@classmethod
def _get_next_available_key(cls, adict):
key = len(adict)
while key in adict:
key += 1
return key
def add_material(self, material):
"""Adds a material to the content.
Parameters
----------
material : :class:`compas.files.data_classes.MaterialData`
The material to add
Returns
-------
int
"""
key = self._get_next_available_key(self.materials)
self.materials[key] = material
return key
def add_texture(self, texture):
"""Adds a texture to the content.
Parameters
----------
texture : :class:`compas.files.data_classes.TextureData`
The texture to add
Returns
-------
int
"""
key = self._get_next_available_key(self.textures)
self.textures[key] = texture
return key
def add_image(self, image):
"""Adds an image to the content.
Parameters
----------
image : :class:`compas.files.data_classes.ImageData`
The image to add
Returns
-------
int
"""
key = self._get_next_available_key(self.images)
self.images[key] = image
return key
def get_material_index_by_name(self, name):
"""Returns the index of the material.
Parameters
----------
name : str
The name of the material
Returns
-------
int or None
"""
for key, material in self.materials.items():
if material.name == name:
return key
return None
def add_scene(self, name=None, extras=None):
"""Adds a scene to the content.
Parameters
----------
name : str
extras : object
Returns
-------
:class:`~compas.files.GLTFScene`
"""
return GLTFScene(self, name=name, extras=extras)
def add_node_to_scene(self, scene, node_name=None, node_extras=None):
"""Creates a :class:`~compas.files.GLTFNode` and adds this node to the children of ``scene``.
Parameters
----------
scene : :class:`~compas.files.GLTFScene`
node_name : str
node_extras : object
Returns
-------
:class:`~compas.files.GLTFNode`
"""
if scene not in self.scenes.values():
raise Exception("Cannot find scene.")
node = GLTFNode(self, node_name, node_extras)
scene.children.append(node.key)
return node
def add_child_to_node(self, parent_node, child_name=None, child_extras=None):
"""Creates a :class:`~compas.files.GLTFNode` and adds this node to the children of ``parent_node``.
Parameters
----------
parent_node : :class:`~compas.files.GLTFNode`
child_name : str
child_extras : object
Returns
-------
:class:`~compas.files.GLTFNode`
"""
child_node = GLTFNode(self, child_name, child_extras)
parent_node.children.append(child_node.key)
return child_node
def add_mesh(self, mesh):
"""Creates a :class:`~compas.files.GLTFMesh` object from a compas mesh, and adds this
to the content.
Parameters
----------
mesh : :class:`~compas.datastructures.Mesh`
Returns
-------
:class:`~compas.files.GLTFMesh`
"""
return GLTFMesh.from_mesh(self, mesh)
def add_mesh_to_node(self, node, mesh):
"""Adds an existing mesh to ``node`` if ``mesh`` is a valid mesh key, or through ``add_mesh`` creates and adds a
mesh to ``node``.
Parameters
----------
node : :class:`~compas.files.GLTFNode`
mesh : Union[:class:`~compas.datastructures.Mesh`, int]
Returns
-------
:class:`~compas.files.GLTFMesh`
"""
if isinstance(mesh, int):
mesh_data = self.meshes[mesh]
else:
mesh_data = self.add_mesh(mesh)
node.mesh_key = mesh_data.key
return mesh_data
def get_nodes_from_scene(self, scene):
"""Returns dictionary of nodes in the given scene, without a specified root.
Parameters
----------
scene : :class:`~compas.files.GLTFScene`
Returns
-------
dict
"""
node_dict = {}
def visit(key):
node_dict[key] = self.nodes[key]
for child in self.nodes[key].children:
visit(child)
for child_key in scene.children:
visit(child_key)
return node_dict
def get_scene_positions_and_edges(self, scene):
"""Returns a tuple containing a dictionary of positions and a list of tuples representing edges.
Parameters
----------
scene : :class:`~compas.files.GLTFScene`
Returns
-------
tuple
"""
positions_dict = {"root": [0, 0, 0]}
edges_list = []
def visit(node, key):
for child_key in node.children:
positions_dict[child_key] = self.nodes[child_key].position
edges_list.append((key, child_key))
visit(self.nodes[child_key], child_key)
visit(scene, "root")
return positions_dict, edges_list
# ==============================================================================
# Main
# ==============================================================================
if __name__ == "__main__":
import os
import urllib
import compas
from compas.datastructures import Mesh
from compas.files.gltf.data_classes import ImageData
from compas.files.gltf.data_classes import MaterialData
from compas.files.gltf.data_classes import MineType
from compas.files.gltf.data_classes import PBRMetallicRoughnessData
from compas.files.gltf.data_classes import TextureData
from compas.files.gltf.extensions import KHR_materials_pbrSpecularGlossiness
from compas.files.gltf.extensions import KHR_Texture_Transform
from compas.files.gltf.gltf import GLTF
from compas.geometry import Box
from compas.geometry import Frame
from compas.utilities import download_file_from_remote
dirname = os.path.join(compas.APPDATA, "data", "gltfs")
gltf_filepath = os.path.join(dirname, "compas.gltf")
image_uri = "compas_icon_white.png"
image_file = os.path.join(dirname, image_uri)
try:
download_file_from_remote("https://compas.dev/images/compas_icon_white.png", image_file)
except urllib.error.HTTPError:
pass
cnt = GLTFContent()
scene = cnt.add_scene()
# image's uri should be the relative path to the image from the filepath given at the time of export,
# so if the image will sit in the same directory as the resultant gltf, the uri is just the name of the file.
# it's the exporter's job to manage how things are stored in the buffer, and it would only store image data
# in the buffer if it is exporting as glb. otherwise the uri will just stay the relative path to the image
# and the gltf only makes sense when bundled with these external files.
image_data = ImageData(
name=image_uri,
mime_type=MineType.PNG,
uri=image_file,
)
image_idx = cnt.add_image(image_data)
# TextureData.source takes the key of the ImageData that it should use as 'source'
texture = TextureData(source=image_idx)
texture_idx = cnt.add_texture(texture)
texture = TextureData(source=image_idx)
texture_idx2 = cnt.add_texture(texture)
material = MaterialData()
material.name = "Texture"
material.pbr_metallic_roughness = PBRMetallicRoughnessData()
material.pbr_metallic_roughness.metallic_factor = 0.0
material.pbr_metallic_roughness.base_color_texture = TextureInfoData(index=texture_idx)
material_key = cnt.add_material(material)
# add extension
pbr_specular_glossiness = KHR_materials_pbrSpecularGlossiness()
pbr_specular_glossiness.diffuse_factor = [
0.980392158,
0.980392158,
0.980392158,
1.0,
]
pbr_specular_glossiness.specular_factor = [0.0, 0.0, 0.0]
pbr_specular_glossiness.glossiness_factor = 0.0
texture_transform = KHR_Texture_Transform()
texture_transform.rotation = 0.0
texture_transform.scale = [2.0, 2.0]
# same here, TextureInfoData uses the key of the TextureData
pbr_specular_glossiness.diffuse_texture = TextureInfoData(texture_idx2)
pbr_specular_glossiness.diffuse_texture.add_extension(texture_transform)
material.add_extension(pbr_specular_glossiness)
# add box
box = Box(Frame.worldXY(), 1, 1, 1)
mesh = Mesh.from_shape(box)
mesh.quads_to_triangles()
node = scene.add_child()
mesh_data = node.add_mesh(mesh)
normals = [mesh.vertex_normal(k) for k in mesh.vertices()]
texcoord_0 = [(0, 0) for _ in mesh.vertices()]
"""
for fkey in mesh.faces():
vkeys = mesh.face_vertices(fkey)
plane = mesh.face_plane(fkey)
frame = Frame.from_plane(plane)
coords = mesh.face_coordinates(fkey)
for vkey, xyz in zip(vkeys, coords):
u, v, _ = frame.to_local_coordinates(Point(*xyz))
texcoord_0[vkey] = (u, v) # not ideal, gets overwritten
"""
# here is the tricky part... for this material to be valid and applied to this mesh,
# each of the primitives must have within the attribute `attributes` a key of the form `TEXCOORD_{some integer}`.
# the value of this thing should be a list of pairs of floats representing the UV texture coordinates for each vertex.
# if `{some integer}` is 0 then there's nothing else to do. but if a primitive has multiple `TEXTCOORD_{some integer}`s,
# then the various `TextureInfoData.tex_coord` associated to this material have to be updated with the appropriate `{some integer}`.
# would work better if each vertex could have 4 different texture coordinates
texcoord_0 = [
(0.0, 1.0),
(0.0, 0.0),
(1.0, 0.0),
(1.0, 1.0),
(0.0, 0.0),
(1.0, 0.0),
(1.0, 1.0),
(0.0, 1.0),
]
pd = node.mesh_data.primitive_data_list[0]
pd.material = material_key
pd.attributes["TEXCOORD_0"] = texcoord_0
pd.attributes["NORMAL"] = normals
gltf = GLTF(gltf_filepath)
gltf.content = cnt
gltf.export(embed_data=False)
| mit | 535b5c543af273229139d4130ab22bdd | 33.050162 | 136 | 0.590553 | 4.003615 | false | false | false | false |
compas-dev/compas | src/compas/topology/traversal.py | 1 | 18798 | from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
try:
from queue import PriorityQueue
except ImportError:
from Queue import PriorityQueue
from collections import deque
from compas.geometry import distance_point_point
# ==============================================================================
# DFS
# ==============================================================================
def depth_first_ordering(adjacency, root):
"""Compute a depth-first ordering of the nodes of a graph, starting from a root node.
Parameters
----------
adjacency : dict[hashable, dict[hashable, None]] | dict[hashable, sequence[hashable]]
An adjacency dictionary representing the connectivity of the graph
by mapping nodes identifiers to neighbour identifiers.
Examples of valid adjacency dicts are
* ``{0: [1, 2, 3, 4], 1: [0], 2: [0], 3: [0], 4: [0]}``
* ``{0: {1: None, 2: None, 3: None, 4: None}, 1: {0: None}, 2: {0: None}, 3: {0: None}, 4: {0: None}}``
root : hashable
The node from which to start the depth-first search.
Returns
-------
list[hashable]
A depth-first ordering of all nodes of the graph.
Notes
-----
Return all nodes of a connected component containing `root` of a network
represented by an adjacency dictionary.
This implementation uses a "to visit" stack. The principle of a stack
is LIFO. In Python, a list is a stack.
Initially only the root element is on the stack. While there are still
elements on the stack, the node on top of the stack is "popped off" and if
this node was not already visited, its neighbors are added to the stack if
they hadn't already been visited themselves.
Since the last element on top of the stack is always popped off, the
algorithm goes deeper and deeper in the datastructure, until it reaches a
node without (unvisited) neighbors and then backtracks. Once a new node
with unvisited neighbors is found, there too it will go as deep as possible
before backtracking again, and so on. Once there are no more nodes on the
stack, the entire structure has been traversed.
Note that this returns a depth-first spanning tree of a connected component
of the network.
"""
adjacency = {key: set(nbrs) for key, nbrs in iter(adjacency.items())}
tovisit = [root]
visited = set()
ordering = []
while tovisit:
# pop the last added element from the stack
node = tovisit.pop()
if node not in visited:
# mark the node as visited
visited.add(node)
ordering.append(node)
# add the unvisited nbrs to the stack
tovisit.extend(adjacency[node] - visited)
return ordering
# def depth_first_tree(adjacency, root):
# """Construct a spanning tree using a depth-first search.
# Parameters
# ----------
# adjacency : dict
# An adjacency dictionary.
# root : hashable
# The identifier of the root node.
# Returns
# -------
# list
# List of nodes in depth-first order.
# dict
# Dictionary of predecessors for each of the nodes.
# list
# The depth-first paths.
# Examples
# --------
# >>>
# """
# adjacency = {key: set(nbrs) for key, nbrs in iter(adjacency.items())}
# tovisit = [root]
# visited = set()
# ordering = []
# predecessors = {}
# paths = [[root]]
# while tovisit:
# # pop the last added element from the stack
# node = tovisit.pop()
# if node not in visited:
# paths[-1].append(node)
# # mark the node as visited
# visited.add(node)
# ordering.append(node)
# # add the unvisited nbrs to the stack
# nodes = adjacency[node] - visited
# if nodes:
# for child in nodes:
# predecessors[child] = node
# else:
# paths.append([])
# tovisit.extend(nodes)
# if not len(paths[-1]):
# del paths[-1]
# return ordering, predecessors, paths
# ==============================================================================
# BFS
# ==============================================================================
def breadth_first_ordering(adjacency, root):
"""Compute a breadth-first ordering of the nodes of a graph, starting from a root node.
Parameters
----------
adjacency : dict[hashable, dict[hashable, None]] | dict[hashable, sequence[hashable]]
An adjacency dictionary representing the connectivity of the graph
by mapping nodes identifiers to neighbour identifiers.
Examples of valid adjacency dicts are
* ``{0: [1, 2, 3, 4], 1: [0], 2: [0], 3: [0], 4: [0]}``
* ``{0: {1: None, 2: None, 3: None, 4: None}, 1: {0: None}, 2: {0: None}, 3: {0: None}, 4: {0: None}}``
root : hashable
The node from which to start the breadth-first search.
Returns
-------
list[hashable]
A breadth-first ordering of all nodes of the graph.
Notes
-----
This implementation uses a double-ended queue (deque) to keep track of nodes to visit.
The principle of a queue is FIFO. In Python, a deque is ideal for removing elements
from the beginning, i.e. from the 'left'.
In a breadth-first search, all unvisited neighbors of a node are visited
first. When a neighbor is visited, its univisited neighbors are added to
the list of nodes to visit.
By appending the neighbors to the end of the list of nodes to visit,
and by visiting the nodes at the start of the list first, the network is
traversed in *breadth-first* order.
"""
tovisit = deque([root])
visited = set([root])
ordering = [root]
while tovisit:
node = tovisit.popleft()
for nbr in adjacency[node]:
if nbr not in visited:
tovisit.append(nbr)
visited.add(nbr)
ordering.append(nbr)
return ordering
def breadth_first_traverse(adjacency, root, callback=None):
"""Traverse an adjacency dict in "breadth-first" order.
Parameters
----------
adjacency : dict[hashable, dict[hashable, None]] | dict[hashable, sequence[hashable]]
An adjacency dictionary representing the connectivity of the graph
by mapping nodes identifiers to neighbour identifiers.
Examples of valid adjacency dicts are
* ``{0: [1, 2, 3, 4], 1: [0], 2: [0], 3: [0], 4: [0]}``
* ``{0: {1: None, 2: None, 3: None, 4: None}, 1: {0: None}, 2: {0: None}, 3: {0: None}, 4: {0: None}}``
root : hashable
The identifier of the starting node.
callback : callable, optional
A callback function applied to every traversed node and its current neighbour.
Returns
-------
set[hashable]
The visited nodes.
"""
tovisit = deque([root])
visited = set([root])
while tovisit:
node = tovisit.popleft()
for nbr in adjacency[node]:
if nbr not in visited:
tovisit.append(nbr)
visited.add(nbr)
if callback:
callback(node, nbr)
return visited
def breadth_first_paths(adjacency, root, goal):
"""Return all paths from root to goal.
Parameters
----------
adjacency : dict[hashable, dict[hashable, None]] | dict[hashable, sequence[hashable]]
An adjacency dictionary representing the connectivity of the graph
by mapping nodes identifiers to neighbour identifiers.
Examples of valid adjacency dicts are
* ``{0: [1, 2, 3, 4], 1: [0], 2: [0], 3: [0], 4: [0]}``
* ``{0: {1: None, 2: None, 3: None, 4: None}, 1: {0: None}, 2: {0: None}, 3: {0: None}, 4: {0: None}}``
root : hashable
The identifier of the starting node.
goal : hashable
The identifier of the ending node.
Yields
------
list[hashable]
A path from root to goal.
Notes
-----
Due to the nature of the search, the first path returned is the shortest.
"""
adjacency = {key: set(nbrs) for key, nbrs in iter(adjacency.items())}
tovisit = deque([(root, [root])])
while tovisit:
node, path = tovisit.popleft()
for nbr in adjacency[node] - set(path):
if nbr == goal:
yield path + [nbr]
else:
tovisit.append((nbr, path + [nbr]))
def breadth_first_tree(adjacency, root):
"""Compute a BFS tree, starting from a root node.
Parameters
----------
adjacency : dict[hashable, dict[hashable, None]] | dict[hashable, sequence[hashable]]
An adjacency dictionary representing the connectivity of the graph
by mapping nodes identifiers to neighbour identifiers.
Examples of valid adjacency dicts are
* ``{0: [1, 2, 3, 4], 1: [0], 2: [0], 3: [0], 4: [0]}``
* ``{0: {1: None, 2: None, 3: None, 4: None}, 1: {0: None}, 2: {0: None}, 3: {0: None}, 4: {0: None}}``
root : hashable
Identifier of the root node.
Returns
-------
list[hashable]
BFS ordering of all nodes.
dict[hashable, hashable]
A dict mapping each node to its direct predecessor in the tree.
list[list[hashable]]
A traversal path for every node in the graph.
"""
tovisit = deque([root])
visited = set([root])
ordering = [root]
predecessors = {}
paths = []
while tovisit:
node = tovisit.popleft()
for nbr in adjacency[node]:
if nbr not in visited:
predecessors[nbr] = node
tovisit.append(nbr)
visited.add(nbr)
ordering.append(nbr)
else:
path = [node]
while path[-1] in predecessors:
path.append(predecessors[path[-1]])
paths.append(reversed(path))
return ordering, predecessors, paths
# ==============================================================================
# shortest
# ==============================================================================
def shortest_path(adjacency, root, goal):
"""Find the shortest path between two vertices of a network.
Parameters
----------
adjacency : dict[hashable, dict[hashable, None]] | dict[hashable, sequence[hashable]]
An adjacency dictionary representing the connectivity of the graph
by mapping nodes identifiers to neighbour identifiers.
Examples of valid adjacency dicts are
* ``{0: [1, 2, 3, 4], 1: [0], 2: [0], 3: [0], 4: [0]}``
* ``{0: {1: None, 2: None, 3: None, 4: None}, 1: {0: None}, 2: {0: None}, 3: {0: None}, 4: {0: None}}``
root : hashable
The identifier of the starting node.
goal : hashable
The identifier of the ending node.
Returns
-------
list[hashable] | None
The path from root to goal, or None, if no path exists between the vertices.
"""
try:
return next(breadth_first_paths(adjacency, root, goal))
except StopIteration:
return None
# ==============================================================================
# A*
# ==============================================================================
def reconstruct_path(came_from, current):
if current not in came_from:
return None
total_path = [current]
while current in came_from:
current = came_from[current]
total_path.append(current)
total_path.reverse()
return total_path
def astar_lightest_path(adjacency, weights, heuristic, root, goal):
"""Find the path of least weight between two vertices of a graph using the A* search algorithm.
Parameters
----------
adjacency : dict[hashable, dict[hashable, None]] | dict[hashable, sequence[hashable]]
An adjacency dictionary representing the connectivity of the graph
by mapping nodes identifiers to neighbour identifiers.
Examples of valid adjacency dicts are
* ``{0: [1, 2, 3, 4], 1: [0], 2: [0], 3: [0], 4: [0]}``
* ``{0: {1: None, 2: None, 3: None, 4: None}, 1: {0: None}, 2: {0: None}, 3: {0: None}, 4: {0: None}}``
weights : dict[tuple[hashable, hashable], float]
A dictionary of edge weights.
heuristic : dict[hashable, float]
A dictionary of guesses of weights of paths from a node to the goal.
root : hashable
The start vertex.
goal : hashable
The end vertex.
Returns
-------
list[hashable] | None
The path from root to goal, or None, if no path exists between the vertices.
References
----------
https://en.wikipedia.org/wiki/A*_search_algorithm
"""
visited_set = set()
candidates_set = {root}
best_candidate_heap = PriorityQueue()
best_candidate_heap.put((heuristic[root], root))
came_from = dict()
g_score = dict()
for v in adjacency:
g_score[v] = float("inf")
g_score[root] = 0
while not best_candidate_heap.empty():
_, current = best_candidate_heap.get()
if current == goal:
break
visited_set.add(current)
for neighbor in adjacency[current]:
if neighbor in visited_set:
continue
tentative_g_score = g_score[current] + weights[(current, neighbor)]
if neighbor not in candidates_set:
candidates_set.add(neighbor)
elif tentative_g_score >= g_score[neighbor]:
continue
came_from[neighbor] = current
g_score[neighbor] = tentative_g_score
new_f_score = g_score[neighbor] + heuristic[neighbor]
best_candidate_heap.put((new_f_score, neighbor))
return reconstruct_path(came_from, goal)
def _get_coordinates(key, structure):
if hasattr(structure, "node_attributes"):
return structure.node_attributes(key, "xyz")
if hasattr(structure, "vertex_coordinates"):
return structure.vertex_coordinates(key)
raise Exception("Coordinates cannot be found for object of type {}".format(type(structure)))
def _get_points(structure):
if hasattr(structure, "nodes"):
return structure.nodes()
if hasattr(structure, "vertices"):
return structure.vertices()
raise Exception("Points cannot be found for object of type {}".format(type(structure)))
def astar_shortest_path(graph, root, goal):
"""Find the shortest path between two vertices of a graph or mesh using the A* search algorithm.
Parameters
----------
graph : :class:`~compas.datastructures.Network` | :class:`~compas.datastructures.Mesh`
A network or mesh data structure.
root : hashable
The identifier of the starting node.
goal : hashable
The identifier of the ending node.
Returns
-------
list[hashable] | None
The path from root to goal, or None, if no path exists between the vertices.
References
----------
https://en.wikipedia.org/wiki/A*_search_algorithm
"""
adjacency = graph.adjacency
weights = {}
for u, v in graph.edges():
u_coords = _get_coordinates(u, graph)
v_coords = _get_coordinates(v, graph)
distance = distance_point_point(u_coords, v_coords)
weights[(u, v)] = distance
weights[(v, u)] = distance
heuristic = {}
goal_coords = _get_coordinates(goal, graph)
points = _get_points(graph)
for u in points:
u_coords = _get_coordinates(u, graph)
heuristic[u] = distance_point_point(u_coords, goal_coords)
return astar_lightest_path(adjacency, weights, heuristic, root, goal)
def dijkstra_distances(adjacency, weight, target):
"""Compute Dijkstra distances from all nodes in a graph to one target node.
Parameters
----------
adjacency : dict[hashable, dict[hashable, None]] | dict[hashable, sequence[hashable]]
An adjacency dictionary representing the connectivity of the graph
by mapping nodes identifiers to neighbour identifiers.
Examples of valid adjacency dicts are
* ``{0: [1, 2, 3, 4], 1: [0], 2: [0], 3: [0], 4: [0]}``
* ``{0: {1: None, 2: None, 3: None, 4: None}, 1: {0: None}, 2: {0: None}, 3: {0: None}, 4: {0: None}}``
weight : dict[tuple[hashable, hashable], float]
A dictionary of edge weights.
target : hashable
The key of the vertex to which the distances are computed.
Returns
-------
dict[hashable, float]
A dictionary of distances to the target.
"""
adjacency = {key: set(nbrs) for key, nbrs in adjacency.items()}
distance = {key: (0 if key == target else 1e17) for key in adjacency}
tovisit = set(adjacency.keys())
visited = set()
while tovisit:
u = min(tovisit, key=lambda k: distance[k])
tovisit.remove(u)
visited.add(u)
for v in adjacency[u] - visited:
d = distance[u] + weight[(u, v)]
if d < distance[v]:
distance[v] = d
return distance
def dijkstra_path(adjacency, weight, source, target, dist=None):
"""Find the shortest path between two nodes of a graph if the weights of the connecting edges are not all the same.
Parameters
----------
adjacency : dict[hashable, dict[hashable, None]] | dict[hashable, sequence[hashable]]
An adjacency dictionary representing the connectivity of the graph
by mapping nodes identifiers to neighbour identifiers.
Examples of valid adjacency dicts are
* ``{0: [1, 2, 3, 4], 1: [0], 2: [0], 3: [0], 4: [0]}``
* ``{0: {1: None, 2: None, 3: None, 4: None}, 1: {0: None}, 2: {0: None}, 3: {0: None}, 4: {0: None}}``
weight : dict[tuple[hashable, hashable], float]
A dictionary of edge weights.
source : hashable
The start vertex.
target : hashable
The end vertex.
Returns
-------
list[hashable]
The shortest path.
Notes
-----
The edge weights should all be positive.
For a directed graph, set the weights of the reversed edges to ``+inf``.
For an undirected graph, add the same weight for an edge in both directions.
"""
if not dist:
dist = dijkstra_distances(adjacency, weight, target)
path = [source]
node = source
node = min(adjacency[node], key=lambda nbr: dist[nbr] + weight[(node, nbr)])
path.append(node)
while node != target:
node = min(adjacency[node], key=lambda nbr: dist[nbr] + weight[(node, nbr)])
path.append(node)
return path
| mit | 64f611426e89c3dc328198f02297ff09 | 32.09507 | 119 | 0.58086 | 4.009812 | false | false | false | false |
compas-dev/compas | src/compas/robots/model/joint.py | 1 | 24178 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from compas.data import Data
from compas.files import URDFElement
from compas.files import URDFParser
from compas.geometry import Frame
from compas.geometry import Rotation
from compas.geometry import Transformation
from compas.geometry import Translation
from compas.geometry import Vector
from compas.geometry import transform_vectors
from compas.robots.model.base import FrameProxy
from compas.robots.model.base import _attr_from_data
from compas.robots.model.base import _attr_to_data
from compas.robots.model.base import _parse_floats
__all__ = [
"Joint",
"ParentLink",
"ChildLink",
"Calibration",
"Dynamics",
"Limit",
"Axis",
"Mimic",
"SafetyController",
]
class ParentLink(Data):
"""Describes a parent relation between a joint its parent link."""
def __init__(self, link):
super(ParentLink, self).__init__()
self.link = link
def __str__(self):
return str(self.link)
def get_urdf_element(self):
return URDFElement("parent", {"link": self.link})
@property
def data(self):
return {
"link": self.link,
}
@data.setter
def data(self, data):
self.link = data["link"]
@classmethod
def from_data(cls, data):
return cls(data["link"])
class ChildLink(Data):
"""Describes a child relation between a joint and its child link."""
def __init__(self, link):
super(ChildLink, self).__init__()
self.link = link
def __str__(self):
return str(self.link)
def get_urdf_element(self):
return URDFElement("child", {"link": self.link})
@property
def data(self):
return {
"link": self.link,
}
@data.setter
def data(self, data):
self.link = data["link"]
@classmethod
def from_data(cls, data):
return cls(data["link"])
class Calibration(Data):
"""Reference positions of the joint, used to calibrate the absolute position."""
def __init__(self, rising=0.0, falling=0.0, reference_position=0.0):
super(Calibration, self).__init__()
self.rising = float(rising)
self.falling = float(falling)
self.reference_position = float(reference_position)
def get_urdf_element(self):
attributes = {
"rising": self.rising,
"falling": self.falling,
"reference_position": self.reference_position,
}
attributes = dict(filter(lambda x: x[1], attributes.items()))
return URDFElement("calibration", attributes)
@property
def data(self):
return {
"rising": self.rising,
"falling": self.falling,
"reference_position": self.reference_position,
}
@data.setter
def data(self, data):
self.rising = data["rising"]
self.falling = data["falling"]
self.reference_position = data["reference_position"]
class Dynamics(Data):
"""Physical properties of the joint used for simulation of dynamics."""
def __init__(self, damping=0.0, friction=0.0, **kwargs):
super(Dynamics, self).__init__()
self.damping = float(damping)
self.friction = float(friction)
self.attr = kwargs
def get_urdf_element(self):
attributes = {
"damping": self.damping,
"friction": self.friction,
}
attributes.update(self.attr)
return URDFElement("dynamics", attributes)
@property
def data(self):
return {
"damping": self.damping,
"friction": self.friction,
"attr": _attr_to_data(self.attr),
}
@data.setter
def data(self, data):
self.damping = data["damping"]
self.friction = data["friction"]
self.attr = _attr_from_data(data["attr"])
class Limit(Data):
"""Joint limit properties.
Attributes
----------
effort : float
Maximum joint effort.
velocity : float
Maximum joint velocity.
lower : float
Lower joint limit (radians for revolute joints, meter for prismatic joints).
upper : float
Upper joint limit (radians for revolute joints, meter for prismatic joints).
"""
def __init__(self, effort=0.0, velocity=0.0, lower=0.0, upper=0.0, **kwargs):
super(Limit, self).__init__()
self.effort = float(effort)
self.velocity = float(velocity)
self.lower = float(lower)
self.upper = float(upper)
self.attr = kwargs
def get_urdf_element(self):
attributes = {
"lower": self.lower,
"upper": self.upper,
}
attributes = dict(filter(lambda x: x[1], attributes.items()))
attributes["effort"] = self.effort
attributes["velocity"] = self.velocity
attributes.update(self.attr)
return URDFElement("limit", attributes)
@property
def data(self):
return {
"effort": self.effort,
"velocity": self.velocity,
"lower": self.lower,
"upper": self.upper,
"attr": _attr_to_data(self.attr),
}
@data.setter
def data(self, data):
self.effort = data["effort"]
self.velocity = data["velocity"]
self.lower = data["lower"]
self.upper = data["upper"]
self.attr = _attr_from_data(data["attr"])
def scale(self, factor):
"""Scale the upper and lower limits by a given factor.
Parameters
----------
factor : float
Scale factor.
Returns
-------
None
"""
self.lower *= factor
self.upper *= factor
class Mimic(Data):
"""Description of joint mimic."""
def __init__(self, joint, multiplier=1.0, offset=0.0):
super(Mimic, self).__init__()
self.joint = joint # == joint name
self.multiplier = float(multiplier)
self.offset = float(offset)
def get_urdf_element(self):
attributes = {"joint": self.joint}
if self.multiplier != 1.0:
attributes["multiplier"] = self.multiplier
if self.offset != 0.0:
attributes["offset"] = self.offset
return URDFElement("mimic", attributes)
@property
def data(self):
return {
"joint": self.joint,
"multiplier": self.multiplier,
"offset": self.offset,
}
@data.setter
def data(self, data):
self.joint = data["joint"]
self.multiplier = data["multiplier"]
self.offset = data["offset"]
@classmethod
def from_data(cls, data):
mimic = cls(data["joint"])
mimic.data = data
return mimic
def calculate_position(self, mimicked_joint_position):
return self.multiplier * mimicked_joint_position + self.offset
class SafetyController(Data):
"""Safety controller properties."""
def __init__(self, k_velocity, k_position=0.0, soft_lower_limit=0.0, soft_upper_limit=0.0):
super(SafetyController, self).__init__()
self.k_velocity = float(k_velocity)
self.k_position = float(k_position)
self.soft_lower_limit = float(soft_lower_limit)
self.soft_upper_limit = float(soft_upper_limit)
def get_urdf_element(self):
attributes = {
"k_position": self.k_position,
"soft_lower_limit": self.soft_lower_limit,
"soft_upper_limit": self.soft_upper_limit,
}
attributes = dict(filter(lambda x: x[1], attributes.items()))
attributes["k_velocity"] = self.k_velocity
return URDFElement("safety_controller", attributes)
@property
def data(self):
return {
"k_velocity": self.k_velocity,
"k_position": self.k_position,
"soft_lower_limit": self.soft_lower_limit,
"soft_upper_limit": self.soft_upper_limit,
}
@data.setter
def data(self, data):
self.k_velocity = data["k_velocity"]
self.k_position = data["k_position"]
self.soft_lower_limit = data["soft_lower_limit"]
self.soft_upper_limit = data["soft_upper_limit"]
@classmethod
def from_data(cls, data):
sc = cls(data["k_velocity"])
sc.data = data
return sc
class Axis(Data):
"""Representation of an axis or vector.
Attributes
----------
x : float
X coordinate.
y: float
Y coordinate.
z : float
Z coordinate.
attr : dict
Additional axis attributes.
"""
def __init__(self, xyz="1 0 0", **kwargs):
# We are not using Vector here because we
# cannot attach _urdf_source to it due to __slots__
super(Axis, self).__init__()
xyz = _parse_floats(xyz)
xyz = Vector(*xyz)
if xyz.length != 0:
xyz.unitize()
self.x = xyz[0]
self.y = xyz[1]
self.z = xyz[2]
self.attr = kwargs
def get_urdf_element(self):
attributes = {"xyz": "{} {} {}".format(self.x, self.y, self.z)}
attributes.update(self.attr)
return URDFElement("axis", attributes)
@property
def data(self):
return {
"x": self.x,
"y": self.y,
"z": self.z,
"attr": _attr_to_data(self.attr),
}
@data.setter
def data(self, data):
self.x = data["x"]
self.y = data["y"]
self.z = data["z"]
self.attr = _attr_from_data(data["attr"])
def copy(self):
"""Create a copy of the axis instance."""
cls = type(self)
return cls("%f %f %f" % (self.x, self.y, self.z))
def transform(self, transformation):
"""Transform the axis in place.
Parameters
----------
transformation : :class:`~compas.geometry.Transformation`
The transformation used to transform the axis.
"""
xyz = transform_vectors([[self.x, self.y, self.z]], transformation.matrix)
self.x = xyz[0][0]
self.y = xyz[0][1]
self.z = xyz[0][2]
def transformed(self, transformation):
"""Return a transformed copy of the axis.
Parameters
----------
transformation : :class:`~compas.geometry.Transformation`
The transformation used to transform the axis.
Returns
-------
:class:`Axis`
The transformed axis.
"""
xyz = transform_vectors([[self.x, self.y, self.z]], transformation.matrix)
return Vector(xyz[0][0], xyz[0][1], xyz[0][2])
@property
def vector(self):
"""Vector of the axis."""
return Vector(self.x, self.y, self.z)
def __str__(self):
return "[%.3f, %.3f, %.3f]" % (self.x, self.y, self.z)
class Joint(Data):
"""Representation of the kinematics and dynamics of a joint and its safety limits.
Attributes
----------
name : str
Unique name for the joint.
type : str | int
Joint type either as a string or an index number. See class attributes for named constants and supported types.
origin : :class:`Frame`
Frame defining the transformation from the parent link to the child link frame.
parent : :class:`ParentLink` | str
Parent link instance or parent link name.
child : :class:`ChildLink` | str
Child link instance or name of child link.
axis : :class:`Axis`
Joint axis specified in the joint frame. Represents the axis of
rotation for revolute joints, the axis of translation for prismatic
joints, and the surface normal for planar joints. The axis is
specified in the joint frame of reference.
calibration : :class:`Calibration`
Reference positions of the joint, used to calibrate the absolute position of the joint.
dynamics : :class:`Dynamics`
Physical properties of the joint. These values are used to
specify modeling properties of the joint, particularly useful for
simulation.
limit : :class:`Limit`
Joint limit properties.
safety_controller : :class:`SafetyController`
Safety controller properties.
mimic : :class:`Mimic`
Used to specify that the defined joint mimics another existing joint.
attr : dict
Non-standard attributes.
child_link : :class:`Link`
Joint's child link
position : float
The current position of the joint. This depends on the
joint type, i.e. for revolute joints, it will be the rotation angle
in radians, for prismatic joints the translation in meters.
Class Attributes
----------------
REVOLUTE : int
Revolute joint type.
CONTINUOUS : int
Continuous joint type.
PRISMATIC : int
Prismatic joint type.
FIXED : int
Fixed joint type.
FLOATING : int
Floating joint type.
PLANAR : int
Planar joint type.
SUPPORTED_TYPES : list[str]
String representations of the supported joint types.
"""
REVOLUTE = 0
CONTINUOUS = 1
PRISMATIC = 2
FIXED = 3
FLOATING = 4
PLANAR = 5
SUPPORTED_TYPES = (
"revolute",
"continuous",
"prismatic",
"fixed",
"floating",
"planar",
)
def __init__(
self,
name,
type,
parent,
child,
origin=None,
axis=None,
calibration=None,
dynamics=None,
limit=None,
safety_controller=None,
mimic=None,
**kwargs
):
type_idx = type
if isinstance(type_idx, str) and type_idx in Joint.SUPPORTED_TYPES:
type_idx = Joint.SUPPORTED_TYPES.index(type_idx)
if type_idx not in range(len(Joint.SUPPORTED_TYPES)):
raise ValueError("Unsupported joint type: %s" % type)
super(Joint, self).__init__()
self.name = name
self.type = type_idx
self.parent = parent if isinstance(parent, ParentLink) else ParentLink(parent)
self.child = child if isinstance(child, ChildLink) else ChildLink(child)
self.origin = origin or Frame.from_euler_angles([0.0, 0.0, 0.0])
self.axis = axis or Axis()
self.calibration = calibration
self.dynamics = dynamics
self.limit = limit
self.safety_controller = safety_controller
self.mimic = mimic
self.attr = kwargs
self.child_link = None
self.position = 0
# The following are world-relative frames representing the origin and the axis, which change with
# the joint state, while `origin` and `axis` above are parent-relative and static.
self.current_origin = self.origin.copy()
self.current_axis = self.axis.copy()
@property
def origin(self):
return self._origin
@origin.setter
def origin(self, value):
self._origin = FrameProxy.create_proxy(value)
@property
def current_origin(self):
return self._current_origin
@current_origin.setter
def current_origin(self, value):
self._current_origin = FrameProxy.create_proxy(value)
def get_urdf_element(self):
attributes = {"name": self.name, "type": self.SUPPORTED_TYPES[self.type]}
attributes.update(self.attr)
elements = [
self.parent,
self.child,
self.axis,
self.calibration,
self.dynamics,
self.limit,
self.safety_controller,
self.mimic,
self.origin,
]
return URDFElement("joint", attributes, elements)
@property
def data(self):
return {
"name": self.name,
"type": self.SUPPORTED_TYPES[self.type],
"parent": self.parent.data,
"child": self.child.data,
"origin": self.origin.data if self.origin else None,
"axis": self.axis.data if self.axis else None,
"calibration": self.calibration.data if self.calibration else None,
"dynamics": self.dynamics.data if self.dynamics else None,
"limit": self.limit.data if self.limit else None,
"safety_controller": self.safety_controller.data if self.safety_controller else None,
"mimic": self.mimic.data if self.mimic else None,
"attr": _attr_to_data(self.attr),
"position": self.position,
}
@data.setter
def data(self, data):
self.name = data["name"]
self.type = Joint.SUPPORTED_TYPES.index(data["type"])
self.parent = ParentLink.from_data(data["parent"])
self.child = ChildLink.from_data(data["child"])
self.origin = Frame.from_data(data["origin"]) if data["origin"] else None
self.axis = Axis.from_data(data["axis"]) if data["axis"] else None
self.calibration = Calibration.from_data(data["calibration"]) if data["calibration"] else None
self.dynamics = Dynamics.from_data(data["dynamics"]) if data["dynamics"] else None
self.limit = Limit.from_data(data["limit"]) if data["limit"] else None
self.safety_controller = (
SafetyController.from_data(data["safety_controller"]) if data["safety_controller"] else None
)
self.mimic = Mimic.from_data(data["mimic"]) if data["mimic"] else None
self.attr = _attr_from_data(data["attr"])
self.position = data["position"]
@classmethod
def from_data(cls, data):
joint = cls(
data["name"],
data["type"],
ParentLink.from_data(data["parent"]),
ChildLink.from_data(data["child"]),
)
joint.data = data
return joint
@property
def current_transformation(self):
"""Current transformation of the joint."""
return Transformation.from_frame(self.current_origin)
def transform(self, transformation):
"""Transform the joint in place.
Parameters
----------
transformation : :class:`~compas.geometry.Transformation`
The transformation used to transform the joint.
Returns
-------
None
"""
self.current_origin.transform(transformation)
self.current_axis.transform(transformation)
def _create(self, transformation):
"""Internal method to initialize the transformation tree.
Parameters
----------
transformation : :class:`~compas.geometry.Transformation`
The transformation used to transform the joint.
Returns
-------
None
"""
self.current_origin = self.origin.transformed(transformation)
self.current_axis.transform(self.current_transformation)
def calculate_revolute_transformation(self, position):
"""Returns a transformation of a revolute joint.
A revolute joint rotates about the axis and has a limited range
specified by the upper and lower limits.
Parameters
----------
position : float
Angle in radians.
Returns
-------
:class:`Rotation`
Transformation of type rotation for the revolute joint.
"""
if not self.limit:
raise ValueError("Revolute joints are required to define a limit")
position = max(min(position, self.limit.upper), self.limit.lower)
return self.calculate_continuous_transformation(position)
def calculate_continuous_transformation(self, position):
"""Returns a transformation of a continuous joint.
A continuous joint rotates about the axis and has no upper and lower
limits.
Parameters
----------
position : float
Angle in radians
Returns
-------
:class:`Rotation`
Transformation of type rotation for the continuous joint.
"""
return Rotation.from_axis_and_angle(self.current_axis.vector, position, self.current_origin.point)
def calculate_prismatic_transformation(self, position):
"""Returns a transformation of a prismatic joint.
A prismatic joint slides along the axis and has a limited range
specified by the upper and lower limits.
Parameters
----------
position : float
Translation movement in meters.
Returns
-------
:class:`Translation`
Transformation of type translation for the prismatic joint.
"""
if not self.limit:
raise ValueError("Prismatic joints are required to define a limit")
position = max(min(position, self.limit.upper), self.limit.lower)
return Translation.from_vector(self.current_axis.vector * position)
# does this ever happen?
def calculate_fixed_transformation(self, position):
"""Returns an identity transformation.
The fixed joint is is not really a joint because it cannot move. All
degrees of freedom are locked.
Returns
-------
:class:`Translation`
Identity transformation.
"""
return Transformation()
def calculate_floating_transformation(self, position):
"""Returns a transformation of a floating joint.
A floating joint allows motion for all 6 degrees of freedom.
"""
raise NotImplementedError
def calculate_planar_transformation(self, position):
"""Returns a transformation of a planar joint.
A planar joint allows motion in a plane perpendicular to the axis.
"""
raise NotImplementedError
def calculate_transformation(self, position):
"""Returns the transformation of the joint.
This function calls different calculate_*_transformation depends on self.type
Parameters
----------
position : float
Position in radians or meters depending on the joint type.
"""
# Set the transformation function according to the type
if not hasattr(self, "_calculate_transformation"):
switcher = {
Joint.REVOLUTE: self.calculate_revolute_transformation,
Joint.CONTINUOUS: self.calculate_continuous_transformation,
Joint.PRISMATIC: self.calculate_prismatic_transformation,
Joint.FIXED: self.calculate_fixed_transformation,
Joint.FLOATING: self.calculate_floating_transformation,
Joint.PLANAR: self.calculate_planar_transformation,
}
self._calculate_transformation = switcher.get(self.type)
return self._calculate_transformation(position)
def is_configurable(self):
"""Returns ``True`` if the joint can be configured, otherwise ``False``."""
return self.type != Joint.FIXED and self.mimic is None
def is_scalable(self):
"""Returns ``True`` if the joint can be scaled, otherwise ``False``."""
return self.type in [Joint.PLANAR, Joint.PRISMATIC]
def scale(self, factor):
"""Scale the joint origin and limit (only if scalable) by a given factor.
Parameters
----------
factor : float
Scale factor.
Returns
-------
None
"""
self.current_origin.scale(factor)
if self.is_scalable():
self.limit.scale(factor)
URDFParser.install_parser(Joint, "robot/joint")
URDFParser.install_parser(ParentLink, "robot/joint/parent")
URDFParser.install_parser(ChildLink, "robot/joint/child")
URDFParser.install_parser(Calibration, "robot/joint/calibration")
URDFParser.install_parser(Dynamics, "robot/joint/dynamics")
URDFParser.install_parser(Limit, "robot/joint/limit")
URDFParser.install_parser(Axis, "robot/joint/axis")
URDFParser.install_parser(Mimic, "robot/joint/mimic")
URDFParser.install_parser(SafetyController, "robot/joint/safety_controller")
URDFParser.install_parser(Frame, "robot/joint/origin", proxy_type=FrameProxy)
| mit | eae16c4c9930d4890caf3b13f6e68a41 | 29.566372 | 119 | 0.596203 | 4.129462 | false | false | false | false |
compas-dev/compas | src/compas/geometry/primitives/circle.py | 1 | 6043 | from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from math import pi
from compas.geometry.primitives import Primitive
from compas.geometry.primitives import Plane
class Circle(Primitive):
"""A circle is defined by a plane and a radius.
Parameters
----------
plane : [point, vector] | :class:`~compas.geometry.Plane`
The plane of the circle.
radius : float
The radius of the circle.
Attributes
----------
plane : :class:`~compas.geometry.Plane`
The plane of the circle.
radius : float
The radius of the circle.
center : :class:`~compas.geometry.Point`
The center of the circle.
normal : :class:`~compas.geometry.Vector`, read-only
The normal of the circle.
diameter : float, read-only
The diameter of the circle.
area : float, read-only
The area of the circle.
circumference : float, read-only
The circumference of the circle.
Examples
--------
>>> from compas.geometry import Plane
>>> from compas.geometry import Circle
>>> plane = Plane([0, 0, 0], [0, 0, 1])
>>> circle = Circle(plane, 5)
"""
__slots__ = ["_plane", "_radius"]
def __init__(self, plane, radius, **kwargs):
super(Circle, self).__init__(**kwargs)
self._plane = None
self._radius = None
self.plane = plane
self.radius = radius
# ==========================================================================
# data
# ==========================================================================
@property
def DATASCHEMA(self):
""":class:`schema.Schema` : Schema of the data representation."""
import schema
return schema.Schema(
{
"plane": Plane.DATASCHEMA.fget(None),
"radius": schema.And(float, lambda x: x > 0),
}
)
@property
def JSONSCHEMANAME(self):
"""str : Name of the schema of the data representation in JSON format."""
return "circle"
@property
def data(self):
"""dict : The data dictionary that represents the circle."""
return {"plane": self.plane.data, "radius": self.radius}
@data.setter
def data(self, data):
self.plane = Plane.from_data(data["plane"])
self.radius = data["radius"]
@classmethod
def from_data(cls, data):
"""Construct a circle from its data representation.
Parameters
----------
data : dict
The data dictionary.
Returns
-------
:class:`~compas.geometry.Circle`
The constructed circle.
Examples
--------
>>> from compas.geometry import Circle
>>> data = {'plane': {'point': [0.0, 0.0, 0.0], 'normal': [0.0, 0.0, 1.0]}, 'radius': 5.}
>>> circle = Circle.from_data(data)
"""
return cls(Plane.from_data(data["plane"]), data["radius"])
# ==========================================================================
# properties
# ==========================================================================
@property
def plane(self):
return self._plane
@plane.setter
def plane(self, plane):
self._plane = Plane(*plane)
@property
def radius(self):
return self._radius
@radius.setter
def radius(self, radius):
self._radius = float(radius)
@property
def normal(self):
return self.plane.normal
@property
def diameter(self):
return self.radius * 2
@property
def center(self):
return self.plane.point
@center.setter
def center(self, point):
self.plane.point = point
@property
def area(self):
return pi * (self.radius**2)
@property
def circumference(self):
return 2 * pi * self.radius
# ==========================================================================
# customization
# ==========================================================================
def __repr__(self):
return "Circle({0!r}, {1!r})".format(self.plane, self.radius)
def __len__(self):
return 2
def __getitem__(self, key):
if key == 0:
return self.plane
elif key == 1:
return self.radius
else:
raise KeyError
def __setitem__(self, key, value):
if key == 0:
self.plane = value
elif key == 1:
self.radius = value
else:
raise KeyError
def __iter__(self):
return iter([self.plane, self.radius])
def __eq__(self, other):
try:
other_plane = other[0]
other_radius = other[1]
except: # noqa: E722
return False
return self.plane == other_plane and self.radius == other_radius
# ==========================================================================
# constructors
# ==========================================================================
# ==========================================================================
# methods
# ==========================================================================
def transform(self, T):
"""Transform the circle.
Parameters
----------
T : :class:`~compas.geometry.Transformation` | list[list[float]]
The transformation.
Returns
-------
None
Examples
--------
>>> from compas.geometry import Frame
>>> from compas.geometry import Transformation
>>> from compas.geometry import Plane
>>> from compas.geometry import Circle
>>> circle = Circle(Plane.worldXY(), 5)
>>> frame = Frame([1, 1, 1], [0.68, 0.68, 0.27], [-0.67, 0.73, -0.15])
>>> T = Transformation.from_frame(frame)
>>> circle.transform(T)
"""
self.plane.transform(T)
| mit | b3a9ce03454dc0b90259dad51b799de4 | 26.098655 | 97 | 0.473771 | 4.634202 | false | false | false | false |
compas-dev/compas | src/compas_rhino/conversions/_shapes.py | 1 | 4128 | from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from compas.geometry import Plane
from compas.geometry import Circle
from compas.geometry import Box
from compas.geometry import Sphere
from compas.geometry import Cone
from compas.geometry import Cylinder
from Rhino.Geometry import Box as RhinoBox
from Rhino.Geometry import Sphere as RhinoSphere
from Rhino.Geometry import Cone as RhinoCone
from Rhino.Geometry import Cylinder as RhinoCylinder
from Rhino.Geometry import Interval
from ._primitives import plane_to_rhino
from ._primitives import circle_to_rhino
from ._primitives import frame_to_rhino
from ._primitives import point_to_rhino
from ._primitives import plane_to_compas_frame
from ._primitives import plane_to_compas
from ._primitives import point_to_compas
from ._primitives import vector_to_compas
def box_to_compas(box):
"""Convert a Rhino box to a COMPAS box.
Parameters
----------
box: :rhino:`Rhino.Geometry.Box`
Returns
-------
:class:`~compas.geometry.Box`
"""
xsize = box.X.Length
ysize = box.Y.Length
zsize = box.Z.Length
frame = plane_to_compas_frame(box.Plane)
frame.point += frame.xaxis * 0.5 * xsize
frame.point += frame.yaxis * 0.5 * ysize
frame.point += frame.zaxis * 0.5 * zsize
return Box(frame, xsize, ysize, zsize)
def box_to_rhino(box):
"""Convert a COMPAS box to a Rhino box.
Parameters
----------
box: :class:`~compas.geometry.Box`
Returns
-------
:rhino:`Rhino.Geometry.Box`
"""
# compas frame is center of box, intervals are in frame space
base_plane = box.frame.copy()
base_plane.point -= base_plane.xaxis * 0.5 * box.xsize
base_plane.point -= base_plane.yaxis * 0.5 * box.ysize
base_plane.point -= base_plane.zaxis * 0.5 * box.zsize
return RhinoBox(
frame_to_rhino(base_plane),
Interval(0, box.xsize),
Interval(0, box.ysize),
Interval(0, box.zsize),
)
def sphere_to_compas(sphere):
"""Convert a Rhino sphere to a COMPAS sphere.
Parameters
----------
sphere: :rhino:`Rhino.Geometry.Sphere`
Returns
-------
:class:`~compas.geometry.Sphere`
"""
return Sphere(point_to_compas(sphere.Center), sphere.Radius)
def sphere_to_rhino(sphere):
"""Convert a COMPAS sphere to a Rhino sphere.
Parameters
----------
sphere: :class:`~compas.geometry.Sphere`
Returns
-------
:rhino:`Rhino.Geometry.Sphere`
"""
return RhinoSphere(point_to_rhino(sphere.point), sphere.radius)
def cone_to_compas(cone):
"""Convert a Rhino cone to a COMPAS cone.
Parameters
----------
cone: :rhino:`Rhino.Geometry.Cone`
Returns
-------
:class:`~compas.geometry.Cone`
"""
plane = Plane(cone.BasePoint, vector_to_compas(cone.Plane.Normal).inverted())
return Cone(Circle(plane, cone.Radius), cone.Height)
def cone_to_rhino(cone):
"""Convert a COMPAS cone to a Rhino cone.
Parameters
----------
cone: :class:`~compas.geometry.Cone`
Returns
-------
:rhino:`Rhino.Geometry.Cone`
"""
return RhinoCone(plane_to_rhino(cone.circle.plane), cone.height, cone.circle.radius)
def cylinder_to_compas(cylinder):
"""Convert a Rhino cylinder to a COMPAS cylinder.
Parameters
----------
cylinder: :rhino:`Rhino.Geometry.Cylinder`
Returns
-------
:class:`~compas.geometry.Cylinder`
"""
plane = plane_to_compas(cylinder.BasePlane)
height = cylinder.TotalHeight
plane.point += plane.normal * (0.5 * height)
return Cylinder(Circle(plane, cylinder.Radius), height)
def cylinder_to_rhino(cylinder):
"""Convert a COMPAS cylinder to a Rhino cylinder.
Parameters
----------
cylinder: :class:`~compas.geometry.Cylinder`
Returns
-------
:rhino:`Rhino.Geometry.Cylinder`
"""
circle = cylinder.circle.copy()
height = cylinder.height
circle.plane.point += circle.plane.normal * (-0.5 * height)
return RhinoCylinder(circle_to_rhino(circle), cylinder.height)
| mit | e979bbf54cbd80567250cee47b0498b5 | 23.426036 | 88 | 0.654312 | 3.286624 | false | false | false | false |
compas-dev/compas | src/compas_rhino/conduits/lines.py | 1 | 3263 | from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from System.Drawing.Color import FromArgb
from Rhino.Geometry import Point3d
from compas.utilities import is_sequence_of_iterable
from compas.utilities import iterable_like
from .base import BaseConduit
class LinesConduit(BaseConduit):
"""A Rhino display conduit for lines.
Parameters
----------
lines : list[[point, point] | :class:`~compas.geometry.Line`]
A list of start-end point pairs that define the lines.
thickness : list[int], optional
The thickness of the individual lines.
Default is :attr:`LinesConduit.default_thickness` for all lines.
color : list[tuple[int, int, int]], optional
The colors of the faces.
Default is :attr:`LinesConduit.default_color` for all lines.
Attributes
----------
color : list[System.Drawing.Color]
A color per line.
thickness : list[float]
A thickness per line.
Class Attributes
----------------
default_thickness : float
The default thickness is ``1.0``.
default_color : System.Drawing.Color
the default color is ``FromArgb(255, 255, 255)``.
Examples
--------
.. code-block:: python
from random import randint
points = [(1.0 * randint(0, 30), 1.0 * randint(0, 30), 0.0) for _ in range(100)]
lines = [(points[i], points[i + 1]) for i in range(99)]
conduit = LinesConduit(lines)
with conduit.enabled():
for i in range(100):
points = [(1.0 * randint(0, 30), 1.0 * randint(0, 30), 0.0) for _ in range(100)]
conduit.lines = [(points[i], points[i + 1]) for i in range(99)]
conduit.redraw(pause=0.1)
"""
default_thickness = 1.0
default_color = FromArgb(255, 255, 255)
def __init__(self, lines, thickness=None, color=None, **kwargs):
super(LinesConduit, self).__init__(**kwargs)
self._thickness = None
self._color = None
self.lines = lines or []
self.thickness = thickness
self.color = color
@property
def thickness(self):
return self._thickness
@thickness.setter
def thickness(self, thickness):
thickness = thickness or self.default_thickness
try:
len(thickness)
except TypeError:
thickness = [thickness]
thickness = iterable_like(self.lines, thickness, self.default_thickness)
self._thickness = list(thickness)
@property
def color(self):
return self._color
@color.setter
def color(self, color):
color = color or self.default_color
if not is_sequence_of_iterable(color):
color = [color]
self._color = [FromArgb(*c) for c in iterable_like(self.lines, color, self.default_color)]
def DrawForeground(self, e):
"""Draw the lines.
Parameters
----------
e : Rhino.Display.DrawEventArgs
Returns
-------
None
"""
for (start, end), color, thickness in zip(self.lines, self.color, self.thickness):
e.Display.DrawLine(Point3d(*start), Point3d(*end), color, thickness)
| mit | 4b86c1f7bd0646c4935a4971a2729274 | 28.93578 | 98 | 0.599755 | 3.847877 | false | false | false | false |
compas-dev/compas | src/compas/datastructures/mesh/conway.py | 1 | 17108 | from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
__all__ = [
"mesh_conway_dual",
"mesh_conway_join",
"mesh_conway_ambo",
"mesh_conway_kis",
"mesh_conway_needle",
"mesh_conway_zip",
"mesh_conway_truncate",
"mesh_conway_ortho",
"mesh_conway_expand",
"mesh_conway_gyro",
"mesh_conway_snub",
"mesh_conway_meta",
"mesh_conway_bevel",
]
def mesh_conway_dual(mesh):
"""Generates the dual mesh from a seed mesh.
Parameters
----------
mesh : :class:`~compas.datastructures.Mesh`
A seed mesh
Returns
-------
:class:`~compas.datastructures.Mesh`
The dual mesh.
References
----------
Based on [1]_ and [2]_.
.. [1] Wikipedia. *Conway polyhedron notation*.
Available at: https://en.wikipedia.org/wiki/Conway_polyhedron_notation.
.. [2] Hart, George. *Conway Notation for Polyhedron*.
Available at: http://www.georgehart.com/virtual-polyhedra/conway_notation.html.
Examples
--------
>>> from compas.datastructures import Mesh
>>> mesh = Mesh.from_polyhedron(6)
>>> dual = mesh_conway_dual(mesh)
>>> dual.number_of_vertices() == mesh.number_of_faces()
True
>>> dual.number_of_edges() == mesh.number_of_edges()
True
>>> dual.number_of_faces() == mesh.number_of_vertices()
True
"""
cls = type(mesh)
vertices = [mesh.face_centroid(fkey) for fkey in mesh.faces()]
old_faces_to_new_vertices = {fkey: i for i, fkey in enumerate(mesh.faces())}
faces = [
[old_faces_to_new_vertices[fkey] for fkey in reversed(mesh.vertex_faces(vkey, ordered=True))]
for vkey in mesh.vertices()
if not mesh.is_vertex_on_boundary(vkey) and len(mesh.vertex_neighbors(vkey)) != 0
]
return cls.from_vertices_and_faces(vertices, faces)
def mesh_conway_join(mesh):
"""Generates the join mesh from a seed mesh.
Parameters
----------
mesh : :class:`~compas.datastructures.Mesh`
A seed mesh
Returns
-------
:class:`~compas.datastructures.Mesh`
The join mesh.
References
----------
Based on [1]_ and [2]_.
.. [1] Wikipedia. *Conway polyhedron notation*.
Available at: https://en.wikipedia.org/wiki/Conway_polyhedron_notation.
.. [2] Hart, George. *Conway Notation for Polyhedron*.
Available at: http://www.georgehart.com/virtual-polyhedra/conway_notation.html.
Examples
--------
>>> from compas.datastructures import Mesh
>>> mesh = Mesh.from_polyhedron(6)
>>> join = mesh_conway_join(mesh)
>>> join.number_of_vertices() == mesh.number_of_vertices() + mesh.number_of_faces()
True
>>> join.number_of_edges() == 2 * mesh.number_of_edges()
True
>>> join.number_of_faces() == mesh.number_of_edges()
True
"""
cls = type(mesh)
vertices = [mesh.vertex_coordinates(vkey) for vkey in mesh.vertices()]
vertices += [mesh.face_centroid(fkey) for fkey in mesh.faces()]
v = mesh.number_of_vertices()
vkey_index = {vkey: i for i, vkey in enumerate(mesh.vertices())}
fkey_index = {fkey: i + v for i, fkey in enumerate(mesh.faces())}
faces = [
[
vkey_index[u],
fkey_index[mesh.halfedge[v][u]],
vkey_index[v],
fkey_index[mesh.halfedge[u][v]],
]
for u, v in mesh.edges()
if not mesh.is_edge_on_boundary(u, v)
]
join_mesh = cls.from_vertices_and_faces(vertices, faces)
# is this necessary?
join_mesh.cull_vertices()
return join_mesh
def mesh_conway_ambo(mesh):
"""Generates the ambo mesh from a seed mesh.
Parameters
----------
mesh : :class:`~compas.datastructures.Mesh`
A seed mesh
Returns
-------
:class:`~compas.datastructures.Mesh`
The ambo mesh.
References
----------
Based on [1]_ and [2]_.
.. [1] Wikipedia. *Conway polyhedron notation*.
Available at: https://en.wikipedia.org/wiki/Conway_polyhedron_notation.
.. [2] Hart, George. *Conway Notation for Polyhedron*.
Available at: http://www.georgehart.com/virtual-polyhedra/conway_notation.html.
Examples
--------
>>> from compas.datastructures import Mesh
>>> mesh = Mesh.from_polyhedron(6)
>>> ambo = mesh_conway_ambo(mesh)
>>> ambo.number_of_vertices() == mesh.number_of_edges()
True
>>> ambo.number_of_edges() == 2 * mesh.number_of_edges()
True
>>> ambo.number_of_faces() == mesh.number_of_vertices() + mesh.number_of_faces()
True
"""
return mesh_conway_dual(mesh_conway_join(mesh))
def mesh_conway_kis(mesh):
"""Generates the kis mesh from a seed mesh.
Parameters
----------
mesh : :class:`~compas.datastructures.Mesh`
A seed mesh
Returns
-------
:class:`~compas.datastructures.Mesh`
The kis mesh.
References
----------
Based on [1]_ and [2]_.
.. [1] Wikipedia. *Conway polyhedron notation*.
Available at: https://en.wikipedia.org/wiki/Conway_polyhedron_notation.
.. [2] Hart, George. *Conway Notation for Polyhedron*.
Available at: http://www.georgehart.com/virtual-polyhedra/conway_notation.html.
Examples
--------
>>> from compas.datastructures import Mesh
>>> mesh = Mesh.from_polyhedron(6)
>>> kis = mesh_conway_kis(mesh)
>>> kis.number_of_vertices() == mesh.number_of_vertices() + mesh.number_of_faces()
True
>>> kis.number_of_edges() == 3 * mesh.number_of_edges()
True
>>> kis.number_of_faces() == 2 * mesh.number_of_edges()
True
"""
cls = type(mesh)
vertices = [mesh.vertex_coordinates(vkey) for vkey in mesh.vertices()]
vertices += [mesh.face_centroid(fkey) for fkey in mesh.faces()]
v = mesh.number_of_vertices()
vkey_index = {vkey: i for i, vkey in enumerate(mesh.vertices())}
fkey_index = {fkey: i + v for i, fkey in enumerate(mesh.faces())}
faces = [
[vkey_index[u], vkey_index[v], fkey_index[mesh.halfedge[u][v]]]
for fkey in mesh.faces()
for u, v in mesh.face_halfedges(fkey)
]
return cls.from_vertices_and_faces(vertices, faces)
def mesh_conway_needle(mesh):
"""Generates the needle mesh from a seed mesh.
Parameters
----------
mesh : :class:`~compas.datastructures.Mesh`
A seed mesh
Returns
-------
:class:`~compas.datastructures.Mesh`
The needle mesh.
References
----------
Based on [1]_ and [2]_.
.. [1] Wikipedia. *Conway polyhedron notation*.
Available at: https://en.wikipedia.org/wiki/Conway_polyhedron_notation.
.. [2] Hart, George. *Conway Notation for Polyhedron*.
Available at: http://www.georgehart.com/virtual-polyhedra/conway_notation.html.
Examples
--------
>>> from compas.datastructures import Mesh
>>> mesh = Mesh.from_polyhedron(6)
>>> needle = mesh_conway_needle(mesh)
>>> needle.number_of_vertices() == mesh.number_of_vertices() + mesh.number_of_faces()
True
>>> needle.number_of_edges() == 3 * mesh.number_of_edges()
True
>>> needle.number_of_faces() == 2 * mesh.number_of_edges()
True
"""
return mesh_conway_kis(mesh_conway_dual(mesh))
def mesh_conway_zip(mesh):
"""Generates the zip mesh from a seed mesh.
Parameters
----------
mesh : :class:`~compas.datastructures.Mesh`
A seed mesh
Returns
-------
:class:`~compas.datastructures.Mesh`
The zip mesh.
References
----------
Based on [1]_ and [2]_.
.. [1] Wikipedia. *Conway polyhedron notation*.
Available at: https://en.wikipedia.org/wiki/Conway_polyhedron_notation.
.. [2] Hart, George. *Conway Notation for Polyhedron*.
Available at: http://www.georgehart.com/virtual-polyhedra/conway_notation.html.
Examples
--------
>>> from compas.datastructures import Mesh
>>> mesh = Mesh.from_polyhedron(6)
>>> zipp = mesh_conway_zip(mesh)
>>> zipp.number_of_vertices() == 2 * mesh.number_of_edges()
True
>>> zipp.number_of_edges() == 3 * mesh.number_of_edges()
True
>>> zipp.number_of_faces() == mesh.number_of_vertices() + mesh.number_of_faces()
True
"""
return mesh_conway_dual(mesh_conway_kis(mesh))
def mesh_conway_truncate(mesh):
"""Generates the truncate mesh from a seed mesh.
Parameters
----------
mesh : :class:`~compas.datastructures.Mesh`
A seed mesh
Returns
-------
:class:`~compas.datastructures.Mesh`
The truncate mesh.
References
----------
Based on [1]_ and [2]_.
.. [1] Wikipedia. *Conway polyhedron notation*.
Available at: https://en.wikipedia.org/wiki/Conway_polyhedron_notation.
.. [2] Hart, George. *Conway Notation for Polyhedron*.
Available at: http://www.georgehart.com/virtual-polyhedra/conway_notation.html.
Examples
--------
>>> from compas.datastructures import Mesh
>>> mesh = Mesh.from_polyhedron(6)
>>> trun = mesh_conway_truncate(mesh)
>>> trun.number_of_vertices() == 2 * mesh.number_of_edges()
True
>>> trun.number_of_edges() == 3 * mesh.number_of_edges()
True
>>> trun.number_of_faces() == mesh.number_of_vertices() + mesh.number_of_faces()
True
"""
# same as conway_dual(conway_needle())?
return mesh_conway_dual(mesh_conway_kis(mesh_conway_dual(mesh)))
def mesh_conway_ortho(mesh):
"""Generates the ortho mesh from a seed mesh.
Parameters
----------
mesh : :class:`~compas.datastructures.Mesh`
A seed mesh
Returns
-------
:class:`~compas.datastructures.Mesh`
The ortho mesh.
References
----------
Based on [1]_ and [2]_.
.. [1] Wikipedia. *Conway polyhedron notation*.
Available at: https://en.wikipedia.org/wiki/Conway_polyhedron_notation.
.. [2] Hart, George. *Conway Notation for Polyhedron*.
Available at: http://www.georgehart.com/virtual-polyhedra/conway_notation.html.
Examples
--------
>>> from compas.datastructures import Mesh
>>> mesh = Mesh.from_polyhedron(6)
>>> orth = mesh_conway_ortho(mesh)
>>> orth.number_of_vertices() == mesh.number_of_vertices() + mesh.number_of_faces() + mesh.number_of_edges()
True
>>> orth.number_of_edges() == 4 * mesh.number_of_edges()
True
>>> orth.number_of_faces() == 2 * mesh.number_of_edges()
True
"""
return mesh_conway_join(mesh_conway_join(mesh))
def mesh_conway_expand(mesh):
"""Generates the expand mesh from a seed mesh.
Parameters
----------
mesh : :class:`~compas.datastructures.Mesh`
A seed mesh
Returns
-------
:class:`~compas.datastructures.Mesh`
The expand mesh.
References
----------
Based on [1]_ and [2]_.
.. [1] Wikipedia. *Conway polyhedron notation*.
Available at: https://en.wikipedia.org/wiki/Conway_polyhedron_notation.
.. [2] Hart, George. *Conway Notation for Polyhedron*.
Available at: http://www.georgehart.com/virtual-polyhedra/conway_notation.html.
Examples
--------
>>> from compas.datastructures import Mesh
>>> mesh = Mesh.from_polyhedron(6)
>>> expa = mesh_conway_expand(mesh)
>>> expa.number_of_vertices() == 2 * mesh.number_of_edges()
True
>>> expa.number_of_edges() == 4 * mesh.number_of_edges()
True
>>> expa.number_of_faces() == mesh.number_of_vertices() + mesh.number_of_faces() + mesh.number_of_edges()
True
"""
return mesh_conway_ambo(mesh_conway_ambo(mesh))
def mesh_conway_gyro(mesh):
"""Generates the gyro mesh from a seed mesh.
Parameters
----------
mesh : :class:`~compas.datastructures.Mesh`
A seed mesh
Returns
-------
:class:`~compas.datastructures.Mesh`
The gyro mesh.
References
----------
Based on [1]_ and [2]_.
.. [1] Wikipedia. *Conway polyhedron notation*.
Available at: https://en.wikipedia.org/wiki/Conway_polyhedron_notation.
.. [2] Hart, George. *Conway Notation for Polyhedron*.
Available at: http://www.georgehart.com/virtual-polyhedra/conway_notation.html.
Examples
--------
>>> from compas.datastructures import Mesh
>>> mesh = Mesh.from_polyhedron(6)
>>> gyro = mesh_conway_gyro(mesh)
>>> gyro.number_of_vertices() == mesh.number_of_vertices() + mesh.number_of_faces() + 2 * mesh.number_of_edges()
True
>>> gyro.number_of_edges() == 5 * mesh.number_of_edges()
True
>>> gyro.number_of_faces() == 2 * mesh.number_of_edges()
True
"""
cls = type(mesh)
vertices = [mesh.vertex_coordinates(vkey) for vkey in mesh.vertices()]
vertices += [mesh.face_centroid(fkey) for fkey in mesh.faces()]
vertices += [mesh.edge_point(u, v, t=0.33) for u in mesh.vertices() for v in mesh.halfedge[u]]
V = mesh.number_of_vertices()
F = mesh.number_of_faces()
vkey_index = {vkey: i for i, vkey in enumerate(mesh.vertices())}
fkey_index = {fkey: i + V for i, fkey in enumerate(mesh.faces())}
ekey_index = {
halfedge: i + V + F for i, halfedge in enumerate([(u, v) for u in mesh.vertices() for v in mesh.halfedge[u]])
}
faces = []
for fkey in mesh.faces():
for u, v in mesh.face_halfedges(fkey):
faces.append(
[
ekey_index[u, v],
ekey_index[v, u],
vkey_index[v],
ekey_index[v, mesh.face_vertex_descendant(fkey, v)],
fkey_index[mesh.halfedge[u][v]],
]
)
return cls.from_vertices_and_faces(vertices, faces)
def mesh_conway_snub(mesh):
"""Generates the snub mesh from a seed mesh.
Parameters
----------
mesh : :class:`~compas.datastructures.Mesh`
A seed mesh
Returns
-------
:class:`~compas.datastructures.Mesh`
The gyro mesh.
References
----------
Based on [1]_ and [2]_.
.. [1] Wikipedia. *Conway polyhedron notation*.
Available at: https://en.wikipedia.org/wiki/Conway_polyhedron_notation.
.. [2] Hart, George. *Conway Notation for Polyhedron*.
Available at: http://www.georgehart.com/virtual-polyhedra/conway_notation.html.
Examples
--------
>>> from compas.datastructures import Mesh
>>> mesh = Mesh.from_polyhedron(6)
>>> snub = mesh_conway_snub(mesh)
>>> snub.number_of_vertices() == 2 * mesh.number_of_edges()
True
>>> snub.number_of_edges() == 5 * mesh.number_of_edges()
True
>>> snub.number_of_faces() == mesh.number_of_vertices() + mesh.number_of_faces() + 2 * mesh.number_of_edges()
True
"""
return mesh_conway_dual(mesh_conway_gyro(mesh_conway_dual(mesh)))
def mesh_conway_meta(mesh):
"""Generates the meta mesh from a seed mesh.
Parameters
----------
mesh : :class:`~compas.datastructures.Mesh`
A seed mesh
Returns
-------
:class:`~compas.datastructures.Mesh`
The meta mesh.
References
----------
Based on [1]_ and [2]_.
.. [1] Wikipedia. *Conway polyhedron notation*.
Available at: https://en.wikipedia.org/wiki/Conway_polyhedron_notation.
.. [2] Hart, George. *Conway Notation for Polyhedron*.
Available at: http://www.georgehart.com/virtual-polyhedra/conway_notation.html.
Examples
--------
>>> from compas.datastructures import Mesh
>>> mesh = Mesh.from_polyhedron(6)
>>> meta = mesh_conway_meta(mesh)
>>> meta.number_of_vertices() == mesh.number_of_vertices() + mesh.number_of_faces() + mesh.number_of_edges()
True
>>> meta.number_of_edges() == 6 * mesh.number_of_edges()
True
>>> meta.number_of_faces() == 4 * mesh.number_of_edges()
True
"""
return mesh_conway_kis(mesh_conway_join(mesh))
def mesh_conway_bevel(mesh):
"""Generates the bevel mesh from a seed mesh.
Parameters
----------
mesh : :class:`~compas.datastructures.Mesh`
A seed mesh
Returns
-------
:class:`~compas.datastructures.Mesh`
The bevel mesh.
References
----------
Based on [1]_ and [2]_.
.. [1] Wikipedia. *Conway polyhedron notation*.
Available at: https://en.wikipedia.org/wiki/Conway_polyhedron_notation.
.. [2] Hart, George. *Conway Notation for Polyhedron*.
Available at: http://www.georgehart.com/virtual-polyhedra/conway_notation.html.
Examples
--------
>>> from compas.datastructures import Mesh
>>> mesh = Mesh.from_polyhedron(6)
>>> bevl = mesh_conway_bevel(mesh)
>>> bevl.number_of_vertices() == 4 * mesh.number_of_edges()
True
>>> bevl.number_of_edges() == 6 * mesh.number_of_edges()
True
>>> bevl.number_of_faces() == mesh.number_of_vertices() + mesh.number_of_faces() + mesh.number_of_edges()
True
"""
return mesh_conway_truncate(mesh_conway_ambo(mesh))
| mit | f585af4c2291ee2c68f244855017514b | 28.701389 | 117 | 0.597264 | 3.363082 | false | false | false | false |
widdowquinn/pyani | tests/test_subcmd_01_download.py | 1 | 4394 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# (c) The James Hutton Institute 2016-2019
# (c) The University of Strathclude 2019-2020
# Author: Leighton Pritchard
#
# Contact:
# leighton.pritchard@strath.ac.uk
#
# Leighton Pritchard,
# Strathclyde Institute of Pharmaceutical and Biomedical Sciences
# The University of Strathclyde
# 161 Cathedral Street
# Glasgow
# G4 0RE
# Scotland,
# UK
#
# The MIT License
#
# Copyright (c) 2016-2019 The James Hutton Institute
# (c) The University of Strathclude 2019-2020
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Test download subcommand for pyani.
The test suite is intended to be run from the repository root using:
pytest -v
Each command CMD available at the command line as pyani <CMD> is
tested in its own class as a subclass of unittest.TestCase, where
setUp() defines input/output files, a null logger (which is also
picked up by pytest), and a dictionary of command lines, keyed
by test name, with values representing command-line options.
For each test, command line options are defined in a Namespace and
passed as the sole argument to the appropriate subcommand.
As the download operations are slow, and subject to network issues,
especially with CI, we mock the download operations.
"""
import logging
from argparse import Namespace
from pathlib import Path
from unittest import TestCase
from unittest.mock import patch
import pytest
from pyani import download
from pyani.scripts import subcommands
from tools import modify_namespace
@pytest.fixture
def api_keypath():
"""NCBI API key path."""
return Path("~/.ncbi/api_key")
@pytest.fixture
def base_download_namespace(api_keypath, path_fixtures_base, tmp_path):
"""Base namespace for the pyani download subcommand."""
return Namespace(
outdir=tmp_path / "C_blochmannia",
taxon="203804",
email="my.email@my.domain",
retries=20,
batchsize=10000,
timeout=10,
classfname="classes.txt",
labelfname="labels.txt",
kraken=False,
force=True,
noclobber=False,
dryrun=False,
disable_tqdm=True,
api_keypath=api_keypath,
)
@pytest.fixture
def dryrun_namespace(base_download_namespace):
"""Namespace for pyani download dry run."""
return modify_namespace(base_download_namespace, dryrun=True)
@pytest.fixture
def kraken_namespace(base_download_namespace, tmp_path):
"""Namespace for downloading C. blochmannia with Kraken labels."""
return modify_namespace(
base_download_namespace, kraken=True, outdir=tmp_path / "kraken"
)
# Create object for accessing unittest assertions
assertions = TestCase("__init__")
def test_create_hash():
"""Test that the expected exception is raised if the file doesn't exist."""
test_file = "/this/is/not/a/file"
with assertions.assertRaises(download.PyaniIndexException):
download.create_hash(test_file)
def test_download_dry_run(dryrun_namespace):
"""Dry run of C. blochmannia download."""
subcommands.subcmd_download(dryrun_namespace)
def test_download_c_blochmannia(base_download_namespace):
"""Test C. blochmannia download."""
subcommands.subcmd_download(base_download_namespace)
def test_download_kraken(kraken_namespace):
"""C. blochmannia download in Kraken format."""
subcommands.subcmd_download(kraken_namespace)
| mit | 8f21adff0debbee2b4db9bac249166a8 | 30.84058 | 79 | 0.738052 | 3.73322 | false | true | false | false |
zzzeek/alembic | alembic/ddl/mysql.py | 2 | 16420 | from __future__ import annotations
import re
from typing import Any
from typing import Optional
from typing import TYPE_CHECKING
from typing import Union
from sqlalchemy import schema
from sqlalchemy import types as sqltypes
from sqlalchemy.ext.compiler import compiles
from .base import alter_table
from .base import AlterColumn
from .base import ColumnDefault
from .base import ColumnName
from .base import ColumnNullable
from .base import ColumnType
from .base import format_column_name
from .base import format_server_default
from .impl import DefaultImpl
from .. import util
from ..autogenerate import compare
from ..util import sqla_compat
from ..util.sqla_compat import _is_mariadb
from ..util.sqla_compat import _is_type_bound
if TYPE_CHECKING:
from typing import Literal
from sqlalchemy.dialects.mysql.base import MySQLDDLCompiler
from sqlalchemy.sql.ddl import DropConstraint
from sqlalchemy.sql.schema import Constraint
from sqlalchemy.sql.type_api import TypeEngine
from .base import _ServerDefault
class MySQLImpl(DefaultImpl):
__dialect__ = "mysql"
transactional_ddl = False
type_synonyms = DefaultImpl.type_synonyms + (
{"BOOL", "TINYINT"},
{"JSON", "LONGTEXT"},
)
type_arg_extract = [r"character set ([\w\-_]+)", r"collate ([\w\-_]+)"]
def alter_column( # type:ignore[override]
self,
table_name: str,
column_name: str,
nullable: Optional[bool] = None,
server_default: Union["_ServerDefault", "Literal[False]"] = False,
name: Optional[str] = None,
type_: Optional["TypeEngine"] = None,
schema: Optional[str] = None,
existing_type: Optional["TypeEngine"] = None,
existing_server_default: Optional["_ServerDefault"] = None,
existing_nullable: Optional[bool] = None,
autoincrement: Optional[bool] = None,
existing_autoincrement: Optional[bool] = None,
comment: Optional[Union[str, "Literal[False]"]] = False,
existing_comment: Optional[str] = None,
**kw: Any,
) -> None:
if sqla_compat._server_default_is_identity(
server_default, existing_server_default
) or sqla_compat._server_default_is_computed(
server_default, existing_server_default
):
# modifying computed or identity columns is not supported
# the default will raise
super(MySQLImpl, self).alter_column(
table_name,
column_name,
nullable=nullable,
type_=type_,
schema=schema,
existing_type=existing_type,
existing_nullable=existing_nullable,
server_default=server_default,
existing_server_default=existing_server_default,
**kw,
)
if name is not None or self._is_mysql_allowed_functional_default(
type_ if type_ is not None else existing_type, server_default
):
self._exec(
MySQLChangeColumn(
table_name,
column_name,
schema=schema,
newname=name if name is not None else column_name,
nullable=nullable
if nullable is not None
else existing_nullable
if existing_nullable is not None
else True,
type_=type_ if type_ is not None else existing_type,
default=server_default
if server_default is not False
else existing_server_default,
autoincrement=autoincrement
if autoincrement is not None
else existing_autoincrement,
comment=comment
if comment is not False
else existing_comment,
)
)
elif (
nullable is not None
or type_ is not None
or autoincrement is not None
or comment is not False
):
self._exec(
MySQLModifyColumn(
table_name,
column_name,
schema=schema,
newname=name if name is not None else column_name,
nullable=nullable
if nullable is not None
else existing_nullable
if existing_nullable is not None
else True,
type_=type_ if type_ is not None else existing_type,
default=server_default
if server_default is not False
else existing_server_default,
autoincrement=autoincrement
if autoincrement is not None
else existing_autoincrement,
comment=comment
if comment is not False
else existing_comment,
)
)
elif server_default is not False:
self._exec(
MySQLAlterDefault(
table_name, column_name, server_default, schema=schema
)
)
def drop_constraint(
self,
const: "Constraint",
) -> None:
if isinstance(const, schema.CheckConstraint) and _is_type_bound(const):
return
super(MySQLImpl, self).drop_constraint(const)
def _is_mysql_allowed_functional_default(
self,
type_: Optional["TypeEngine"],
server_default: Union["_ServerDefault", "Literal[False]"],
) -> bool:
return (
type_ is not None
and type_._type_affinity # type:ignore[attr-defined]
is sqltypes.DateTime
and server_default is not None
)
def compare_server_default(
self,
inspector_column,
metadata_column,
rendered_metadata_default,
rendered_inspector_default,
):
# partially a workaround for SQLAlchemy issue #3023; if the
# column were created without "NOT NULL", MySQL may have added
# an implicit default of '0' which we need to skip
# TODO: this is not really covered anymore ?
if (
metadata_column.type._type_affinity is sqltypes.Integer
and inspector_column.primary_key
and not inspector_column.autoincrement
and not rendered_metadata_default
and rendered_inspector_default == "'0'"
):
return False
elif inspector_column.type._type_affinity is sqltypes.Integer:
rendered_inspector_default = (
re.sub(r"^'|'$", "", rendered_inspector_default)
if rendered_inspector_default is not None
else None
)
return rendered_inspector_default != rendered_metadata_default
elif rendered_inspector_default and rendered_metadata_default:
# adjust for "function()" vs. "FUNCTION" as can occur particularly
# for the CURRENT_TIMESTAMP function on newer MariaDB versions
# SQLAlchemy MySQL dialect bundles ON UPDATE into the server
# default; adjust for this possibly being present.
onupdate_ins = re.match(
r"(.*) (on update.*?)(?:\(\))?$",
rendered_inspector_default.lower(),
)
onupdate_met = re.match(
r"(.*) (on update.*?)(?:\(\))?$",
rendered_metadata_default.lower(),
)
if onupdate_ins:
if not onupdate_met:
return True
elif onupdate_ins.group(2) != onupdate_met.group(2):
return True
rendered_inspector_default = onupdate_ins.group(1)
rendered_metadata_default = onupdate_met.group(1)
return re.sub(
r"(.*?)(?:\(\))?$", r"\1", rendered_inspector_default.lower()
) != re.sub(
r"(.*?)(?:\(\))?$", r"\1", rendered_metadata_default.lower()
)
else:
return rendered_inspector_default != rendered_metadata_default
def correct_for_autogen_constraints(
self,
conn_unique_constraints,
conn_indexes,
metadata_unique_constraints,
metadata_indexes,
):
# TODO: if SQLA 1.0, make use of "duplicates_index"
# metadata
removed = set()
for idx in list(conn_indexes):
if idx.unique:
continue
# MySQL puts implicit indexes on FK columns, even if
# composite and even if MyISAM, so can't check this too easily.
# the name of the index may be the column name or it may
# be the name of the FK constraint.
for col in idx.columns:
if idx.name == col.name:
conn_indexes.remove(idx)
removed.add(idx.name)
break
for fk in col.foreign_keys:
if fk.name == idx.name:
conn_indexes.remove(idx)
removed.add(idx.name)
break
if idx.name in removed:
break
# then remove indexes from the "metadata_indexes"
# that we've removed from reflected, otherwise they come out
# as adds (see #202)
for idx in list(metadata_indexes):
if idx.name in removed:
metadata_indexes.remove(idx)
def correct_for_autogen_foreignkeys(self, conn_fks, metadata_fks):
conn_fk_by_sig = dict(
(compare._fk_constraint_sig(fk).sig, fk) for fk in conn_fks
)
metadata_fk_by_sig = dict(
(compare._fk_constraint_sig(fk).sig, fk) for fk in metadata_fks
)
for sig in set(conn_fk_by_sig).intersection(metadata_fk_by_sig):
mdfk = metadata_fk_by_sig[sig]
cnfk = conn_fk_by_sig[sig]
# MySQL considers RESTRICT to be the default and doesn't
# report on it. if the model has explicit RESTRICT and
# the conn FK has None, set it to RESTRICT
if (
mdfk.ondelete is not None
and mdfk.ondelete.lower() == "restrict"
and cnfk.ondelete is None
):
cnfk.ondelete = "RESTRICT"
if (
mdfk.onupdate is not None
and mdfk.onupdate.lower() == "restrict"
and cnfk.onupdate is None
):
cnfk.onupdate = "RESTRICT"
class MariaDBImpl(MySQLImpl):
__dialect__ = "mariadb"
class MySQLAlterDefault(AlterColumn):
def __init__(
self,
name: str,
column_name: str,
default: "_ServerDefault",
schema: Optional[str] = None,
) -> None:
super(AlterColumn, self).__init__(name, schema=schema)
self.column_name = column_name
self.default = default
class MySQLChangeColumn(AlterColumn):
def __init__(
self,
name: str,
column_name: str,
schema: Optional[str] = None,
newname: Optional[str] = None,
type_: Optional["TypeEngine"] = None,
nullable: Optional[bool] = None,
default: Optional[Union["_ServerDefault", "Literal[False]"]] = False,
autoincrement: Optional[bool] = None,
comment: Optional[Union[str, "Literal[False]"]] = False,
) -> None:
super(AlterColumn, self).__init__(name, schema=schema)
self.column_name = column_name
self.nullable = nullable
self.newname = newname
self.default = default
self.autoincrement = autoincrement
self.comment = comment
if type_ is None:
raise util.CommandError(
"All MySQL CHANGE/MODIFY COLUMN operations "
"require the existing type."
)
self.type_ = sqltypes.to_instance(type_)
class MySQLModifyColumn(MySQLChangeColumn):
pass
@compiles(ColumnNullable, "mysql", "mariadb")
@compiles(ColumnName, "mysql", "mariadb")
@compiles(ColumnDefault, "mysql", "mariadb")
@compiles(ColumnType, "mysql", "mariadb")
def _mysql_doesnt_support_individual(element, compiler, **kw):
raise NotImplementedError(
"Individual alter column constructs not supported by MySQL"
)
@compiles(MySQLAlterDefault, "mysql", "mariadb")
def _mysql_alter_default(
element: "MySQLAlterDefault", compiler: "MySQLDDLCompiler", **kw
) -> str:
return "%s ALTER COLUMN %s %s" % (
alter_table(compiler, element.table_name, element.schema),
format_column_name(compiler, element.column_name),
"SET DEFAULT %s" % format_server_default(compiler, element.default)
if element.default is not None
else "DROP DEFAULT",
)
@compiles(MySQLModifyColumn, "mysql", "mariadb")
def _mysql_modify_column(
element: "MySQLModifyColumn", compiler: "MySQLDDLCompiler", **kw
) -> str:
return "%s MODIFY %s %s" % (
alter_table(compiler, element.table_name, element.schema),
format_column_name(compiler, element.column_name),
_mysql_colspec(
compiler,
nullable=element.nullable,
server_default=element.default,
type_=element.type_,
autoincrement=element.autoincrement,
comment=element.comment,
),
)
@compiles(MySQLChangeColumn, "mysql", "mariadb")
def _mysql_change_column(
element: "MySQLChangeColumn", compiler: "MySQLDDLCompiler", **kw
) -> str:
return "%s CHANGE %s %s %s" % (
alter_table(compiler, element.table_name, element.schema),
format_column_name(compiler, element.column_name),
format_column_name(compiler, element.newname),
_mysql_colspec(
compiler,
nullable=element.nullable,
server_default=element.default,
type_=element.type_,
autoincrement=element.autoincrement,
comment=element.comment,
),
)
def _mysql_colspec(
compiler: "MySQLDDLCompiler",
nullable: Optional[bool],
server_default: Optional[Union["_ServerDefault", "Literal[False]"]],
type_: "TypeEngine",
autoincrement: Optional[bool],
comment: Optional[Union[str, "Literal[False]"]],
) -> str:
spec = "%s %s" % (
compiler.dialect.type_compiler.process(type_),
"NULL" if nullable else "NOT NULL",
)
if autoincrement:
spec += " AUTO_INCREMENT"
if server_default is not False and server_default is not None:
spec += " DEFAULT %s" % format_server_default(compiler, server_default)
if comment:
spec += " COMMENT %s" % compiler.sql_compiler.render_literal_value(
comment, sqltypes.String()
)
return spec
@compiles(schema.DropConstraint, "mysql", "mariadb")
def _mysql_drop_constraint(
element: "DropConstraint", compiler: "MySQLDDLCompiler", **kw
) -> str:
"""Redefine SQLAlchemy's drop constraint to
raise errors for invalid constraint type."""
constraint = element.element
if isinstance(
constraint,
(
schema.ForeignKeyConstraint,
schema.PrimaryKeyConstraint,
schema.UniqueConstraint,
),
):
assert not kw
return compiler.visit_drop_constraint(element)
elif isinstance(constraint, schema.CheckConstraint):
# note that SQLAlchemy as of 1.2 does not yet support
# DROP CONSTRAINT for MySQL/MariaDB, so we implement fully
# here.
if _is_mariadb(compiler.dialect):
return "ALTER TABLE %s DROP CONSTRAINT %s" % (
compiler.preparer.format_table(constraint.table),
compiler.preparer.format_constraint(constraint),
)
else:
return "ALTER TABLE %s DROP CHECK %s" % (
compiler.preparer.format_table(constraint.table),
compiler.preparer.format_constraint(constraint),
)
else:
raise NotImplementedError(
"No generic 'DROP CONSTRAINT' in MySQL - "
"please specify constraint type"
)
| mit | 7bb6fde74dd1629691d5ab8d4183c304 | 34.464363 | 79 | 0.570463 | 4.458322 | false | false | false | false |
cfelton/rhea | rhea/models/usbext/fx2/usbp_host.py | 2 | 3073 | #
# Copyright (c) 2011-2013 Christopher L. Felton
#
from __future__ import print_function, absolute_import
import myhdl
from myhdl import delay, always, instances
from .fx2_model import Fx2Model
class UsbpHost(Fx2Model):
"""
"""
def __init__(self):
Fx2Model.__init__(self, FifoSize=512, Config=0)
def WriteAddress(self, addr, data):
wbuf = [0xDE, 0xCA, 0x01, 0x00, 0x00, 0x01, 0xFB, 0xAD, 0x00]
rbuf = [0 for ii in range(9)]
wbuf[3] = (addr >> 8) & 0xFF
wbuf[4] = addr & 0xFF
wbuf[5] = 1
wbuf[8] = data
self.Write(wbuf, self.EP2)
while not self.IsEmpty(self.EP2):
yield delay(2*self.IFCLK_TICK)
while not self.IsData(self.EP6, 9):
yield delay(2*self.IFCLK_TICK)
for i in range(9):
rbuf[i] = self.Read(self.EP6)
# The last byte is the previous value of the register, it will not match
for i in range(8):
if wbuf[i] != rbuf[i]:
print("wbuf ", wbuf)
print('rbuf ', rbuf)
assert wbuf[i] == rbuf[i], "Write Address Failed wbuf[%d](%02x) != rbuf[%d](%02x)" % (i, wbuf[i], i, rbuf[i])
def ReadAddress(self, addr, data, w=1):
plen = 8+w
wbuf = [0]*plen
rbuf = [0]*plen
wbuf[0] = 0xDE
wbuf[1] = 0xCA
wbuf[2] = 0x02
wbuf[3] = (addr >> 8) & 0xFF
wbuf[4] = addr & 0xFF
wbuf[5] = w
wbuf[6] = 0xFB
wbuf[7] = 0xAD
self.Write(wbuf, self.EP2)
while not self.IsEmpty(self.EP2):
yield delay(2*self.IFCLK_TICK)
while not self.IsData(self.EP6, plen):
yield delay(2*self.IFCLK_TICK)
for i in range(plen):
rbuf[i] = self.Read(self.EP6)
for i in range(8):
if wbuf[i] != rbuf[i]:
print('[%d] wbuf %s' % (i, wbuf))
print('[%d] rbuf %s' % (i, rbuf))
raise AssertionError("Read Address Failed wbuf[%d](%02x) != rbuf[%d](%02x)" % (i, wbuf[i], i, rbuf[i]))
if w == 1:
data[0] = rbuf[8]
elif w == 2:
data[0] = (rbuf[8] << 8) | rbuf[9]
elif w == 3:
data[0] = (rbuf[8] << 16) | (rbuf[9] << 8) | rbuf[10]
if w == 4:
data[0] = (rbuf[9] << 24) | (rbuf[9] << 16) | (rbuf[10] << 8) | rbuf[11]
print(rbuf[8], rbuf[9], rbuf[10], rbuf[11])
# ---------------------------------------------------------------------------
def HostReadRequest(self, DataInRdy48, BufferSize=4096):
""" Emulated Host request
In an actual system the host will request N bytes from the USB system.
This is a separate generate that will do a ReadBlock from. If a block is
available it will copy it to the HostQueue.
For simulation
"""
@always(DataInRdy48.posedge)
def tm_host_request():
pass
return myhdl.intances()
| mit | f9f9cd57db5aee1adcf1193671e54e3a | 29.73 | 121 | 0.485194 | 3.060757 | false | false | false | false |
cfelton/rhea | rhea/cores/video/lcd/lt24lcd_init_sequence.py | 2 | 3672 |
"""
The following describes the display initialization sequence. This
init sequence was adapted from the display init sequence provided
in the terasic example ILI9341.c
Refer to the ILI9341 datasheet for more information.
Init sequence overview
----------------------
0x11: (section 8.2.12) turn off sleep mode, the datasheet indicates
a delay of 5 msecs before next command and 120ms before this
command can be sent if in sleep mode (0x10).
0xCF: (8.4.2) Power control B, DC enable
0xED: (8.4.6) Power on sequence control
0xE8: (8.4.3) Driver timing control A
0xCB: (8.4.1) Power control A
0xF7: (8.4.8) Pump ratio control
0xEA: (8.4.5) Driver timing control B
0xC0
The display sequence will be converted into a ROM, the sequence
ROM is organized as:
00: data length (legth of the data bytes to send)
01: pause, delay in ms after the command
02: command to send
03: command first data byte
...
0cn: last command byte
...
0n: last command
"""
# init sequence from terasic uP code
# @todo: only a couple pauses, remove them from the sequence
# save space in the ROM
seq = []
seq += [dict(cmd=0x00,
data=[], pause=120)]
seq += [dict(cmd=0x11,
data=[], pause=5)]
seq += [dict(cmd=0xCF,
data=[0x00, 0x81, 0xC0], pause=0)]
seq += [dict(cmd=0xED,
data=[0x64, 0x03, 0x12, 0x81], pause=0)]
seq += [dict(cmd=0xE8,
data=[0x85, 0x00, 0x78], pause=0)]
seq += [dict(cmd=0xCB,
data=[0x39, 0x2C, 0x00, 0x34, 0x02], pause=0)]
seq += [dict(cmd=0xF7,
data=[0x20], pause=0)]
seq += [dict(cmd=0xEA,
data=[0x00, 0x00], pause=0)]
seq += [dict(cmd=0xB1,
data=[0x00, 0x1B], pause=0)]
seq += [dict(cmd=0xB6,
data=[0x0A, 0xA2], pause=0)]
seq += [dict(cmd=0xC0, # power control
data=[0x05], pause=0)]
seq += [dict(cmd=0xC1, # power control
data=[0x11], pause=0)]
seq += [dict(cmd=0xC5, # VCM control
data=[0x45, 0x45], pause=0)]
seq += [dict(cmd=0xC7, # VCM control 2
data=[0xA2], pause=0)]
seq += [dict(cmd=0x36, # memory access controll
data=[0x08], pause=0)]
seq += [dict(cmd=0xF2, # 3gamma functin diable
data=[0x00], pause=0)]
seq += [dict(cmd=0x26, # gamma set
data=[0x01, 0x30], pause=0)]
seq += [dict(cmd=0xE0, # set gamma table
data=[0x0F, 0x26, 0x24, 0x0B, 0x0E, 0x08,
0x4B, 0xA8, 0x3B, 0x0A, 0x14, 0x06,
0x10, 0x09, 0x00], pause=0)]
seq += [dict(cmd=0xE1, # set gamma table
data=[0x00, 0x1C, 0x20, 0x04, 0x10, 0x08,
0x34, 0x47, 0x44, 0x05, 0x0B, 0x09,
0x2F, 0x36, 0x0F], pause=0)]
seq += [dict(cmd=0x2A, data=[0x00, 0x00, 0x00, 0xEF], pause=0)]
seq += [dict(cmd=0x2B, data=[0x00, 0x00, 0x01, 0x3F], pause=0)]
seq += [dict(cmd=0x3A, data=[0x55], pause=30)]
seq += [dict(cmd=0xF6, data=[0x01, 0x30, 0x00], pause=0)]
seq += [dict(cmd=0x29, data=[], pause=30)] # display on
seq += [dict(cmd=0x2C, data=[], pause=30)]
init_sequence = seq
def build_init_rom(init_sequence):
mem, maxpause = [], 0
for info in init_sequence:
assert isinstance(info, dict)
cmd_entry = [len(info['data'])+3] + [info['pause']] + \
[info['cmd']] + info['data']
print("{cmd:02X} {pause} {data} {bb}".format(
bb=list(map(hex, cmd_entry)), **info))
maxpause = max(maxpause, info['pause'])
mem = mem + cmd_entry
rom = tuple(mem)
return rom, len(rom), maxpause
| mit | 53931a381e693ffdf91c7023bc61d0b4 | 31.495575 | 70 | 0.569172 | 2.767144 | false | false | false | false |
cfelton/rhea | rhea/cores/sdram/mem_test.py | 2 | 2400 |
from __future__ import absolute_import
import myhdl
from myhdl import Signal, enum, always_seq
# from ..misc import random_generator
def mem_test(glbl, memmap, progress, error, done,
start_address=0x00000000, end_address=0xFFFFFFFF):
"""
This module performs a memory test over the memory-map bus
"""
if end_address > memmap.addr.max:
end_address = memmap.addr.max
States = enum('init', 'write', 'write_ack', 'compare_read',
'compare', 'end')
state = Signal(States.init)
clock, reset = glbl.clock, glbl.reset
rglbl, randgen = random_generator.portmap.values()
randgen.data = Signal(memmap.wdata.val)
rglbl.clock, rglbl.reset = clock, reset
rand_inst = random_generator(glbl, randgen)
@always_seq(clock.posedge, reset=reset)
def beh():
# defaults
randgen.load.next = False
randgen.enable.next = False
if state == States.init:
randgen.load.next = True
randgen.enable.next = True
error.next = False
progress.next = 0
memmap.addr.next = start_address
elif state == States.write:
progress.next = 1
memmap.write.next = True
memmap.wdata.next = randgen.data
state.next = States.write.ack
randgen.enable.next = True
elif state == States.write_ack:
memmap.write.next = False
if memmap.addr == end_address-1:
randgen.load.next = True
state.next = States.compare_read
memmap.addr.next = start_address
else:
memmap.addr.next = memmap.addr + 1
state.next = States.write
elif state == States.compare_read:
progress.next = 2
memmap.read.next = True
elif state == States.compare:
memmap.read.next = False
randgen.enable.next = True
if memmap.rdata != randgen.data:
error.next = True
if memmap.addr == end_address-1:
state.next = States.end
else:
memmap.addr.next = memmap.addr + 1
state.next = States.compare.read
elif state == States.end:
pass
else:
assert False, "Invalid state %s" % (state,)
return myhdl.instances()
| mit | ef1c961283e19a128aed6d940948fb5a | 28.62963 | 63 | 0.559583 | 3.947368 | false | false | false | false |
cfelton/rhea | rhea/cores/misc/uix/btn_mm_ctl.py | 2 | 1688 |
import myhdl
from myhdl import Signal, intbv, always_seq
from . import button_debounce
from rhea.cores.memmap import controller_basic
@myhdl.block
def button_controller(glbl, regbus, btns, led_addr=0x240):
""" Generate bus cycles from a button input
This is a nonsensical module that creates memory-mapped
bus cycles from a button press. It is used in simple
examples and demonstrations.
"""
clock, reset = glbl.clock, glbl.reset
dbtns = Signal(intbv(0)[len(btns):])
led_addr = intbv(led_addr)[16:]
# simple interface to control (invoke) the controller
ctl = regbus.get_generic()
# debounce the buttons
btn_inst = button_debounce(glbl, btns, dbtns)
# use the basic controller defined in the memmap modules
# this basic controller is very simple, a write strobe
# will start a write cycle and a read strobe a read cycle.
ctl_inst = controller_basic(ctl, regbus)
# @todo: finish, can't use the write's like they are
# but I need a bus agnostic method to read/write
# different buses.
@always_seq(clock.posedge, reset=reset)
def beh():
# default values
ctl.write.next = False
ctl.read.next = False
ctl.per_addr.next = led_addr[16:8]
ctl.mem_addr.next = led_addr[8:0]
if ctl.done:
if btns != 0:
ctl.write.next = True
if dbtns[0]:
ctl.write_data.next = 1
elif dbtns[1]:
ctl.write_data.next = 2
elif dbtns[2]:
ctl.write_data.next = 3
elif dbtns[3]:
ctl.write_data.next = 4
return myhdl.instances()
| mit | 5a746199275fd4901c713df340837758 | 29.142857 | 62 | 0.614929 | 3.437882 | false | false | false | false |
reviewboard/rbtools | rbtools/commands/login.py | 1 | 1738 | from __future__ import unicode_literals
import logging
from rbtools.api.errors import AuthorizationError
from rbtools.commands import Command, CommandError
from rbtools.utils.users import get_authenticated_session
class Login(Command):
"""Logs into a Review Board server.
The user will be prompted for a username and password, unless otherwise
passed on the command line, allowing the user to log in and save a
session cookie without needing to be in a repository or posting to
the server.
If the user is already logged in, this won't do anything.
"""
name = 'login'
author = 'The Review Board Project'
needs_api = True
option_list = [
Command.server_options,
]
def main(self):
"""Run the command."""
session = self.api_root.get_session(expand='user')
was_authenticated = session.authenticated
if not was_authenticated:
try:
session = get_authenticated_session(api_client=self.api_client,
api_root=self.api_root,
auth_required=True,
session=session)
except AuthorizationError:
raise CommandError('Unable to log in to Review Board.')
if session.authenticated:
if not was_authenticated or (self.options.username and
self.options.password):
logging.info('Successfully logged in to Review Board.')
else:
logging.info('You are already logged in to Review Board at %s',
self.api_client.domain)
| mit | 04b23147d9db9d1d0482cfe5249a778d | 33.76 | 79 | 0.582278 | 5.023121 | false | false | false | false |
reviewboard/rbtools | rbtools/clients/tests/test_scmclient_registry.py | 1 | 8252 | """Unit tests for rbtools.clients.base.registry.
Version Added:
4.0
"""
import re
import sys
if sys.version_info[:2] >= (3, 10):
# Python >= 3.10
from importlib.metadata import EntryPoint, entry_points
else:
# Python <= 3.9
from importlib_metadata import EntryPoint, entry_points
import kgb
from rbtools.clients.base.scmclient import BaseSCMClient
from rbtools.clients.base.registry import SCMClientRegistry
from rbtools.clients.bazaar import BazaarClient
from rbtools.clients.clearcase import ClearCaseClient
from rbtools.clients.cvs import CVSClient
from rbtools.clients.errors import SCMClientNotFoundError
from rbtools.clients.git import GitClient
from rbtools.clients.mercurial import MercurialClient
from rbtools.clients.perforce import PerforceClient
from rbtools.clients.plastic import PlasticClient
from rbtools.clients.sos import SOSClient
from rbtools.clients.svn import SVNClient
from rbtools.clients.tfs import TFSClient
from rbtools.deprecation import RemovedInRBTools50Warning
from rbtools.testing import TestCase
class MySCMClient1(BaseSCMClient):
scmclient_id = 'my_client1'
class MySCMClient2(BaseSCMClient):
pass
class SCMClientRegistryTests(kgb.SpyAgency, TestCase):
"""Unit tests for SCMClientRegistry."""
def tearDown(self):
super().tearDown()
# Tests will end up patching MySCMClient2.scmtool_id. Unset this.
MySCMClient2.scmclient_id = None
def test_init(self):
"""Testing SCMClientRegistry.__init__"""
registry = SCMClientRegistry()
self.assertEqual(registry._scmclient_classes, {})
self.assertFalse(registry._builtin_loaded)
self.assertFalse(registry._entrypoints_loaded)
def test_iter(self):
"""Testing SCMClientRegistry.__iter__"""
registry = SCMClientRegistry()
self._add_fake_entrypoints([
EntryPoint(name='my_client1',
value='%s:MySCMClient1' % __name__,
group='rbtools_scm_clients'),
EntryPoint(name='my_client2',
value='%s:MySCMClient2' % __name__,
group='rbtools_scm_clients'),
])
message = re.escape(
'MySCMClient2.scmclient_id must be set, and must be a unique '
'value. You probably want to set it to "my_client2".'
)
with self.assertWarnsRegex(RemovedInRBTools50Warning, message):
self.assertEqual(
list(registry),
[
BazaarClient,
ClearCaseClient,
CVSClient,
GitClient,
MercurialClient,
PerforceClient,
PlasticClient,
SOSClient,
SVNClient,
TFSClient,
MySCMClient1,
MySCMClient2,
])
self.assertTrue(registry._builtin_loaded)
self.assertTrue(registry._entrypoints_loaded)
def test_get_with_builtin(self):
"""Testing SCMClientRegistry.get with built-in SCMClient"""
registry = SCMClientRegistry()
self.assertIs(registry.get('git'), GitClient)
self.assertTrue(registry._builtin_loaded)
self.assertFalse(registry._entrypoints_loaded)
def test_get_with_entrypoint(self):
"""Testing SCMClientRegistry.get with entry point SCMClient"""
registry = SCMClientRegistry()
self._add_fake_entrypoints([
EntryPoint(name='my_client1',
value='%s:MySCMClient1' % __name__,
group='rbtools_scm_clients'),
])
self.assertIs(registry.get('my_client1'), MySCMClient1)
self.assertTrue(registry._builtin_loaded)
self.assertTrue(registry._entrypoints_loaded)
def test_get_with_entrypoint_no_scmclient_id(self):
"""Testing SCMClientRegistry.get with entry point SCMClient with no
scmclient_id set
"""
registry = SCMClientRegistry()
self._add_fake_entrypoints([
EntryPoint(name='my_client2',
value='%s:MySCMClient2' % __name__,
group='rbtools_scm_clients'),
])
message = re.escape(
'MySCMClient2.scmclient_id must be set, and must be a unique '
'value. You probably want to set it to "my_client2".'
)
with self.assertWarnsRegex(RemovedInRBTools50Warning, message):
scmclient_cls = registry.get('my_client2')
self.assertIs(scmclient_cls, MySCMClient2)
self.assertTrue(registry._builtin_loaded)
self.assertTrue(registry._entrypoints_loaded)
def test_get_with_entrypoint_and_missing(self):
"""Testing SCMClientRegistry.get with entry point SCMClient missing"""
registry = SCMClientRegistry()
self._add_fake_entrypoints([
EntryPoint(name='xxx',
value='%s:XXX' % __name__,
group='rbtools_scm_clients'),
])
message = re.escape(
'No client support was found for "xxx".'
)
with self.assertRaisesRegex(SCMClientNotFoundError, message) as ctx:
registry.get('xxx')
self.assertEqual(ctx.exception.scmclient_id, 'xxx')
self.assertTrue(registry._builtin_loaded)
self.assertTrue(registry._entrypoints_loaded)
def test_register(self):
"""Testing SCMClientRegistry.register"""
registry = SCMClientRegistry()
registry.register(MySCMClient1)
self.assertTrue(registry._builtin_loaded)
self.assertFalse(registry._entrypoints_loaded)
# This will have triggered a load of defaults, but not entry points.
self.assertEqual(
list(registry),
[
BazaarClient,
ClearCaseClient,
CVSClient,
GitClient,
MercurialClient,
PerforceClient,
PlasticClient,
SOSClient,
SVNClient,
TFSClient,
MySCMClient1,
])
def test_register_with_no_scmclient_id(self):
"""Testing SCMClientRegistry.register with no scmclient_id"""
registry = SCMClientRegistry()
message = re.compile(
'MySCMClient2.scmclient_id must be set, and must be a unique '
'value.'
)
with self.assertRaisesRegex(ValueError, message):
registry.register(MySCMClient2)
self.assertTrue(registry._builtin_loaded)
self.assertFalse(registry._entrypoints_loaded)
self.assertNotIn(MySCMClient1, registry)
def test_register_with_already_registered(self):
"""Testing SCMClientRegistry.register with class already registered"""
registry = SCMClientRegistry()
message = re.compile('GitClient is already registered.')
with self.assertRaisesRegex(ValueError, message):
registry.register(GitClient)
self.assertTrue(registry._builtin_loaded)
self.assertFalse(registry._entrypoints_loaded)
self.assertNotIn(MySCMClient1, registry)
def test_register_with_id_already_used(self):
"""Testing SCMClientRegistry.register with ID already used"""
class MyGitClient(BaseSCMClient):
scmclient_id = 'git'
registry = SCMClientRegistry()
message = re.compile(
'A SCMClient with an ID of "git" is already registered: '
'rbtools.clients.git.GitClient'
)
with self.assertRaisesRegex(ValueError, message):
registry.register(MyGitClient)
self.assertTrue(registry._builtin_loaded)
self.assertFalse(registry._entrypoints_loaded)
self.assertNotIn(MySCMClient1, registry)
def _add_fake_entrypoints(self, entrypoints):
self.spy_on(entry_points, op=kgb.SpyOpMatchAny([
{
'args': (),
'kwargs': {
'group': 'rbtools_scm_clients',
},
'op': kgb.SpyOpReturn(entrypoints),
},
]))
| mit | 81098ae8465952474a7eb88808a24c17 | 32.140562 | 78 | 0.610397 | 4.233966 | false | true | false | false |
reviewboard/rbtools | rbtools/api/utils.py | 1 | 2490 | """Utilities used by the API interfaces."""
from typing_extensions import TypedDict
class ParsedMIMEType(TypedDict):
"""A MIME type, parsed into its component parts.
Version Added:
4.0
"""
#: The full MIME type.
#:
#: Type:
#: str
type: str
#: Main type (For example, "application" for "application/octet-stream")
#:
#: Type:
#: str
main_type: str
#: Sub-type (for example, "plain" for "text/plain").
#:
#: Type:
#: str
sub_type: str
#: The vendor tag, if available.
#:
#: For example, "vnd.reviewboard.org.test" in
#: "application/vnd.reviewboard.org.test+json".
#:
#: Type:
#: str
vendor: str
#: The sub-type format, if available.
#:
#: For example, "json" in "application/vnd.reviewboard.org.test+json".
#:
#: Type:
#: str
format: str
#: The particular API resource name, if available.
#:
#: For example, "test" in "application/vnd.reviewboard.org.test+json".
#:
#: Type:
#: str
resource: str
def parse_mimetype(
mime_type: str,
) -> ParsedMIMEType:
"""Parse a mime type into its component parts.
Args:
mime_type (str):
The MIME type to parse.
Returns:
ParsedMIMEType:
The type, parsed into its component parts.
"""
types = mime_type.split(';')[0].split('/')
sub_type = types[1].split('+')
if len(sub_type) == 1:
vendor = ''
format = sub_type[0]
else:
vendor = sub_type[0]
format = sub_type[1]
vendor_parts = vendor.split('.')
if len(vendor_parts) > 1:
resource = vendor_parts[-1].replace('-', '_')
else:
resource = ''
return ParsedMIMEType(
type=mime_type,
main_type=types[0],
sub_type=types[0],
vendor=vendor,
format=format,
resource=resource)
def rem_mime_format(
mime_type: str,
) -> str:
"""Strip the subtype from a mimetype, leaving vendor specific information.
Removes the portion of the subtype after a +, or the entire
subtype if no vendor specific type information is present.
Args:
mime_type (str):
The MIME type string to modify.
Returns:
str:
The MIME type less any subtypes.
"""
if mime_type.rfind('+') != 0:
return mime_type.rsplit('+', 1)[0]
else:
return mime_type.rsplit('/', 1)[0]
| mit | 83a55dabd4ec0bd296b87a9f9f1416a6 | 20.465517 | 78 | 0.554618 | 3.75 | false | false | false | false |
reviewboard/rbtools | rbtools/utils/graphs.py | 5 | 1847 | from __future__ import unicode_literals
from collections import defaultdict, deque
import six
def visit_depth_first(graph, start):
"""Yield vertices in the graph starting at the start vertex.
The vertices are yielded in a depth first order and only those vertices
that can be reached from the start vertex will be yielded.
"""
unvisited = deque()
visited = set()
unvisited.append(start)
while unvisited:
vertex = unvisited.popleft()
if vertex in visited:
continue
visited.add(vertex)
yield vertex
if vertex in graph:
for adjacent in graph[vertex]:
unvisited.append(adjacent)
def path_exists(graph, start, end):
"""Determine if a directed path exists between start and end in graph."""
for vertex in visit_depth_first(graph, start):
if vertex == end:
return True
return False
def toposort(graph):
"""Return a topological sorting of the vertices in the directed graph.
If the graph contains cycles, ValueError is raised.
"""
result = []
indegrees = defaultdict(int) # The in-degree of each vertex in the graph.
for head in six.iterkeys(graph):
indegrees[head] = 0
for tails in six.itervalues(graph):
for tail in tails:
indegrees[tail] += 1
heads = set(
vertex
for vertex, indegree in six.iteritems(indegrees)
if indegree == 0
)
while len(heads):
head = heads.pop()
result.append(head)
if head in graph:
for tail in graph[head]:
indegrees[tail] -= 1
if indegrees[tail] == 0:
heads.add(tail)
if any(six.itervalues(indegrees)):
raise ValueError('Graph contains cycles.')
return result
| mit | dc51116f3ba21a4003576e522f23239d | 22.379747 | 78 | 0.608554 | 4.408115 | false | false | false | false |
reviewboard/rbtools | rbtools/diffs/tools/backends/apple.py | 1 | 8434 | """A diff tool interfacing with Apple Diff.
Version Added:
4.0
"""
import io
import re
from datetime import datetime
from typing import List
from rbtools.diffs.tools.base import BaseDiffTool, DiffFileResult
from rbtools.utils.filesystem import iter_exes_in_path
from rbtools.utils.process import RunProcessError, run_process
class AppleDiffTool(BaseDiffTool):
"""A diff tool interfacing with Apple Diff.
Apple Diff is introduced with macOS Ventura, replacing GNU Diff.
Version Added:
4.0
"""
diff_tool_id = 'apple'
name = 'Apple Diff'
_BINARY_FILES_DIFFER_RE = re.compile(
br'^Binary files .*? and .*? differ$')
_DIFF_HEADER_RE = re.compile(
br'^(?P<line>(?:---|\+\+\+) .*?\t\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2})'
br'(?P<newline>[\r\n]+)$')
def check_available(self) -> bool:
"""Check whether Apple Diff is available for use.
This will check if Apple Diff is present in the system path.
If available, this will set :py:attr:`exe_path` and
:py:attr:`version_info`.
Returns:
bool:
``True`` if Apple Diff is available. ``False`` if it's not.
"""
for diff_path in iter_exes_in_path('diff'):
try:
result = (
run_process([diff_path, '--version'],
ignore_errors=True)
.stdout
.readlines()
)
except Exception:
# Skip this and try the next one.
continue
if result and result[0].startswith('Apple diff'):
self.exe_path = diff_path
self.version_info = result[0].strip()
return True
return False
def make_run_diff_file_cmdline(
self,
*,
orig_path: str,
modified_path: str,
show_hunk_context: bool = False,
treat_missing_as_empty: bool = True,
) -> List[str]:
"""Return the command line for running the diff tool.
This should generally be used by :py:meth:`run_diff_file`, and
can be useful to unit tests that need to check for the process being
run.
Args:
orig_path (str):
The path to the original file.
modified_path (str):
The path to the modified file.
show_hunk_context (bool, optional):
Whether to show context on hunk lines, if supported by the
diff tool.
treat_missing_as_empty (bool, optional):
Whether to treat a missing ``orig_path`` or ``modified_path``
as an empty file, instead of failing to diff.
This must be supported by subclasses.
Returns:
list of str:
The command line to run for the given flags.
"""
assert self.exe_path
flags: List[str] = ['u']
if treat_missing_as_empty:
flags.append('N')
if show_hunk_context:
flags.append('p')
return [
self.exe_path,
'-%s' % ''.join(flags),
orig_path,
modified_path,
]
def run_diff_file(
self,
*,
orig_path: str,
modified_path: str,
show_hunk_context: bool = False,
treat_missing_as_empty: bool = True,
) -> DiffFileResult:
"""Return the result of a diff between two files.
This will call Apple Diff with the appropriate parameters, returning
a Unified Diff of the results.
Args:
orig_path (str):
The path to the original file.
modified_path (str):
The path to the modified file.
show_hunk_context (bool, optional):
Whether to show context on hunk lines, if supported by the
diff tool.
treat_missing_as_empty (bool, optional):
Whether to treat a missing ``orig_path`` or ``modified_path``
as an empty file, instead of failing to diff.
Returns:
rbtools.diffs.tools.base.diff_file_result.DiffFileResult:
The result of the diff operation.
Raises:
rbtools.utils.process.RunProcessError:
There was an error invoking the diff tool. Details are in the
exception.
"""
assert self.available
cmdline = self.make_run_diff_file_cmdline(
orig_path=orig_path,
modified_path=modified_path,
show_hunk_context=show_hunk_context,
treat_missing_as_empty=treat_missing_as_empty)
process_result = run_process(cmdline,
ignore_errors=(1, 2),
log_debug_output_on_error=False)
if process_result.exit_code == 0:
# There were no differences.
return DiffFileResult(orig_path=orig_path,
modified_path=modified_path,
diff=io.BytesIO(),
has_text_differences=False)
else:
# Differences were found, or trouble occurred.
#
# We may get either value from Apple Diff for binary files,
# despite documentation claiming we'd receive an exit code of 1.
lines = process_result.stdout_bytes.readlines()
if (len(lines) == 1 and
self._BINARY_FILES_DIFFER_RE.match(lines[0])):
# This appears to be a binary file. Return a normalized
# version of this.
return DiffFileResult(
orig_path=orig_path,
modified_path=modified_path,
diff=io.BytesIO(b'Binary files %s and %s differ\n'
% (orig_path.encode('utf-8'),
modified_path.encode('utf-8'))),
is_binary=True,
has_text_differences=False)
elif process_result.exit_code == 1:
process_result.stdout_bytes.seek(0)
return DiffFileResult(
orig_path=orig_path,
modified_path=modified_path,
diff=self._normalize_diff(process_result.stdout_bytes))
# Something else went wrong. Raise this.
raise RunProcessError(process_result)
def _normalize_diff(
self,
stream: io.BytesIO,
) -> io.BytesIO:
"""Normalize an Apple Diff result.
Apple Diff and GNU Diff mostly have the same Unified Diff output,
but they do differ when it comes to timestamps.
GNU Diff timestamps include millisecond precision and a timezone
offset. For some reason, Apple Diff only does this if running in
"legacy" mode" (running with a ``COMMAND_MODE=legacy`` environment),
with a documentation warning that these may not be patchable.
We still need consistency, though, and GNU Diff's output is the
target. Rather than rely on that environment, which may conceivably
change in the future, this method processes the diff and adds the
millisecond precision (of 0) and timezone offset to the timestamp.
If Apple Diff ever changes, this function will effectively be a no-op.
Args:
stream (io.BytesIO):
The stream from running Apple Diff.
Returns:
io.BytesIO:
The resulting stream with a processed diff.
"""
DIFF_HEADER_RE = self._DIFF_HEADER_RE
diff = io.BytesIO()
timezone = datetime.now().astimezone().strftime('%z').encode('utf-8')
normalized = False
for i in range(2):
line = stream.readline()
m = DIFF_HEADER_RE.match(line)
if m:
diff.write(b'%s.000000000 %s%s'
% (m.group('line'),
timezone,
m.group('newline')))
normalized = True
else:
diff.write(line)
if normalized:
diff.write(stream.read())
else:
diff = stream
diff.seek(0)
return diff
| mit | b1ce39f96c3da23885e2903b39c17d8b | 31.438462 | 78 | 0.534977 | 4.536848 | false | false | false | false |
reviewboard/rbtools | rbtools/testing/transport.py | 1 | 4262 | """Transport subclass used for unit testing.
Deprecated::
3.1:
Replaced with
:py:class:`rbtools.testing.api.transport.URLMapTransport.`
"""
from __future__ import unicode_literals
from rbtools.api.factory import create_resource
from rbtools.api.request import HttpRequest
from rbtools.api.tests.base import TestWithPayloads
from rbtools.api.transport import Transport
class TestTransport(Transport):
"""Mocked subclass of Transport used for unit tests.
This is mainly used to test functionality that requires interacting with
and reading data from a Review Board server. Unlike the original
implementation of its parent class, custom payloads can be passed in to
force return a specific subclass of
:py:class:`rbtools.api.resource.Resource`.
Deprecated::
3.1:
Replaced with
:py:class:`rbtools.testing.api.transport.URLMapTransport.`
"""
def __init__(self, url, list_payload=TestWithPayloads.list_payload,
root_payload=TestWithPayloads.root_payload):
"""Initialize an instance of TestTransport.
Args:
url (list of unicode):
URL representing the Transport URL.
list_payload (dict, optional):
Dictionary of key-pair values representing the payload for a
:py:class:`rbtools.api.resource.ItemResource` instance.
Default value is a payload defined in
rbtools.api.tests.base.TestWithPayloads.
root_payload (dict, optional):
Dictionary of key-pair values representing the payload for a
:py:class:`rbtools.api.resource.RootResource`. Default
value is a payload defined in
rbtools.api.tests.base.TestWithPayloads.
"""
self.url = url
self.list_payload = list_payload
self.root_payload = root_payload
def execute_request_method(self, method, *args, **kwargs):
"""Return an instance of ItemResource.
Instead of executing :py:meth:`execute_request_method` and carrying out
an instance of :py:class:`rbtools.api.request.HttpRequest`, it returns
an instance of:py:class:`rbtools.api.resource.ItemResource`. The type
of metadata this instance contains depends on the type of
:py:attr:`list_payload` passed in.
Args:
method (callable):
A function that acts as a method to be executed and returns a
:py:class:`rbtools.api.request.HttpRequest` instance.
*args:
Variable arguments used for running the passed in method.
**kwargs:
Keyword arguments used for running the passed in method.
Returns:
rbtools.api.resource.ItemResource:
An instance of :py:class:`rbtools.api.resource.ItemResource` if the
executed method is an instance of
:py:class:`rbtools.api.request.HttpRequest`.
"""
request = method(*args, **kwargs)
if isinstance(request, HttpRequest):
return create_resource(
transport=self,
payload=self.list_payload,
url='http://localhost:8080/api/repositories/',
mime_type='application/vnd.reviewboard.org.list+json',
item_mime_type='application/vnd.reviewboard.org.repository'
'+json')
return request
def get_root(self):
"""Return an instance of RootResource
Instead of calling :py:meth:`get_root` and returning an
instance of :py:class:`rbtools.api.request.HttpRequest`, an instance of
:py:class:`rbtools.api.resource.RootResource` is simply returned.
The type of metadata this instance contains depends on the
type of :py:attr:`root_payload` passed in.
Returns:
rbtools.api.resource.RootResource:
An instance of :py:class:`rbtools.api.request.RootResource`.
"""
return create_resource(
transport=self,
payload=self.root_payload,
url='http://localhost:8080/api/',
mime_type='application/vnd.reviewboard.org.root+json')
| mit | 36661df704691ad37179171b498e6a93 | 37.053571 | 79 | 0.633271 | 4.500528 | false | true | false | false |
reviewboard/rbtools | rbtools/utils/tests/test_buffered_iterator.py | 1 | 4140 | """Unit tests for rbtools.utils.streams.BufferedIterator.
Version Added:
4.0
"""
from rbtools.testing import TestCase
from rbtools.utils.streams import BufferedIterator
class BufferedIteratorTests(TestCase):
"""Unit tests for rbtools.utils.streams.BufferedIterator."""
def test_is_empty_with_iter_empty(self):
"""Testing BufferedIterator.is_empty with empty iterator"""
iterator = BufferedIterator([])
self.assertTrue(iterator.is_empty)
def test_is_empty_with_iter_populated(self):
"""Testing BufferedIterator.is_empty with iterator populated"""
iterator = BufferedIterator([1])
self.assertFalse(iterator.is_empty)
def test_is_empty_with_iter_empty_buffer_populated(self):
"""Testing BufferedIterator.is_empty with iterator empty and buffer
populated
"""
iterator = BufferedIterator([1])
iterator.peek(1)
self.assertFalse(iterator.is_empty)
def test_iter(self):
"""Testing BufferedIterator.__iter__"""
iterator = BufferedIterator([1, 2, 3, 4])
self.assertEqual(list(iterator), [1, 2, 3, 4])
def test_iter_after_peek(self):
"""Testing BufferedIterator.__iter__ after peek"""
iterator = BufferedIterator([1, 2, 3, 4])
iterator.peek(2)
self.assertEqual(list(iterator), [1, 2, 3, 4])
def test_next(self):
"""Testing BufferedIterator.__next__"""
iterator = BufferedIterator([1, 2, 3, 4])
self.assertEqual(next(iterator), 1)
self.assertEqual(next(iterator), 2)
self.assertEqual(next(iterator), 3)
self.assertEqual(next(iterator), 4)
with self.assertRaises(StopIteration):
next(iterator)
def test_next_after_peek(self):
"""Testing BufferedIterator.__next__"""
iterator = BufferedIterator([1, 2, 3, 4])
iterator.peek(2)
self.assertEqual(next(iterator), 1)
self.assertEqual(next(iterator), 2)
self.assertEqual(next(iterator), 3)
self.assertEqual(next(iterator), 4)
with self.assertRaises(StopIteration):
next(iterator)
def test_peek(self):
"""Testing BufferedIterator.peek"""
iterator = BufferedIterator([1, 2, 3, 4])
self.assertEqual(iterator.peek(2), [1, 2])
# Doing this again should return the same buffer contents.
self.assertEqual(iterator.peek(2), [1, 2])
def test_peek_overflow(self):
"""Testing BufferedIterator.peek with count > iterator count"""
iterator = BufferedIterator([1, 2, 3, 4])
self.assertEqual(iterator.peek(6), [1, 2, 3, 4])
# Doing this again should return the same buffer contents.
self.assertEqual(iterator.peek(6), [1, 2, 3, 4])
def test_peek_empty(self):
"""Testing BufferedIterator.peek with empty iterator"""
iterator = BufferedIterator([])
self.assertEqual(iterator.peek(2), [])
def test_consume(self):
"""Testing BufferedIterator.consume"""
iterator = BufferedIterator([1, 2, 3, 4])
self.assertEqual(iterator.consume(2), [1, 2])
# Doing this again should return the next contents.
self.assertEqual(iterator.consume(2), [3, 4])
def test_consume_overflow(self):
"""Testing BufferedIterator.consume with count > iterator count"""
iterator = BufferedIterator([1, 2, 3, 4])
self.assertEqual(iterator.consume(6), [1, 2, 3, 4])
# Doing this again should return an empty list.
self.assertEqual(iterator.consume(6), [])
def test_consume_empty(self):
"""Testing BufferedIterator.consume with empty iterator"""
iterator = BufferedIterator([])
self.assertEqual(iterator.consume(2), [])
def test_consume_after_peek(self):
"""Testing BufferedIterator.consume after peek"""
iterator = BufferedIterator([1, 2, 3, 4])
iterator.peek(2)
self.assertEqual(iterator.consume(2), [1, 2])
# One more time to be sure.
iterator.peek(2)
self.assertEqual(iterator.consume(2), [3, 4])
| mit | 5952024e1aee175674b5daf40c51f7e4 | 31.093023 | 75 | 0.629227 | 4.078818 | false | true | false | false |
reviewboard/rbtools | rbtools/api/tests/test_errors.py | 1 | 6553 | """Unit tests for rbtools.api.errors."""
from __future__ import unicode_literals
import six
from rbtools.api.errors import APIError, AuthorizationError, BadRequestError
from rbtools.testing import TestCase
class APIErrorTests(TestCase):
"""Unit tests for rbtools.api.errors.APIError."""
def test_str_with_http_status(self):
"""Testing APIError.__str__ with http_status"""
self.assertEqual(
six.text_type(APIError(http_status=500)),
'An error occurred when communicating with Review Board. '
'(HTTP 500: Internal Server Error)')
def test_str_with_http_status_unknown(self):
"""Testing APIError.__str__ with unknown http_status"""
self.assertEqual(
six.text_type(APIError(http_status=900)),
'An error occurred when communicating with Review Board. '
'(HTTP 900)')
def test_str_with_error_code(self):
"""Testing APIError.__str__ with error_code"""
self.assertEqual(
six.text_type(APIError(error_code=105)),
'An error occurred when communicating with Review Board. '
'(API Error 105: Invalid Form Data)')
def test_str_with_error_code_unknown(self):
"""Testing APIError.__str__ with unknown error_code"""
self.assertEqual(
six.text_type(APIError(error_code=12345)),
'An error occurred when communicating with Review Board. '
'(API Error 12345)')
def test_str_with_http_status_and_error_code(self):
"""Testing APIError.__str__ with http_status and error_code"""
self.assertEqual(
six.text_type(APIError(http_status=400,
error_code=106)),
'An error occurred when communicating with Review Board. '
'(API Error 106: Missing Attribute)')
def test_str_with_rsp(self):
"""Testing APIError.__str__ with rsp error message"""
self.assertEqual(
six.text_type(APIError(rsp={
'err': {
'msg': 'Bad things happened.',
},
})),
'Bad things happened.')
def test_str_with_rsp_and_error_code(self):
"""Testing APIError.__str__ with rsp error message and error_code"""
self.assertEqual(
six.text_type(APIError(
http_status=400,
error_code=106,
rsp={
'err': {
'msg': 'Bad things happened.',
},
})),
'Bad things happened. (API Error 106: Missing Attribute)')
def test_str_with_rsp_and_http_status(self):
"""Testing APIError.__str__ with rsp error message and http_status"""
self.assertEqual(
six.text_type(APIError(
http_status=400,
rsp={
'err': {
'msg': 'Bad things happened.',
},
})),
'Bad things happened. (HTTP 400: Bad Request)')
def test_str_with_no_details(self):
"""Testing APIError.__str__ without any details"""
self.assertEqual(
six.text_type(APIError()),
'An error occurred when communicating with Review Board.')
class AuthorizationErrorTests(TestCase):
"""Unit tests for rbtools.api.errors.AuthorizationError."""
def test_str_with_message(self):
"""Testing AuthorizationError.__str__ with explicit error message"""
self.assertEqual(
six.text_type(AuthorizationError(message='Oh no.')),
'Oh no.')
def test_str_with_details(self):
"""Testing AuthorizationError.__str__ without explicit error message,
with HTTP details
"""
self.assertEqual(
six.text_type(AuthorizationError(http_status=401,
error_code=104)),
'Error authenticating to Review Board. (API Error 104: '
'Login Failed)')
def test_str_without_message_or_details(self):
"""Testing AuthorizationError.__str__ without explicit error message
or HTTP details
"""
self.assertEqual(
six.text_type(AuthorizationError()),
'Error authenticating to Review Board.')
class BadRequestErrorTests(TestCase):
"""Unit tests for rbtools.api.errors.BadRequestError."""
def test_str(self):
"""Testing BadRequestError.__str__"""
self.assertEqual(
six.text_type(BadRequestError()),
'Missing or invalid data was sent to Review Board.')
def test_str_with_error_code(self):
"""Testing BadRequestError.__str__"""
self.assertEqual(
six.text_type(BadRequestError(error_code=200)),
'Missing or invalid data was sent to Review Board. '
'(API Error 200: Unspecified Diff Revision)')
def test_str_with_rsp_error_message(self):
"""Testing BadRequestError.__str__ with rsp error message"""
self.assertEqual(
six.text_type(BadRequestError(
error_code=200,
rsp={
'err': {
'msg': 'Diff revision not specified.',
},
})),
'Diff revision not specified. (API Error 200: Unspecified Diff '
'Revision)')
def test_str_with_message(self):
"""Testing BadRequestError.__str__ with message"""
self.assertEqual(
six.text_type(BadRequestError(
error_code=200,
message='Diff revision not specified.')),
'Diff revision not specified. (API Error 200: Unspecified Diff '
'Revision)')
def test_str_with_message_with_fields(self):
"""Testing BadRequestError.__str__ with fields"""
self.assertEqual(
six.text_type(BadRequestError(
error_code=105,
rsp={
'err': {
'msg': 'One or more fields had errors',
},
'fields': {
'field1': ['This field was invalid'],
'field2': ['This one, too', 'So invalid'],
},
})),
'One or more fields had errors (API Error 105: Invalid Form '
'Data)\n'
'\n'
' field1: This field was invalid\n'
' field2: This one, too; So invalid')
| mit | 0da0690485784f6eca7c59b1fc188237 | 36.232955 | 77 | 0.544636 | 4.541234 | false | true | false | false |
reviewboard/rbtools | rbtools/utils/tests/test_repository.py | 1 | 6116 | """Unit tests for rbtools.utils.repository."""
from __future__ import unicode_literals
import json
import kgb
from six.moves.urllib.request import urlopen
from rbtools.api.client import RBClient
from rbtools.api.tests.base import MockResponse
from rbtools.testing import TestCase
from rbtools.utils.repository import get_repository_resource
_REPO1 = {
'id': 1,
'name': 'Git Repo 1',
'path': 'git@example.com:test.git',
'mirror_path': 'https://example.com/test3.git',
'links': {
'info': {
'href': 'http://localhost:8080/api/repositories/1/info/',
'method': 'GET',
},
},
}
_REPO2 = {
'id': 2,
'name': 'Git Repo 2',
'path': 'https://git@example.com/test2.git',
'mirror_path': 'git@example.com:test2.git',
'links': {
'info': {
'href': 'http://localhost:8080/api/repositories/2/info/',
'method': 'GET',
},
},
}
_REPO3 = {
'id': 3,
'name': 'Git Repo 3',
'path': 'https://git@example.com/test3.git',
'mirror_path': '',
'links': {
'info': {
'href': 'http://localhost:8080/api/repositories/3/info/',
'method': 'GET',
},
},
}
_MATCH_URL_BASE = (
'http://localhost:8080/api/repositories/?'
'only-fields=id%2Cname%2Cmirror_path%2Cpath&only-links=info'
)
class RepositoryMatchTests(kgb.SpyAgency, TestCase):
"""Unit tests for remote repository matching."""
payloads = {
'http://localhost:8080/api/': {
'mimetype': 'application/vnd.reviewboard.org.root+json',
'rsp': {
'uri_templates': {},
'links': {
'self': {
'href': 'http://localhost:8080/api/',
'method': 'GET',
},
'repositories': {
'href': 'http://localhost:8080/api/repositories/',
'method': 'GET',
},
},
'stat': 'ok',
},
},
(_MATCH_URL_BASE +
'&path=git%40example.com%3Atest.git'): {
'mimetype': 'application/vnd.reviewboard.org.repositories+json',
'rsp': {
'repositories': [_REPO1],
'links': {},
'total_results': 1,
'stat': 'ok',
},
},
(_MATCH_URL_BASE +
'&path=git%40example.com%3Atest2.git'): {
'mimetype': 'application/vnd.reviewboard.org.repositories+json',
'rsp': {
'repositories': [_REPO2],
'links': {},
'total_results': 1,
'stat': 'ok',
},
},
(_MATCH_URL_BASE +
'&path=http%3A%2F%2Fexample.com%2Ftest3.git'): {
'mimetype': 'application/vnd.reviewboard.org.repositories+json',
'rsp': {
'repositories': [_REPO1, _REPO3],
'links': {},
'total_results': 2,
'stat': 'ok',
},
},
(_MATCH_URL_BASE +
'&path=git%40example.com%3Atest4.git'): {
'mimetype': 'application/vnd.reviewboard.org.repositories+json',
'rsp': {
'repositories': [],
'links': {},
'total_results': 0,
'stat': 'ok',
},
},
(_MATCH_URL_BASE): {
'mimetype': 'application/vnd.reviewboard.org.repositories+json',
'rsp': {
'repositories': [
_REPO1,
_REPO2,
],
'links': {},
'total_results': 2,
'stat': 'ok',
},
},
}
def setUp(self):
super(RepositoryMatchTests, self).setUp()
@self.spy_for(urlopen)
def _urlopen(url, **kwargs):
url = url.get_full_url()
try:
payload = self.payloads[url]
except KeyError:
print('Test requested unexpected URL "%s"' % url)
return MockResponse(404, {}, json.dumps({
'rsp': {
'stat': 'fail',
'err': {
'code': 100,
'msg': 'Object does not exist',
},
},
}))
return MockResponse(
200,
{
'Content-Type': payload['mimetype'],
},
json.dumps(payload['rsp']))
self.api_client = RBClient('http://localhost:8080/')
self.root_resource = self.api_client.get_root()
def test_find_matching_server_repository_with_path_match(self):
"""Testing get_repository_resource with path match"""
repository, info = get_repository_resource(
self.root_resource,
repository_paths='git@example.com:test.git')
self.assertEqual(repository.id, 1)
def test_find_matching_server_repository_with_mirror_path_match(self):
"""Testing get_repository_resource with mirror path match"""
repository, info = get_repository_resource(
self.root_resource,
repository_paths='git@example.com:test2.git')
self.assertEqual(repository.id, 2)
def test_find_matching_server_repository_with_multiple_matches(self):
"""Testing get_repository_resource with multiple matching paths"""
repository, info = get_repository_resource(
self.root_resource,
repository_paths='http://example.com/test3.git')
self.assertEqual(repository.id, 1)
def test_find_matching_server_repository_no_match(self):
"""Testing get_repository_resource with no match"""
repository, info = get_repository_resource(
self.root_resource,
repository_paths='git@example.com:test4.git')
self.assertIsNone(repository)
self.assertIsNone(info)
| mit | 83fd98075d58ceef8fea76f1c6296014 | 30.045685 | 76 | 0.48463 | 4.101945 | false | true | false | false |
pytest-dev/pytest-mock | scripts/gen-release-notes.py | 1 | 1138 | """
Generates the release notes for the latest release, in Markdown.
Convert CHANGELOG.rst to Markdown, and extracts just the latest release.
Writes to ``scripts/latest-release-notes.md``, which can be
used with https://github.com/softprops/action-gh-release.
"""
from pathlib import Path
import pypandoc
this_dir = Path(__file__).parent
rst_text = (this_dir.parent / "CHANGELOG.rst").read_text(encoding="UTF-8")
md_text = pypandoc.convert_text(
rst_text, "md", format="rst", extra_args=["--wrap=preserve"]
)
output_lines = []
first_heading_found = False
for line in md_text.splitlines():
if line.startswith("# "):
# Skip the first section (normally # Releases).
pass
elif line.startswith("## "):
# First second-level section, note it and skip the text,
# as we are only interested in the individual release items.
if first_heading_found:
break
first_heading_found = True
else:
output_lines.append(line)
output_fn = this_dir / "latest-release-notes.md"
output_fn.write_text("\n".join(output_lines), encoding="UTF-8")
print(output_fn, "generated.")
| mit | 81243a8cc28adf9b52ad88b3b17d1231 | 30.611111 | 74 | 0.676626 | 3.501538 | false | true | false | false |
ywangd/stash | bin/webviewer.py | 1 | 1337 | # coding: utf-8
"""Opens the given URL in the webbrowser or an App."""
import argparse
import webbrowser
import ui
from objc_util import on_main_thread
@on_main_thread
def open_webbrowser(url, modal=False):
"""opens the url in the webbrowser"""
webbrowser.open(url, modal)
def open_webview(url, modal=False):
"""opens the url in a view."""
v = ui.WebView()
v.present("fullscreen")
v.load_url(url)
if modal:
v.wait_modal()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("url", help="url to open", action="store")
parser.add_argument("-m", "--modal", help="wait until the user closed the webbrowser", action="store_true", dest="modal")
parser.add_argument(
"-n",
"--insecure",
help="prefix the url with http:// instead of https:// if no prefix is given",
action="store_const",
const="http://",
default="https://",
dest="prefix"
)
parser.add_argument("-f", "--foreground", help="Open the url in the foreground", action="store_true", dest="foreground")
ns = parser.parse_args()
url = ns.url
if "://" not in url:
url = ns.prefix + url
if not ns.foreground:
open_webbrowser(url, ns.modal)
else:
open_webview(url, ns.modal)
| mit | 7c7f60c315dae61c8c9e7a5185aabc91 | 28.711111 | 125 | 0.611818 | 3.565333 | false | false | false | false |
ywangd/stash | bin/head.py | 1 | 2669 | # -*- coding: utf-8 -*-
"""Print the first 10 lines of the given files.
"""
from __future__ import print_function
import argparse
import string
import sys
import fileinput
def filter_non_printable(s):
return ''.join([c if c.isalnum() or c.isspace() or c in string.punctuation else ' ' for c in s])
def head(f, nlines):
if nlines >= 0:
for i, line in enumerate(f):
if i >= nlines:
break
print(line, end='')
else:
buf = []
print(1)
line = f.readline()
print(2)
while line:
buf.append(line)
if len(buf) > -nlines:
del buf[0]
line = f.readline()
for line in buf:
print(line, end='')
def main(args):
p = argparse.ArgumentParser(description=__doc__)
p.add_argument(
"-n",
"--lines",
default=10,
type=int,
help="""print the first K lines instead of 10;
if negative, print the last -K lines"""
)
p.add_argument("-q", "--quiet", "--silent", action='store_true', help="never print headers for each file")
p.add_argument("-v", "--verbose", action='store_true', help="always print headers for each file")
p.add_argument("files", action="store", nargs="*", help="files to print")
ns = p.parse_args(args)
status = 0
header_fmt = '==> {} <==\n'
if len(ns.files) == 0:
ns.files = ['-']
try:
for fname in ns.files:
if ns.verbose or (len(ns.files) > 1 and not ns.quiet):
if fname == '-':
print(header_fmt.format('standard input'), end='')
else:
print(header_fmt.format(fname), end='')
fileinput.close()
inp = fileinput.input(fname, openhook=fileinput.hook_encoded("utf-8"))
if ns.lines >= 0:
buf = []
for i, line in enumerate(inp):
if i >= ns.lines:
break
buf.append(line)
for line in buf:
print(line, end='')
else:
buf = []
for line in fileinput.input(inp, openhook=fileinput.hook_encoded("utf-8")):
buf.append(line)
if len(buf) > -ns.lines:
del buf[0]
for line in buf:
print(line, end='')
except Exception as e:
print('head :%s' % str(e))
status = 1
finally:
fileinput.close()
sys.exit(status)
if __name__ == "__main__":
main(sys.argv[1:])
| mit | 9efe891f032a3572cf5a07784aeeacc9 | 26.515464 | 110 | 0.48033 | 3.959941 | false | false | false | false |
ywangd/stash | bin/unzip.py | 1 | 3494 | # -*- coding: utf-8 -*-
"""Extract a zip archive into a directory."""
from __future__ import print_function
import os
import sys
import zipfile
import argparse
def main(args):
ap = argparse.ArgumentParser()
ap.add_argument('-d', '--exdir', nargs='?', help='extract files into exdir')
ap.add_argument('-v', '--verbose', action='store_true', help='be more chatty')
ap.add_argument('-t', '--list', action='store_true', help='list the contents of an archive')
ap.add_argument('zipfile', help='zip file to be extracted')
ns = ap.parse_args(args)
if not os.path.isfile(ns.zipfile):
print("%s: No such file" % ns.zipfile)
else:
# PK magic marker check
with open(ns.zipfile, "rb") as f:
try:
pk_check = f.read(2)
except:
pk_check = ''
if pk_check != b'PK':
print("%s: does not appear to be a zip file" % ns.zipfile)
sys.exit(1)
if ns.list:
location = ''
else:
if os.path.basename(ns.zipfile).lower().endswith('.zip'):
altpath = os.path.splitext(os.path.basename(ns.zipfile))[0]
else:
altpath = os.path.basename(ns.zipfile) + '_unzipped'
altpath = os.path.join(os.path.dirname(ns.zipfile), altpath)
location = ns.exdir or altpath
if (os.path.exists(location)) and not (os.path.isdir(location)):
print("%s: destination is not a directory" % location)
sys.exit(1)
elif not os.path.exists(location):
os.makedirs(location)
with open(ns.zipfile, 'rb') as zipfp:
try:
zipf = zipfile.ZipFile(zipfp)
# check for a leading directory common to all files and remove it
dirnames = [os.path.join(os.path.dirname(x), '') for x in zipf.namelist()]
common_dir = os.path.commonprefix(dirnames or ['/'])
# Check to make sure there aren't 2 or more sub directories with the same prefix
if not common_dir.endswith('/'):
common_dir = os.path.join(os.path.dirname(common_dir), '')
for name in zipf.namelist():
data = zipf.read(name)
fn = name
if common_dir:
if fn.startswith(common_dir):
fn = fn.split(common_dir, 1)[-1]
elif fn.startswith('/' + common_dir):
fn = fn.split('/' + common_dir, 1)[-1]
fn = fn.lstrip('/')
fn = os.path.join(location, fn)
dirf = os.path.dirname(fn)
if not os.path.exists(dirf) and not ns.list:
os.makedirs(dirf)
if fn.endswith('/'):
# A directory
if not os.path.exists(fn) and not ns.list:
os.makedirs(fn)
elif not ns.list:
fp = open(fn, 'wb')
try:
fp.write(data)
finally:
fp.close()
if ns.verbose or ns.list:
print(fn)
except:
print("%s: zip file is corrupt" % ns.zipfile)
if __name__ == '__main__':
main(sys.argv[1:])
| mit | d2a44b345aa883132a34d022e5aef110 | 37.395604 | 96 | 0.479393 | 4.199519 | false | false | false | false |
ywangd/stash | lib/libcore.py | 1 | 2588 | # -*- coding: utf-8 -*-
import os
import fileinput
try:
unicode
except NameError:
unicode = str
def collapseuser(path):
"""Reverse of os.path.expanduser: return path relative to ~, if
such representation is meaningful. If path is not ~ or a
subdirectory, the absolute path will be returned.
"""
path = os.path.abspath(unicode(path))
home = os.path.expanduser("~")
if os.path.exists(os.path.expanduser("~/Pythonista.app")):
althome = os.path.dirname(os.path.realpath(os.path.expanduser("~/Pythonista.app")))
else:
althome = home
if path.startswith(home):
collapsed = os.path.relpath(path, home)
elif path.startswith(althome):
collapsed = os.path.relpath(path, althome)
else:
collapsed = path
return "~" if collapsed == "." else os.path.join("~", collapsed)
def get_lan_ip():
try:
from objc_util import ObjCClass
NSHost = ObjCClass('NSHost')
addresses = []
for address in NSHost.currentHost().addresses():
address = str(address)
if 48 <= ord(address[0]) <= 57 and address != '127.0.0.1':
addresses.append(address)
return ' '.join(addresses)
except ImportError:
return ''
def input_stream(files=()):
""" Handles input files similar to fileinput.
The advantage of this function is it recovers from errors if one
file is invalid and proceed with the next file
"""
fileinput.close()
try:
if not files:
for line in fileinput.input(files):
yield line, '', fileinput.filelineno()
else:
while files:
thefile = files.pop(0)
try:
for line in fileinput.input(thefile):
yield line, fileinput.filename(), fileinput.filelineno()
except IOError as e:
yield None, fileinput.filename(), e
finally:
fileinput.close()
def sizeof_fmt(num):
"""
Return a human readable string describing the size of something.
:param num: the number in machine-readble form
:type num: int
:param base: base of each unit (e.g. 1024 for KiB -> MiB)
:type base: int
:param suffix: suffix to add. By default, the string returned by sizeof_fmt() does not contain a suffix other than 'K', 'M', ...
:type suffix: str
"""
for unit in ['B', 'KiB', 'MiB', 'GiB']:
if num < 1024:
return "%3.1f%s" % (num, unit)
num /= 1024.0
return "%3.1f%s" % (num, 'Ti')
| mit | 7339f0f380796576c681019b2ddd1420 | 29.447059 | 132 | 0.584621 | 3.88006 | false | false | false | false |
ywangd/stash | lib/stashutils/fsi/base.py | 1 | 5264 | # -*- coding: utf-8 -*-
"""helper functions and base classes."""
from stashutils.fsi.errors import OperationFailure
import random
import os
import time
import stat
import pwd
class BaseFSI(object):
"""
Baseclass for all FSIs.
Other FSIs should subclass this.
This class currently only serves as a documentation, but this may change.
"""
def __init__(self, logger=None):
"""
called on __init__().
"logger" should be a callable,
which will be called with log messages, or None.
"""
self.logger = logger
def connect(self, *args):
"""
Called to 'connect' to a filesystem.
'args' are the additional args passed by the user.
This should be no-op on if no connection nor setup is required.
This should return True on success, otherwise a string describing the error.
"""
return "Not Implemented"
def repr(self):
"""
this should return a string identifying the instance of this interface.
"""
return "Unknown Interface"
def listdir(self, path="."):
"""
called for listing a dir.
The FSI is responsible for keeping track of the cwd.
This should return a list of strings.
'..' doesnt need to be added.
"""
return []
def cd(self, name):
"""this should change the cwd to name."""
raise OperationFailure("NotImplemented")
def get_path(self):
"""this should return the current path as a string."""
return "/"
def remove(self, name):
"""this should remove name. name may refer either to a dir or a file."""
raise OperationFailure("NotImplemented")
def open(self, name, mode="r", buffering=0):
"""
this should return a file-like object opened in mode mode.
"""
raise OperationFailure("NotImplemented")
def mkdir(self, name):
"""this should create a dir."""
raise OperationFailure("NotImplemented")
def close(self):
"""this should close the interface.
There is a chance that this may not be called."""
pass
def isdir(self, name):
"""this should return True if name is an existing directory and
False if not."""
raise OperationFailure("NotImplemented")
def isfile(self, name):
"""this should return wether name is an existing file."""
# default: not isdir(). problem: no exist check
return not self.isdir(name)
def stat(self, name):
"""
this should stat the file name and return a os.stat_result or
FakeStatResult().
"""
if self.isfile(name):
return make_stat(type=stat.S_IFREG)
else:
return make_stat(type=stat.S_IFDIR)
def log(self, msg):
"""logs/prints a message to self.logger."""
if self.logger is not None:
self.logger(msg)
def calc_mode(
sticky=False,
isuid=True,
isgid=True,
type=stat.S_IFREG,
owner_read=True,
owner_write=True,
owner_exec=True,
group_read=True,
group_write=True,
group_exec=True,
other_read=True,
other_write=True,
other_exec=True,
):
"""helper function to calculate the mode bits of a file."""
mode = 0
if owner_read:
mode |= stat.S_IRUSR
if owner_write:
mode |= stat.S_IWUSR
if owner_exec:
mode |= stat.S_IXUSR
if group_read:
mode |= stat.S_IRGRP
if group_write:
mode |= stat.S_IWGRP
if group_exec:
mode |= stat.S_IXGRP
if other_read:
mode |= stat.S_IROTH
if other_write:
mode |= stat.S_IWOTH
if other_exec:
mode |= stat.S_IXOTH
if sticky:
mode |= stat.S_ISVTX
if isuid:
mode |= stat.ST_UID
if isgid:
mode |= stat.ST_GID
mode |= type
return mode
DEFAULT_MODE = calc_mode()
def make_stat(
mode=DEFAULT_MODE,
inode=None,
dev=None,
nlinks=1,
gid=None,
uid=None,
size=0,
atime=None,
mtime=None,
ctime=None,
blocks=1,
blksize=None,
rdev=stat.S_IFREG,
flags=0,
):
"""helper function to generate os.stat results."""
if inode is None:
inode = random.randint(1000, 9999999)
if dev is None:
dev = os.makedev(64, random.randint(1, 100))
if uid is None:
uid = os.getuid()
if gid is None:
uid2 = os.getuid()
gid = pwd.getpwuid(uid2).pw_gid
if atime is None:
atime = time.time()
if mtime is None:
mtime = time.time()
if ctime is None:
ctime = time.time()
if os.stat_float_times():
ctime = float(ctime)
mtime = float(mtime)
atime = float(atime)
else:
ctime = int(ctime)
atime = int(atime)
mtime = int(mtime)
if blksize is None:
blksize = max(size, 2048)
s = os.stat_result(
(
mode,
inode,
dev,
nlinks,
gid,
uid,
size,
atime,
mtime,
ctime,
),
{
"st_blocks": blocks,
"st_blksize": blksize,
"st_rdev": rdev,
"st_flags": flags,
}
)
return s
| mit | 2dae80246a1f03322538f998b39e577b | 23.713615 | 80 | 0.56478 | 3.822803 | false | false | false | false |
ywangd/stash | bin/ping.py | 1 | 8143 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Usage:
ping [-c <count>] [-i <interval>] [-W <timeout>] <destination>
Options:
-c <count>, --count=<count> [default: 5]
-i <interval>, --interval=<interval> [default: 1.0]
Wait interval seconds between sending each packet. The default is to wait for one second between each packet normally.
-W <timeout>, --timeout=<timeout> [default: 2.]
Time to wait for a response, in seconds. The option affects only timeout in absense of any responses, otherwise ping waits for two RTTs.
A pure python ping implementation using raw socket.
Note that ICMP messages can only be sent from processes running as root.
Derived from ping.c distributed in Linux's netkit. That code is
copyright (c) 1989 by The Regents of the University of California.
That code is in turn derived from code written by Mike Muuss of the
US Army Ballistic Research Laboratory in December, 1983 and
placed in the public domain. They have my thanks.
Bugs are naturally mine. I'd be glad to hear about them. There are
certainly word - size dependenceies here.
Copyright (c) Matthew Dixon Cowles, <http://www.visi.com/~mdc/>.
Distributable under the terms of the GNU General Public License
version 2. Provided with no warranties of any sort.
Original Version from Matthew Dixon Cowles:
-> ftp://ftp.visi.com/users/mdc/ping.py
Rewrite by Jens Diemer:
-> http://www.python-forum.de/post-69122.html#69122
Revision history
~~~~~~~~~~~~~~~~
August 18, 2016
changes by J. Bain
- implemented interface for pythonista stash
March 11, 2010
changes by Samuel Stauffer:
- replaced time.clock with default_timer which is set to
time.clock on windows and time.time on other systems.
May 30, 2007
little rewrite by Jens Diemer:
- change socket asterisk import to a normal import
- replace time.time() with time.clock()
- delete "return None" (or change to "return" only)
- in checksum() rename "str" to "source_string"
November 22, 1997
Initial hack. Doesn't do much, but rather than try to guess
what features I (or others) will want in the future, I've only
put in what I need now.
December 16, 1997
For some reason, the checksum bytes are in the wrong order when
this is run under Solaris 2.X for SPARC but it works right under
Linux x86. Since I don't know just what's wrong, I'll swap the
bytes always and then do an htons().
December 4, 2000
Changed the struct.pack() calls to pack the checksum and ID as
unsigned. My thanks to Jerome Poincheval for the fix.
Januari 27, 2015
Changed receive response to not accept ICMP request messages.
It was possible to receive the very request that was sent.
Last commit info:
~~~~~~~~~~~~~~~~~
$LastChangedDate: $
$Rev: $
$Author: $
"""
from __future__ import print_function
import os
import select
import socket
import struct
import sys
import time
import argparse
from six.moves import xrange
# On Windows, the best timer is time.clock()
# On most other platforms the best timer is time.time()
default_timer = time.clock if sys.platform == "win32" else time.time
# From /usr/include/linux/icmp.h; your milage may vary.
ICMP_ECHO_REQUEST = 8 # Seems to be the same on Solaris.
def checksum(source_string):
"""
I'm not too confident that this is right but testing seems
to suggest that it gives the same answers as in_cksum in ping.c
"""
sum = 0
countTo = (len(source_string) / 2) * 2
count = 0
while count < countTo:
v1 = source_string[count + 1]
if not isinstance(v1, int):
v1 = ord(v1)
v2 = source_string[count]
if not isinstance(v2, int):
v2 = ord(v2)
thisVal = v1 * 256 + v2
sum = sum + thisVal
sum = sum & 0xffffffff # Necessary?
count = count + 2
if countTo < len(source_string):
sum = sum + ord(source_string[len(source_string) - 1])
sum = sum & 0xffffffff # Necessary?
sum = (sum >> 16) + (sum & 0xffff)
sum = sum + (sum >> 16)
answer = ~sum
answer = answer & 0xffff
# Swap bytes. Bugger me if I know why.
answer = answer >> 8 | (answer << 8 & 0xff00)
return answer
def receive_one_ping(my_socket, ID, timeout):
"""
receive the ping from the socket.
"""
timeLeft = timeout
while True:
startedSelect = default_timer()
whatReady = select.select([my_socket], [], [], timeLeft)
howLongInSelect = (default_timer() - startedSelect)
if whatReady[0] == []: # Timeout
return
timeReceived = default_timer()
recPacket, addr = my_socket.recvfrom(1024)
icmpHeader = recPacket[20:28]
type, code, checksum, packetID, sequence = struct.unpack(b"bbHHh", icmpHeader)
# Filters out the echo request itself.
# This can be tested by pinging 127.0.0.1
# You'll see your own request
if type != 8 and packetID == ID:
bytesInDouble = struct.calcsize(b"d")
timeSent = struct.unpack(b"d", recPacket[28:28 + bytesInDouble])[0]
return timeReceived - timeSent
timeLeft = timeLeft - howLongInSelect
if timeLeft <= 0:
return
def send_one_ping(my_socket, dest_addr, ID):
"""
Send one ping to the given >dest_addr<.
"""
dest_addr = socket.gethostbyname(dest_addr)
# Header is type (8), code (8), checksum (16), id (16), sequence (16)
my_checksum = 0
# Make a dummy heder with a 0 checksum.
header = struct.pack(b"bbHHh", ICMP_ECHO_REQUEST, 0, my_checksum, ID, 1)
bytesInDouble = struct.calcsize("d")
data = (192 - bytesInDouble) * b"Q"
data = struct.pack("d", default_timer()) + data
# Calculate the checksum on the data and the dummy header.
my_checksum = checksum(header + data)
# Now that we have the right checksum, we put that in. It's just easier
# to make up a new header than to stuff it into the dummy.
header = struct.pack(b"bbHHh", ICMP_ECHO_REQUEST, 0, socket.htons(my_checksum), ID, 1)
packet = header + data
my_socket.sendto(packet, (dest_addr, 1)) # Don't know about the 1
def do_one(dest_addr, timeout):
"""
Returns either the delay (in seconds) or none on timeout.
"""
icmp = socket.getprotobyname("icmp")
my_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, icmp)
my_ID = os.getpid() & 0xFFFF
send_one_ping(my_socket, dest_addr, my_ID)
delay = receive_one_ping(my_socket, my_ID, timeout)
my_socket.close()
return delay
def verbose_ping(dest_addr, timeout=2, count=4, interval=1.0):
"""
Send >count< ping to >dest_addr< with the given >timeout< and display
the result.
"""
ping_succeeded = False
for i in xrange(count):
print("ping %s..." % dest_addr, end=' ')
try:
delay = do_one(dest_addr, timeout)
except socket.gaierror as e:
print("failed. (socket error: '%s')" % e[1])
break
if delay == None:
print("failed. (timeout within %ssec.)" % timeout)
else:
time.sleep(min(0, interval - delay))
print("got ping in %0.4fms\n" % (delay * 1000))
ping_succeeded = True
return ping_succeeded
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="send ICMP ECHO_REQUEST to network hosts")
parser.add_argument("destination", help="host to ping")
parser.add_argument("-W", "--timeout", help="specify a timeout", type=float, default=2)
parser.add_argument("-c", "--count", help="stop after sending this much ECHO_REQUEST packkets", type=int, default=5)
parser.add_argument("-i", "--interval", help="Wait the specified time between each ping", type=float, default=1.0)
ns = parser.parse_args()
s = verbose_ping(ns.destination, ns.timeout, ns.count, ns.interval)
if s:
sys.exit(0)
else:
sys.exit(1)
| mit | 9c684c9afe73e0b8adf7debf4f09e884 | 32.372951 | 144 | 0.635392 | 3.604692 | false | false | false | false |
pyvisa/pyvisa | pyvisa/resources/gpib.py | 1 | 7862 | # -*- coding: utf-8 -*-
"""High level wrapper for GPIB resources.
This file is part of PyVISA.
:copyright: 2014-2022 by PyVISA Authors, see AUTHORS for more details.
:license: MIT, see LICENSE for more details.
"""
from time import perf_counter
from typing import Tuple
from .. import attributes, constants
from ..attributes import Attribute
from .messagebased import ControlRenMixin, MessageBasedResource
from .resource import Resource
class _GPIBMixin(ControlRenMixin):
"""Common attributes and methods of GPIB Instr and Interface."""
#: Primary address of the GPIB device used by the given session.
primary_address: Attribute[int] = attributes.AttrVI_ATTR_GPIB_PRIMARY_ADDR()
#: Secondary address of the GPIB device used by the given session.
secondary_address: Attribute[int] = attributes.AttrVI_ATTR_GPIB_SECONDARY_ADDR()
#: Current state of the GPIB REN (Remote ENable) interface line.
remote_enabled: Attribute[
constants.LineState
] = attributes.AttrVI_ATTR_GPIB_REN_STATE()
@Resource.register(constants.InterfaceType.gpib, "INSTR")
class GPIBInstrument(_GPIBMixin, MessageBasedResource):
"""Communicates with to devices of type GPIB::<primary address>[::INSTR]
More complex resource names can be specified with the following grammar:
GPIB[board]::primary address[::secondary address][::INSTR]
Do not instantiate directly, use
:meth:`pyvisa.highlevel.ResourceManager.open_resource`.
"""
#: Whether to unaddress the device (UNT and UNL) after each read or write operation.
enable_unaddressing: Attribute[bool] = attributes.AttrVI_ATTR_GPIB_UNADDR_EN()
#: Whether to use repeat addressing before each read or write operation.
enable_repeat_addressing: Attribute[bool] = attributes.AttrVI_ATTR_GPIB_READDR_EN()
def wait_for_srq(self, timeout: int = 25000) -> None:
"""Wait for a serial request (SRQ) coming from the instrument.
Note that this method is not ended when *another* instrument signals an
SRQ, only *this* instrument.
Parameters
----------
timeout : int
Maximum waiting time in milliseconds. Defaul: 25000 (milliseconds).
None means waiting forever if necessary.
"""
self.enable_event(
constants.EventType.service_request, constants.EventMechanism.queue
)
if timeout and not (0 <= timeout <= 4294967295):
raise ValueError("timeout value is invalid")
starting_time = perf_counter()
while True:
if timeout is None:
adjusted_timeout = constants.VI_TMO_INFINITE
else:
adjusted_timeout = int(
(starting_time + timeout / 1e3 - perf_counter()) * 1e3
)
if adjusted_timeout < 0:
adjusted_timeout = 0
self.wait_on_event(constants.EventType.service_request, adjusted_timeout)
if self.stb & 0x40:
break
self.discard_events(
constants.EventType.service_request, constants.EventMechanism.queue
)
@Resource.register(constants.InterfaceType.gpib, "INTFC")
class GPIBInterface(_GPIBMixin, MessageBasedResource):
"""Communicates with to devices of type GPIB::INTFC
More complex resource names can be specified with the following grammar:
GPIB[board]::INTFC
Do not instantiate directly, use
:meth:`pyvisa.highlevel.ResourceManager.open_resource`.
"""
#: Is the specified GPIB interface currently the system controller.
is_system_controller: Attribute[
bool
] = attributes.AttrVI_ATTR_GPIB_SYS_CNTRL_STATE()
#: Is the specified GPIB interface currently CIC (Controller In Charge).
is_controller_in_charge: Attribute[bool] = attributes.AttrVI_ATTR_GPIB_CIC_STATE()
#: Current state of the GPIB ATN (ATtentioN) interface line.
atn_state: Attribute[constants.LineState] = attributes.AttrVI_ATTR_GPIB_ATN_STATE()
#: Current state of the GPIB NDAC (Not Data ACcepted) interface line.
ndac_state: Attribute[
constants.LineState
] = attributes.AttrVI_ATTR_GPIB_NDAC_STATE()
#: Is the GPIB interface currently addressed to talk or listen, or is not addressed.
address_state: Attribute[
constants.LineState
] = attributes.AttrVI_ATTR_GPIB_ADDR_STATE()
def send_command(self, data: bytes) -> Tuple[int, constants.StatusCode]:
"""Write GPIB command bytes on the bus.
Corresponds to viGpibCommand function of the VISA library.
Parameters
----------
data : bytes
Command to write.
Returns
-------
int
Number of bytes written
constants.StatusCode
Return value of the library call.
"""
return self.visalib.gpib_command(self.session, data)
def control_atn(self, mode: constants.ATNLineOperation) -> constants.StatusCode:
"""Specifies the state of the ATN line and the local active controller state.
Corresponds to viGpibControlATN function of the VISA library.
Parameters
----------
mode : constants.ATNLineOperation
Specifies the state of the ATN line and optionally the local active
controller state.
Returns
-------
constants.StatusCode
Return value of the library call.
"""
return self.visalib.gpib_control_atn(self.session, mode)
def pass_control(
self, primary_address: int, secondary_address: int
) -> constants.StatusCode:
"""Tell a GPIB device to become controller in charge (CIC).
Corresponds to viGpibPassControl function of the VISA library.
Parameters
----------
primary_address : int
Primary address of the GPIB device to which you want to pass control.
secondary_address : int
Secondary address of the targeted GPIB device.
If the targeted device does not have a secondary address,
this parameter should contain the value Constants.NO_SEC_ADDR.
Returns
-------
constants.StatusCode
Return value of the library call.
"""
return self.visalib.gpib_pass_control(
self.session, primary_address, secondary_address
)
def send_ifc(self) -> constants.StatusCode:
"""Pulse the interface clear line (IFC) for at least 100 microseconds.
Corresponds to viGpibSendIFC function of the VISA library.
"""
return self.visalib.gpib_send_ifc(self.session)
def group_execute_trigger(
self, *resources: GPIBInstrument
) -> Tuple[int, constants.StatusCode]:
"""
Parameters
----------
resources : GPIBInstrument
GPIB resources to which to send the group trigger.
Returns
-------
int
Number of bytes written as part of sending the GPIB commands.
constants.StatusCode
Return value of the library call.
"""
for resource in resources:
if not isinstance(resource, GPIBInstrument):
raise ValueError("%r is not a GPIBInstrument", resource)
# TODO: check that all resources are in the same board.
if not self.is_controller_in_charge:
self.send_ifc()
command = [
0x40,
0x20 + 31,
] # broadcast TAD#0 and "UNL" (don't listen) to all devices
for resource in resources:
# tell device GPIB::11 to listen
command.append(0x20 + resource.primary_address)
# send GET ('group execute trigger')
command.append(0x08)
return self.send_command(bytes(command))
| mit | f9a2a4a7de0d81ac5f090782a6c306d4 | 32.033613 | 88 | 0.641313 | 4.310307 | false | false | false | false |
ywangd/stash | bin/sort.py | 1 | 1035 | # -*- coding: utf-8 -*-
"""Sort standard input or given files to standard output"""
from __future__ import print_function
import os
import sys
import fileinput
import argparse
def main(args):
ap = argparse.ArgumentParser()
ap.add_argument('files', nargs='*', help='files to sort')
ap.add_argument('-r', '--reverse', action='store_true', default=False, help='reverse the result of comparisons')
ns = ap.parse_args(args)
def _print(lines):
if lines is not None:
lines = sorted(lines)
if ns.reverse:
lines = lines[::-1]
print(''.join(lines))
fileinput.close() # in case it is not closed
try:
lines = None
for line in fileinput.input(ns.files, openhook=fileinput.hook_encoded("utf-8")):
if fileinput.isfirstline():
_print(lines)
lines = []
lines.append(line)
_print(lines)
finally:
fileinput.close()
if __name__ == '__main__':
main(sys.argv[1:])
| mit | a49d857a17870af0972553e1fbc20c4d | 25.538462 | 116 | 0.573913 | 3.90566 | false | false | false | false |
ywangd/stash | bin/zip.py | 1 | 1603 | # -*- coding: utf-8 -*-
""" Package and compress (archive) files and directories """
from __future__ import print_function
import os
import sys
import argparse
import zipfile
def main(args):
ap = argparse.ArgumentParser()
ap.add_argument('zipfile', help='')
ap.add_argument('list', nargs='+', help='')
ap.add_argument('-v', '--verbose', action='store_true', help='be more chatty')
ns = ap.parse_args(args)
relroot = os.path.abspath(os.path.dirname(ns.zipfile))
with zipfile.ZipFile(ns.zipfile, "w", zipfile.ZIP_DEFLATED) as outs:
for path in ns.list:
if os.path.isfile(path):
if ns.verbose:
print(path)
arcname = os.path.relpath(path, relroot)
outs.write(path, arcname=arcname)
elif os.path.isdir(path):
for root, dirs, files in os.walk(path):
this_relroot = os.path.relpath(root, relroot)
# add directory (needed for empty dirs)
outs.write(root, arcname=this_relroot)
if ns.verbose:
print(this_relroot)
for f in files:
filename = os.path.join(root, f)
if os.path.isfile(filename): # regular files only
if ns.verbose:
print(filename)
arcname = os.path.join(this_relroot, f)
outs.write(filename, arcname=arcname)
if __name__ == '__main__':
main(sys.argv[1:])
| mit | ceab8b7bebfa60e6bae6eb722e01f203 | 34.622222 | 82 | 0.520274 | 4.174479 | false | false | false | false |
pyvisa/pyvisa | pyvisa/resources/usb.py | 1 | 5017 | # -*- coding: utf-8 -*-
"""High level wrapper for USB resources.
This file is part of PyVISA.
:copyright: 2014-2022 by PyVISA Authors, see AUTHORS for more details.
:license: MIT, see LICENSE for more details.
"""
from .. import attributes, constants
from ..attributes import Attribute
from .messagebased import ControlRenMixin, MessageBasedResource
class USBCommon(MessageBasedResource):
"""Common class for USB resources."""
#: USB interface number used by the given session.
interface_number: Attribute[int] = attributes.AttrVI_ATTR_USB_INTFC_NUM()
#: USB serial number of this device.
serial_number: Attribute[str] = attributes.AttrVI_ATTR_USB_SERIAL_NUM()
#: USB protocol used by this USB interface.
usb_protocol: Attribute[int] = attributes.AttrVI_ATTR_USB_PROTOCOL()
#: Maximum size of data that will be stored by any given USB interrupt.
maximum_interrupt_size: Attribute[int] = attributes.AttrVI_ATTR_USB_MAX_INTR_SIZE()
#: Manufacturer name.
manufacturer_name: Attribute[str] = attributes.AttrVI_ATTR_MANF_NAME()
#: Manufacturer identification number of the device.
manufacturer_id: Attribute[int] = attributes.AttrVI_ATTR_MANF_ID()
#: Model name of the device.
model_name: Attribute[str] = attributes.AttrVI_ATTR_MODEL_NAME()
#: Model code for the device.
model_code: Attribute[int] = attributes.AttrVI_ATTR_MODEL_CODE()
@MessageBasedResource.register(constants.InterfaceType.usb, "INSTR")
class USBInstrument(ControlRenMixin, USBCommon):
"""USB INSTR resources USB::manufacturer ID::model code::serial number
More complex resource names can be specified with the following grammar:
USB[board]::manufacturer ID::model code::serial number[::USB interface number][::INSTR]
Do not instantiate directly, use
:meth:`pyvisa.highlevel.ResourceManager.open_resource`.
"""
#: Whether the device is 488.2 compliant.
is_4882_compliant: Attribute[bool] = attributes.AttrVI_ATTR_4882_COMPLIANT()
def control_in(
self,
request_type_bitmap_field: int,
request_id: int,
request_value: int,
index: int,
length: int = 0,
) -> bytes:
"""Performs a USB control pipe transfer from the device.
Parameters
----------
request_type_bitmap_field : int
bmRequestType parameter of the setup stage of a USB control transfer.
request_id : int
bRequest parameter of the setup stage of a USB control transfer.
request_value : int
wValue parameter of the setup stage of a USB control transfer.
index : int
wIndex parameter of the setup stage of a USB control transfer.
This is usually the index of the interface or endpoint.
length : int
wLength parameter of the setup stage of a USB control transfer.
This value also specifies the size of the data buffer to receive
the data from the optional data stage of the control transfer.
Returns
-------
bytes
The data buffer that receives the data from the optional data stage
of the control transfer.
"""
return self.visalib.usb_control_in(
self.session,
request_type_bitmap_field,
request_id,
request_value,
index,
length,
)[0]
def control_out(
self,
request_type_bitmap_field: int,
request_id: int,
request_value: int,
index: int,
data: bytes = b"",
):
"""Performs a USB control pipe transfer to the device.
Parameters
----------
request_type_bitmap_field : int
bmRequestType parameter of the setup stage of a USB control transfer.
request_id : int
bRequest parameter of the setup stage of a USB control transfer.
request_value : int
wValue parameter of the setup stage of a USB control transfer.
index : int
wIndex parameter of the setup stage of a USB control transfer.
This is usually the index of the interface or endpoint.
data : bytes
The data buffer that sends the data in the optional data stage of
the control transfer.
"""
return self.visalib.usb_control_out(
self.session,
request_type_bitmap_field,
request_id,
request_value,
index,
data,
)
@MessageBasedResource.register(constants.InterfaceType.usb, "RAW")
class USBRaw(USBCommon):
"""USB RAW resources: USB::manufacturer ID::model code::serial number::RAW
More complex resource names can be specified with the following grammar:
USB[board]::manufacturer ID::model code::serial number[::USB interface number]::RAW
Do not instantiate directly, use
:meth:`pyvisa.highlevel.ResourceManager.open_resource`.
"""
| mit | 0b5e94e8fe36aeb61a7a4edca9a26cff | 33.363014 | 95 | 0.646203 | 4.412489 | false | false | false | false |
arogozhnikov/einops | einops/einops.py | 1 | 33737 | import functools
import itertools
import string
import typing
from collections import OrderedDict
from typing import Set, Tuple, List, Dict, Union, Callable, Optional, TypeVar, cast, Any
if typing.TYPE_CHECKING:
import numpy as np
from . import EinopsError
from ._backends import get_backend
from .parsing import ParsedExpression, _ellipsis, AnonymousAxis
Tensor = TypeVar('Tensor')
ReductionCallable = Callable[[Tensor, Tuple[int, ...]], Tensor]
Reduction = Union[str, ReductionCallable]
_reductions = ('min', 'max', 'sum', 'mean', 'prod')
# magic integers are required to stay within
# traceable subset of language
_ellipsis_not_in_parenthesis: List[int] = [-999]
_unknown_axis_length = -999999
def is_ellipsis_not_in_parenthesis(group: List[int]) -> bool:
if len(group) != 1:
return False
return group[0] == -999
def _product(sequence: List[int]) -> int:
""" minimalistic product that works both with numbers and symbols. Supports empty lists """
result = 1
for element in sequence:
result *= element
return result
def _reduce_axes(tensor, reduction_type: Reduction, reduced_axes: List[int], backend):
if callable(reduction_type):
# custom callable
return reduction_type(tensor, tuple(reduced_axes))
else:
# one of built-in operations
if len(reduced_axes) == 0:
return tensor
assert reduction_type in _reductions
if reduction_type == 'mean':
if not backend.is_float_type(tensor):
raise NotImplementedError('reduce_mean is not available for non-floating tensors')
return backend.reduce(tensor, reduction_type, tuple(reduced_axes))
def _optimize_transformation(init_shapes, reduced_axes, axes_reordering, final_shapes):
# 'collapses' neighboring axes if those participate in the result pattern in the same order
# TODO add support for added_axes
assert len(axes_reordering) + len(reduced_axes) == len(init_shapes)
# joining consecutive axes that will be reduced
# possibly we can skip this if all backends can optimize this (not sure)
reduced_axes = tuple(sorted(reduced_axes))
for i in range(len(reduced_axes) - 1)[::-1]:
if reduced_axes[i] + 1 == reduced_axes[i + 1]:
removed_axis = reduced_axes[i + 1]
removed_length = init_shapes[removed_axis]
init_shapes = init_shapes[:removed_axis] + init_shapes[removed_axis + 1:]
init_shapes[removed_axis - 1] *= removed_length
reduced_axes = reduced_axes[:i + 1] + tuple(axis - 1 for axis in reduced_axes[i + 2:])
# removing axes that are moved together during reshape
def build_mapping():
init_to_final = {}
for axis in range(len(init_shapes)):
if axis in reduced_axes:
init_to_final[axis] = None
else:
after_reduction = sum(x is not None for x in init_to_final.values())
init_to_final[axis] = list(axes_reordering).index(after_reduction)
return init_to_final
init_axis_to_final_axis = build_mapping()
for init_axis in range(len(init_shapes) - 1)[::-1]:
if init_axis_to_final_axis[init_axis] is None:
continue
if init_axis_to_final_axis[init_axis + 1] is None:
continue
if init_axis_to_final_axis[init_axis] + 1 == init_axis_to_final_axis[init_axis + 1]:
removed_axis = init_axis + 1
removed_length = init_shapes[removed_axis]
removed_axis_after_reduction = sum(x not in reduced_axes for x in range(removed_axis))
reduced_axes = tuple(axis if axis < removed_axis else axis - 1 for axis in reduced_axes)
init_shapes = init_shapes[:removed_axis] + init_shapes[removed_axis + 1:]
init_shapes[removed_axis - 1] *= removed_length
old_reordering = axes_reordering
axes_reordering = []
for axis in old_reordering:
if axis == removed_axis_after_reduction:
pass
elif axis < removed_axis_after_reduction:
axes_reordering.append(axis)
else:
axes_reordering.append(axis - 1)
init_axis_to_final_axis = build_mapping()
return init_shapes, reduced_axes, axes_reordering, final_shapes
CookedRecipe = Tuple[List[int], List[int], List[int], Dict[int, int], List[int]]
class TransformRecipe:
"""
Recipe describes actual computation pathway.
Recipe can be applied to a tensor or variable.
"""
# structure is non-mutable. In future, this can be non-mutable dataclass (python 3.7+)
def __init__(self,
# list of expressions (or just sizes) for elementary axes as they appear in left expression.
# this is what (after computing unknown parts) will be a shape after first transposition.
# If ellipsis is present, it forms one dimension here (in the right position).
elementary_axes_lengths: List[int],
# each dimension in input can help to reconstruct length of one elementary axis
# or verify one of dimensions. Each element points to element of elementary_axes_lengths
input_composite_axes: List[Tuple[List[int], List[int]]],
# indices of axes to be squashed
reduced_elementary_axes: List[int],
# in which order should axes be reshuffled after reduction
axes_permutation: List[int],
# at which positions which of elementary axes should appear
added_axes: Dict[int, int],
# ids of axes as they appear in result, again pointers to elementary_axes_lengths,
# only used to infer result dimensions
output_composite_axes: List[List[int]],
# positions of ellipsis in lhs and rhs of expression
ellipsis_position_in_lhs: Optional[int] = None,
):
self.elementary_axes_lengths: List[int] = elementary_axes_lengths
self.input_composite_axes: List[Tuple[List[int], List[int]]] = input_composite_axes
self.output_composite_axes: List[List[int]] = output_composite_axes
self.axes_permutation: List[int] = axes_permutation
self.added_axes: Dict[int, int] = added_axes
# This is redundant information, but more convenient to use
self.reduced_elementary_axes: List[int] = reduced_elementary_axes
# setting to a large number to avoid handling Nones in reconstruct_from_shape
self.ellipsis_position_in_lhs: int = ellipsis_position_in_lhs if ellipsis_position_in_lhs is not None else 10000
def _reconstruct_from_shape_uncached(self: TransformRecipe, shape: List[int]) -> CookedRecipe:
"""
Reconstruct all actual parameters using shape.
Shape is a tuple that may contain integers, shape symbols (tf, keras, theano) and UnknownSize (keras, mxnet)
known axes can be integers or symbols, but not Nones.
"""
axes_lengths: List[int] = list(self.elementary_axes_lengths)
if self.ellipsis_position_in_lhs != 10000:
if len(shape) < len(self.input_composite_axes) - 1:
raise EinopsError('Expected at least {} dimensions, got {}'.format(
len(self.input_composite_axes) - 1, len(shape)))
else:
if len(shape) != len(self.input_composite_axes):
raise EinopsError('Expected {} dimensions, got {}'.format(len(self.input_composite_axes), len(shape)))
ellipsis_shape: List[int] = []
for input_axis, (known_axes, unknown_axes) in enumerate(self.input_composite_axes):
before_ellipsis = input_axis
after_ellipsis = input_axis + len(shape) - len(self.input_composite_axes)
if input_axis == self.ellipsis_position_in_lhs:
assert len(known_axes) == 0 and len(unknown_axes) == 1
unknown_axis: int = unknown_axes[0]
ellipsis_shape = shape[before_ellipsis:after_ellipsis + 1]
for d in ellipsis_shape:
if d is None:
raise EinopsError("Couldn't infer shape for one or more axes represented by ellipsis")
total_dim_size: int = _product(ellipsis_shape)
axes_lengths[unknown_axis] = total_dim_size
else:
if input_axis < self.ellipsis_position_in_lhs:
length = shape[before_ellipsis]
else:
length = shape[after_ellipsis]
known_product = 1
for axis in known_axes:
known_product *= axes_lengths[axis]
if len(unknown_axes) == 0:
if isinstance(length, int) and isinstance(known_product, int) and length != known_product:
raise EinopsError('Shape mismatch, {} != {}'.format(length, known_product))
# this is enforced when recipe is created
# elif len(unknown_axes) > 1:
# raise EinopsError(
# "Lengths of two or more axes in parenthesis not provided (dim={}), can't infer dimensions".
# format(known_product)
# )
else:
if isinstance(length, int) and isinstance(known_product, int) and length % known_product != 0:
raise EinopsError("Shape mismatch, can't divide axis of length {} in chunks of {}".format(
length, known_product))
unknown_axis = unknown_axes[0]
inferred_length: int = length // known_product
axes_lengths[unknown_axis] = inferred_length
# at this point all axes_lengths are computed (either have values or variables, but not Nones)
# TODO more readable expression
init_shapes = axes_lengths[:len(axes_lengths) - len(self.added_axes)]
final_shapes: List[int] = []
for output_axis, grouping in enumerate(self.output_composite_axes):
if is_ellipsis_not_in_parenthesis(grouping):
final_shapes.extend(ellipsis_shape)
else:
lengths = [axes_lengths[elementary_axis] for elementary_axis in grouping]
final_shapes.append(_product(lengths))
reduced_axes = self.reduced_elementary_axes
axes_reordering = self.axes_permutation
added_axes: Dict[int, int] = {
pos: axes_lengths[pos_in_elementary] for pos, pos_in_elementary in self.added_axes.items()}
# if optimize:
# assert len(self.added_axes) == 0
# return _optimize_transformation(init_shapes, reduced_axes, axes_reordering, final_shapes)
return init_shapes, reduced_axes, axes_reordering, added_axes, final_shapes
_reconstruct_from_shape = functools.lru_cache(1024)(_reconstruct_from_shape_uncached)
def _apply_recipe(recipe: TransformRecipe, tensor: Tensor, reduction_type: Reduction) -> Tensor:
# this method works for all backends but not compilable with
backend = get_backend(tensor)
init_shapes, reduced_axes, axes_reordering, added_axes, final_shapes = \
_reconstruct_from_shape(recipe, backend.shape(tensor))
tensor = backend.reshape(tensor, init_shapes)
tensor = _reduce_axes(tensor, reduction_type=reduction_type, reduced_axes=reduced_axes, backend=backend)
tensor = backend.transpose(tensor, axes_reordering)
if len(added_axes) > 0:
tensor = backend.add_axes(tensor, n_axes=len(axes_reordering) + len(added_axes), pos2len=added_axes)
return backend.reshape(tensor, final_shapes)
@functools.lru_cache(256)
def _prepare_transformation_recipe(pattern: str,
operation: Reduction,
axes_lengths: Tuple[Tuple, ...]) -> TransformRecipe:
""" Perform initial parsing of pattern and provided supplementary info
axes_lengths is a tuple of tuples (axis_name, axis_length)
"""
left_str, rght_str = pattern.split('->')
left = ParsedExpression(left_str)
rght = ParsedExpression(rght_str)
# checking that axes are in agreement - new axes appear only in repeat, while disappear only in reduction
if not left.has_ellipsis and rght.has_ellipsis:
raise EinopsError('Ellipsis found in right side, but not left side of a pattern {}'.format(pattern))
if left.has_ellipsis and left.has_ellipsis_parenthesized:
raise EinopsError('Ellipsis is parenthesis in the left side is not allowed: {}'.format(pattern))
if operation == 'rearrange':
difference = set.symmetric_difference(left.identifiers, rght.identifiers)
if left.has_non_unitary_anonymous_axes or rght.has_non_unitary_anonymous_axes:
raise EinopsError('Non-unitary anonymous axes are not supported in rearrange (exception is length 1)')
if len(difference) > 0:
raise EinopsError('Identifiers only on one side of expression (should be on both): {}'.format(difference))
elif operation == 'repeat':
difference = set.difference(left.identifiers, rght.identifiers)
if len(difference) > 0:
raise EinopsError('Unexpected identifiers on the left side of repeat: {}'.format(difference))
axes_without_size = set.difference({ax for ax in rght.identifiers if not isinstance(ax, AnonymousAxis)},
{*left.identifiers, *(ax for ax, _ in axes_lengths)})
if len(axes_without_size) > 0:
raise EinopsError('Specify sizes for new axes in repeat: {}'.format(axes_without_size))
elif operation in _reductions or callable(operation):
difference = set.difference(rght.identifiers, left.identifiers)
if len(difference) > 0:
raise EinopsError('Unexpected identifiers on the right side of reduce {}: {}'.format(operation, difference))
else:
raise EinopsError('Unknown reduction {}. Expect one of {}.'.format(operation, _reductions))
# parsing all dimensions to find out lengths
axis_name2known_length: Dict[Union[str, AnonymousAxis], int] = OrderedDict()
for composite_axis in left.composition:
for axis_name in composite_axis:
if isinstance(axis_name, AnonymousAxis):
axis_name2known_length[axis_name] = axis_name.value
else:
axis_name2known_length[axis_name] = _unknown_axis_length
# axis_ids_after_first_reshape = range(len(axis_name2known_length)) at this point
repeat_axes_names = []
for axis_name in rght.identifiers:
if axis_name not in axis_name2known_length:
if isinstance(axis_name, AnonymousAxis):
axis_name2known_length[axis_name] = axis_name.value
else:
axis_name2known_length[axis_name] = _unknown_axis_length
repeat_axes_names.append(axis_name)
axis_name2position = {name: position for position, name in enumerate(axis_name2known_length)}
reduced_axes: List[int] = [position for axis, position in axis_name2position.items() if
axis not in rght.identifiers]
reduced_axes = list(sorted(reduced_axes))
for elementary_axis, axis_length in axes_lengths:
if not ParsedExpression.check_axis_name(elementary_axis):
raise EinopsError('Invalid name for an axis', elementary_axis)
if elementary_axis not in axis_name2known_length:
raise EinopsError('Axis {} is not used in transform'.format(elementary_axis))
axis_name2known_length[elementary_axis] = axis_length
input_axes_known_unknown = []
# some of shapes will be inferred later - all information is prepared for faster inference
for composite_axis in left.composition:
known: Set[str] = {axis for axis in composite_axis if axis_name2known_length[axis] != _unknown_axis_length}
unknown: Set[str] = {axis for axis in composite_axis if axis_name2known_length[axis] == _unknown_axis_length}
if len(unknown) > 1:
raise EinopsError('Could not infer sizes for {}'.format(unknown))
assert len(unknown) + len(known) == len(composite_axis)
input_axes_known_unknown.append(
([axis_name2position[axis] for axis in known],
[axis_name2position[axis] for axis in unknown])
)
axis_position_after_reduction: Dict[str, int] = {}
for axis_name in itertools.chain(*left.composition):
if axis_name in rght.identifiers:
axis_position_after_reduction[axis_name] = len(axis_position_after_reduction)
result_axes_grouping: List[List[int]] = []
for composite_axis in rght.composition:
if composite_axis == _ellipsis:
result_axes_grouping.append(_ellipsis_not_in_parenthesis)
else:
result_axes_grouping.append([axis_name2position[axis] for axis in composite_axis])
ordered_axis_right = list(itertools.chain(*rght.composition))
axes_permutation = [
axis_position_after_reduction[axis] for axis in ordered_axis_right if axis in left.identifiers]
added_axes = {i: axis_name2position[axis_name] for i, axis_name in enumerate(ordered_axis_right)
if axis_name not in left.identifiers}
ellipsis_left = None if _ellipsis not in left.composition else left.composition.index(_ellipsis)
return TransformRecipe(
elementary_axes_lengths=list(axis_name2known_length.values()),
input_composite_axes=input_axes_known_unknown,
reduced_elementary_axes=reduced_axes,
axes_permutation=axes_permutation,
added_axes=added_axes,
output_composite_axes=result_axes_grouping,
ellipsis_position_in_lhs=ellipsis_left,
)
def reduce(tensor: Tensor, pattern: str, reduction: Reduction, **axes_lengths: int) -> Tensor:
"""
einops.reduce provides combination of reordering and reduction using reader-friendly notation.
Examples for reduce operation:
```python
>>> x = np.random.randn(100, 32, 64)
# perform max-reduction on the first axis
>>> y = reduce(x, 't b c -> b c', 'max')
# same as previous, but with clearer axes meaning
>>> y = reduce(x, 'time batch channel -> batch channel', 'max')
>>> x = np.random.randn(10, 20, 30, 40)
# 2d max-pooling with kernel size = 2 * 2 for image processing
>>> y1 = reduce(x, 'b c (h1 h2) (w1 w2) -> b c h1 w1', 'max', h2=2, w2=2)
# if one wants to go back to the original height and width, depth-to-space trick can be applied
>>> y2 = rearrange(y1, 'b (c h2 w2) h1 w1 -> b c (h1 h2) (w1 w2)', h2=2, w2=2)
>>> assert parse_shape(x, 'b _ h w') == parse_shape(y2, 'b _ h w')
# Adaptive 2d max-pooling to 3 * 4 grid
>>> reduce(x, 'b c (h1 h2) (w1 w2) -> b c h1 w1', 'max', h1=3, w1=4).shape
(10, 20, 3, 4)
# Global average pooling
>>> reduce(x, 'b c h w -> b c', 'mean').shape
(10, 20)
# Subtracting mean over batch for each channel
>>> y = x - reduce(x, 'b c h w -> () c () ()', 'mean')
# Subtracting per-image mean for each channel
>>> y = x - reduce(x, 'b c h w -> b c () ()', 'mean')
```
Parameters:
tensor: tensor: tensor of any supported library (e.g. numpy.ndarray, tensorflow, pytorch, mxnet.ndarray).
list of tensors is also accepted, those should be of the same type and shape
pattern: string, reduction pattern
reduction: one of available reductions ('min', 'max', 'sum', 'mean', 'prod'), case-sensitive
alternatively, a callable f(tensor, reduced_axes) -> tensor can be provided.
This allows using various reductions, examples: np.max, tf.reduce_logsumexp, torch.var, etc.
axes_lengths: any additional specifications for dimensions
Returns:
tensor of the same type as input
"""
try:
hashable_axes_lengths = tuple(sorted(axes_lengths.items()))
recipe = _prepare_transformation_recipe(pattern, reduction, axes_lengths=hashable_axes_lengths)
return _apply_recipe(recipe, tensor, reduction_type=reduction)
except EinopsError as e:
message = ' Error while processing {}-reduction pattern "{}".'.format(reduction, pattern)
if not isinstance(tensor, list):
message += '\n Input tensor shape: {}. '.format(get_backend(tensor).shape(tensor))
else:
message += '\n Input is list. '
message += 'Additional info: {}.'.format(axes_lengths)
raise EinopsError(message + '\n {}'.format(e))
def rearrange(tensor: Union[Tensor, List[Tensor]], pattern: str, **axes_lengths) -> Tensor:
"""
einops.rearrange is a reader-friendly smart element reordering for multidimensional tensors.
This operation includes functionality of transpose (axes permutation), reshape (view), squeeze, unsqueeze,
stack, concatenate and other operations.
Examples for rearrange operation:
```python
# suppose we have a set of 32 images in "h w c" format (height-width-channel)
>>> images = [np.random.randn(30, 40, 3) for _ in range(32)]
# stack along first (batch) axis, output is a single array
>>> rearrange(images, 'b h w c -> b h w c').shape
(32, 30, 40, 3)
# concatenate images along height (vertical axis), 960 = 32 * 30
>>> rearrange(images, 'b h w c -> (b h) w c').shape
(960, 40, 3)
# concatenated images along horizontal axis, 1280 = 32 * 40
>>> rearrange(images, 'b h w c -> h (b w) c').shape
(30, 1280, 3)
# reordered axes to "b c h w" format for deep learning
>>> rearrange(images, 'b h w c -> b c h w').shape
(32, 3, 30, 40)
# flattened each image into a vector, 3600 = 30 * 40 * 3
>>> rearrange(images, 'b h w c -> b (c h w)').shape
(32, 3600)
# split each image into 4 smaller (top-left, top-right, bottom-left, bottom-right), 128 = 32 * 2 * 2
>>> rearrange(images, 'b (h1 h) (w1 w) c -> (b h1 w1) h w c', h1=2, w1=2).shape
(128, 15, 20, 3)
# space-to-depth operation
>>> rearrange(images, 'b (h h1) (w w1) c -> b h w (c h1 w1)', h1=2, w1=2).shape
(32, 15, 20, 12)
```
When composing axes, C-order enumeration used (consecutive elements have different last axis)
Find more examples in einops tutorial.
Parameters:
tensor: tensor of any supported library (e.g. numpy.ndarray, tensorflow, pytorch, mxnet.ndarray).
list of tensors is also accepted, those should be of the same type and shape
pattern: string, rearrangement pattern
axes_lengths: any additional specifications for dimensions
Returns:
tensor of the same type as input. If possible, a view to the original tensor is returned.
"""
if isinstance(tensor, list):
if len(tensor) == 0:
raise TypeError("Rearrange can't be applied to an empty list")
tensor = get_backend(tensor[0]).stack_on_zeroth_dimension(tensor)
return reduce(cast(Tensor, tensor), pattern, reduction='rearrange', **axes_lengths)
def repeat(tensor: Tensor, pattern: str, **axes_lengths) -> Tensor:
"""
einops.repeat allows reordering elements and repeating them in arbitrary combinations.
This operation includes functionality of repeat, tile, broadcast functions.
Examples for repeat operation:
```python
# a grayscale image (of shape height x width)
>>> image = np.random.randn(30, 40)
# change it to RGB format by repeating in each channel
>>> repeat(image, 'h w -> h w c', c=3).shape
(30, 40, 3)
# repeat image 2 times along height (vertical axis)
>>> repeat(image, 'h w -> (repeat h) w', repeat=2).shape
(60, 40)
# repeat image 2 time along height and 3 times along width
>>> repeat(image, 'h w -> (h2 h) (w3 w)', h2=2, w3=3).shape
(60, 120)
# convert each pixel to a small square 2x2. Upsample image by 2x
>>> repeat(image, 'h w -> (h h2) (w w2)', h2=2, w2=2).shape
(60, 80)
# pixelate image first by downsampling by 2x, then upsampling
>>> downsampled = reduce(image, '(h h2) (w w2) -> h w', 'mean', h2=2, w2=2)
>>> repeat(downsampled, 'h w -> (h h2) (w w2)', h2=2, w2=2).shape
(30, 40)
```
When composing axes, C-order enumeration used (consecutive elements have different last axis)
Find more examples in einops tutorial.
Parameters:
tensor: tensor of any supported library (e.g. numpy.ndarray, tensorflow, pytorch, mxnet.ndarray).
list of tensors is also accepted, those should be of the same type and shape
pattern: string, rearrangement pattern
axes_lengths: any additional specifications for dimensions
Returns:
Tensor of the same type as input. If possible, a view to the original tensor is returned.
"""
return reduce(tensor, pattern, reduction='repeat', **axes_lengths)
def parse_shape(x, pattern: str) -> dict:
"""
Parse a tensor shape to dictionary mapping axes names to their lengths.
```python
# Use underscore to skip the dimension in parsing.
>>> x = np.zeros([2, 3, 5, 7])
>>> parse_shape(x, 'batch _ h w')
{'batch': 2, 'h': 5, 'w': 7}
# `parse_shape` output can be used to specify axes_lengths for other operations:
>>> y = np.zeros([700])
>>> rearrange(y, '(b c h w) -> b c h w', **parse_shape(x, 'b _ h w')).shape
(2, 10, 5, 7)
```
For symbolic frameworks may return symbols, not integers.
Parameters:
x: tensor of any of supported frameworks
pattern: str, space separated names for axes, underscore means skip axis
Returns:
dict, maps axes names to their lengths
"""
exp = ParsedExpression(pattern, allow_underscore=True)
shape = get_backend(x).shape(x)
if exp.has_composed_axes():
raise RuntimeError("Can't parse shape with composite axes: {pattern} {shape}".format(
pattern=pattern, shape=shape))
if len(shape) != len(exp.composition):
if exp.has_ellipsis:
if len(shape) < len(exp.composition) - 1:
raise RuntimeError("Can't parse shape with this number of dimensions: {pattern} {shape}".format(
pattern=pattern, shape=shape))
else:
raise RuntimeError("Can't parse shape with different number of dimensions: {pattern} {shape}".format(
pattern=pattern, shape=shape))
if exp.has_ellipsis:
ellipsis_idx = exp.composition.index(_ellipsis)
composition = (exp.composition[:ellipsis_idx] +
['_'] * (len(shape) - len(exp.composition) + 1) +
exp.composition[ellipsis_idx + 1:])
else:
composition = exp.composition
result = {}
for (axis_name,), axis_length in zip(composition, shape): # type: ignore
if axis_name != '_':
result[axis_name] = axis_length
return result
# this one is probably not needed in the public API
def _enumerate_directions(x):
"""
For an n-dimensional tensor, returns tensors to enumerate each axis.
```python
x = np.zeros([2, 3, 4]) # or any other tensor
i, j, k = _enumerate_directions(x)
result = i + 2*j + 3*k
```
`result[i, j, k] = i + 2j + 3k`, and also has the same shape as result
Works very similarly to numpy.ogrid (open indexing grid)
"""
backend = get_backend(x)
shape = backend.shape(x)
result = []
for axis_id, axis_length in enumerate(shape):
shape = [1] * len(shape)
shape[axis_id] = axis_length
result.append(backend.reshape(backend.arange(0, axis_length), shape))
return result
# to avoid importing numpy
np_ndarray = Any
def asnumpy(tensor) -> np_ndarray:
"""
Convert a tensor of an imperative framework (i.e. numpy/cupy/torch/gluon/etc.) to `numpy.ndarray`
Parameters:
tensor: tensor of any of known imperative framework
Returns:
`numpy.ndarray`, converted to numpy
"""
return get_backend(tensor).to_numpy(tensor)
def _validate_einsum_axis_name(axis_name):
if len(axis_name) == 0:
raise NotImplementedError("Singleton () axes are not yet supported in einsum.")
if len(axis_name) > 1:
raise NotImplementedError("Shape rearrangement is not yet supported in einsum.")
axis_name = axis_name[0]
if isinstance(axis_name, AnonymousAxis):
raise NotImplementedError("Anonymous axes are not yet supported in einsum.")
if len(axis_name) == 0:
raise RuntimeError("Encountered empty axis name in einsum.")
if not isinstance(axis_name, str):
raise RuntimeError("Axis name in einsum must be a string.")
@functools.lru_cache(256)
def _compactify_pattern_for_einsum(pattern: str) -> str:
if "->" not in pattern:
# numpy allows this, so make sure users
# don't accidentally do something like this.
raise ValueError("Einsum pattern must contain '->'.")
lefts_str, right_str = pattern.split('->')
lefts = [
ParsedExpression(left, allow_underscore=True, allow_duplicates=True)
for left in lefts_str.split(',')
]
right = ParsedExpression(right_str, allow_underscore=True)
# Start from 'a' and go up to 'Z'
output_axis_names = string.ascii_letters
i = 0
axis_name_mapping = {}
left_patterns = []
for left in lefts:
left_pattern = ""
for raw_axis_name in left.composition:
if raw_axis_name == _ellipsis:
left_pattern += '...'
continue
_validate_einsum_axis_name(raw_axis_name)
axis_name = raw_axis_name[0]
if axis_name not in axis_name_mapping:
if i >= len(output_axis_names):
raise RuntimeError("Too many axes in einsum.")
axis_name_mapping[axis_name] = output_axis_names[i]
i += 1
left_pattern += axis_name_mapping[axis_name]
left_patterns.append(left_pattern)
compact_pattern = ",".join(left_patterns) + "->"
for raw_axis_name in right.composition:
if raw_axis_name == _ellipsis:
compact_pattern += '...'
continue
_validate_einsum_axis_name(raw_axis_name)
axis_name = raw_axis_name[0]
if axis_name not in axis_name_mapping:
raise EinopsError(f"Unknown axis {axis_name} on right side of einsum {pattern}.")
compact_pattern += axis_name_mapping[axis_name]
return compact_pattern
# dunders in overloads turn arguments into positional-only.
# After python 3.7 EOL this should be replaced with '/' as the last argument.
@typing.overload
def einsum(__tensor: Tensor, __pattern: str) -> Tensor: ...
@typing.overload
def einsum(__tensor1: Tensor, __tensor2: Tensor, __pattern: str) -> Tensor: ...
@typing.overload
def einsum(__tensor1: Tensor, __tensor2: Tensor, __tensor3: Tensor, __pattern: str) -> Tensor: ...
@typing.overload
def einsum(__tensor1: Tensor, __tensor2: Tensor, __tensor3: Tensor, __tensor4: Tensor, __pattern: str) -> Tensor: ...
def einsum(*tensors_and_pattern: Union[Tensor, str]) -> Tensor:
"""
einops.einsum calls einsum operations with einops-style named
axes indexing, computing tensor products with an arbitrary
number of tensors. Unlike typical einsum syntax, here you must
pass tensors first, and then the pattern.
Also, note that rearrange operations such as `"(batch chan) out"`,
or singleton axes `()`, are not currently supported.
Examples:
For a given pattern such as:
```python
>>> x, y, z = np.random.randn(3, 20, 20, 20)
>>> output = einsum(x, y, z, "a b c, c b d, a g k -> a b k")
```
the following formula is computed:
```tex
output[a, b, k] =
\sum_{c, d, g} x[a, b, c] * y[c, b, d] * z[a, g, k]
```
where the summation over `c`, `d`, and `g` is performed
because those axes names do not appear on the right-hand side.
Let's see some additional examples:
```python
# Filter a set of images:
>>> batched_images = np.random.randn(128, 16, 16)
>>> filters = np.random.randn(16, 16, 30)
>>> result = einsum(batched_images, filters,
... "batch h w, h w channel -> batch channel")
>>> result.shape
(128, 30)
# Matrix multiplication, with an unknown input shape:
>>> batch_shape = (50, 30)
>>> data = np.random.randn(*batch_shape, 20)
>>> weights = np.random.randn(10, 20)
>>> result = einsum(weights, data,
... "out_dim in_dim, ... in_dim -> ... out_dim")
>>> result.shape
(50, 30, 10)
# Matrix trace on a single tensor:
>>> matrix = np.random.randn(10, 10)
>>> result = einsum(matrix, "i i ->")
>>> result.shape
()
```
Parameters:
tensors_and_pattern:
tensors: tensors of any supported library (numpy, tensorflow, pytorch, jax).
pattern: string, einsum pattern, with commas
separating specifications for each tensor.
pattern should be provided after all tensors.
Returns:
Tensor of the same type as input, after processing with einsum.
"""
if len(tensors_and_pattern) <= 1:
raise ValueError(
"`einops.einsum` takes at minimum two arguments: the tensors (at least one),"
" followed by the pattern."
)
pattern = tensors_and_pattern[-1]
if not isinstance(pattern, str):
raise ValueError(
"The last argument passed to `einops.einsum` must be a string,"
" representing the einsum pattern."
)
tensors = tensors_and_pattern[:-1]
pattern = _compactify_pattern_for_einsum(pattern)
return get_backend(tensors[0]).einsum(pattern, *tensors)
| mit | 052edfb206dfbc3436f01eeda05ccccb | 41.543506 | 120 | 0.632925 | 3.812952 | false | false | false | false |
ywangd/stash | bin/tar.py | 1 | 4605 | # -*- coding: utf-8 -*-
'''
Create and extract tar, gzip, bz2 archives.
Examples:
Create a gzip compressed archive:
tar -czvf test.tar.gz your_directory file1.py file2.py
Create a tar archive:
tar -cvf test.tar.gz your_directory file1.py file2.py
Unpack a gzip archive:
tar -xzvf test.tar.gz
List Contents of gzip:
tar -tzf test.tar.gz
usage: tar.py [-h] [-c] [-v] [-t] [-j] [-z] [-x] [-f FILE] [files [files ...]]
positional arguments:
files Create: Files/Dirs to add to archive. Extract:
Specific Files/Dirs to extract, default: all
optional arguments:
-h, --help show this help message and exit
-c, --create Creates a new archive
-v, --verbose Verbose output print.
-t, --list List Contents
-j, --bz2 Compress as bz2 format
-z, --gzip Compress as gzip format
-x, --extract Extract an archive.
-f FILE, --file FILE Archive filename.
'''
from __future__ import print_function
import argparse
import os
import tarfile
def output_print(msg):
if args.verbose:
print(msg)
class MyFileObject(tarfile.ExFileObject):
def read(self, size, *args):
if self.position == self.size:
output_print("Extracting: %s" % self.name)
return tarfile.ExFileObject.read(self, size, *args)
def extract_members(members, extract):
for tarinfo in members:
for path in extract:
if tarinfo.name == path or tarinfo.name.startswith(path):
yield tarinfo
def extract_all(filename, members=None, directory=''):
if args.gzip:
output_print('Reading gzip file.')
tar = tarfile.open(filename, "r:gz")
elif args.bz2:
output_print('Reading bz2 file.')
tar = tarfile.open(filename, "r:bz2")
else:
output_print('Reading tar file.')
tar = tarfile.open(filename, "r:")
output_print('Extracting files.')
# check for specific file extraction
if members:
tar.extractall(path=directory, members=extract_members(tar, members))
else:
tar.extractall(path=directory)
tar.close()
print('Archive extracted.')
def create_tar(filename, files):
# Progress filter
def tar_filter(tarinfo):
output_print('Adding: %s' % tarinfo.name)
return tarinfo
if args.gzip:
output_print('Creating gzip file.')
tar = tarfile.open(filename, "w:gz")
elif args.bz2:
output_print('Creating bz2 file.')
tar = tarfile.open(filename, "w:bz2")
else:
output_print('Creating tar file.')
tar = tarfile.open(filename, "w")
for name in files:
output_print('Adding %s' % name)
tar.add(name, filter=tar_filter)
tar.close()
print('Archive Created.')
def list_tar(filename):
if args.gzip:
tar = tarfile.open(filename, "r:gz")
elif args.bz2:
tar = tarfile.open(filename, "r:bz2")
else:
tar = tarfile.open(filename, "r:")
tar.list()
tar.close()
if __name__ == '__main__':
ap = argparse.ArgumentParser()
ap.add_argument('-c', '--create', action='store_true', default=False, help='Creates a new archive')
ap.add_argument('-v', '--verbose', action='store_true', default=False, help='Verbose output print.')
ap.add_argument('-t', '--list', action='store_true', default=False, help='List Contents')
ap.add_argument('-j', '--bz2', action='store_true', default=False, help='Compress as bz2 format')
ap.add_argument('-z', '--gzip', action='store_true', default=False, help='Compress as gzip format')
ap.add_argument('-x', '--extract', action='store_true', default=False, help='Extract an archive.')
ap.add_argument('-f', '--file', action='store', help='Archive filename.')
ap.add_argument(
'-C',
'--directory',
action='store',
default='',
help='Change to directory before processing remaining files'
)
ap.add_argument(
'files',
action='store',
default=[],
nargs='*',
help='Create: Files/Dirs to add to archive.\nExtract: Specific Files/Dirs to extract, default: all',
)
args = ap.parse_args()
tarfile.TarFile.fileobject = MyFileObject
if args.list:
list_tar(os.path.expanduser(args.file))
elif args.create:
create_tar(os.path.expanduser(args.file), args.files)
elif args.extract:
extract_all(os.path.expanduser(args.file), args.files, directory=args.directory)
| mit | ebbb610f6ca49e5b246a78abcc383310 | 31.202797 | 108 | 0.60456 | 3.710717 | false | false | false | false |
ywangd/stash | lib/stashutils/extensions.py | 1 | 2520 | # -*- coding: utf-8 -*-
"""This module defines functions to interact with stash extensions."""
import os
import shutil
import io
from stash.system.shcommon import _STASH_EXTENSION_BIN_PATH as EBP
from stash.system.shcommon import _STASH_EXTENSION_MAN_PATH as EMP
from stash.system.shcommon import _STASH_EXTENSION_FSI_PATH as EFP
from stash.system.shcommon import _STASH_EXTENSION_PATCH_PATH as EPP
from stashutils.core import load_from_dir
from six import text_type, binary_type
# alias load_from_dir (so you can access it trough this namespace)
load_from_dir = load_from_dir
def create_file(dest, content):
"""
Creates a file at dest with content.
If content is a string or unicode, use it as the content.
Otherwise, use content.read() as the content.
"""
if not isinstance(content, (binary_type, text_type)):
content = content.read()
parent = os.path.dirname(dest)
if not os.path.exists(parent):
os.makedirs(parent)
if isinstance(content, binary_type):
with io.open(dest, "wb") as f:
f.write(content)
elif isinstance(content, text_type):
with io.open(dest, "w", encoding="utf-8") as f:
f.write(content)
return dest
def create_page(name, content):
"""
Creates a manpage with name filled with content.
If content is a list or tuple, instead create a dir and fill it with pages
created from the elements of this list.
The list should consist of tuples of (ending, content)
"""
path = os.path.join(EMP, name)
if isinstance(content, (list, tuple)):
# create a bunch of pages
if os.path.exists(path):
shutil.rmtree(path)
os.mkdir(path)
for n, element in enumerate(content, 1):
ending, elementcontent = element
pagename = "{b}/page_{n}.{e}".format(n=n, e=ending, b=path)
create_page(pagename, elementcontent)
return path
else:
return create_file(path, content)
def create_command(name, content):
"""creates a script named name filled with content"""
path = os.path.join(EBP, name)
return create_file(path, content)
def create_fsi_file(name, content):
"""creates a fsi extension named name filled with content"""
path = os.path.join(EFP, name)
return create_file(path, content)
def create_patch_file(name, content):
"""creates a patch extension named name filled with content"""
path = os.path.join(EPP, name)
return create_file(path, content) | mit | 4ad313f03ce9f8a6916abfb9359f92f1 | 31.74026 | 78 | 0.668651 | 3.631124 | false | false | false | false |
ywangd/stash | lib/git/git-branch.py | 1 | 11974 | #coding: utf-8
""" git branch [-r | -a] [--abbrev=n | --no-abbrev\n
git branch [--set-upstream | --track | --no-track] [-l][-f] <branchname> <startpoint>
git branch (-m | -M) [<oldbranch>] <newbranch>
git branch (-d | -D) [-r] <branchname>…
git branch --edit-description [<branchname>]"""
from __future__ import print_function
import argparse
import os
import sys
from git.gitutils import (GitError, _get_repo, any_one, count_commits_between, find_revision_sha, get_remote_tracking_branch)
from six import iteritems
from six.moves import input
def branch(args):
repo = _get_repo()
parser = argparse.ArgumentParser(prog='git branch', description="List, create, or delete branches")
#list
list_grp = parser.add_mutually_exclusive_group(required=False)
list_grp.add_argument('-r', '--remotes', action='store_true', help='list or delete remotep tracking branches')
list_grp.add_argument('-a', '--all', action='store_true', help='list both remote and local branches')
# move type commands
move_type = parser.add_mutually_exclusive_group(required=False)
move_type.add_argument(
'-m',
'--move',
nargs='+',
metavar=('[oldbranch]',
'newbranch'),
help='move/rename oldbranch or HEAD'
)
move_type.add_argument(
'-M',
nargs='+',
metavar=('[oldbranch]',
'newbranch'),
help='move/rename even if branch already exists'
)
# delete type commands
delete_flags = parser.add_mutually_exclusive_group(required=False)
delete_flags.add_argument(
'-d',
'--delete',
nargs=1,
metavar=('branchname'),
help='delete branchname,TODO: branch must be fully merged with upstream '
)
delete_flags.add_argument('-D', nargs=1, metavar=('branchname'), help='Delete a branch irrespective of its merged status.')
# misc flags
parser.add_argument(
'-v',
'--verbose',
action='count',
help='When in list mode, show sha1 and commit subject line for each head, along with relationship to upstream branch (if any). If given twice, print the name of the upstream branch, as well (see also git remote show <remote>).'
)
parser.add_argument(
'-f',
'--force',
action='store_true',
help='Reset <branchname> to <startpoint> if <branchname> exists already. Without -f git branch refuses to change an existing branch.'
)
abbrevgrp = parser.add_mutually_exclusive_group()
abbrevgrp.add_argument(
'--abbrev',
action='store',
nargs='?',
help='set number of characters to display in sha',
type=int,
default=7
)
abbrevgrp.add_argument('--no-abbrev', action='store_const', help='do not abbreviate sha ', const=40, dest='abbrev')
track_flags = parser.add_mutually_exclusive_group(required=False)
track_flags.add_argument(
'--set-upstream',
action='store',
nargs=2,
metavar=('branchname',
'upstream'),
help='set branchname to track upstream'
)
track_flags.add_argument(
'--no-track',
nargs='+',
metavar=('branchname',
'startpoint'),
help='set existing branch to not track, or create new branch that doesnt track'
)
# add_branch
parser.add_argument('branchname', nargs='?')
parser.add_argument('startpoint', nargs='?')
parser.add_argument('--edit_description', action='store', nargs='?', metavar='branchname', const=repo.active_branch)
result = parser.parse_args(args)
# combine args
edit_description = result.edit_description
delete_branchname = result.delete or result.D
move_branchname = result.move or result.M
no_track = result.no_track
add_branchname = (result.branchname, result.startpoint or repo.active_branch)
set_upstream = result.set_upstream
force = result.force or result.D or result.M
mutual_exclusive_list = (delete_branchname, move_branchname, edit_description, result.branchname, set_upstream, no_track)
list_flag = not any_one(mutual_exclusive_list)
if not any_one((list_flag, ) + mutual_exclusive_list):
raise GitError('too many options specified.\n' + parser.print_help())
if list_flag:
branch_list(result)
elif delete_branchname:
delete_branch(delete_branchname[0], force, result.remotes, result.verbose)
elif move_branchname:
move_branch(move_branchname, force, result.verbose)
elif add_branchname[0]:
create_branch(add_branchname[0], add_branchname[1], force, False)
elif edit_description:
edit_branch_description(edit_description)
elif set_upstream:
add_tracking(set_upstream[0], *(['origin'] + set_upstream[1].split('/'))[-2:])
print(set_upstream[0], format_tracking_branch_desc(repo, set_upstream[0]))
elif no_track:
if len(no_track) == 1:
remove_tracking(no_track[0])
else:
create_branch(no_track[0], no_track[1], force, True)
#print result
def format_tracking_branch_desc(repo, branchname):
try:
remote = get_remote_tracking_branch(repo, branchname)
mysha = repo.branches[branchname]
theirsha = repo.remote_branches[remote]
ahead, behind = count_commits_between(repo, mysha, theirsha)
return '+{}/-{} relative to {} ({})'.format(ahead, behind, remote, theirsha)
except KeyError:
return ''
def edit_branch_description(branchname, description=None):
description = description or input('enter description:')
config = _get_repo().repo.get_config()
if not branchname in _get_repo().branches:
GitError('{} is not an existing branch'.format(branchname))
config.set(('branch', branchname), 'description', description)
config.write_to_path()
def branch_list(result):
# TODO: tracking branches
N = result.abbrev
repo = _get_repo()
if not result.remotes:
for key, value in iteritems(repo.branches):
dispval = value[0:N] #todo, --abbrev=n
commitmsg = (repo[value].message if result.verbose else '').strip()
tracking = get_remote_tracking_branch(repo, key)
trackmsg = ''
diffmsg = trackingsha = ''
if tracking:
trackingsha = repo.remote_branches[tracking]
ahead, behind = count_commits_between(repo, value, trackingsha)
diffmsg = '+{}/-{} compare to'.format(ahead, behind) if result.verbose else ''
trackmsg = '[{} {} {}]'.format(diffmsg, tracking, trackingsha[0:N])
print(' '.join([('* ' if repo.active_branch == key else '') + key, dispval, commitmsg]))
if result.remotes or result.all:
for key, value in iteritems(repo.remote_branches):
dispval = value[0:N] #todo, --abbrev=n
commitmsg = (repo[value].message if result.verbose else '').strip()
print(' '.join([('* ' if repo.active_branch == key else '') + key, dispval, commitmsg]))
def delete_branch(delete_branchname, force=False, remote=None, verbose=0):
'''delete a branch.
if remote=True, then look in refs/remotes, otherwise check refs/heads
for local, check if it has a remote tracking branch, and only allow delete if upstream has merged
'''
print('delete', delete_branchname, force, remote)
repo = _get_repo()
if remote:
qualified_branch = repo._format_ref_remote(delete_branchname)
else:
qualified_branch = repo._format_ref_branch(delete_branchname)
if delete_branchname == repo.active_branch:
GitError('Cannot delete active branch. ')
remote_tracking_branch = get_remote_tracking_branch(repo, delete_branchname)
if remote_tracking_branch and not force:
#see if local is ahead of remote
commits_ahead = count_commits_between(repo,
repo.refs[qualified_branch],
repo.remote_branches[remote_tracking_branch])[0]
if commits_ahead:
raise GitError(
'{0} is ahead of {1} by {2} commits.\nuse git branch -D\n'.format(
delete_branchname,
remote_tracking_branch,
commits_ahead
)
)
print('removing {} (was {})\n'.format(delete_branchname, repo.refs[qualified_branch]))
del repo.repo.refs[qualified_branch]
if not remote:
remove_tracking(delete_branchname)
#todo reflog
def move_branch(movebranch, force, verbose):
'''move oldbranch (or active_branch) to newbranch. update config if needed'''
repo = _get_repo()
oldbranch, newbranch = ([repo.active_branch] + movebranch)[-2:]
if oldbranch not in repo.branches:
raise GitError('{} does not exist in branches'.format(oldbranch))
if newbranch in repo.branches and not force:
raise GitError('{} already exists. use -M to force overwriting'.format(newbranch))
if newbranch != oldbranch:
print('Renaming {} ({}) to {}\n'.format(oldbranch, repo.branches[oldbranch], newbranch))
repo.add_ref(repo._format_ref_branch(newbranch), repo._format_ref_branch(oldbranch))
del repo.repo.refs[repo._format_ref_branch(oldbranch)]
#todo: reflog
if oldbranch == repo.active_branch:
repo.active_branch = newbranch
def remove_tracking(branchname):
'''remove branch entry from config'''
# Get repo's config
config = _get_repo().repo.get_config()
try:
del config[('branch', branchname)]['remote']
del config[('branch', branchname)]['merge']
if not config[('branch', branchname)]:
del config[('branch', branchname)]
except KeyError:
pass
# Write to disk
config.write_to_path()
def add_tracking(branchname, remote, remotebranch):
# Get repo's config
config = _get_repo().repo.get_config()
# Add new entries for remote
config.set(('branch', branchname), 'remote', remote)
config.set(('branch', branchname), 'merge', 'refs/heads/' + remotebranch)
# Write to disk
config.write_to_path()
def create_branch(new_branch, base_rev, force=False, no_track=False):
"""Try creating a new branch which tracks the given remote
if such a branch does not exist then branch off a local branch
"""
repo = _get_repo()
# Already exists
if new_branch in repo.branches:
if not force:
raise GitError("branch %s already exists\n use --force to overwrite anyway" % new_branch)
# fork with new sha
new_ref = repo._format_ref_branch(new_branch)
base_sha = find_revision_sha(repo, base_rev)
repo.repo.refs[new_ref] = base_sha
#handle tracking, only if this was a remote
tracking, remote_branch = (['origin'] + base_rev.split('/'))[-2:] #branch-> origin/branch. remote/branch stays as is
qualified_remote_branch = os.path.sep.join([tracking, remote_branch])
if qualified_remote_branch in repo.remote_branches and not base_rev in repo.branches:
if not no_track:
add_tracking(new_branch, tracking, remote_branch)
else:
remove_tracking(new_branch)
#todo reflog
return new_ref
def test():
import os
os.chdir('../..')
def run(cmd):
print('branch ', cmd)
branch(cmd.split())
print('')
#run('-d test')
run('')
run('-f test origin/master')
run('')
print('delete test: should delete')
run('-d test')
print('set to remote')
run('test origin/master')
run('-v')
try:
run('test dev')
except GitError:
pass
else:
print('did not error!')
run('-f test dev')
run('-v')
run('-m test test2')
if __name__ == '__main__':
branch(sys.argv[1:])
| mit | 3c3b28c62efabded20851ed0f131a9ed | 35.389058 | 235 | 0.619195 | 3.842105 | false | true | false | false |
arogozhnikov/einops | einops/layers/__init__.py | 1 | 2887 | __author__ = 'Alex Rogozhnikov'
import functools
from typing import Any
from einops.einops import _apply_recipe
from ..einops import TransformRecipe, _prepare_transformation_recipe
from .. import EinopsError
class RearrangeMixin:
"""
Rearrange layer behaves identically to einops.rearrange operation.
:param pattern: str, rearrangement pattern
:param axes_lengths: any additional specification of dimensions
See einops.rearrange for source_examples.
"""
def __init__(self, pattern: str, **axes_lengths: Any) -> None:
super().__init__()
self.pattern = pattern
self.axes_lengths = axes_lengths
self._recipe = self.recipe() # checking parameters
def __repr__(self) -> str:
params = repr(self.pattern)
for axis, length in self.axes_lengths.items():
params += ', {}={}'.format(axis, length)
return '{}({})'.format(self.__class__.__name__, params)
@functools.lru_cache(maxsize=1024)
def recipe(self) -> TransformRecipe:
try:
hashable_lengths = tuple(sorted(self.axes_lengths.items()))
return _prepare_transformation_recipe(self.pattern, operation='rearrange', axes_lengths=hashable_lengths)
except EinopsError as e:
raise EinopsError(' Error while preparing {!r}\n {}'.format(self, e))
def _apply_recipe(self, x):
return _apply_recipe(self._recipe, x, reduction_type='rearrange')
class ReduceMixin:
"""
Reduce layer behaves identically to einops.reduce operation.
:param pattern: str, rearrangement pattern
:param reduction: one of available reductions ('min', 'max', 'sum', 'mean', 'prod'), case-sensitive
:param axes_lengths: any additional specification of dimensions
See einops.reduce for source_examples.
"""
def __init__(self, pattern: str, reduction: str, **axes_lengths: Any):
super().__init__()
self.pattern = pattern
self.reduction = reduction
self.axes_lengths = axes_lengths
self._recipe = self.recipe() # checking parameters
def __repr__(self):
params = '{!r}, {!r}'.format(self.pattern, self.reduction)
for axis, length in self.axes_lengths.items():
params += ', {}={}'.format(axis, length)
return '{}({})'.format(self.__class__.__name__, params)
@functools.lru_cache(maxsize=1024)
def recipe(self) -> TransformRecipe:
try:
hashable_lengths = tuple(sorted(self.axes_lengths.items()))
return _prepare_transformation_recipe(
self.pattern, operation=self.reduction, axes_lengths=hashable_lengths)
except EinopsError as e:
raise EinopsError(' Error while preparing {!r}\n {}'.format(self, e))
def _apply_recipe(self, x):
return _apply_recipe(self._recipe, x, reduction_type=self.reduction)
| mit | 8773d2572b5f056f103e931e07e825e1 | 35.0875 | 117 | 0.638379 | 3.943989 | false | false | false | false |
ywangd/stash | lib/stashutils/fsi/zip.py | 1 | 7023 | # -*- coding: utf-8 -*-
"""The FSI for zipfiles"""
import zipfile
import os
import tempfile
import time
import shutil
import datetime
import stat
from io import BytesIO
from stashutils.fsi import base
from stashutils.fsi import errors
# TODO: check filename bug when writing
class ZipfileFSI(base.BaseFSI):
"""FSI for zipfiles"""
def __init__(self, logger):
base.BaseFSI.__init__(self, logger)
self.logger = logger
self.path = "/"
self.zf = None
self.is_new = True
self.dirs = ["/"] # list of dirs with no files in them
self.log("Warning: The ZipfileFSI has some unfixed bugs!\n")
# ^^^ These bugs are beyond my abilities (and they seem to be case
# dependent)
def abspath(self, path):
"""returns the absolute path for path."""
p = os.path.join(self.path, path)
while p.startswith("/"):
p = p[1:]
return p
def _getdirs(self):
"""returns a list of all dirs"""
dirs = ["/"] + self.dirs
for name in self.zf.namelist():
dirpath = os.path.dirname(name)
if dirpath not in dirs:
dirs.append(dirpath)
return dirs
def _update(self, remove=[]):
"""create a new zipfile with some changes"""
nzfp = os.path.join(tempfile.gettempdir(), "tempzip_{t}.zip".format(t=time.time()))
op = self.zf.fp.name
pswd = self.zf.pwd
comment = self.zf.comment
nzf = zipfile.ZipFile(nzfp, "w", self.zf.compression, True)
infos = self.zf.infolist()
for zipinfo in infos:
add = True
for rm in remove:
if zipinfo.filename.startswith(rm):
add = False
break
if not add:
continue
ofo = self.zf.open(zipinfo)
nzf.writestr(zipinfo, ofo.read())
self.zf.close()
os.remove(op)
nzf.close()
shutil.copy(nzfp, op)
self.zf = zipfile.ZipFile(op, "a", zipfile.ZIP_DEFLATED, True)
self.zf.setpassword(pswd)
self.zf.comment = comment
def connect(self, *args):
"""open the zipfile"""
if len(args) != 1:
return "expected one or two arguments!"
ap = os.path.abspath(args[0])
if os.path.exists(ap):
if not zipfile.is_zipfile(ap):
return "not a zipfile"
try:
self.zf = zipfile.ZipFile(ap, "a", zipfile.ZIP_DEFLATED, True)
self.is_new = False
except Exception as e:
return e.message
if len(args) == 2:
self.zf.setpassword(args[1])
return True
else:
try:
self.zf = zipfile.ZipFile(ap, "w", zipfile.ZIP_DEFLATED, True)
self.is_new = True
except Exception as e:
return e.message
return True
def repr(self):
"""returns a string representing this fsi"""
template = "{inz} Zipfile at '{p}'"
inz = "New" if self.is_new else "Open"
return template.format(inz=inz, p=self.zf.fp.name)
def listdir(self, path="."):
ap = self.abspath(path)
dirlist = self._getdirs()
namelist = self.zf.namelist()
names = dirlist + namelist
content = []
for name in names:
dirname = os.path.dirname(name)
if dirname == ap:
content.append(name.replace(dirname, ""))
return content
def cd(self, path):
np = self.abspath(path)
dirs = self._getdirs()
if np not in dirs:
raise errors.OperationFailure("Dir does not exists!")
self.path = np
def get_path(self):
return self.path
def remove(self, path):
ap = self.abspath(path)
self._update(remove=[ap])
def mkdir(self, name):
ap = self.abspath(name)
self.dirs.append(ap)
def close(self):
self.zf.close()
def isdir(self, name):
ap = self.abspath(name)
return ((ap in self._getdirs()) and not self.isfile(name))
def isfile(self, name):
ap = self.abspath(name)
return (ap in self.zf.namelist())
def stat(self, name):
ap = self.abspath(name)
self.log("stat: {ap}\n".format(ap=ap))
isdir = self.isdir(name)
isfile = self.isfile(name)
if not (isdir or isfile):
self.log("stat-target not found.\n")
raise errors.OperationFailure("Not found!")
if isdir:
size = 1
mtime = None
else:
zipinfo = self.zf.getinfo(ap)
size = zipinfo.file_size
timestamp = zipinfo.date_time
dt = datetime.datetime(*timestamp)
mtime = (dt - datetime.datetime(1970, 1, 1)).total_seconds()
type_ = (stat.S_IFREG if isfile else stat.S_IFDIR)
mode = base.calc_mode(type=type_)
self.log("stat return\n")
return base.make_stat(size=size, mtime=mtime, ctime=mtime, mode=mode)
def open(self, name, mode="r", buffering=0):
ap = self.abspath(name)
self.log("open {ap} with mode {m}\n".format(ap=ap, m=mode))
if "r" in mode:
try:
reader = ZipReader(self, ap, mode, buffering)
except:
raise errors.OperationFailure("Not found!")
else:
return reader
elif "w" in mode:
if ap in self.zf.namelist():
self._update(remove=[ap])
return ZipWriter(self, ap, mode, buffering)
else:
raise errors.OperationFailure("Unsupported mode!")
class ZipWriter(object):
"""utility class used for writing to a ZipFile."""
def __init__(self, root, fp, mode, buffering):
self.root = root
self.fp = fp
self.name = fp
self.buffering = buffering
self.mode = mode
self.sio = BytesIO()
self.closed = False
def close(self):
"""called on file close"""
if self.closed:
return
self.closed = True
content = self.sio.getvalue()
self.sio.close()
self.root.zf.writestr(self.fp, content)
def __getattr__(self, name):
return getattr(self.sio, name)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.close()
def __del__(self):
self.close()
class ZipReader(ZipWriter):
"""utility class for reading a file from a zip."""
def __init__(self, root, fp, mode, buffering):
self.root = root
self.fp = fp
self.name = fp
self.buffering = buffering
self.mode = mode
self.sio = BytesIO(self.root.zf.read(fp))
self.closed = False
def close(self):
if self.closed:
return
self.closed = True
self.sio.close()
| mit | a17c2544118a3b4f856fc653197c570e | 28.758475 | 91 | 0.537804 | 3.901667 | false | false | false | false |
pyvisa/pyvisa | pyvisa/testsuite/keysight_assisted_tests/test_tcpip_resources.py | 1 | 4530 | # -*- coding: utf-8 -*-
"""Test the TCPIP based resources.
"""
import pytest
from pyvisa import constants, errors
from . import copy_func, require_virtual_instr
from .messagebased_resource_utils import (
EventAwareMessagebasedResourceTestCaseMixin,
LockableMessagedBasedResourceTestCaseMixin,
MessagebasedResourceTestCase,
)
@require_virtual_instr
class TestTCPIPInstr(
LockableMessagedBasedResourceTestCaseMixin,
EventAwareMessagebasedResourceTestCaseMixin,
MessagebasedResourceTestCase,
):
"""Test pyvisa against a TCPIP INSTR resource."""
#: Type of resource being tested in this test case.
#: See RESOURCE_ADDRESSES in the __init__.py file of this package for
#: acceptable values
RESOURCE_TYPE = "TCPIP::INSTR"
#: Minimal timeout value accepted by the resource. When setting the timeout
#: to VI_TMO_IMMEDIATE, Visa (Keysight at least) may actually use a
#: different value depending on the values supported by the resource.
MINIMAL_TIMEOUT = 1
def test_io_prot_attr(self):
"""Test getting/setting the io prot attribute.
We would need to spy on the transaction to ensure we are sending a
string instead of using the lower level mechanism.
"""
try:
self.instr.read_stb()
# XXX note sure what is the actual issue here
with pytest.raises(errors.VisaIOError):
self.instr.set_visa_attribute(
constants.VI_ATTR_IO_PROT, constants.IOProtocol.hs488
)
# self.instr.read_stb()
# assert (
# self.instr.get_visa_attribute(constants.VI_ATTR_IO_PROT)
# == constants.IOProtocol.hs488
# )
finally:
self.instr.set_visa_attribute(
constants.VI_ATTR_IO_PROT, constants.IOProtocol.normal
)
@require_virtual_instr
class TestTCPIPSocket(MessagebasedResourceTestCase):
"""Test pyvisa against a TCPIP SOCKET resource."""
#: Type of resource being tested in this test case.
#: See RESOURCE_ADDRESSES in the __init__.py file of this package for
#: acceptable values
RESOURCE_TYPE = "TCPIP::SOCKET"
#: Minimal timeout value accepted by the resource. When setting the timeout
#: to VI_TMO_IMMEDIATE, Visa (Keysight at least) may actually use a
#: different value depending on the values supported by the resource.
MINIMAL_TIMEOUT = 1
# Copy functions since the marker is applied in-place
test_write_raw_read_bytes = pytest.mark.xfail(
copy_func(MessagebasedResourceTestCase.test_write_raw_read_bytes)
)
test_write_raw_read_raw = pytest.mark.xfail(
copy_func(MessagebasedResourceTestCase.test_write_raw_read_raw)
)
test_write_read = pytest.mark.xfail(
copy_func(MessagebasedResourceTestCase.test_write_read)
)
test_write_ascii_values = pytest.mark.xfail(
copy_func(MessagebasedResourceTestCase.test_write_ascii_values)
)
test_write_binary_values = pytest.mark.xfail(
copy_func(MessagebasedResourceTestCase.test_write_binary_values)
)
test_read_ascii_values = pytest.mark.xfail(
copy_func(MessagebasedResourceTestCase.test_read_ascii_values)
)
test_read_binary_values = pytest.mark.xfail(
copy_func(MessagebasedResourceTestCase.test_read_binary_values)
)
test_read_query_binary_values_invalid_header = pytest.mark.xfail(
copy_func(
MessagebasedResourceTestCase.test_read_query_binary_values_invalid_header
)
)
test_read_binary_values_unreported_length = pytest.mark.xfail(
copy_func(
MessagebasedResourceTestCase.test_read_binary_values_unreported_length
)
)
test_read_binary_values_empty = pytest.mark.xfail(
copy_func(MessagebasedResourceTestCase.test_read_binary_values_empty)
)
test_delay_in_query_ascii = pytest.mark.xfail(
copy_func(MessagebasedResourceTestCase.test_delay_in_query_ascii)
)
test_instrument_wide_delay_in_query_binary = pytest.mark.xfail(
copy_func(
MessagebasedResourceTestCase.test_instrument_wide_delay_in_query_binary
)
)
test_delay_args_in_query_binary = pytest.mark.xfail(
copy_func(MessagebasedResourceTestCase.test_delay_args_in_query_binary)
)
test_no_delay_args_in_query_binary = pytest.mark.xfail(
copy_func(MessagebasedResourceTestCase.test_no_delay_args_in_query_binary)
)
| mit | cef2ecdd04d7456cfdfdb4baea117109 | 36.131148 | 85 | 0.683885 | 3.878425 | false | true | false | false |
arogozhnikov/einops | einops/experimental/indexing.py | 1 | 14777 | """
Indexing one array with the other(s).
Concept for discussion.
Notation targets hard cases, not simple ones, like indexing of 1d-array with another 1d-array
(notation supports that, but you can't simplify arr[ind], and there is no reason to)
Examples
1. query for every token in sequence a token in the image. Images and sequences are paired
einindex('b t c <- b h w c, [h, w] b t', arr_bhwc, [h_indices_bt, w_indices_bt])
this is equivalent, so you can pass indexers idependently or together
einindex('b t c <- b h w c, [h, w] b t', arr_bhwc, np.asarray([h_indices_bt, w_indices_bt]))
after some thinking I decided that having first axis for indexing variable is not too restrictive,
but should simplify mapping of such cases.
For this reason [...] part should always go first in indexer.
This makes the largest difference with einindex https://github.com/malmaud/einindex,
which has almost identical grammar, but puts special dimension last, while we put it first.
This trick allows naturally decomposing multiindex into individual dimensions or visa versa.
2. query for every token in the video the most suitable word in a (matching) sentence
einindex('b t h w <- seq b, [seq] t b h w', arr_tbc, [t_indices_bhw])
note, that only one indexer is used, but still it has to be enclosed in the list.
That's a price for being generic. Alternatively leading singleton dimension can be added.
3. (not supported now, future planning)
for every timeframe in a video, find the token with the highest norm (across h and w), and compose a new stack of them
indices_2bt = argmax(x_bthwc.norm(dim=-1), 'b t h w -> [h, w] b t')
selected_embeddings_btc = einindex('b t c <- b t h w c, [h, w] b t', x_bthwc, indices_2bt)
while currently question is around 'how do we index',
it is important to pre-align that with a question 'what are natural ways to get indices'.
Most common are min/max. less common options: topk (works here), random sampling.
Some important properties of this notation:
- support for multiple indexers, including using a single tensor to keep multiple indexers
- 'batch' indexing, when some axes of indexer and array should be matched
- universal (one-indexing-to-rule-them-all)
- extensible for (named) ellipses, including variadic number of indexers
- extensible for einops-style compositions and decompositions
- extensible for outer indexing when indexers are not aligned
Current implementation based on python array api and uses loops,
because no appropriate indexing available in the standard.
"""
from typing import List, Union, TypeVar, Tuple
from einops import EinopsError
T = TypeVar('T')
class CompositionDecomposition:
def __init__(
self,
decomposed_shape: List[str],
composed_shape: List[List[str]],
):
flat_shape = []
for x in composed_shape:
flat_shape.extend(x)
self.compose_transposition: Tuple[int, ...] = tuple([decomposed_shape.index(x) for x in flat_shape])
self.decompose_transposition: Tuple[int, ...] = tuple([flat_shape.index(x) for x in decomposed_shape])
self.composed_shape = composed_shape
self.decomposed_shape = decomposed_shape
def decompose(self, x, known_axes_lengths: dict[str, int]):
xp = x.__array_namespace__()
shape = x.shape
flat_shape = []
for i, axis_group in enumerate(self.composed_shape):
unknown_axis_name = None
known_sizes_prod = 1
for axis_name in axis_group:
if axis_name in known_axes_lengths:
known_sizes_prod *= known_axes_lengths[axis_name]
else:
if unknown_axis_name is None:
unknown_axis_name = axis_name
else:
raise EinopsError("Can't infer the size")
if unknown_axis_name is None:
assert shape[i] == known_sizes_prod
else:
known_axes_lengths[unknown_axis_name] = shape[i] // known_sizes_prod
for axis in axis_group:
flat_shape.append(known_axes_lengths[axis])
x = xp.reshape(x, flat_shape)
return xp.permute_dims(x, self.decompose_transposition)
def compose(self, x, known_axes_lengths: dict[str, int]):
xp = x.__array_namespace__()
for axis_len, axis_name in zip(x.shape, self.decomposed_shape):
if axis_name in known_axes_lengths:
assert known_axes_lengths[axis_name] == axis_len
else:
known_axes_lengths[axis_name] = axis_len
x = xp.permute_dims(x, self.compose_transposition)
new_shape = []
for axis_group in self.composed_shape:
composed_axis_size = 1
for axis_name in axis_group:
composed_axis_size *= known_axes_lengths[axis_name]
new_shape.append(composed_axis_size)
return xp.reshape(x, tuple(new_shape))
def arange_at_position(xp, n_axes, axis, axis_len, device=None):
x = xp.arange(axis_len, dtype=xp.int64, device=device)
shape = [1] * n_axes
shape[axis] = axis_len
x = xp.reshape(x, shape)
return x
class IndexingFormula:
def __init__(self, pattern: str):
"""
:param pattern: example 'b t c <- b hsel wsel c, [hsel, wsel] b t'
"""
self.pattern = pattern
left, right = pattern.split('<-')
arg_split = right.index(',')
arr_pattern, ind_pattern = right[:arg_split], right[arg_split + 1:]
ind_pattern = ind_pattern.strip()
# print(
# arr_pattern, '\n',
# ind_pattern,
# )
assert ind_pattern.startswith('['), 'composition axis should go first in indexer (second argument) [h w] i j k'
composition_start = ind_pattern.index('[')
composition_end = ind_pattern.index(']')
composition = ind_pattern[composition_start + 1: composition_end]
ind_other_axes = ind_pattern[composition_end + 1:]
self.result_axes_names = left.split()
self.array_axes_names = arr_pattern.split()
self.indexing_axes_names = [x.strip() for x in composition.split(',')]
self.indexer_other_axes_names = ind_other_axes.split()
for group_name, group in [
('result', self.result_axes_names),
('array', self.array_axes_names),
('indexer', self.indexing_axes_names + self.indexer_other_axes_names),
]:
if len(set(group)) != len(group):
# need more verbosity, which axis, raise
raise EinopsError(f'{group_name} pattern ({group}) contains a duplicated axis')
axis_groups = [
self.result_axes_names,
self.array_axes_names,
self.indexing_axes_names,
self.indexer_other_axes_names,
]
all_axes = set()
for group in axis_groups:
all_axes.update(group)
self.indexer_axes = []
self.batch_axes = []
self.result_and_index_axes = []
self.result_and_array_axes = []
for axis in all_axes:
presence = tuple(axis in g for g in axis_groups)
# want match-case here. sweet dreams
if presence == (False, True, True, False):
self.indexer_axes.append(axis)
elif presence[2]:
raise EinopsError(f'Wrong usage of indexer variable {axis}')
elif presence == (True, True, False, True):
self.batch_axes.append(axis)
elif presence == (True, False, False, True):
self.result_and_index_axes.append(axis)
elif presence == (True, True, False, False):
self.result_and_array_axes.append(axis)
else:
# TODO better categorization of wrong usage patterns
raise EinopsError(f'{axis} is used incorrectly in {pattern}')
assert set(self.indexer_axes) == set(self.indexing_axes_names)
# order of these variables matters, since we can't lose mapping here
self.indexer_axes = self.indexing_axes_names
self.array_composition = CompositionDecomposition(
decomposed_shape=self.array_axes_names,
composed_shape=[self.batch_axes + self.indexer_axes, self.result_and_array_axes],
)
self.index_composition = CompositionDecomposition(
decomposed_shape=self.indexer_other_axes_names,
# single axis after composition
composed_shape=[self.batch_axes + self.result_and_index_axes],
)
self.result_composition = CompositionDecomposition(
decomposed_shape=self.result_axes_names,
composed_shape=[self.batch_axes + self.result_and_index_axes, self.result_and_array_axes],
)
def apply_to_array_api(self, arr: T, ind: Union[T, List[T]]):
known_axes_sizes: dict[str, int] = {}
xp = arr.__array_namespace__()
if not isinstance(ind, list):
ind = [ind[i, ...] for i in range(ind.shape[0])]
for indexer in ind:
assert len(indexer.shape) == len(self.indexer_other_axes_names)
# step 1. transpose, reshapes of arr; learn its dimensions
arr_2d = self.array_composition.compose(arr, known_axes_sizes)
# step 2. compute shifts and create an actual indexing array
shift = 1
full_index = xp.zeros([1] * len(ind[0].shape), dtype=xp.int64, device=arr.device)
# original order: [*batch-like axes, *indexing_axes,]
# now we need to traverse them in the opposite direction
for axis_name, indexer in list(zip(self.indexing_axes_names, ind))[::-1]:
full_index = full_index + shift * (indexer % known_axes_sizes[axis_name])
shift *= known_axes_sizes[axis_name]
for axis_name in self.batch_axes[::-1]:
axis_id = self.indexer_other_axes_names.index(axis_name)
full_index = full_index + arange_at_position(
xp, len(self.indexer_other_axes_names), axis=axis_id, axis_len=known_axes_sizes[axis_name],
device=arr.device,
) * shift
shift *= known_axes_sizes[axis_name]
assert shift == arr_2d.shape[0]
# step 3. Flatten index
full_index = self.index_composition.compose(full_index, known_axes_sizes)
# step 4. indexing
# python array api lacks any integer indexing, so... I use loops.
# did you know that there is conceptual programming ... just like art?
# result_2d = arr_2d[full_index]
result_2d = xp.stack([arr_2d[full_index[i], :] for i in range(full_index.shape[0])])
# step 5. doing resulting
result = self.result_composition.decompose(result_2d, known_axes_sizes)
return result
def einindex(pattern: str, arr: T, /, ind: Union[T, List[T]]):
"""
Demonstrates how einindex should work.
Supports data-api compliant arrays.
"""
formula = IndexingFormula(pattern)
return formula.apply_to_array_api(arr, ind)
def test_composition_and_decomposition():
import numpy.array_api as np
x = np.arange(2 * 3 * 5 * 7)
x = np.reshape(x, (2, 3, 5, 7))
comp = CompositionDecomposition(
decomposed_shape=['a', 'b', 'c', 'd'],
composed_shape=[['a', 'b'], ['c', 'd']],
)
assert comp.compose(x, known_axes_lengths={}).shape == (2 * 3, 5 * 7)
y = CompositionDecomposition(
decomposed_shape=['a', 'b', 'c', 'd'],
composed_shape=[['a', 'b'], [], ['c', 'd']],
).compose(x, {})
assert y.shape == (2 * 3, 1, 5 * 7)
assert np.all(np.reshape(x, (-1,)) == np.reshape(y, (-1,)))
comp = CompositionDecomposition(
decomposed_shape=['a', 'b', 'e', 'c', 'd'],
composed_shape=[['e', 'c'], ['b'], ['a', 'd']],
)
x = np.arange(2 * 3 * 5 * 7 * 3)
x = np.reshape(x, (2, 3, 5, 7, 3))
axes = {}
y = comp.compose(x, axes)
x2 = comp.decompose(y, axes)
assert np.all(x == x2)
def test_simple_indexing():
import numpy.array_api as np
# simple 2d test
arr = np.reshape(np.arange(5 * 7), (5, 7))
ind = np.arange(7) % 5
x = einindex('j <- i j, [i] j', arr, [ind])
for j, i in enumerate(ind):
assert arr[i, j] == x[j]
y = einindex('j <- j i, [i] j', np.permute_dims(arr, (1, 0)), [ind])
for j, i in enumerate(ind):
assert arr[i, j] == y[j]
def test_multidimensional_indexing():
import numpy.array_api as np
embedding_bhwc = (
+ arange_at_position(np, 4, 0, 2) * 1000
+ arange_at_position(np, 4, 1, 3) * 100
+ arange_at_position(np, 4, 2, 5) * 10
+ arange_at_position(np, 4, 3, 7) * 1
)
hindices_bt = np.reshape(np.arange(6), (2, 3)) % 3
windices_bt = np.reshape(np.arange(6), (2, 3)) % 5
# imagine that you have pairs of image <> sentence
# your goal is to get most suitable token from image for every token in sentence
# thus for every token in sentence you compute best k and v
result = einindex('c t b <- b h w c, [h, w] b t', embedding_bhwc, [hindices_bt, windices_bt])
# example of using a single array for indexing multiple axes
hw_indices_bt = np.stack([hindices_bt, windices_bt])
result2 = einindex('c t b <- b h w c, [h, w] b t', embedding_bhwc, hw_indices_bt)
assert np.all(result == result2)
# check vs manual element computation
result_manual = result * 0
for b in range(2):
for t in range(3):
for c in range(7):
h = hindices_bt[b, t]
w = windices_bt[b, t]
result_manual[c, t, b] = embedding_bhwc[b, h, w, c]
assert np.all(result == result_manual)
def test_reverse_indexing():
import numpy.array_api as np
C, T, B = 2, 3, 5
# G = GPU, batch-like varaible
G = 4
H = 7
W = 9
arr_gtbc = (
+ arange_at_position(np, 4, 0, G) * 1000
+ arange_at_position(np, 4, 1, T) * 100
+ arange_at_position(np, 4, 2, B) * 10
+ arange_at_position(np, 4, 3, C) * 1
)
t_indices_gbhw = np.reshape(np.arange(G * B * H * W), (G, B, H, W)) % T
result = einindex('g b c h w <- g t b c, [t] g b h w', arr_gtbc, [t_indices_gbhw])
result_manual = result * 0
for g in range(G):
for b in range(B):
for c in range(C):
for h in range(H):
for w in range(W):
t = t_indices_gbhw[g, b, h, w]
result_manual[g, b, c, h, w] = arr_gtbc[g, t, b, c]
assert np.all(result == result_manual)
| mit | feeba9ba6e9a8528b26c2484c11e94a0 | 36.600509 | 121 | 0.595046 | 3.506645 | false | false | false | false |
ywangd/stash | bin/version.py | 1 | 3242 | # -*- coding: utf-8 -*-
""" Show information about this StaSh installation.
"""
from __future__ import print_function
import os
import io
import sys
import time
import platform
import plistlib
_stash = globals()['_stash']
try:
collapseuser = _stash.libcore.collapseuser
except AttributeError:
collapseuser = lambda p: p
IN_PYTHONISTA = sys.executable.find('Pythonista') >= 0
# Following functions for getting Pythonista and iOS version information are adapted from
# https://github.com/cclauss/Ten-lines-or-less/blob/master/pythonista_version.py
def pythonista_version(): # 2.0.1 (201000)
try:
path = os.path.abspath(os.path.join(sys.executable, '..', 'Info.plist'))
with io.open(path, "rb") as fin:
plist = plistlib.load(fin)
return '{CFBundleShortVersionString} ({CFBundleVersion})'.format(**plist)
except Exception as e:
return "UNKNOWN ({e})".format(e=repr(e))
def ios_version(): # 9.2 (64-bit iPad5,4)
try:
ios_ver, _, machine_model = platform.mac_ver()
except Exception as e:
return "UNKNOWN ({e})".format(e=repr(e))
else:
bit = platform.architecture()[0].rstrip('bit') + '-bit'
return '{} ({} {})'.format(ios_ver, bit, machine_model)
def print_stash_info():
"""
Print general StaSh information.
"""
STASH_ROOT = os.environ['STASH_ROOT']
print(_stash.text_style('StaSh v%s' % globals()['_stash'].__version__, {'color': 'blue', 'traits': ['bold']}))
print(u'{} {} ({})'.format(_stash.text_bold('Python'), os.environ['STASH_PY_VERSION'], platform.python_implementation()))
print(u'{} {}'.format(_stash.text_bold('UI'), _stash.ui.__module__))
print(u'{}: {}'.format(_stash.text_bold('root'), collapseuser(STASH_ROOT)))
_stat = os.stat(os.path.join(STASH_ROOT, 'core.py'))
last_modified = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(_stat.st_mtime))
print(u'{}: {}'.format(_stash.text_bold('core.py'), last_modified))
print(u'{}: {}'.format(_stash.text_bold('SELFUPDATE_TARGET'), os.environ['SELFUPDATE_TARGET']))
def print_pythonista_info():
"""
Print pythonista related informations.
"""
print(u'{} {}'.format(_stash.text_bold('Pythonista'), pythonista_version()))
print(u'{} {}'.format(_stash.text_bold('iOS'), ios_version()))
def print_paths():
"""
Print path related informations
"""
print(_stash.text_bold('BIN_PATH:'))
for p in os.environ['BIN_PATH'].split(':'):
print(' {}'.format(collapseuser(p)))
print(_stash.text_bold('PYTHONPATH:'))
for p in os.environ['PYTHONPATH'].split(':'):
print(' {}'.format(collapseuser(p)))
def print_machine_info():
"""
Print information about the current machine.
"""
if IN_PYTHONISTA:
print_pythonista_info()
print(u"{} {}".format(_stash.text_bold("Platform"), platform.platform()))
def print_libs():
"""
Print loaded libs.
"""
print(_stash.text_bold("Loaded libraries:"))
for an in dir(_stash):
if an.startswith("lib"):
print(u" {}".format(an))
def main():
print_stash_info()
print_machine_info()
print_paths()
print_libs()
if __name__ == '__main__':
main()
| mit | 2fb38108a3ada71f16686621de27bc5c | 29.584906 | 125 | 0.612893 | 3.370062 | false | false | false | false |
ywangd/stash | lib/stashutils/core.py | 1 | 1149 | # -*- coding: utf-8 -*-
"""core utilities for StaSh-scripts"""
import threading
import imp
import os
from stash.system import shthreads
def get_stash():
"""
returns the currently active StaSh-instance.
returns None if it can not be found.
This is useful for modules.
"""
if "_stash" in globals():
return globals()["_stash"]
for thr in threading.enumerate():
if isinstance(thr, shthreads.ShBaseThread):
ct = thr
while not ct.is_top_level():
ct = ct.parent
return ct.parent.stash
return None
def load_from_dir(dirpath, varname):
"""
returns a list of all variables named 'varname' in .py files in a directofy 'dirname'.
"""
if not os.path.isdir(dirpath):
return []
ret = []
for fn in os.listdir(dirpath):
fp = os.path.join(dirpath, fn)
if not os.path.isfile(fp):
continue
with open(fp, "r") as fin:
mod = imp.load_source(fn[:fn.index(".")], fp, fin)
if not hasattr(mod, varname):
continue
else:
ret.append(getattr(mod, varname))
return ret
| mit | 0563660dec4e505a3643a854b386bf44 | 25.113636 | 87 | 0.583986 | 3.767213 | false | false | false | false |
pyvisa/pyvisa | pyvisa/resources/serial.py | 1 | 3638 | # -*- coding: utf-8 -*-
"""High level wrapper for Serial resources.
This file is part of PyVISA.
:copyright: 2014-2022 by PyVISA Authors, see AUTHORS for more details.
:license: MIT, see LICENSE for more details.
"""
from .. import attributes, constants
from ..attributes import Attribute
from .messagebased import MessageBasedResource
@MessageBasedResource.register(constants.InterfaceType.asrl, "INSTR")
class SerialInstrument(MessageBasedResource):
"""Communicates with devices of type ASRL<board>[::INSTR]
Do not instantiate directly, use
:meth:`pyvisa.highlevel.ResourceManager.open_resource`.
"""
#: Baud rate of the interface. The default value is 9600.
baud_rate: Attribute[int] = attributes.AttrVI_ATTR_ASRL_BAUD()
#: Number of data bits contained in each frame (from 5 to 8). The default value is 8.
data_bits: Attribute[int] = attributes.AttrVI_ATTR_ASRL_DATA_BITS()
#: Parity used with every frame transmitted and received. The default value is
#: `constants.Parity.none` (VI_ASRL_PAR_NONE).
parity: Attribute[constants.Parity] = attributes.AttrVI_ATTR_ASRL_PARITY()
#: Number of stop bits used to indicate the end of a frame. The default value is
#: `constants.StopBits.one` (VI_ASRL_STOP_ONE).
stop_bits: Attribute[constants.StopBits] = attributes.AttrVI_ATTR_ASRL_STOP_BITS()
#: Indicates the type of flow control used by the transfer mechanism. The default value
#: is `constants.ControlFlow.none` (VI_ASRL_FLOW_NONE).
flow_control: Attribute[
constants.ControlFlow
] = attributes.AttrVI_ATTR_ASRL_FLOW_CNTRL()
#: Number of bytes available in the low- level I/O receive buffer.
bytes_in_buffer: Attribute[int] = attributes.AttrVI_ATTR_ASRL_AVAIL_NUM()
#: If set to True, NUL characters are discarded. The default is False.
discard_null: Attribute[bool] = attributes.AttrVI_ATTR_ASRL_DISCARD_NULL()
#: Manually control transmission. The default value is True.
allow_transmit: Attribute[bool] = attributes.AttrVI_ATTR_ASRL_ALLOW_TRANSMIT()
#: Method used to terminate read operations. The default value is
#: `constants.SerialTermination.termination_char` (VI_ASRL_END_TERMCHAR).
end_input: Attribute[
constants.SerialTermination
] = attributes.AttrVI_ATTR_ASRL_END_IN()
#: Method used to terminate write operations. The default value is
#: `constants.SerialTermination.none` (VI_ASRL_END_NONE) and terminates
#: when all requested data is transferred or when an error occurs.
end_output: Attribute[
constants.SerialTermination
] = attributes.AttrVI_ATTR_ASRL_END_OUT()
#: Duration (in milliseconds) of the break signal. The default value is 250.
break_length: Attribute[int] = attributes.AttrVI_ATTR_ASRL_BREAK_LEN()
#: Manually control the assertion state of the break signal. The default state is
#: `constants.LineState.unasserted` (VI_STATE_UNASSERTED).
break_state: Attribute[
constants.LineState
] = attributes.AttrVI_ATTR_ASRL_BREAK_STATE()
#: Character to be used to replace incoming characters that arrive with errors.
#: The default character is '\0'.
replace_char: Attribute[str] = attributes.AttrVI_ATTR_ASRL_REPLACE_CHAR()
#: XOFF character used for XON/XOFF flow control (both directions).
#: The default character is '0x13'.
xoff_char: Attribute[str] = attributes.AttrVI_ATTR_ASRL_XOFF_CHAR()
#: XON character used for XON/XOFF flow control (both directions).
#: The default character is '0x11'.
xon_char: Attribute[str] = attributes.AttrVI_ATTR_ASRL_XON_CHAR()
| mit | dd6194dcc192bacba284d099bdf7bb80 | 41.8 | 91 | 0.718802 | 3.700916 | false | false | false | false |
ywangd/stash | system/shhistory.py | 1 | 6418 | #-*- coding: utf-8 -*-
"""
StaSh input history
"""
from io import open
import json
from .shcommon import ShEventNotFound
class ShHistory(object):
"""
This class is responsible for input history.
:param stash: the StaSh core
:type stash: StaSh
"""
ENCODING = "utf-8"
DEFAULT = "_default"
def __init__(self, stash):
self.stash = stash
self._histories = {}
self._current = self.DEFAULT
self.allow_double = self.stash.config.getboolean("history", "allow_double_lines")
self.hide_whitespace = self.stash.config.getboolean("history", "hide_whitespace_lines")
self.ipython_style_history_search = self.stash.config.getboolean('history', 'ipython_style_history_search')
self.maxsize = self.stash.config.getint("history", "maxsize")
self.templine = ""
self.idx = -1
@classmethod
def load(cls, path, stash):
"""
Load the history from a path.
:param path: path to load from.
:type path: str
:param config: the StaSh core
:type config: StaSh
:return: the history loaded from the file
:rtype: ShHistory
"""
shh = cls(stash)
try:
with open(path, "r", encoding=cls.ENCODING) as fin:
h = json.loads(u"" + fin.read())
except ValueError:
h = {"StaSh.runtime": cls.load_old_format(path)}
shh._histories = h
return shh
@classmethod
def load_old_format(cls, path):
"""
Load the content of an old-style history.
:param path: path to load from
:type path: str
:return: the lines loaded from the file
:rtype: list of str
"""
with open(path, "r", encoding=cls.ENCODING) as fin:
lines = [line.strip() for line in fin.readlines()]
return lines
def save(self, path):
"""
Save the history to a path.
:param path: path to save to.
:type path: str
"""
with open(path, "w", encoding=self.ENCODING) as fout:
s = json.dumps(self._histories)
fout.write(u"" + s) # ensure unicode
def clear(self, target=None):
"""
Clear the history
:param target: history to clear or None for current
:type history: str or None
"""
if target is None:
target = self._current
if target in self._histories:
del self._histories[target]
def clear_all(self):
"""
Clear all histories.
"""
self._histories = {}
def swap(self, target):
"""
Swap the history
:param target: identifier to get the history for
:type target: str or None
"""
self._current = target
def add(self, line, always=False):
"""
Add a line to the history.
:param line: line to add to history
:type line: str
:param always: always add this line, regardless of config
:type always: bool
"""
if self._current not in self._histories:
self._histories[self._current] = []
stripped = line.strip()
last_line = (self._histories[self._current][-1] if len(self._histories[self._current]) > 0 else None)
if not always:
# check if this line should be added
if stripped == last_line and not self.allow_double:
# prevent double lines
return
if line.startswith(" ") and self.hide_whitespace:
# hide lines starting with a whitespace
return
self._histories[self._current].append(stripped)
# ensure maxsize
while len(self._histories[self._current]) > max(0, self.maxsize):
self._histories[self._current].pop(0)
# reset index
self.reset_idx()
def getlist(self):
"""
Return a list of the current history.
:return: list of current history entries
:rtype: list of str
"""
if self._current not in self._histories:
self._histories[self._current] = []
return self._histories[self._current][::-1]
def search(self, tok):
"""
Search the history.
:param tok:
:type tok:
:return: last entry in history matching the search
:rtype: str
"""
history = self.getlist()
search_string = tok[1:]
if search_string == '':
return ''
if search_string == '!':
return history[0]
try:
idx = int(search_string)
try:
return history[::-1][idx]
except IndexError:
raise ShEventNotFound(tok)
except ValueError:
for entry in history:
if entry.startswith(search_string):
return entry
raise ShEventNotFound(tok)
def reset_idx(self):
"""
Reset the index of the current position in the history
"""
self.idx = -1
def up(self):
"""
Move upwards in the history.
"""
# Save the unfinished line user is typing before showing entries from history
history = self.getlist()
if self.idx == -1:
self.templine = self.stash.mini_buffer.modifiable_string.rstrip()
self.idx += 1
if self.idx >= len(history):
self.idx = len(history) - 1
else:
entry = history[self.idx]
# If move up away from an unfinished input line, try search history for
# a line starts with the unfinished line
if self.idx == 0 and self.ipython_style_history_search:
for idx, hs in enumerate(history):
if hs.startswith(self.templine):
entry = hs
self.idx = idx
break
self.stash.mini_buffer.feed(None, entry)
def down(self):
"""
Move downwqrds in the history
"""
history = self.getlist()
self.idx -= 1
if self.idx < -1:
self.idx = -1
else:
if self.idx == -1:
entry = self.templine
else:
entry = history[self.idx]
self.stash.mini_buffer.feed(None, entry)
| mit | a4926bd397b9d35c9fda315212b98934 | 29.417062 | 115 | 0.53459 | 4.290107 | false | false | false | false |
ywangd/stash | tests/misc/test_pwd.py | 1 | 1194 | # -*- coding: utf-8 -*-
"""tests for the 'pwd' command."""
import os
from stash.tests.stashtest import StashTestCase
class PwdTests(StashTestCase):
"""tests for the 'pwd' command."""
cwd = os.path.expanduser("~")
def test_help(self):
"""test 'pwd --help'."""
output = self.run_command("pwd --help")
self.assertIn("pwd", output)
self.assertIn("-h", output)
self.assertIn("--help", output)
self.assertIn("-b", output)
self.assertIn("--basename", output)
self.assertIn("-f", output)
self.assertIn("--fullname", output)
def test_pwd_collapseuser(self):
"""tests 'pwd'."""
output = self.run_command("pwd").replace("\n", "").replace("/", "")
self.assertEqual(output, "~")
def test_pwd_fullname(self):
"""tests 'pwd --fullname'."""
output = self.run_command("pwd --fullname").replace("\n", "")
self.assertEqual(output, os.path.abspath(os.getcwd()))
def test_pwd_basename(self):
"""tests 'pwd --basename'."""
output = self.run_command("pwd --basename").replace("\n", "")
self.assertEqual(output, os.path.basename(os.getcwd()))
| mit | b6ff144cd8c8216008c6f453e0476061 | 32.166667 | 75 | 0.572864 | 3.864078 | false | true | false | false |
ywangd/stash | lib/mlpatches/tl_patches.py | 1 | 1462 | # -*- coding: utf-8 -*-
"""patches for making some vars thread-local"""
import threading
import copy
from mlpatches import base
class ThreadLocalVar(object):
"""creates a proxy to a thread-local version of passee var."""
# todo: maybe add lock?
def __init__(self, var):
self.__var = var
self.__local = threading.local()
self.__setattr__ = self.__setattr_ # set __settattr__ here
def __getattr__(self, name):
try:
v = self.__local.var
except AttributeError:
v = self.__local.var = copy.deepcopy(self.__var)
return getattr(v, name)
def __setattr_(self, name, value): # keep missing "_"
try:
v = self.__local.var
except AttributeError:
v = self.__local.var = copy.deepcopy(self.__var)
return setattr(v, name, value)
def __delattr__(self, name):
try:
v = self.__local.var
except AttributeError:
v = self.__local.var = copy.deepcopy(self.__var)
return delattr(v, name)
def __del__(self):
try:
del self.__local.var
except AttributeError:
pass
# define patches
class ThreadLocalArgv(base.FunctionPatch):
"""Patches sys.argv to be thread-local."""
PY2 = True
PY3 = True
module = "sys"
function = "argv"
replacement = ThreadLocalVar([])
# create patch instances
TL_ARGV_PATCH = ThreadLocalArgv()
| mit | 463946cfa89fd69be337e3cda047c574 | 24.206897 | 67 | 0.569767 | 3.994536 | false | false | false | false |
pyvisa/pyvisa | pyvisa/typing.py | 1 | 1444 | # -*- coding: utf-8 -*-
"""Type aliases allowing to narrow down definition and reduce duplication
This file is part of PyVISA.
:copyright: 2020-2022 by PyVISA Authors, see AUTHORS for more details.
:license: MIT, see LICENSE for more details.
"""
from typing import Any, Callable, NewType
from . import constants
#: Type alias used to identify VISA resource manager sessions
VISARMSession = NewType("VISARMSession", int)
#: Type alias used to identify VISA resource sessions
VISASession = NewType("VISASession", int)
#: Type alias used to identify an event context (created when handling an event)
VISAEventContext = NewType("VISAEventContext", int)
#: Type alias used to identify a job id created during an asynchronous operation
#: JobID should always be treated as opaque objects since their exact behavior
#: may depend on the backend in use.
VISAJobID = NewType("VISAJobID", object)
#: Type alias used to identify a memory address in a register based resource after
#: it has been mapped
VISAMemoryAddress = NewType("VISAMemoryAddress", int)
#: Type for event handler passed to the VISA library. The last argument is the
#: user handle specified when registering the handler. The value that will be
#: passed to the handler is the value as interpreted by the backend and returned
#: by the install_visa_handler method of the library object.
VISAHandler = Callable[[VISASession, constants.EventType, VISAEventContext, Any], None]
| mit | bbba9e3f8c8041c8bc4408aed738c0d9 | 39.111111 | 87 | 0.774238 | 3.945355 | false | false | false | false |
mozillazg/python-pinyin | gen_pinyin_dict.py | 1 | 1118 | # -*- coding: utf-8 -*-
import sys
def main(in_fp, out_fp):
out_fp.write('''# -*- coding: utf-8 -*-
from __future__ import unicode_literals
# Warning: Auto-generated file, don't edit.
pinyin_dict = {
''')
for line in in_fp.readlines():
line = line.strip()
if line.startswith('#') or not line:
continue
else:
# line is U+4E2D: zhōng,zhòng # 中
# raw_line U+4E2D: zhōng,zhòng
raw_line = line.split('#')[0].strip()
# 0x4E2D: zhōng,zhòng
new_line = raw_line.replace('U+', '0x')
# 0x4E2D: 'zhōng,zhòng
new_line = new_line.replace(': ', ": '")
# 0x4E2D: 'zhōng,zhòng'\n
new_line = " {new_line}',\n".format(new_line=new_line)
out_fp.write(new_line)
out_fp.write('}\n')
if __name__ == '__main__':
if len(sys.argv) == 1:
print('python gen_pinyin_dict.py INPUT OUTPUT')
sys.exit(1)
in_f = sys.argv[1]
out_f = sys.argv[2]
with open(in_f) as in_fp, open(out_f, 'w') as out_fp:
main(in_fp, out_fp)
| mit | 957c6c7c41f33323f227c0ce497ecf91 | 28.105263 | 69 | 0.506329 | 2.704156 | false | false | false | false |
mozillazg/python-pinyin | pypinyin/contrib/uv.py | 1 | 1334 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
class V2UMixin(object):
"""无声调相关拼音风格下的结果使用 ``ü`` 代替原来的 ``v``
使用方法::
from pypinyin import lazy_pinyin, Style
from pypinyin.contrib.uv import V2UMixin
from pypinyin.converter import DefaultConverter
from pypinyin.core import Pinyin
# 原来的结果中会使用 ``v`` 表示 ``ü``
print(lazy_pinyin('战略'))
# 输出:['zhan', 'lve']
class MyConverter(V2UMixin, DefaultConverter):
pass
my_pinyin = Pinyin(MyConverter())
pinyin = my_pinyin.pinyin
lazy_pinyin = my_pinyin.lazy_pinyin
# 新的结果中使用 ``ü`` 代替原来的 ``v``
print(lazy_pinyin('战略'))
# 输出: ['zhan', 'lüe']
print(pinyin('战略', style=Style.NORMAL))
# 输出:[['zhan'], ['lüe']]
"""
def post_convert_style(self, han, orig_pinyin, converted_pinyin,
style, strict, **kwargs):
pre_data = super(V2UMixin, self).post_convert_style(
han, orig_pinyin, converted_pinyin, style, strict, **kwargs)
if pre_data is not None:
converted_pinyin = pre_data
return converted_pinyin.replace('v', 'ü')
| mit | dcd3e1d8a98402adeb16ccedde7fc768 | 25.8 | 72 | 0.565506 | 2.674058 | false | false | false | false |
mozillazg/python-pinyin | pypinyin/style/wadegiles.py | 1 | 12194 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from pypinyin.constants import Style
from pypinyin.style import register
from pypinyin.style._utils import replace_symbol_to_no_symbol
from pypinyin.style._utils import get_initials
# https://en.wikipedia.org/wiki/Wade%E2%80%93Giles
# https://web.archive.org/web/20070128065433/http://polaris.gseis.ucla.edu/skozerow/wadegiles.htm
# https://www.lib.uchicago.edu/about/directory/departments/eastasia/find/wade-giles-pinyin-conversion-table/
# https://www.zdic.net/ts/fulu/2019/05/18.html
_convert_table = [
['a', 'a'],
['ai', 'ai'],
['an', 'an'],
['ang', 'ang'],
['ao', 'ao'],
['zha', 'cha'],
['cha', 'ch\'a'],
['zhai', 'chai'],
['chai', 'ch\'ai'],
['zhan', 'chan'],
['chan', 'ch\'an'],
['zhang', 'chang'],
['chang', 'ch\'ang'],
['zhao', 'chao'],
['chao', 'ch\'ao'],
['zhe', 'che'],
['che', 'ch\'e'],
['zhen', 'chen'],
['chen', 'ch\'en'],
['zheng', 'cheng'],
['cheng', 'ch\'eng'],
['ji', 'chi'],
['qi', 'ch\'i'],
['jia', 'chia'],
['qia', 'ch\'ia'],
['jiang', 'chiang'],
['qiang', 'ch\'iang'],
['jiao', 'chiao'],
['qiao', 'ch\'iao'],
['jie', 'chieh'],
['qie', 'ch\'ieh'],
['jian', 'chien'],
['qian', 'ch\'ien'],
['zhi', 'chih'],
['chi', 'ch\'ih'],
['jin', 'chin'],
['qin', 'ch\'in'],
['jing', 'ching'],
['qing', 'ch\'ing'],
['jiu', 'chiu'],
['qiu', 'ch\'iu'],
['jiong', 'chiung'],
['qiong', 'ch\'iung'],
['zhuo', 'cho'],
['chuo', 'ch\'o'],
['zhou', 'chou'],
['chou', 'ch\'ou'],
['zhu', 'chu'],
['chu', 'ch\'u'],
['ju', 'chü'],
['qu', 'ch\'ü'],
['zhua', 'chua'],
['zhuai', 'chuai'],
['chuai', 'ch\'uai'],
['zhuan', 'chuan'],
['chuan', 'ch\'uan'],
['juan', 'chüan'],
['quan', 'ch\'üan'],
['zhuang', 'chuang'],
['chuang', 'ch\'uang'],
['jue', 'chüeh'],
['que', 'ch\'üeh'],
['zhui', 'chui'],
['chui', 'ch\'ui'],
['zhun', 'chun'],
['chun', 'ch\'un'],
['jun', 'chün'],
['qun', 'ch\'ün'],
['zhong', 'chung'],
['chong', 'ch\'ung'],
['en', 'en'],
['er', 'erh'],
['fa', 'fa'],
['fan', 'fan'],
['fang', 'fang'],
['fei', 'fei'],
['fen', 'fen'],
['feng', 'feng'],
['fo', 'fo'],
['fou', 'fou'],
['fu', 'fu'],
['ha', 'ha'],
['hai', 'hai'],
['han', 'han'],
['hang', 'hang'],
['hao', 'hao'],
['hei', 'hei'],
['hen', 'hen'],
['heng', 'heng'],
['he', 'ho'],
['hou', 'hou'],
['xi', 'hsi'],
['xia', 'hsia'],
['xiang', 'hsiang'],
['xiao', 'hsiao'],
['xie', 'hsieh'],
['xian', 'hsien'],
['xin', 'hsin'],
['xing', 'hsing'],
['xiu', 'hsiu'],
['xiong', 'hsiung'],
['xu', 'hsü'],
['xuan', 'hsüan'],
['xue', 'hsüeh'],
['xun', 'hsün'],
['hu', 'hu'],
['hua', 'hua'],
['huai', 'huai'],
['huan', 'huan'],
['huang', 'huang'],
['hui', 'hui'],
['hun', 'hun'],
['hong', 'hung'],
['huo', 'huo'],
['yi', 'i'],
['ran', 'jan'],
['rang', 'jang'],
['rao', 'jao'],
['re', 'je'],
['ren', 'jen'],
['reng', 'jeng'],
['ri', 'jih'],
['ruo', 'jo'],
['rou', 'jou'],
['ru', 'ju'],
['ruan', 'juan'],
['rui', 'jui'],
['run', 'jun'],
['rong', 'jung'],
['ga', 'ka'],
['ka', 'k\'a'],
['gai', 'kai'],
['kai', 'k\'ai'],
['gan', 'kan'],
['kan', 'k\'an'],
['gang', 'kang'],
['kang', 'k\'ang'],
['gao', 'kao'],
['kao', 'k\'ao'],
['gen', 'ken'],
['ken', 'k\'en'],
['geng', 'keng'],
['keng', 'k\'eng'],
['ge', 'ko'],
['ke', 'k\'o'],
['gou', 'kou'],
['kou', 'k\'ou'],
['gu', 'ku'],
['ku', 'k\'u'],
['gua', 'kua'],
['kua', 'k\'ua'],
['guai', 'kuai'],
['kuai', 'k\'uai'],
['guan', 'kuan'],
['kuan', 'k\'uan'],
['guang', 'kuang'],
['kuang', 'k\'uang'],
['gui', 'kuei'],
['kui', 'k\'uei'],
['gun', 'kun'],
['kun', 'k\'un'],
['gong', 'kung'],
['kong', 'k\'ung'],
['guo', 'kuo'],
['kuo', 'k\'uo'],
['la', 'la'],
['lai', 'lai'],
['lan', 'lan'],
['lang', 'lang'],
['lao', 'lao'],
['le', 'le'],
['lei', 'lei'],
['leng', 'leng'],
['li', 'li'],
['liang', 'liang'],
['liao', 'liao'],
['lie', 'lieh'],
['lian', 'lien'],
['lin', 'lin'],
['ling', 'ling'],
['liu', 'liu'],
['luo', 'lo'],
['lou', 'lou'],
['lu', 'lu'],
['lü', 'lü'],
['luan', 'luan'],
['luan', 'lüan'],
['lue', 'lüeh'],
['lun', 'lun'],
['long', 'lung'],
['ma', 'ma'],
['mai', 'mai'],
['man', 'man'],
['mang', 'mang'],
['mao', 'mao'],
['mei', 'mei'],
['men', 'men'],
['meng', 'meng'],
['mi', 'mi'],
['miao', 'miao'],
['mie', 'mieh'],
['mian', 'mien'],
['min', 'min'],
['ming', 'ming'],
['miu', 'miu'],
['mo', 'mo'],
['mou', 'mou'],
['mu', 'mu'],
['na', 'na'],
['nai', 'nai'],
['nan', 'nan'],
['nang', 'nang'],
['nao', 'nao'],
['nei', 'nei'],
['nen', 'nen'],
['neng', 'neng'],
['ni', 'ni'],
['niang', 'niang'],
['niao', 'niao'],
['nie', 'nieh'],
['nian', 'nien'],
['nin', 'nin'],
['ning', 'ning'],
['niu', 'niu'],
['nuo', 'no'],
['nou', 'nou'],
['nu', 'nu'],
['nü', 'nü'],
['nuan', 'nuan'],
['nue', 'nüeh'],
['nong', 'nung'],
['e', 'o'],
['ou', 'ou'],
['ba', 'pa'],
['pa', 'p\'a'],
['bai', 'pai'],
['pai', 'p\'ai'],
['ban', 'pan'],
['pan', 'p\'an'],
['bang', 'pang'],
['pang', 'p\'ang'],
['bao', 'pao'],
['pao', 'p\'ao'],
['bei', 'pei'],
['pei', 'p\'ei'],
['ben', 'pen'],
['pen', 'p\'en'],
['beng', 'peng'],
['peng', 'p\'eng'],
['bi', 'pi'],
['pi', 'p\'i'],
['biao', 'piao'],
['piao', 'p\'iao'],
['bie', 'pieh'],
['pie', 'p\'ieh'],
['bian', 'pien'],
['pian', 'p\'ien'],
['bin', 'pin'],
['pin', 'p\'in'],
['bing', 'ping'],
['ping', 'p\'ing'],
['bo', 'po'],
['po', 'p\'o'],
['pou', 'p\'ou'],
['bu', 'pu'],
['pu', 'p\'u'],
['sa', 'sa'],
['sai', 'sai'],
['san', 'san'],
['sang', 'sang'],
['sao', 'sao'],
['se', 'se'],
['sen', 'sen'],
['seng', 'seng'],
['sha', 'sha'],
['shai', 'shai'],
['shan', 'shan'],
['shang', 'shang'],
['shao', 'shao'],
['she', 'she'],
['shen', 'shen'],
['sheng', 'sheng'],
['shi', 'shih'],
['shou', 'shou'],
['shu', 'shu'],
['shua', 'shua'],
['shuai', 'shuai'],
['shuan', 'shuan'],
['shuang', 'shuang'],
['shui', 'shui'],
['shun', 'shun'],
['shuo', 'shuo'],
['suo', 'so'],
['sou', 'sou'],
['si', 'ssu'],
['su', 'su'],
['suan', 'suan'],
['sui', 'sui'],
['sun', 'sun'],
['song', 'sung'],
['da', 'ta'],
['ta', 't\'a'],
['dai', 'tai'],
['tai', 't\'ai'],
['dan', 'tan'],
['tan', 't\'an'],
['dang', 'tang'],
['tang', 't\'ang'],
['dao', 'tao'],
['tao', 't\'ao'],
['de', 'te'],
['te', 't\'e'],
['deng', 'teng'],
['teng', 't\'eng'],
['di', 'ti'],
['ti', 't\'i'],
['diao', 'tiao'],
['tiao', 't\'iao'],
['die', 'tieh'],
['tie', 't\'oeh'],
['dian', 'tien'],
['tian', 't\'ien'],
['ding', 'ting'],
['ting', 't\'ing'],
['diu', 'tiu'],
['duo', 'to'],
['tuo', 't\'o'],
['dou', 'tou'],
['tou', 't\'ou'],
['du', 'tu'],
['tu', 't\'u'],
['duan', 'tuan'],
['tuan', 't\'uan'],
['dui', 'tui'],
['tui', 't\'ui'],
['dun', 'tun'],
['tun', 't\'un'],
['dong', 'tung'],
['tong', 't\'ung'],
['za', 'tsa'],
['ca', 'ts\'a'],
['zai', 'tsai'],
['cai', 'ts\'ai'],
['zan', 'tsan'],
['can', 'ts\'an'],
['zang', 'tsang'],
['cang', 'ts\'ang'],
['zao', 'tsao'],
['cao', 'ts\'ao'],
['ze', 'tse'],
['ce', 'ts\'e'],
['zei', 'tsei'],
['zen', 'tsen'],
['cen', 'ts\'en'],
['zeng', 'tseng'],
['ceng', 'ts\'eng'],
['zuo', 'tso'],
['cuo', 'ts\'o'],
['zou', 'tsou'],
['cou', 'ts\'ou'],
['zu', 'tsu'],
['cu', 'ts\'u'],
['zuan', 'tsuan'],
['cuan', 'ts\'uan'],
['zui', 'tsui'],
['cui', 'ts\'ui'],
['zun', 'tsun'],
['cun', 'ts\'un'],
['zong', 'tsung'],
['cong', 'ts\'ung'],
['zi', 'tzu'],
['ci', 'tz\'u'],
['wa', 'wa'],
['wai', 'wai'],
['wan', 'wan'],
['wang', 'wang'],
['wei', 'wei'],
['wen', 'wen'],
['weng', 'weng'],
['wo', 'wo'],
['wu', 'wu'],
['ya', 'ya'],
['yai', 'yai'],
['yang', 'yang'],
['yao', 'yao'],
['ye', 'yeh'],
['yan', 'yen'],
['yin', 'yin'],
['ying', 'ying'],
['yo', 'yo'],
['you', 'yu'],
['yu', 'yü'],
['yuan', 'yüan'],
['yue', 'yüeh'],
['yun', 'yün'],
['yong', 'yung'],
['chua', 'ch`ua'],
['dei', 'tei'],
['den', 'ten'],
['diang', 'tiang'],
['ei', 'ei'],
['eng', 'eng'],
['gei', 'kei'],
['lia', 'lia'],
['lo', 'lo'],
['lüan', 'lüan'],
['lvan', 'lüan'],
['lüe', 'lüeh'],
['lve', 'lüeh'],
['lün', 'lün'],
['lvn', 'lün'],
['me', 'me'],
['ne', 'ne'],
['nia', 'nia'],
['nun', 'nun'],
['nüe', 'nüeh'],
['nve', 'nüeh'],
['o', 'o'],
['sei', 'sei'],
['shei', 'shei'],
['shong', 'shung'],
['zhei', 'chei'],
]
_convert_table.sort(key=lambda x: len(x[0]), reverse=True)
# https://www.mysmth.net/nForum/#!article/Linguistics/4991
_initial_table = [
['b', 'p'],
['p', 'p\''],
['m', 'm'],
['f', 'f'],
['d', 't'],
['t', 't\''],
['n', 'n'],
['l', 'l'],
['g', 'g'],
['k', 'k\''],
['h', 'h'],
['j', 'ch'],
['q', 'ch\''],
['x', 'hs'],
['zh', 'ch'],
['ch', 'ch\''],
['sh', 'sh'],
['r', 'j'],
['z', 'ts'],
['c', 'ts\''],
['s', 's'],
]
_initial_table.sort(key=lambda x: len(x[0]), reverse=True)
_tone_table = [
['i', 'i'],
['u', 'u'],
['ü', 'ü'],
['v', 'ü'],
['a', 'a'],
['ia', 'ia'],
['ua', 'ua'],
['o', 'o'],
['uo', 'o'],
['e', 'e'],
['ie', 'ieh'],
['üe', 'üeh'],
['ve', 'üeh'],
['ai', 'ei'],
['uei', 'ui'],
['ao', 'ao'],
['iao', 'iao'],
['ou', 'ou'],
['iou', 'iu'],
['an', 'an'],
['ian', 'ien'],
['uan', 'uan'],
['üan', 'üan'],
['van', 'üan'],
['en', 'en'],
['in', 'in'],
['uen', 'un'],
['ün', 'ün'],
['vn', 'ün'],
['ang', 'ang'],
['iang', 'iang'],
['uang', 'uang'],
['eng', 'eng'],
['ing', 'ing'],
['ueng', 'ueng'],
['ong', 'ung'],
['iong', 'iung'],
['ê', 'eh'],
['er', 'erh'],
]
_tone_table.sort(key=lambda x: len(x[0]), reverse=True)
_except_table = [
['zhi', 'chih'],
['chi', 'ch\'ih'],
['shi', 'shih'],
['ri', 'jih'],
['zi', 'tzu'],
['ci', 'tz\'u'],
['si', 'ssu'],
['guo', 'guo'],
['kuo', 'k\'uo'],
['huo', 'huo'],
['luo', 'luo'],
['jue', 'chüeh'],
['que', 'ch\'üeh'],
['xue', 'hsüeh'],
]
_except_table.sort(key=lambda x: len(x[0]), reverse=True)
def to_wade_glides(pinyin, **kwargs):
pinyin = replace_symbol_to_no_symbol(pinyin).replace('v', 'ü')
whole_converted = _convert_whole(pinyin, _except_table)
if whole_converted != pinyin:
return _fixed_result(whole_converted)
whole_converted = _convert_whole(pinyin, _convert_table)
if whole_converted != pinyin:
return _fixed_result(whole_converted)
initials = get_initials(pinyin, strict=False)
tones = pinyin[len(initials):]
initials = _convert_whole(initials, _initial_table)
tones = _convert_whole(tones, _tone_table)
return _fixed_result('{}{}'.format(initials, tones))
def _fixed_result(pinyin):
return pinyin.replace('ü', 'v')
def _convert_whole(chars, table):
for pair in table:
f, r = pair
if f == chars:
return r
return chars
register(Style.WADEGILES, func=to_wade_glides)
| mit | 3dbf3aa5115b1246d60a6260e7dbfc7e | 20.719141 | 108 | 0.363809 | 2.469189 | false | false | false | false |
pgmpy/pgmpy | pgmpy/estimators/StructureScore.py | 2 | 15672 | #!/usr/bin/env python
import numpy as np
from scipy.special import gammaln
from math import lgamma, log
from pgmpy.estimators import BaseEstimator
class StructureScore(BaseEstimator):
def __init__(self, data, **kwargs):
"""
Abstract base class for structure scoring classes in pgmpy. Use any of the derived classes
K2Score, BDeuScore, or BicScore. Scoring classes are
used to measure how well a model is able to describe the given data set.
Parameters
----------
data: pandas DataFrame object
dataframe object where each column represents one variable.
(If some values in the data are missing the data cells should be set to `numpy.NaN`.
Note that pandas converts each column containing `numpy.NaN`s to dtype `float`.)
state_names: dict (optional)
A dict indicating, for each variable, the discrete set of states (or values)
that the variable can take. If unspecified, the observed values in the data set
are taken to be the only possible states.
complete_samples_only: bool (optional, default `True`)
Specifies how to deal with missing data, if present. If set to `True` all rows
that contain `np.Nan` somewhere are ignored. If `False` then, for each variable,
every row where neither the variable nor its parents are `np.NaN` is used.
This sets the behavior of the `state_count`-method.
Reference
---------
Koller & Friedman, Probabilistic Graphical Models - Principles and Techniques, 2009
Section 18.3
"""
super(StructureScore, self).__init__(data, **kwargs)
def score(self, model):
"""
Computes a score to measure how well the given `BayesianNetwork` fits
to the data set. (This method relies on the `local_score`-method that
is implemented in each subclass.)
Parameters
----------
model: BayesianNetwork instance
The Bayesian network that is to be scored. Nodes of the BayesianNetwork need to coincide
with column names of data set.
Returns
-------
score: float
A number indicating the degree of fit between data and model
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> from pgmpy.models import BayesianNetwork
>>> from pgmpy.estimators import K2Score
>>> # create random data sample with 3 variables, where B and C are identical:
>>> data = pd.DataFrame(np.random.randint(0, 5, size=(5000, 2)), columns=list('AB'))
>>> data['C'] = data['B']
>>> K2Score(data).score(BayesianNetwork([['A','B'], ['A','C']]))
-24242.367348745247
>>> K2Score(data).score(BayesianNetwork([['A','B'], ['B','C']]))
-16273.793897051042
"""
score = 0
for node in model.nodes():
score += self.local_score(node, model.predecessors(node))
score += self.structure_prior(model)
return score
def structure_prior(self, model):
"""A (log) prior distribution over models. Currently unused (= uniform)."""
return 0
def structure_prior_ratio(self, operation):
"""Return the log ratio of the prior probabilities for a given proposed change to the DAG.
Currently unused (=uniform)."""
return 0
class K2Score(StructureScore):
def __init__(self, data, **kwargs):
"""
Class for Bayesian structure scoring for BayesianNetworks with Dirichlet priors.
The K2 score is the result of setting all Dirichlet hyperparameters/pseudo_counts to 1.
The `score`-method measures how well a model is able to describe the given data set.
Parameters
----------
data: pandas DataFrame object
dataframe object where each column represents one variable.
(If some values in the data are missing the data cells should be set to `numpy.NaN`.
Note that pandas converts each column containing `numpy.NaN`s to dtype `float`.)
state_names: dict (optional)
A dict indicating, for each variable, the discrete set of states (or values)
that the variable can take. If unspecified, the observed values in the data set
are taken to be the only possible states.
complete_samples_only: bool (optional, default `True`)
Specifies how to deal with missing data, if present. If set to `True` all rows
that contain `np.Nan` somewhere are ignored. If `False` then, for each variable,
every row where neither the variable nor its parents are `np.NaN` is used.
This sets the behavior of the `state_count`-method.
References
---------
[1] Koller & Friedman, Probabilistic Graphical Models - Principles and Techniques, 2009
Section 18.3.4-18.3.6 (esp. page 806)
[2] AM Carvalho, Scoring functions for learning Bayesian networks,
http://www.lx.it.pt/~asmc/pub/talks/09-TA/ta_pres.pdf
"""
super(K2Score, self).__init__(data, **kwargs)
def local_score(self, variable, parents):
'Computes a score that measures how much a \
given variable is "influenced" by a given list of potential parents.'
var_states = self.state_names[variable]
var_cardinality = len(var_states)
state_counts = self.state_counts(variable, parents)
num_parents_states = float(state_counts.shape[1])
counts = np.asarray(state_counts)
log_gamma_counts = np.zeros_like(counts, dtype=float)
# Compute log(gamma(counts + 1))
gammaln(counts + 1, out=log_gamma_counts)
# Compute the log-gamma conditional sample size
log_gamma_conds = np.sum(counts, axis=0, dtype=float)
gammaln(log_gamma_conds + var_cardinality, out=log_gamma_conds)
score = (
np.sum(log_gamma_counts)
- np.sum(log_gamma_conds)
+ num_parents_states * lgamma(var_cardinality)
)
return score
class BDeuScore(StructureScore):
def __init__(self, data, equivalent_sample_size=10, **kwargs):
"""
Class for Bayesian structure scoring for BayesianNetworks with Dirichlet priors.
The BDeu score is the result of setting all Dirichlet hyperparameters/pseudo_counts to
`equivalent_sample_size/variable_cardinality`.
The `score`-method measures how well a model is able to describe the given data set.
Parameters
----------
data: pandas DataFrame object
dataframe object where each column represents one variable.
(If some values in the data are missing the data cells should be set to `numpy.NaN`.
Note that pandas converts each column containing `numpy.NaN`s to dtype `float`.)
equivalent_sample_size: int (default: 10)
The equivalent/imaginary sample size (of uniform pseudo samples) for the dirichlet hyperparameters.
The score is sensitive to this value, runs with different values might be useful.
state_names: dict (optional)
A dict indicating, for each variable, the discrete set of states (or values)
that the variable can take. If unspecified, the observed values in the data set
are taken to be the only possible states.
complete_samples_only: bool (optional, default `True`)
Specifies how to deal with missing data, if present. If set to `True` all rows
that contain `np.Nan` somewhere are ignored. If `False` then, for each variable,
every row where neither the variable nor its parents are `np.NaN` is used.
This sets the behavior of the `state_count`-method.
References
---------
[1] Koller & Friedman, Probabilistic Graphical Models - Principles and Techniques, 2009
Section 18.3.4-18.3.6 (esp. page 806)
[2] AM Carvalho, Scoring functions for learning Bayesian networks,
http://www.lx.it.pt/~asmc/pub/talks/09-TA/ta_pres.pdf
"""
self.equivalent_sample_size = equivalent_sample_size
super(BDeuScore, self).__init__(data, **kwargs)
def get_number_of_parent_states(self, state_counts):
return float(state_counts.shape[1])
def local_score(self, variable, parents):
'Computes a score that measures how much a \
given variable is "influenced" by a given list of potential parents.'
var_states = self.state_names[variable]
var_cardinality = len(var_states)
state_counts = self.state_counts(variable, parents)
num_parents_states = self.get_number_of_parent_states(state_counts)
counts = np.asarray(state_counts)
log_gamma_counts = np.zeros_like(counts, dtype=float)
alpha = self.equivalent_sample_size / num_parents_states
beta = self.equivalent_sample_size / counts.size
# Compute log(gamma(counts + beta))
gammaln(counts + beta, out=log_gamma_counts)
# Compute the log-gamma conditional sample size
log_gamma_conds = np.sum(counts, axis=0, dtype=float)
gammaln(log_gamma_conds + alpha, out=log_gamma_conds)
score = (
np.sum(log_gamma_counts)
- np.sum(log_gamma_conds)
+ num_parents_states * lgamma(alpha)
- counts.size * lgamma(beta)
)
return score
class BDsScore(BDeuScore):
def __init__(self, data, equivalent_sample_size=10, **kwargs):
"""
Class for Bayesian structure scoring for BayesianNetworks with
Dirichlet priors. The BDs score is the result of setting all Dirichlet
hyperparameters/pseudo_counts to
`equivalent_sample_size/modified_variable_cardinality` where for the
modified_variable_cardinality only the number of parent configurations
where there were observed variable counts are considered. The
`score`-method measures how well a model is able to describe the given
data set.
Parameters
----------
data: pandas DataFrame object
dataframe object where each column represents one variable.
(If some values in the data are missing the data cells should be set to `numpy.NaN`.
Note that pandas converts each column containing `numpy.NaN`s to dtype `float`.)
equivalent_sample_size: int (default: 10)
The equivalent/imaginary sample size (of uniform pseudo samples) for the dirichlet
hyperparameters.
The score is sensitive to this value, runs with different values might be useful.
state_names: dict (optional)
A dict indicating, for each variable, the discrete set of states (or values)
that the variable can take. If unspecified, the observed values in the data set
are taken to be the only possible states.
complete_samples_only: bool (optional, default `True`)
Specifies how to deal with missing data, if present. If set to `True` all rows
that contain `np.Nan` somewhere are ignored. If `False` then, for each variable,
every row where neither the variable nor its parents are `np.NaN` is used.
This sets the behavior of the `state_count`-method.
References
---------
[1] Scutari, Marco. An Empirical-Bayes Score for Discrete Bayesian Networks.
Journal of Machine Learning Research, 2016, pp. 438–48
"""
super(BDsScore, self).__init__(data, equivalent_sample_size, **kwargs)
def get_number_of_parent_states(self, state_counts):
return float(len(np.where(state_counts.sum(axis=0) > 0)[0]))
def structure_prior_ratio(self, operation):
"""Return the log ratio of the prior probabilities for a given proposed change to
the DAG.
"""
if operation == "+":
return -log(2.0)
if operation == "-":
return log(2.0)
return 0
def structure_prior(self, model):
"""
Implements the marginal uniform prior for the graph structure where each arc
is independent with the probability of an arc for any two nodes in either direction
is 1/4 and the probability of no arc between any two nodes is 1/2."""
nedges = float(len(model.edges()))
nnodes = float(len(model.nodes()))
possible_edges = nnodes * (nnodes - 1) / 2.0
score = -(nedges + possible_edges) * log(2.0)
return score
class BicScore(StructureScore):
def __init__(self, data, **kwargs):
"""
Class for Bayesian structure scoring for BayesianNetworks with
Dirichlet priors. The BIC/MDL score ("Bayesian Information Criterion",
also "Minimal Descriptive Length") is a log-likelihood score with an
additional penalty for network complexity, to avoid overfitting. The
`score`-method measures how well a model is able to describe the given
data set.
Parameters
----------
data: pandas DataFrame object
dataframe object where each column represents one variable.
(If some values in the data are missing the data cells should be set to `numpy.NaN`.
Note that pandas converts each column containing `numpy.NaN`s to dtype `float`.)
state_names: dict (optional)
A dict indicating, for each variable, the discrete set of states (or values)
that the variable can take. If unspecified, the observed values in the data set
are taken to be the only possible states.
complete_samples_only: bool (optional, default `True`)
Specifies how to deal with missing data, if present. If set to `True` all rows
that contain `np.Nan` somewhere are ignored. If `False` then, for each variable,
every row where neither the variable nor its parents are `np.NaN` is used.
This sets the behavior of the `state_count`-method.
References
---------
[1] Koller & Friedman, Probabilistic Graphical Models - Principles and Techniques, 2009
Section 18.3.4-18.3.6 (esp. page 802)
[2] AM Carvalho, Scoring functions for learning Bayesian networks,
http://www.lx.it.pt/~asmc/pub/talks/09-TA/ta_pres.pdf
"""
super(BicScore, self).__init__(data, **kwargs)
def local_score(self, variable, parents):
'Computes a score that measures how much a \
given variable is "influenced" by a given list of potential parents.'
var_states = self.state_names[variable]
var_cardinality = len(var_states)
state_counts = self.state_counts(variable, parents)
sample_size = len(self.data)
num_parents_states = float(state_counts.shape[1])
counts = np.asarray(state_counts)
log_likelihoods = np.zeros_like(counts, dtype=float)
# Compute the log-counts
np.log(counts, out=log_likelihoods, where=counts > 0)
# Compute the log-conditional sample size
log_conditionals = np.sum(counts, axis=0, dtype=float)
np.log(log_conditionals, out=log_conditionals, where=log_conditionals > 0)
# Compute the log-likelihoods
log_likelihoods -= log_conditionals
log_likelihoods *= counts
score = np.sum(log_likelihoods)
score -= 0.5 * log(sample_size) * num_parents_states * (var_cardinality - 1)
return score
| mit | 2eb7c912acf3a3a1f1fa5d92e51e0e7c | 43.140845 | 111 | 0.637652 | 4.214632 | false | false | false | false |
pgmpy/pgmpy | pgmpy/tests/test_inference/test_mplp.py | 2 | 2448 | import unittest
import numpy as np
from pgmpy.inference.mplp import Mplp
from pgmpy.readwrite import UAIReader
class TestMplp(unittest.TestCase):
def setUp(self):
reader_file = UAIReader(
"pgmpy/tests/test_readwrite/testdata/grid4x4_with_triplets.uai"
)
self.markov_model = reader_file.get_model()
for factor in self.markov_model.factors:
factor.values = np.log(factor.values)
self.mplp = Mplp(self.markov_model)
class TightenTripletOff(TestMplp):
# Query when tighten triplet is OFF
def test_query_tighten_triplet_off(self):
query_result = self.mplp.map_query(tighten_triplet=False)
# Results from the Sontag code for a mplp run without tightening is:
expected_result = {
"var_1": 1,
"var_0": 1,
"var_2": 0,
"var_3": 0,
"var_4": 1,
"var_5": 0,
"var_6": 1,
"var_7": 0,
"var_8": 0,
"var_9": 0,
"var_10": 1,
"var_11": 1,
"var_12": 1,
"var_13": 0,
"var_14": 1,
"var_15": 0,
}
self.assertEqual(query_result, expected_result)
# The final Integrality gap after solving for the present case
int_gap = self.mplp.get_integrality_gap()
self.assertAlmostEqual(64.59, int_gap, places=1)
class TightenTripletOn(TestMplp):
# Query when tighten triplet is ON
def test_query_tighten_triplet_on(self):
query_result = self.mplp.map_query(tighten_triplet=True)
# Results from the Sontag code for a mplp run with tightening is:
expected_result = {
"var_0": 1,
"var_1": 0,
"var_2": 1,
"var_3": 0,
"var_4": 1,
"var_5": 0,
"var_6": 0,
"var_7": 0,
"var_8": 0,
"var_9": 0,
"var_10": 0,
"var_11": 0,
"var_12": 1,
"var_13": 0,
"var_14": 1,
"var_15": 1,
}
self.assertEqual(query_result, expected_result)
# The final Integrality gap after solving for the present case
int_gap = self.mplp.get_integrality_gap()
# Since the ties are broken arbitrary, we have 2 possible solutions howsoever trivial in difference
self.assertIn(round(int_gap, 2), (7.98, 8.07))
| mit | 93deab2e4321c5b47cfbe42552f1550a | 28.142857 | 107 | 0.530229 | 3.294751 | false | true | false | false |
pgmpy/pgmpy | pgmpy/inference/CausalInference.py | 2 | 25809 | from collections.abc import Iterable
from itertools import chain, product
import networkx as nx
import numpy as np
from tqdm.auto import tqdm
from pgmpy.estimators.LinearModel import LinearEstimator
from pgmpy.factors.discrete import DiscreteFactor
from pgmpy.global_vars import SHOW_PROGRESS
from pgmpy.models import BayesianNetwork
from pgmpy.utils.sets import _powerset, _variable_or_iterable_to_set
class CausalInference(object):
"""
This is an inference class for performing Causal Inference over Bayesian Networks or Structural Equation Models.
This class will accept queries of the form: P(Y | do(X)) and utilize its methods to provide an estimand which:
* Identifies adjustment variables
* Backdoor Adjustment
* Front Door Adjustment
* Instrumental Variable Adjustment
Parameters
----------
model: CausalGraph
The model that we'll perform inference over.
set_nodes: list[node:str] or None
A list (or set/tuple) of nodes in the Bayesian Network which have been set to a specific value per the
do-operator.
Examples
--------
Create a small Bayesian Network.
>>> from pgmpy.models import BayesianNetwork
>>> game = BayesianNetwork([('X', 'A'),
... ('A', 'Y'),
... ('A', 'B')])
Load the graph into the CausalInference object to make causal queries.
>>> from pgmpy.inference.CausalInference import CausalInference
>>> inference = CausalInference(game)
>>> inference.get_all_backdoor_adjustment_sets(X="X", Y="Y")
>>> inference.get_all_frontdoor_adjustment_sets(X="X", Y="Y")
References
----------
'Causality: Models, Reasoning, and Inference' - Judea Pearl (2000)
Many thanks to @ijmbarr for their implementation of Causal Graphical models available. It served as an invaluable
reference. Available on GitHub: https://github.com/ijmbarr/causalgraphicalmodels
"""
def __init__(self, model, set_nodes=None):
if not isinstance(model, BayesianNetwork):
raise NotImplementedError(
"Causal Inference is only implemented for BayesianNetworks at this time."
)
self.model = model
self.set_nodes = _variable_or_iterable_to_set(set_nodes)
self.observed_variables = frozenset(self.model.nodes()).difference(
model.latents
)
def __repr__(self):
variables = ", ".join(map(str, sorted(self.observed_variables)))
return f"{self.__class__.__name__}({variables})"
def is_valid_backdoor_adjustment_set(self, X, Y, Z=[]):
"""
Test whether Z is a valid backdoor adjustment set for estimating the causal impact of X on Y.
Parameters
----------
X: str
Intervention Variable
Y: str
Target Variable
Z: str or set[str]
Adjustment variables
Returns
-------
Is a valid backdoor adjustment set: bool
True if Z is a valid backdoor adjustment set else False
Examples
--------
>>> game1 = BayesianNetwork([('X', 'A'),
... ('A', 'Y'),
... ('A', 'B')])
>>> inference = CausalInference(game1)
>>> inference.is_valid_backdoor_adjustment_set("X", "Y")
True
"""
Z_ = _variable_or_iterable_to_set(Z)
observed = [X] + list(Z_)
parents_d_sep = []
for p in self.model.predecessors(X):
parents_d_sep.append(not self.model.is_dconnected(p, Y, observed=observed))
return all(parents_d_sep)
def get_all_backdoor_adjustment_sets(self, X, Y):
"""
Returns a list of all adjustment sets per the back-door criterion.
A set of variables Z satisfies the back-door criterion relative to an ordered pair of variabies (Xi, Xj) in a DAG G if:
(i) no node in Z is a descendant of Xi; and
(ii) Z blocks every path between Xi and Xj that contains an arrow into Xi.
TODO:
* Backdoors are great, but the most general things we could implement would be Ilya Shpitser's ID and
IDC algorithms. See [his Ph.D. thesis for a full explanation]
(https://ftp.cs.ucla.edu/pub/stat_ser/shpitser-thesis.pdf). After doing a little reading it is clear
that we do not need to immediatly implement this. However, in order for us to truly account for
unobserved variables, we will need not only these algorithms, but a more general implementation of a DAG.
Most DAGs do not allow for bidirected edges, but it is an important piece of notation which Pearl and
Shpitser use to denote graphs with latent variables.
Parameters
----------
X: str
Intervention Variable
Returns
-------
frozenset: A frozenset of frozensets
Y: str
Target Variable
Examples
--------
>>> game1 = BayesianNetwork([('X', 'A'),
... ('A', 'Y'),
... ('A', 'B')])
>>> inference = CausalInference(game1)
>>> inference.get_all_backdoor_adjustment_sets("X", "Y")
frozenset()
References
----------
"Causality: Models, Reasoning, and Inference", Judea Pearl (2000). p.79.
"""
try:
assert X in self.observed_variables
assert Y in self.observed_variables
except AssertionError:
raise AssertionError("Make sure both X and Y are observed.")
if self.is_valid_backdoor_adjustment_set(X, Y, Z=frozenset()):
return frozenset()
possible_adjustment_variables = (
set(self.observed_variables)
- {X}
- {Y}
- set(nx.descendants(self.model, X))
)
valid_adjustment_sets = []
for s in _powerset(possible_adjustment_variables):
super_of_complete = []
for vs in valid_adjustment_sets:
super_of_complete.append(vs.intersection(set(s)) == vs)
if any(super_of_complete):
continue
if self.is_valid_backdoor_adjustment_set(X, Y, s):
valid_adjustment_sets.append(frozenset(s))
if len(valid_adjustment_sets) == 0:
raise ValueError(f"No valid adjustment set found for {X} -> {Y}")
return frozenset(valid_adjustment_sets)
def is_valid_frontdoor_adjustment_set(self, X, Y, Z=None):
"""
Test whether Z is a valid frontdoor adjustment set for estimating the causal impact of X on Y via the frontdoor
adjustment formula.
Parameters
----------
X: str
Intervention Variable
Y: str
Target Variable
Z: set
Adjustment variables
Returns
-------
Is valid frontdoor adjustment: bool
True if Z is a valid frontdoor adjustment set.
"""
Z = _variable_or_iterable_to_set(Z)
# 0. Get all directed paths from X to Y. Don't check further if there aren't any.
directed_paths = list(nx.all_simple_paths(self.model, X, Y))
if directed_paths == []:
return False
# 1. Z intercepts all directed paths from X to Y
unblocked_directed_paths = [
path for path in directed_paths if not any(zz in path for zz in Z)
]
if unblocked_directed_paths:
return False
# 2. there is no backdoor path from X to Z
unblocked_backdoor_paths_X_Z = [
zz for zz in Z if not self.is_valid_backdoor_adjustment_set(X, zz)
]
if unblocked_backdoor_paths_X_Z:
return False
# 3. All back-door paths from Z to Y are blocked by X
valid_backdoor_sets = []
for zz in Z:
valid_backdoor_sets.append(self.is_valid_backdoor_adjustment_set(zz, Y, X))
if not all(valid_backdoor_sets):
return False
return True
def get_all_frontdoor_adjustment_sets(self, X, Y):
"""
Identify possible sets of variables, Z, which satisfy the front-door criterion relative to given X and Y.
Z satisfies the front-door criterion if:
(i) Z intercepts all directed paths from X to Y
(ii) there is no backdoor path from X to Z
(iii) all back-door paths from Z to Y are blocked by X
Returns
-------
frozenset: a frozenset of frozensets
References
----------
Causality: Models, Reasoning, and Inference, Judea Pearl (2000). p.82.
"""
assert X in self.observed_variables
assert Y in self.observed_variables
possible_adjustment_variables = set(self.observed_variables) - {X} - {Y}
valid_adjustment_sets = frozenset(
[
frozenset(s)
for s in _powerset(possible_adjustment_variables)
if self.is_valid_frontdoor_adjustment_set(X, Y, s)
]
)
return valid_adjustment_sets
def get_distribution(self):
"""
Returns a string representing the factorized distribution implied by the CGM.
"""
products = []
for node in nx.topological_sort(self.model):
if node in self.set_nodes:
continue
parents = list(self.model.predecessors(node))
if not parents:
p = f"P({node})"
else:
parents = [
f"do({n})" if n in self.set_nodes else str(n) for n in parents
]
p = f"P({node}|{','.join(parents)})"
products.append(p)
return "".join(products)
def simple_decision(self, adjustment_sets=[]):
"""
Selects the smallest set from provided adjustment sets.
Parameters
----------
adjustment_sets: iterable
A frozenset or list of valid adjustment sets
Returns
-------
frozenset
"""
adjustment_list = list(adjustment_sets)
if adjustment_list == []:
return frozenset([])
return adjustment_list[np.argmin(adjustment_list)]
def estimate_ate(
self,
X,
Y,
data,
estimand_strategy="smallest",
estimator_type="linear",
**kwargs,
):
"""
Estimate the average treatment effect (ATE) of X on Y.
Parameters
----------
X: str
Intervention Variable
Y: str
Target Variable
data: pandas.DataFrame
All observed data for this Bayesian Network.
estimand_strategy: str or frozenset
Either specify a specific backdoor adjustment set or a strategy.
The available options are:
smallest:
Use the smallest estimand of observed variables
all:
Estimate the ATE from each identified estimand
estimator_type: str
The type of model to be used to estimate the ATE.
All of the linear regression classes in statsmodels are available including:
* GLS: generalized least squares for arbitrary covariance
* OLS: ordinary least square of i.i.d. errors
* WLS: weighted least squares for heteroskedastic error
Specify them with their acronym (e.g. "OLS") or simple "linear" as an alias for OLS.
**kwargs: dict
Keyward arguments specific to the selected estimator.
linear:
missing: str
Available options are "none", "drop", or "raise"
Returns
-------
The average treatment effect: float
Examples
--------
>>> import pandas as pd
>>> game1 = BayesianNetwork([('X', 'A'),
... ('A', 'Y'),
... ('A', 'B')])
>>> data = pd.DataFrame(np.random.randint(2, size=(1000, 4)), columns=['X', 'A', 'B', 'Y'])
>>> inference = CausalInference(model=game1)
>>> inference.estimate_ate("X", "Y", data=data, estimator_type="linear")
"""
valid_estimators = ["linear"]
try:
assert estimator_type in valid_estimators
except AssertionError:
print(
f"{estimator_type} if not a valid estimator_type. Please select from {valid_estimators}"
)
all_simple_paths = nx.all_simple_paths(self.model, X, Y)
all_path_effects = []
for path in all_simple_paths:
causal_effect = []
for (x1, x2) in zip(path, path[1:]):
if isinstance(estimand_strategy, frozenset):
adjustment_set = frozenset({estimand_strategy})
assert self.is_valid_backdoor_adjustment_set(
x1, x2, Z=adjustment_set
)
elif estimand_strategy in ["smallest", "all"]:
adjustment_sets = self.get_all_backdoor_adjustment_sets(x1, x2)
if estimand_strategy == "smallest":
adjustment_sets = frozenset(
{self.simple_decision(adjustment_sets)}
)
if estimator_type == "linear":
self.estimator = LinearEstimator(self.model)
ate = [
self.estimator.fit(X=x1, Y=x2, Z=s, data=data, **kwargs)._get_ate()
for s in adjustment_sets
]
causal_effect.append(np.mean(ate))
all_path_effects.append(np.prod(causal_effect))
return np.sum(all_path_effects)
def get_proper_backdoor_graph(self, X, Y, inplace=False):
"""
Returns a proper backdoor graph for the exposure `X` and outcome `Y`.
A proper backdoor graph is a graph which remove the first edge of every
proper causal path from `X` to `Y`.
Parameters
----------
X: list (array-like)
A list of exposure variables.
Y: list (array-like)
A list of outcome variables
inplace: boolean
If inplace is True, modifies the object itself. Otherwise retuns
a modified copy of self.
Examples
--------
>>> from pgmpy.models import BayesianNetwork
>>> from pgmpy.inference import CausalInference
>>> model = BayesianNetwork([("x1", "y1"), ("x1", "z1"), ("z1", "z2"),
... ("z2", "x2"), ("y2", "z2")])
>>> c_infer = CausalInference(model)
>>> c_infer.get_proper_backdoor_graph(X=["x1", "x2"], Y=["y1", "y2"])
<pgmpy.models.BayesianNetwork.BayesianNetwork at 0x7fba501ad940>
References
----------
[1] Perkovic, Emilija, et al. "Complete graphical characterization and construction of adjustment sets in Markov equivalence classes of ancestral graphs." The Journal of Machine Learning Research 18.1 (2017): 8132-8193.
"""
for var in chain(X, Y):
if var not in self.model.nodes():
raise ValueError(f"{var} not found in the model.")
model = self.model if inplace else self.model.copy()
edges_to_remove = []
for source in X:
paths = nx.all_simple_edge_paths(model, source, Y)
for path in paths:
edges_to_remove.append(path[0])
model.remove_edges_from(edges_to_remove)
return model
def is_valid_adjustment_set(self, X, Y, adjustment_set):
"""
Method to test whether `adjustment_set` is a valid adjustment set for
identifying the causal effect of `X` on `Y`.
Parameters
----------
X: list (array-like)
The set of cause variables.
Y: list (array-like)
The set of predictor variables.
adjustment_set: list (array-like)
The set of variables for which to test whether they satisfy the
adjustment set criteria.
Returns
-------
Is valid adjustment set: bool
Returns True if `adjustment_set` is a valid adjustment set for
identifying the effect of `X` on `Y`. Else returns False.
Examples
--------
>>> from pgmpy.models import BayesianNetwork
>>> from pgmpy.inference import CausalInference
>>> model = BayesianNetwork([("x1", "y1"), ("x1", "z1"), ("z1", "z2"),
... ("z2", "x2"), ("y2", "z2")])
>>> c_infer = CausalInference(model)
>>> c_infer.is_valid_adjustment_set(X=['x1', 'x2'], Y=['y1', 'y2'], adjustment_set=['z1', 'z2'])
True
References
----------
[1] Perkovic, Emilija, et al. "Complete graphical characterization and construction of adjustment sets in Markov equivalence classes of ancestral graphs." The Journal of Machine Learning Research 18.1 (2017): 8132-8193.
"""
backdoor_graph = self.get_proper_backdoor_graph(X, Y, inplace=False)
for (x, y) in zip(X, Y):
if backdoor_graph.is_dconnected(start=x, end=y, observed=adjustment_set):
return False
return True
def get_minimal_adjustment_set(self, X, Y):
"""
Method to test whether `adjustment_set` is a valid adjustment set for
identifying the causal effect of `X` on `Y`.
Parameters
----------
X: str (variable name)
The cause/exposure variables.
Y: str (variable name)
The outcome variable
Returns
-------
Minimal adjustment set: set or None
A set of variables which are the minimal possible adjustment set. If
None, no adjustment set is possible.
Examples
--------
References
----------
[1] Perkovic, Emilija, et al. "Complete graphical characterization and construction of adjustment sets in Markov equivalence classes of ancestral graphs." The Journal of Machine Learning Research 18.1 (2017): 8132-8193.
"""
backdoor_graph = self.get_proper_backdoor_graph(X, Y, inplace=False)
return backdoor_graph.minimal_dseparator(X, Y)
def query(
self,
variables,
do=None,
evidence=None,
adjustment_set=None,
inference_algo="ve",
show_progress=True,
**kwargs,
):
"""
Performs a query on the model of the form :math:`P(X | do(Y), Z)` where :math:`X`
is `variables`, :math:`Y` is `do` and `Z` is the `evidence`.
Parameters
----------
variables: list
list of variables in the query i.e. `X` in :math:`P(X | do(Y), Z)`.
do: dict (default: None)
Dictionary of the form {variable_name: variable_state} representing
the variables on which to apply the do operation i.e. `Y` in
:math:`P(X | do(Y), Z)`.
evidence: dict (default: None)
Dictionary of the form {variable_name: variable_state} repesenting
the conditional variables in the query i.e. `Z` in :math:`P(X |
do(Y), Z)`.
adjustment_set: str or list (default=None)
Specifies the adjustment set to use. If None, uses the parents of the
do variables as the adjustment set.
inference_algo: str or pgmpy.inference.Inference instance
The inference algorithm to use to compute the probability values.
String options are: 1) ve: Variable Elimination 2) bp: Belief
Propagation.
kwargs: Any
Additional paramters which needs to be passed to inference
algorithms. Please refer to the pgmpy.inference.Inference for
details.
Returns
-------
Queried distribution: pgmpy.factor.discrete.DiscreteFactor
A factor object representing the joint distribution over the variables in `variables`.
Examples
--------
>>> from pgmpy.utils import get_example_model
>>> model = get_example_model('alarm')
>>> infer = CausalInference(model)
>>> infer.query(['HISTORY'], do={'CVP': 'LOW'}, evidence={'HR': 'LOW'})
<DiscreteFactor representing phi(HISTORY:2) at 0x7f4e0874c2e0>
"""
# Step 1: Check if all the arguments are valid and get them to uniform types.
if (not isinstance(variables, Iterable)) or (isinstance(variables, str)):
raise ValueError(
f"variables much be a list (array-like). Got type: {type(variables)}."
)
elif not all([node in self.model.nodes() for node in variables]):
raise ValueError(
f"Some of the variables in `variables` are not in the model."
)
else:
variables = list(variables)
if do is None:
do = {}
elif not isinstance(do, dict):
raise ValueError(
"`do` must be a dict of the form: {variable_name: variable_state}"
)
if evidence is None:
evidence = {}
elif not isinstance(evidence, dict):
raise ValueError(
"`evidence` must be a dict of the form: {variable_name: variable_state}"
)
from pgmpy.inference import Inference
if inference_algo == "ve":
from pgmpy.inference import VariableElimination
inference_algo = VariableElimination
elif inference_algo == "bp":
from pgmpy.inference import BeliefPropagation
inference_algo = BeliefPropagation
elif not isinstance(inference_algo, Inference):
raise ValueError(
f"inference_algo must be one of: 've', 'bp', or an instance of pgmpy.inference.Inference. Got: {inference_algo}"
)
# Step 2: Check if adjustment set is provided, otherwise try calculating it.
if adjustment_set is None:
do_vars = [var for var, state in do.items()]
adjustment_set = set(
chain(*[self.model.predecessors(var) for var in do_vars])
)
if len(adjustment_set.intersection(self.model.latents)) != 0:
raise ValueError(
"Not all parents of do variables are observed. Please specify an adjustment set."
)
infer = inference_algo(self.model)
# Step 3.1: If no do variable specified, do a normal probabilistic inference.
if do == {}:
return infer.query(variables, evidence, show_progress=False)
# Step 3.2: If no adjustment is required, do a normal probabilistic
# inference with do variables as the evidence.
elif len(adjustment_set) == 0:
evidence = {**evidence, **do}
return infer.query(variables, evidence, show_progress=False)
# Step 4: For other cases, compute \sum_{z} p(variables | do, z) p(z)
values = []
# Step 4.1: Compute p_z and states of z to iterate over.
# For computing p_z, if evidence variables also in adjustment set,
# manually do reduce else inference will throw error.
evidence_adj_inter = {
var: state
for var, state in evidence.items()
if var in adjustment_set.intersection(evidence.keys())
}
if len(evidence_adj_inter) != 0:
p_z = infer.query(adjustment_set, show_progress=False).reduce(
[(key, value) for key, value in evidence_adj_inter.items()],
inplace=False,
)
# Since we are doing reduce over some of the variables, they are
# going to be removed from the factor but would be required to get
# values later. A hackish solution to reintroduce those variables in p_z
if set(p_z.variables) != adjustment_set:
p_z = DiscreteFactor(
p_z.variables + list(evidence_adj_inter.keys()),
list(p_z.cardinality) + [1] * len(evidence_adj_inter),
p_z.values,
state_names={
**p_z.state_names,
**{var: [state] for var, state in evidence_adj_inter.items()},
},
)
else:
p_z = infer.query(adjustment_set, evidence=evidence, show_progress=False)
adj_states = []
for var in adjustment_set:
if var in evidence_adj_inter.keys():
adj_states.append([evidence_adj_inter[var]])
else:
adj_states.append(self.model.get_cpds(var).state_names[var])
# Step 4.2: Iterate over states of adjustment set and compute values.
if show_progress and SHOW_PROGRESS:
pbar = tqdm(total=np.prod([len(states) for states in adj_states]))
for state_comb in product(*adj_states):
adj_evidence = {
var: state for var, state in zip(adjustment_set, state_comb)
}
evidence = {**do, **adj_evidence}
values.append(
infer.query(variables, evidence=evidence, show_progress=False)
* p_z.get_value(**adj_evidence)
)
if show_progress and SHOW_PROGRESS:
pbar.update(1)
return sum(values).normalize(inplace=False)
| mit | 91c4f27b77c1c2fcf4fe5c752e3dbe5b | 36.350217 | 227 | 0.563796 | 4.234454 | false | false | false | false |
pgmpy/pgmpy | pgmpy/tests/test_utils/test_optimizer.py | 2 | 2011 | import unittest
import numpy as np
import numpy.testing as npt
from pgmpy.utils import optimize, pinverse
from pgmpy.global_vars import device, dtype
try: # pragma: no cover
import torch
except ImportError: # pragma: no cover
torch = None
class TestOptimize(unittest.TestCase):
"""
self = TestOptimize()
self.setUp()
"""
def setUp(self):
self.A = torch.randn(5, 5, device=device, dtype=dtype, requires_grad=True)
self.B = torch.ones(5, 5, device=device, dtype=dtype, requires_grad=False)
def loss_fn(self, params, loss_params):
A = params["A"]
B = loss_params["B"]
return (A - B).pow(2).sum()
@unittest.skipIf(torch is None, "torch is not installed")
def test_optimize(self):
# TODO: Add tests for other optimizers
for opt in ["adadelta", "adam", "adamax", "asgd", "lbfgs", "rmsprop", "rprop"]:
A = torch.randn(5, 5, device=device, dtype=dtype, requires_grad=True)
B = torch.ones(5, 5, device=device, dtype=dtype, requires_grad=False)
params = optimize(
self.loss_fn,
params={"A": A},
loss_args={"B": B},
opt=opt,
max_iter=int(1e6),
)
npt.assert_almost_equal(
B.data.cpu().numpy(),
params["A"].detach().cpu().numpy().round(),
decimal=1,
)
class Testpinverse(unittest.TestCase):
@unittest.skipIf(torch is None, "torch is not installed")
def test_pinverse(self):
mat = np.random.randn(5, 5)
np_inv = np.linalg.pinv(mat)
inv = pinverse(torch.tensor(mat))
npt.assert_array_almost_equal(np_inv, inv.numpy())
@unittest.skipIf(torch is None, "torch is not installed")
def test_pinverse_zeros(self):
mat = np.zeros((5, 5))
np_inv = np.linalg.pinv(mat)
inv = pinverse(torch.tensor(mat))
npt.assert_array_almost_equal(np_inv, inv)
| mit | a158046e77f93e39292ac4bc35bb27c5 | 30.421875 | 87 | 0.57633 | 3.540493 | false | true | false | false |
pgmpy/pgmpy | pgmpy/estimators/HillClimbSearch.py | 2 | 13550 | #!/usr/bin/env python
from itertools import permutations
from collections import deque
import networkx as nx
from tqdm.auto import trange
from pgmpy.estimators import (
StructureScore,
StructureEstimator,
K2Score,
ScoreCache,
BDeuScore,
BDsScore,
BicScore,
)
from pgmpy.base import DAG
from pgmpy.global_vars import SHOW_PROGRESS
class HillClimbSearch(StructureEstimator):
def __init__(self, data, use_cache=True, **kwargs):
"""
Class for heuristic hill climb searches for DAGs, to learn
network structure from data. `estimate` attempts to find a model with optimal score.
Parameters
----------
data: pandas DataFrame object
dataframe object where each column represents one variable.
(If some values in the data are missing the data cells should be set to `numpy.NaN`.
Note that pandas converts each column containing `numpy.NaN`s to dtype `float`.)
state_names: dict (optional)
A dict indicating, for each variable, the discrete set of states (or values)
that the variable can take. If unspecified, the observed values in the data set
are taken to be the only possible states.
complete_samples_only: bool (optional, default `True`)
Specifies how to deal with missing data, if present. If set to `True` all rows
that contain `np.Nan` somewhere are ignored. If `False` then, for each variable,
every row where neither the variable nor its parents are `np.NaN` is used.
This sets the behavior of the `state_count`-method.
use_caching: boolean
If True, uses caching of score for faster computation.
Note: Caching only works for scoring methods which are decomposable. Can
give wrong results in case of custom scoring methods.
References
----------
Koller & Friedman, Probabilistic Graphical Models - Principles and Techniques, 2009
Section 18.4.3 (page 811ff)
"""
self.use_cache = use_cache
super(HillClimbSearch, self).__init__(data, **kwargs)
def _legal_operations(
self,
model,
score,
structure_score,
tabu_list,
max_indegree,
black_list,
white_list,
fixed_edges,
):
"""Generates a list of legal (= not in tabu_list) graph modifications
for a given model, together with their score changes. Possible graph modifications:
(1) add, (2) remove, or (3) flip a single edge. For details on scoring
see Koller & Friedman, Probabilistic Graphical Models, Section 18.4.3.3 (page 818).
If a number `max_indegree` is provided, only modifications that keep the number
of parents for each node below `max_indegree` are considered. A list of
edges can optionally be passed as `black_list` or `white_list` to exclude those
edges or to limit the search.
"""
tabu_list = set(tabu_list)
# Step 1: Get all legal operations for adding edges.
potential_new_edges = (
set(permutations(self.variables, 2))
- set(model.edges())
- set([(Y, X) for (X, Y) in model.edges()])
)
for (X, Y) in potential_new_edges:
# Check if adding (X, Y) will create a cycle.
if not nx.has_path(model, Y, X):
operation = ("+", (X, Y))
if (
(operation not in tabu_list)
and ((X, Y) not in black_list)
and ((X, Y) in white_list)
):
old_parents = model.get_parents(Y)
new_parents = old_parents + [X]
if len(new_parents) <= max_indegree:
score_delta = score(Y, new_parents) - score(Y, old_parents)
score_delta += structure_score("+")
yield (operation, score_delta)
# Step 2: Get all legal operations for removing edges
for (X, Y) in model.edges():
operation = ("-", (X, Y))
if (operation not in tabu_list) and ((X, Y) not in fixed_edges):
old_parents = model.get_parents(Y)
new_parents = old_parents[:]
new_parents.remove(X)
score_delta = score(Y, new_parents) - score(Y, old_parents)
score_delta += structure_score("-")
yield (operation, score_delta)
# Step 3: Get all legal operations for flipping edges
for (X, Y) in model.edges():
# Check if flipping creates any cycles
if not any(
map(lambda path: len(path) > 2, nx.all_simple_paths(model, X, Y))
):
operation = ("flip", (X, Y))
if (
((operation not in tabu_list) and ("flip", (Y, X)) not in tabu_list)
and ((X, Y) not in fixed_edges)
and ((Y, X) not in black_list)
and ((Y, X) in white_list)
):
old_X_parents = model.get_parents(X)
old_Y_parents = model.get_parents(Y)
new_X_parents = old_X_parents + [Y]
new_Y_parents = old_Y_parents[:]
new_Y_parents.remove(X)
if len(new_X_parents) <= max_indegree:
score_delta = (
score(X, new_X_parents)
+ score(Y, new_Y_parents)
- score(X, old_X_parents)
- score(Y, old_Y_parents)
)
score_delta += structure_score("flip")
yield (operation, score_delta)
def estimate(
self,
scoring_method="k2score",
start_dag=None,
fixed_edges=set(),
tabu_length=100,
max_indegree=None,
black_list=None,
white_list=None,
epsilon=1e-4,
max_iter=1e6,
show_progress=True,
):
"""
Performs local hill climb search to estimates the `DAG` structure that
has optimal score, according to the scoring method supplied. Starts at
model `start_dag` and proceeds by step-by-step network modifications
until a local maximum is reached. Only estimates network structure, no
parametrization.
Parameters
----------
scoring_method: str or StructureScore instance
The score to be optimized during structure estimation. Supported
structure scores: k2score, bdeuscore, bdsscore, bicscore. Also accepts a
custom score, but it should be an instance of `StructureScore`.
start_dag: DAG instance
The starting point for the local search. By default, a completely
disconnected network is used.
fixed_edges: iterable
A list of edges that will always be there in the final learned model.
The algorithm will add these edges at the start of the algorithm and
will never change it.
tabu_length: int
If provided, the last `tabu_length` graph modifications cannot be reversed
during the search procedure. This serves to enforce a wider exploration
of the search space. Default value: 100.
max_indegree: int or None
If provided and unequal None, the procedure only searches among models
where all nodes have at most `max_indegree` parents. Defaults to None.
black_list: list or None
If a list of edges is provided as `black_list`, they are excluded from the search
and the resulting model will not contain any of those edges. Default: None
white_list: list or None
If a list of edges is provided as `white_list`, the search is limited to those
edges. The resulting model will then only contain edges that are in `white_list`.
Default: None
epsilon: float (default: 1e-4)
Defines the exit condition. If the improvement in score is less than `epsilon`,
the learned model is returned.
max_iter: int (default: 1e6)
The maximum number of iterations allowed. Returns the learned model when the
number of iterations is greater than `max_iter`.
Returns
-------
Estimated model: pgmpy.base.DAG
A `DAG` at a (local) score maximum.
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> from pgmpy.estimators import HillClimbSearch, BicScore
>>> # create data sample with 9 random variables:
... data = pd.DataFrame(np.random.randint(0, 5, size=(5000, 9)), columns=list('ABCDEFGHI'))
>>> # add 10th dependent variable
... data['J'] = data['A'] * data['B']
>>> est = HillClimbSearch(data)
>>> best_model = est.estimate(scoring_method=BicScore(data))
>>> sorted(best_model.nodes())
['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J']
>>> best_model.edges()
OutEdgeView([('B', 'J'), ('A', 'J')])
>>> # search a model with restriction on the number of parents:
>>> est.estimate(max_indegree=1).edges()
OutEdgeView([('J', 'A'), ('B', 'J')])
"""
# Step 1: Initial checks and setup for arguments
# Step 1.1: Check scoring_method
supported_methods = {
"k2score": K2Score,
"bdeuscore": BDeuScore,
"bdsscore": BDsScore,
"bicscore": BicScore,
}
if (
(
isinstance(scoring_method, str)
and (scoring_method.lower() not in supported_methods)
)
) and (not isinstance(scoring_method, StructureScore)):
raise ValueError(
"scoring_method should either be one of k2score, bdeuscore, bicscore, bdsscore, or an instance of StructureScore"
)
if isinstance(scoring_method, str):
score = supported_methods[scoring_method.lower()](data=self.data)
else:
score = scoring_method
if self.use_cache:
score_fn = ScoreCache.ScoreCache(score, self.data).local_score
else:
score_fn = score.local_score
# Step 1.2: Check the start_dag
if start_dag is None:
start_dag = DAG()
start_dag.add_nodes_from(self.variables)
elif not isinstance(start_dag, DAG) or not set(start_dag.nodes()) == set(
self.variables
):
raise ValueError(
"'start_dag' should be a DAG with the same variables as the data set, or 'None'."
)
# Step 1.3: Check fixed_edges
if not hasattr(fixed_edges, "__iter__"):
raise ValueError("fixed_edges must be an iterable")
else:
fixed_edges = set(fixed_edges)
start_dag.add_edges_from(fixed_edges)
if not nx.is_directed_acyclic_graph(start_dag):
raise ValueError(
"fixed_edges creates a cycle in start_dag. Please modify either fixed_edges or start_dag."
)
# Step 1.4: Check black list and white list
black_list = set() if black_list is None else set(black_list)
white_list = (
set([(u, v) for u in self.variables for v in self.variables])
if white_list is None
else set(white_list)
)
# Step 1.5: Initialize max_indegree, tabu_list, and progress bar
if max_indegree is None:
max_indegree = float("inf")
tabu_list = deque(maxlen=tabu_length)
current_model = start_dag
if show_progress and SHOW_PROGRESS:
iteration = trange(int(max_iter))
else:
iteration = range(int(max_iter))
# Step 2: For each iteration, find the best scoring operation and
# do that to the current model. If no legal operation is
# possible, sets best_operation=None.
for _ in iteration:
best_operation, best_score_delta = max(
self._legal_operations(
current_model,
score_fn,
score.structure_prior_ratio,
tabu_list,
max_indegree,
black_list,
white_list,
fixed_edges,
),
key=lambda t: t[1],
default=(None, None),
)
if best_operation is None or best_score_delta < epsilon:
break
elif best_operation[0] == "+":
current_model.add_edge(*best_operation[1])
tabu_list.append(("-", best_operation[1]))
elif best_operation[0] == "-":
current_model.remove_edge(*best_operation[1])
tabu_list.append(("+", best_operation[1]))
elif best_operation[0] == "flip":
X, Y = best_operation[1]
current_model.remove_edge(X, Y)
current_model.add_edge(Y, X)
tabu_list.append(best_operation)
# Step 3: Return if no more improvements or maximum iterations reached.
return current_model
| mit | b8c87f63ce5df1440b6a9acbe8f3d2b7 | 39.690691 | 129 | 0.554022 | 4.292049 | false | false | false | false |
pgmpy/pgmpy | pgmpy/estimators/SEMEstimator.py | 2 | 14703 | import numpy as np
import pandas as pd
import statsmodels.api as sm
try:
import torch
except ImportError:
torch = None
from pgmpy.models import SEMGraph, SEMAlg, SEM
from pgmpy.global_vars import device, dtype
from pgmpy.utils import optimize, pinverse
class SEMEstimator(object):
"""
Base class of SEM estimators. All the estimators inherit this class.
"""
def __init__(self, model):
if isinstance(model, (SEMGraph, SEM)):
self.model = model.to_lisrel()
elif isinstance(model, SEMAlg):
self.model = model
else:
raise ValueError(
f"Model should be an instance of either SEMGraph or SEMAlg class. Got type: {type(model)}"
)
# Initialize trainable and fixed mask tensors
self.B_mask = torch.tensor(
self.model.B_mask, device=device, dtype=dtype, requires_grad=False
)
self.zeta_mask = torch.tensor(
self.model.zeta_mask, device=device, dtype=dtype, requires_grad=False
)
self.B_fixed_mask = torch.tensor(
self.model.B_fixed_mask, device=device, dtype=dtype, requires_grad=False
)
self.zeta_fixed_mask = torch.tensor(
self.model.zeta_fixed_mask, device=device, dtype=dtype, requires_grad=False
)
self.wedge_y = torch.tensor(
self.model.wedge_y, device=device, dtype=dtype, requires_grad=False
)
self.B_eye = torch.eye(
self.B_mask.shape[0], device=device, dtype=dtype, requires_grad=False
)
def _get_implied_cov(self, B, zeta):
"""
Computes the implied covariance matrix from the given parameters.
"""
B_masked = torch.mul(B, self.B_mask) + self.B_fixed_mask
B_inv = pinverse(self.B_eye - B_masked)
zeta_masked = torch.mul(zeta, self.zeta_mask) + self.zeta_fixed_mask
return self.wedge_y @ B_inv @ zeta_masked @ B_inv.t() @ self.wedge_y.t()
def ml_loss(self, params, loss_args):
r"""
Method to compute the Maximum Likelihood loss function. The optimizer calls this
method after each iteration with updated params to compute the new loss.
The fitting function for ML is:
.. math:: F_{ML} = \log |\Sigma(\theta)| + tr(S \Sigma^{-1}(\theta)) - \log S - (p+q)
Parameters
----------
params: dict
params contain all the variables which are updated in each iteration of the
optimization.
loss_args: dict
loss_args contain all the variable which are not updated in each iteration but
are required to compute the loss.
Returns
-------
torch.tensor: The loss value for the given params and loss_args
"""
S = loss_args["S"]
sigma = self._get_implied_cov(params["B"], params["zeta"])
return (
sigma.det().clamp(min=1e-4).log()
+ (S @ pinverse(sigma)).trace()
- S.logdet()
- len(self.model.y)
)
def uls_loss(self, params, loss_args):
r"""
Method to compute the Unweighted Least Squares fitting function. The optimizer calls
this method after each iteration with updated params to compute the new loss.
The fitting function for ML is:
.. math:: F_{ULS} = tr[(S - \Sigma(\theta))^2]
Parameters
----------
params: dict
params contain all the variables which are updated in each iteration of the
optimization.
loss_args: dict
loss_args contain all the variable which are not updated in each iteration but
are required to compute the loss.
Returns
-------
torch.tensor: The loss value for the given params and loss_args
"""
S = loss_args["S"]
sigma = self._get_implied_cov(params["B"], params["zeta"])
return (S - sigma).pow(2).trace()
def gls_loss(self, params, loss_args):
r"""
Method to compute the Weighted Least Squares fitting function. The optimizer calls
this method after each iteration with updated params to compute the new loss.
The fitting function for ML is:
.. math:: F_{ULS} = tr \{ [(S - \Sigma(\theta)) W^{-1}]^2 \}
Parameters
----------
params: dict
params contain all the variables which are updated in each iteration of the
optimization.
loss_args: dict
loss_args contain all the variable which are not updated in each iteration but
are required to compute the loss.
Returns
-------
torch.tensor: The loss value for the given params and loss_args
"""
S = loss_args["S"]
W_inv = pinverse(loss_args["W"])
sigma = self._get_implied_cov(params["B"], params["zeta"])
return ((S - sigma) @ W_inv).pow(2).trace()
def get_init_values(self, data, method):
"""
Computes the starting values for the optimizer.
Reference
---------
.. [1] Table 4C.1: Bollen, K. (2014). Structural Equations with Latent Variables.
New York, NY: John Wiley & Sons.
"""
# Initialize all the values even if the edge doesn't exist, masks would take care of that.
a = 0.4
scaling_vars = self.model.to_SEMGraph().get_scaling_indicators()
eta, m = self.model.eta, len(self.model.eta)
if method == "random":
B = np.random.rand(m, m)
zeta = np.random.rand(m, m)
elif method == "std":
# Add observed vars to `scaling_vars to point to itself. Trick to keep code short.
for observed_var in self.model.y:
scaling_vars[observed_var] = observed_var
B = np.random.rand(m, m)
for i in range(m):
for j in range(m):
if scaling_vars[eta[i]] == eta[j]:
B[i, j] = 1.0
elif i != j:
B[i, j] = a * (
data.loc[:, scaling_vars[eta[i]]].std()
/ data.loc[:, scaling_vars[eta[j]]].std()
)
zeta = np.random.rand(m, m)
for i in range(m):
zeta[i, i] = a * ((data.loc[:, scaling_vars[eta[i]]].std()) ** 2)
for i in range(m):
for j in range(m):
zeta[i, j] = zeta[j, i] = a * np.sqrt(zeta[i, i] * zeta[j, j])
elif method.lower() == "iv":
raise NotImplementedError("IV initialization not supported yet.")
return B, zeta
def fit(
self,
data,
method,
opt="adam",
init_values="random",
exit_delta=1e-4,
max_iter=1000,
**kwargs,
):
"""
Estimate the parameters of the model from the data.
Parameters
----------
data: pandas DataFrame or pgmpy.data.Data instance
The data from which to estimate the parameters of the model.
method: str ("ml"|"uls"|"gls"|"2sls")
The fitting function to use.
ML : Maximum Likelihood
ULS: Unweighted Least Squares
GLS: Generalized Least Squares
2sls: 2-SLS estimator
init_values: str or dict
Options for str: random | std | iv
dict: dictionary with keys `B` and `zeta`.
**kwargs: dict
Extra parameters required in case of some estimators.
GLS:
W: np.array (n x n) where n is the number of observe variables.
2sls:
x:
y:
Returns
-------
pgmpy.model.SEM instance: Instance of the model with estimated parameters
References
----------
.. [1] Bollen, K. A. (2010). Structural equations with latent variables. New York: Wiley.
"""
# Check if given arguments are valid
if not isinstance(data, pd.DataFrame):
raise ValueError(f"data must be a pandas DataFrame. Got type: {type(data)}")
if not sorted(data.columns) == sorted(self.model.y):
raise ValueError(
f"The column names data do not match the variables in the model. Expected: {sorted(self.model.observed)}. Got: {sorted(data.columns)}"
)
# Initialize the values of parameters as tensors.
if isinstance(init_values, dict):
B_init, zeta_init = init_values["B"], init_values["zeta"]
else:
B_init, zeta_init = self.get_init_values(data, method=init_values.lower())
B = torch.tensor(B_init, device=device, dtype=dtype, requires_grad=True)
zeta = torch.tensor(zeta_init, device=device, dtype=dtype, requires_grad=True)
# Compute the covariance of the data
variable_order = self.model.y
S = data.cov().reindex(variable_order, axis=1).reindex(variable_order, axis=0)
S = torch.tensor(S.values, device=device, dtype=dtype, requires_grad=False)
# Optimize the parameters
if method.lower() == "ml":
params = optimize(
self.ml_loss,
params={"B": B, "zeta": zeta},
loss_args={"S": S},
opt=opt,
exit_delta=exit_delta,
max_iter=max_iter,
)
elif method.lower() == "uls":
params = optimize(
self.uls_loss,
params={"B": B, "zeta": zeta},
loss_args={"S": S},
opt=opt,
exit_delta=exit_delta,
max_iter=max_iter,
)
elif method.lower() == "gls":
W = torch.tensor(
kwargs["W"], device=device, dtype=dtype, requires_grad=False
)
params = optimize(
self.gls_loss,
params={"B": B, "zeta": zeta},
loss_args={"S": S, "W": W},
opt=opt,
exit_delta=exit_delta,
max_iter=max_iter,
)
elif method.lower() == "2sls" or method.lower() == "2-sls":
raise NotImplementedError("2-SLS is not implemented yet")
B = params["B"] * self.B_mask + self.B_fixed_mask
zeta = params["zeta"] * self.zeta_mask + self.zeta_fixed_mask
# Compute goodness of fit statistics.
N = data.shape[0]
sample_cov = S.detach().numpy()
sigma_hat = self._get_implied_cov(B, zeta).detach().numpy()
residual = sample_cov - sigma_hat
norm_residual = np.zeros(residual.shape)
for i in range(norm_residual.shape[0]):
for j in range(norm_residual.shape[1]):
norm_residual[i, j] = (sample_cov[i, j] - sigma_hat[i, j]) / np.sqrt(
((sigma_hat[i, i] * sigma_hat[j, j]) + (sigma_hat[i, j] ** 2)) / N
)
# Compute chi-square value.
likelihood_ratio = -(N - 1) * (
np.log(np.linalg.det(sigma_hat))
+ (np.linalg.inv(sigma_hat) @ S).trace()
- np.log(np.linalg.det(S))
- S.shape[0]
)
if method.lower() == "ml":
error = self.ml_loss(params, loss_args={"S": S})
elif method.lower() == "uls":
error = self.uls_loss(params, loss_args={"S": S})
elif method.lower() == "gls":
error = self.gls_loss(params, loss_args={"S": S, "W": W})
chi_square = likelihood_ratio / error.detach().numpy()
free_params = self.B_mask.sum()
dof = ((S.shape[0] * (S.shape[0] + 1)) / 2) - free_params
summary = {
"Sample Size": N,
"Sample Covariance": sample_cov,
"Model Implied Covariance": sigma_hat,
"Residual": residual,
"Normalized Residual": norm_residual,
"chi_square": chi_square,
"dof": dof,
}
# Update the model with the learned params
self.model.set_params(
B=params["B"].detach().numpy(), zeta=params["B"].detach().numpy()
)
return summary
class IVEstimator:
"""
Implements Instrumental Variable (IV) based estimator.
"""
def __init__(self, model):
"""
Initialize IVEstimator object.
Parameters
----------
model: pgmpy.models.SEM
The model for which estimation need to be done.
Examples
--------
"""
self.model = model
def fit(self, X, Y, data, ivs=None, civs=None):
"""
Estimates the parameter X -> Y.
Parameters
----------
X: str
The covariate variable of the parameter being estimated.
Y: str
The predictor variable of the parameter being estimated.
data: pd.DataFrame
The data from which to learn the parameter.
ivs: List (default: None)
List of variable names which should be used as Instrumental Variables (IV).
If not specified, tries to find the IVs from the model structure, fails if
can't find either IV or Conditional IV.
civs: List of tuples (tuple form: (var, coditional_var))
List of conditional IVs to use for estimation.
If not specified, tries to find the IVs from the model structure, fails if
can't find either IV or Conditional IVs.
Examples
--------
>>> from pgmpy.estimators import IVEstimator # TODO: Finish example.
"""
if (ivs is None) and (civs is None):
ivs = self.model.get_ivs(X, Y)
civs = self.model.get_conditional_ivs(X, Y)
civs = [civ for civ in civs if civ[0] not in ivs]
reg_covars = []
for var in self.model.graph.predecessors(X):
if var in self.model.observed:
reg_covars.append(var)
# Get CIV conditionals
civ_conditionals = []
for civ in civs:
civ_conditionals.extend(civ[1])
# First stage regression.
params = (
sm.OLS(data.loc[:, X], data.loc[:, reg_covars + civ_conditionals])
.fit()
.params
)
data["X_pred"] = np.zeros(data.shape[0])
for var in reg_covars:
data.X_pred += params[var] * data.loc[:, var]
summary = sm.OLS(
data.loc[:, Y], data.loc[:, ["X_pred"] + civ_conditionals]
).fit()
return summary.params["X_pred"], summary
| mit | 17839329d17070685797449d1b7274a6 | 33.433255 | 150 | 0.536489 | 3.9208 | false | false | false | false |
plotly/dash | components/dash-core-components/tests/integration/input/test_number_input.py | 1 | 2845 | import time
import sys
from selenium.webdriver.common.keys import Keys
def test_inni001_invalid_numbers(ninput_app, dash_dcc):
dash_dcc.start_server(ninput_app)
for invalid_number in ("10e10000", "e+++eeeeeE-", "12-.3"):
for debounce in ("false", "true"):
elem = dash_dcc.find_element(f"#input_{debounce}")
assert not elem.get_attribute("value"), "input should have no initial value"
# onblur
elem.send_keys(invalid_number)
elem.send_keys(Keys.TAB)
dash_dcc.wait_for_text_to_equal(f"#div_{debounce}", "")
# Enter keypress
dash_dcc.clear_input(elem)
elem.send_keys(invalid_number)
elem.send_keys(Keys.ENTER)
dash_dcc.wait_for_text_to_equal(f"#div_{debounce}", "")
dash_dcc.clear_input(elem)
assert dash_dcc.get_logs() == []
def test_inni002_invalid_numbers_ui(dash_dcc, ninput_app):
dash_dcc.start_server(ninput_app)
elem = dash_dcc.find_element("#input_false")
elem.send_keys("5e-325") # smaller than Number.MIN_VALUE
assert dash_dcc.wait_for_text_to_equal("#div_false", "0")
dash_dcc.clear_input(elem)
elem.send_keys("0.0.0")
elem.send_keys(Keys.TAB)
assert dash_dcc.find_element("#div_false").text != "0.0"
time.sleep(0.5)
dash_dcc.percy_snapshot("inni002 - input invalid number")
assert dash_dcc.get_logs() == []
def test_inni003_invalid_numbers_range(dash_dcc, input_range_app):
dash_dcc.start_server(input_range_app) # range [10, 10000] step=3
elem_range = dash_dcc.find_element("#range")
elem_range.send_keys("1999")
assert dash_dcc.find_element("#out").text == "1999"
for invalid_number in ("0.0", "12", "10e10"):
elem_range.send_keys(invalid_number)
dash_dcc.wait_for_text_to_equal("#out", ""), "invalid value should return none"
dash_dcc.clear_input(elem_range)
elem_range.send_keys("-13")
dash_dcc.wait_for_text_to_equal("#out", ""), "invalid value should return none"
time.sleep(0.5)
dash_dcc.percy_snapshot("inni003 - number out of range")
assert dash_dcc.get_logs() == []
def test_inni010_valid_numbers(dash_dcc, ninput_app):
dash_dcc.start_server(ninput_app)
for num, op in (
("1.0", lambda x: int(float(x))), # limitation of js/json
("10e10", lambda x: int(float(x))),
("-1.0001", float),
(str(sys.float_info.max), float),
(str(sys.float_info.min), float),
):
elem = dash_dcc.find_element("#input_false")
elem.send_keys(num)
assert dash_dcc.wait_for_text_to_equal(
"#div_false", str(op(num))
), "the valid number should be converted to expected form in callback"
dash_dcc.clear_input(elem)
assert dash_dcc.get_logs() == []
| mit | 979b138eb5e298087261a21f86b5a7ad | 32.081395 | 88 | 0.613005 | 3.046039 | false | false | false | false |
plotly/dash | tests/unit/library/test_utils.py | 1 | 1364 | import pytest
import dash._utils as utils
def test_ddut001_attribute_dict():
a = utils.AttributeDict()
assert str(a) == "{}"
with pytest.raises(AttributeError):
a.k
with pytest.raises(KeyError):
a["k"]
assert a.first("no", "k", "nope") is None
a.k = 1
assert a.k == 1
assert a["k"] == 1
assert a.first("no", "k", "nope") == 1
a["k"] = 2
assert a.k == 2
assert a["k"] == 2
a.set_read_only(["k"], "boo")
with pytest.raises(AttributeError) as err:
a.k = 3
assert err.value.args == ("boo", "k")
assert a.k == 2
assert a._read_only == {"k": "boo"}
with pytest.raises(AttributeError) as err:
a["k"] = 3
assert err.value.args == ("boo", "k")
assert a.k == 2
a.set_read_only(["q"])
with pytest.raises(AttributeError) as err:
a.q = 3
assert err.value.args == ("Attribute is read-only", "q")
assert "q" not in a
assert a._read_only == {"k": "boo", "q": "Attribute is read-only"}
a.finalize("nope")
with pytest.raises(AttributeError) as err:
a.x = 4
assert err.value.args == ("nope", "x")
assert "x" not in a
a.finalize()
with pytest.raises(AttributeError) as err:
a.x = 4
assert err.value.args == ("Object is final: No new keys may be added.", "x")
assert "x" not in a
| mit | 754c9d2d020e483e3dcd354aec48fef1 | 21.733333 | 80 | 0.547654 | 3.201878 | false | true | false | false |
plotly/dash | dash/testing/dash_page.py | 1 | 2926 | from bs4 import BeautifulSoup
class DashPageMixin:
def _get_dash_dom_by_attribute(self, attr):
return BeautifulSoup(
self.find_element(self.dash_entry_locator).get_attribute(attr), "lxml"
)
@property
def devtools_error_count_locator(self):
return ".test-devtools-error-count"
@property
def dash_entry_locator(self):
return "#react-entry-point"
@property
def dash_outerhtml_dom(self):
return self._get_dash_dom_by_attribute("outerHTML")
@property
def dash_innerhtml_dom(self):
return self._get_dash_dom_by_attribute("innerHTML")
@property
def redux_state_paths(self):
return self.driver.execute_script(
"""
var p = window.store.getState().paths;
return {strs: p.strs, objs: p.objs}
"""
)
@property
def redux_state_rqs(self):
return self.driver.execute_script(
"""
// Check for legacy `pendingCallbacks` store prop (compatibility for Dash matrix testing)
var pendingCallbacks = window.store.getState().pendingCallbacks;
if (pendingCallbacks) {
return pendingCallbacks.map(function(cb) {
var out = {};
for (var key in cb) {
if (typeof cb[key] !== 'function') { out[key] = cb[key]; }
}
return out;
});
}
// Otherwise, use the new `callbacks` store prop
var callbacksState = Object.assign({}, window.store.getState().callbacks);
delete callbacksState.stored;
delete callbacksState.completed;
return Array.prototype.concat.apply([], Object.values(callbacksState));
"""
)
@property
def redux_state_is_loading(self):
return self.driver.execute_script(
"""
return window.store.getState().isLoading;
"""
)
@property
def window_store(self):
return self.driver.execute_script("return window.store")
def _wait_for_callbacks(self):
return (not self.window_store) or self.redux_state_rqs == []
def get_local_storage(self, store_id="local"):
return self.driver.execute_script(
f"return JSON.parse(window.localStorage.getItem('{store_id}'));"
)
def get_session_storage(self, session_id="session"):
return self.driver.execute_script(
f"return JSON.parse(window.sessionStorage.getItem('{session_id}'));"
)
def clear_local_storage(self):
self.driver.execute_script("window.localStorage.clear()")
def clear_session_storage(self):
self.driver.execute_script("window.sessionStorage.clear()")
def clear_storage(self):
self.clear_local_storage()
self.clear_session_storage()
| mit | 16bafc42692b32e6c4c8ede6367c89b6 | 30.12766 | 101 | 0.580314 | 4.138614 | false | false | false | false |
plotly/dash | dash/long_callback/managers/diskcache_manager.py | 1 | 7013 | import traceback
from contextvars import copy_context
from . import BaseLongCallbackManager
from ..._callback_context import context_value
from ..._utils import AttributeDict
from ...exceptions import PreventUpdate
_pending_value = "__$pending__"
class DiskcacheManager(BaseLongCallbackManager):
"""Manage the background execution of callbacks with subprocesses and a diskcache result backend."""
def __init__(self, cache=None, cache_by=None, expire=None):
"""
Long callback manager that runs callback logic in a subprocess and stores
results on disk using diskcache
:param cache:
A diskcache.Cache or diskcache.FanoutCache instance. See the diskcache
documentation for information on configuration options. If not provided,
a diskcache.Cache instance will be created with default values.
:param cache_by:
A list of zero-argument functions. When provided, caching is enabled and
the return values of these functions are combined with the callback
function's input arguments and source code to generate cache keys.
:param expire:
If provided, a cache entry will be removed when it has not been accessed
for ``expire`` seconds. If not provided, the lifetime of cache entries
is determined by the default behavior of the ``cache`` instance.
"""
try:
import diskcache # pylint: disable=import-outside-toplevel
import psutil # noqa: F401,E402 pylint: disable=import-outside-toplevel,unused-import,unused-variable,import-error
import multiprocess # noqa: F401,E402 pylint: disable=import-outside-toplevel,unused-import,unused-variable
except ImportError as missing_imports:
raise ImportError(
"""\
DiskcacheLongCallbackManager requires extra dependencies which can be installed doing
$ pip install "dash[diskcache]"\n"""
) from missing_imports
if cache is None:
self.handle = diskcache.Cache()
else:
if not isinstance(cache, (diskcache.Cache, diskcache.FanoutCache)):
raise ValueError(
"First argument must be a diskcache.Cache "
"or diskcache.FanoutCache object"
)
self.handle = cache
self.expire = expire
super().__init__(cache_by)
def terminate_job(self, job):
import psutil # pylint: disable=import-outside-toplevel,import-error
if job is None:
return
job = int(job)
# Use diskcache transaction so multiple process don't try to kill the
# process at the same time
with self.handle.transact():
if psutil.pid_exists(job):
process = psutil.Process(job)
for proc in process.children(recursive=True):
try:
proc.kill()
except psutil.NoSuchProcess:
pass
try:
process.kill()
except psutil.NoSuchProcess:
pass
try:
process.wait(1)
except (psutil.TimeoutExpired, psutil.NoSuchProcess):
pass
def terminate_unhealthy_job(self, job):
import psutil # pylint: disable=import-outside-toplevel,import-error
job = int(job)
if job and psutil.pid_exists(job):
if not self.job_running(job):
self.terminate_job(job)
return True
return False
def job_running(self, job):
import psutil # pylint: disable=import-outside-toplevel,import-error
job = int(job)
if job and psutil.pid_exists(job):
proc = psutil.Process(job)
return proc.status() != psutil.STATUS_ZOMBIE
return False
def make_job_fn(self, fn, progress):
return _make_job_fn(fn, self.handle, progress)
def clear_cache_entry(self, key):
self.handle.delete(key)
# noinspection PyUnresolvedReferences
def call_job_fn(self, key, job_fn, args, context):
# pylint: disable-next=import-outside-toplevel,no-name-in-module,import-error
from multiprocess import Process
# pylint: disable-next=not-callable
proc = Process(
target=job_fn, args=(key, self._make_progress_key(key), args, context)
)
proc.start()
return proc.pid
def get_progress(self, key):
progress_key = self._make_progress_key(key)
return self.handle.get(progress_key)
def result_ready(self, key):
return self.handle.get(key) is not None
def get_result(self, key, job):
# Get result value
result = self.handle.get(key, self.UNDEFINED)
if result is self.UNDEFINED:
return self.UNDEFINED
# Clear result if not caching
if self.cache_by is None:
self.clear_cache_entry(key)
else:
if self.expire:
self.handle.touch(key, expire=self.expire)
self.clear_cache_entry(self._make_progress_key(key))
if job:
self.terminate_job(job)
return result
def _make_job_fn(fn, cache, progress):
def job_fn(result_key, progress_key, user_callback_args, context):
def _set_progress(progress_value):
if not isinstance(progress_value, (list, tuple)):
progress_value = [progress_value]
cache.set(progress_key, progress_value)
maybe_progress = [_set_progress] if progress else []
ctx = copy_context()
def run():
c = AttributeDict(**context)
c.ignore_register_page = False
context_value.set(c)
try:
if isinstance(user_callback_args, dict):
user_callback_output = fn(*maybe_progress, **user_callback_args)
elif isinstance(user_callback_args, (list, tuple)):
user_callback_output = fn(*maybe_progress, *user_callback_args)
else:
user_callback_output = fn(*maybe_progress, user_callback_args)
except PreventUpdate:
cache.set(result_key, {"_dash_no_update": "_dash_no_update"})
except Exception as err: # pylint: disable=broad-except
cache.set(
result_key,
{
"long_callback_error": {
"msg": str(err),
"tb": traceback.format_exc(),
}
},
)
else:
cache.set(result_key, user_callback_output)
ctx.run(run)
return job_fn
class DiskcacheLongCallbackManager(DiskcacheManager):
"""Deprecated: use `from dash import DiskcacheManager` instead."""
| mit | 30cff5e508576356faf53c78756ac45a | 34.241206 | 127 | 0.584058 | 4.527437 | false | false | false | false |
plotly/dash | components/dash-core-components/tests/integration/calendar/test_calendar_props.py | 1 | 3358 | import itertools
import pytest
from dash import Dash, Input, Output, html, dcc
import dash.testing.wait as wait
@pytest.mark.DCC594
def test_cdpr001_date_clearable_true_works(dash_dcc):
app = Dash(__name__)
app.layout = html.Div(
[
dcc.DatePickerRange(id="dpr", clearable=True),
dcc.DatePickerSingle(id="dps", clearable=True),
]
)
dash_dcc.start_server(app)
# DPR
start_date, end_date = dash_dcc.select_date_range("dpr", (1, 28))
close_btn = dash_dcc.wait_for_element('button[aria-label="Clear Dates"]')
assert (
"1" in start_date and "28" in end_date
), "both start date and end date should match the selected day"
close_btn.click()
start_date, end_date = dash_dcc.get_date_range("dpr")
assert not start_date and not end_date, "both start and end dates should be cleared"
# DPS
selected = dash_dcc.select_date_single("dps", day="1")
assert selected, "single date should get a value"
close_btn = dash_dcc.wait_for_element("#dps button")
close_btn.click()
(single_date,) = dash_dcc.get_date_range("dps")
assert not single_date, "date should be cleared"
assert dash_dcc.get_logs() == []
def test_cdpr002_updatemodes(dash_dcc):
app = Dash(__name__)
app.layout = html.Div(
[
dcc.DatePickerRange(
id="date-picker-range",
start_date_id="startDate",
end_date_id="endDate",
start_date_placeholder_text="Select a start date!",
end_date_placeholder_text="Select an end date!",
updatemode="bothdates",
),
html.Div(id="date-picker-range-output"),
]
)
@app.callback(
Output("date-picker-range-output", "children"),
[
Input("date-picker-range", "start_date"),
Input("date-picker-range", "end_date"),
],
)
def update_output(start_date, end_date):
return f"{start_date} - {end_date}"
dash_dcc.start_server(app=app)
start_date = dash_dcc.find_element("#startDate")
start_date.click()
end_date = dash_dcc.find_element("#endDate")
end_date.click()
assert (
dash_dcc.find_element("#date-picker-range-output").text == "None - None"
), "the output should not update when both clicked but no selection happen"
start_date.click()
dash_dcc.find_elements(dash_dcc.date_picker_day_locator)[4].click()
assert (
dash_dcc.find_element("#date-picker-range-output").text == "None - None"
), "the output should not update when only one is selected"
eday = dash_dcc.find_elements(dash_dcc.date_picker_day_locator)[-4]
wait.until(lambda: eday.is_displayed() and eday.is_enabled(), timeout=2)
eday.click()
date_tokens = set(start_date.get_attribute("value").split("/"))
date_tokens.update(end_date.get_attribute("value").split("/"))
assert (
set(
itertools.chain(
*[
_.split("-")
for _ in dash_dcc.find_element(
"#date-picker-range-output"
).text.split(" - ")
]
)
)
== date_tokens
), "date should match the callback output"
assert dash_dcc.get_logs() == []
| mit | 1887fc54c3138529f0e90ec364e916e1 | 28.982143 | 88 | 0.580405 | 3.447639 | false | false | false | false |
plotly/dash | tests/integration/long_callback/utils.py | 1 | 1264 | import os
def get_long_callback_manager():
"""
Get the long callback mangaer configured by environment variables
"""
if os.environ.get("LONG_CALLBACK_MANAGER", None) == "celery":
from dash.long_callback import CeleryLongCallbackManager
from celery import Celery
import redis
celery_app = Celery(
__name__,
broker=os.environ.get("CELERY_BROKER"),
backend=os.environ.get("CELERY_BACKEND"),
)
long_callback_manager = CeleryLongCallbackManager(celery_app)
redis_conn = redis.Redis(host="localhost", port=6379, db=1)
long_callback_manager.test_lock = redis_conn.lock("test-lock")
elif os.environ.get("LONG_CALLBACK_MANAGER", None) == "diskcache":
from dash.long_callback import DiskcacheLongCallbackManager
import diskcache
cache = diskcache.Cache(os.environ.get("DISKCACHE_DIR"))
long_callback_manager = DiskcacheLongCallbackManager(cache)
long_callback_manager.test_lock = diskcache.Lock(cache, "test-lock")
else:
raise ValueError(
"Invalid long callback manager specified as LONG_CALLBACK_MANAGER "
"environment variable"
)
return long_callback_manager
| mit | 4d945be45d29dce0cc4b89abace1b887 | 36.176471 | 79 | 0.653481 | 4.157895 | false | true | false | false |
plotly/dash | components/dash-core-components/tests/integration/calendar/test_date_picker_single.py | 1 | 6972 | from datetime import datetime, timedelta
import pandas as pd
import time
import pytest
import werkzeug
from dash import Dash, Input, Output, html, dcc, no_update
@pytest.mark.DCC652
def test_dtps001_simple_click(dash_dcc):
app = Dash(__name__)
app.layout = html.Div(
[
html.Label("Operating Date"),
dcc.DatePickerSingle(
id="dps",
min_date_allowed=datetime(2010, 1, 1),
max_date_allowed=datetime(2099, 12, 31),
initial_visible_month=datetime.today().date() - timedelta(days=1),
day_size=47,
),
],
style={
"width": "10%",
"display": "inline-block",
"marginLeft": 10,
"marginRight": 10,
"marginBottom": 10,
},
)
dash_dcc.start_server(app)
date = dash_dcc.find_element("#dps input")
assert not date.get_attribute("value")
assert dash_dcc.select_date_single(
"dps", index=3
), "Component should be clickable to choose a valid date"
assert dash_dcc.get_logs() == []
def test_dtps010_local_and_session_persistence(dash_dcc):
app = Dash(__name__)
app.layout = html.Div(
[
dcc.DatePickerSingle(id="dps-local", persistence=True, day_size=47),
dcc.DatePickerSingle(
id="dps-session",
persistence=True,
persistence_type="session",
day_size=47,
),
]
)
dash_dcc.start_server(app)
assert not dash_dcc.find_element("#dps-local input").get_attribute(
"value"
) and not dash_dcc.find_element("#dps-session input").get_attribute(
"value"
), "component should contain no initial date"
for idx in range(3):
local = dash_dcc.select_date_single("dps-local", index=idx)
session = dash_dcc.select_date_single("dps-session", index=idx)
dash_dcc.wait_for_page()
assert (
dash_dcc.find_element("#dps-local input").get_attribute("value") == local
and dash_dcc.find_element("#dps-session input").get_attribute("value")
== session
), "the date value should be consistent after refresh"
assert dash_dcc.get_logs() == []
@pytest.mark.xfail(
condition=werkzeug.__version__ in ("2.1.0", "2.1.1"),
reason="Bug with 204 and Transfer-Encoding",
strict=False,
)
def test_dtps011_memory_persistence(dash_dcc):
app = Dash(__name__)
app.layout = html.Div(
[html.Button(id="switch", children="Switch"), html.Div(id="out")]
)
@app.callback(Output("out", "children"), [Input("switch", "n_clicks")])
def cb(clicks):
if clicks is None:
return no_update
if clicks % 2 == 1:
return [
dcc.DatePickerSingle(
id="dps-memory",
min_date_allowed=datetime(2010, 1, 1),
max_date_allowed=datetime(2099, 12, 31),
initial_visible_month=datetime.today().date() - timedelta(days=1),
persistence=True,
persistence_type="memory",
day_size=47,
),
dcc.DatePickerSingle(
id="dps-none",
min_date_allowed=datetime(2010, 1, 1),
max_date_allowed=datetime(2099, 12, 31),
initial_visible_month=datetime.today().date() - timedelta(days=1),
day_size=47,
),
]
else:
return "switched"
dash_dcc.start_server(app)
switch = dash_dcc.find_element("#switch")
switch.click()
memorized = dash_dcc.select_date_single("dps-memory", day="4")
amnesiaed = dash_dcc.select_date_single("dps-none", day="11")
switch.click()
assert dash_dcc.wait_for_text_to_equal("#out", "switched")
switch.click()
assert (
dash_dcc.find_element("#dps-memory input").get_attribute("value") == memorized
)
switched = dash_dcc.find_element("#dps-none input").get_attribute("value")
assert switched != amnesiaed and switched == ""
assert dash_dcc.get_logs() == []
def test_dtps012_initial_month(dash_dcc):
app = Dash(__name__)
app.layout = html.Div(
[
dcc.DatePickerSingle(
id="dps-initial-month",
min_date_allowed=datetime(2010, 1, 1),
max_date_allowed=datetime(2099, 12, 31),
)
]
)
dash_dcc.start_server(app)
date_picker = dash_dcc.find_element("#dps-initial-month")
date_picker.click()
dash_dcc.wait_for_text_to_equal(
"#dps-initial-month .CalendarMonth.CalendarMonth_1[data-visible=true] strong",
"January 2010",
)
assert dash_dcc.get_logs() == []
def test_dtps013_disabled_days_arent_clickable(dash_dcc):
app = Dash(__name__)
app.layout = html.Div(
[
html.Label("Operating Date"),
dcc.DatePickerSingle(
id="dps",
min_date_allowed=datetime(2021, 1, 1),
max_date_allowed=datetime(2021, 1, 31),
initial_visible_month=datetime(2021, 1, 1),
disabled_days=[datetime(2021, 1, 10)],
),
],
style={
"width": "10%",
"display": "inline-block",
"marginLeft": 10,
"marginRight": 10,
"marginBottom": 10,
},
)
dash_dcc.start_server(app)
date = dash_dcc.find_element("#dps input")
assert not date.get_attribute("value")
assert not dash_dcc.select_date_single(
"dps", day=10
), "Disabled days should not be clickable"
assert dash_dcc.select_date_single("dps", day=1), "Other days should be clickable"
# open datepicker to take snapshot
date.click()
dash_dcc.percy_snapshot("dtps013 - disabled days")
def test_dtps0014_disabed_days_timeout(dash_dcc):
app = Dash(__name__)
min_date = pd.to_datetime("2010-01-01")
max_date = pd.to_datetime("2099-01-01")
disabled_days = [
x for x in pd.date_range(min_date, max_date, freq="D") if x.day != 1
]
app.layout = html.Div(
[
html.Label("Operating Date"),
dcc.DatePickerSingle(
id="dps",
min_date_allowed=min_date,
max_date_allowed=max_date,
disabled_days=disabled_days,
),
]
)
dash_dcc.start_server(app)
date = dash_dcc.wait_for_element("#dps", timeout=5)
"""
WebDriver click() function hangs at the time of the react code
execution, so it necessary to check execution time.
"""
start_time = time.time()
date.click()
assert time.time() - start_time < 5
dash_dcc.wait_for_element(".SingleDatePicker_picker", timeout=5)
assert dash_dcc.get_logs() == []
| mit | 7b6dc6f9264481c9f2ee1d3f67186842 | 29.986667 | 86 | 0.551348 | 3.604964 | false | false | false | false |
plotly/dash | tests/integration/callbacks/test_global_dash_callback.py | 1 | 1761 | import dash
from dash import Input, Output, dcc, html
def test_dash_callback_001(dash_duo):
app = dash.Dash(__name__)
app.layout = html.Div(
[
dcc.Input(id="input"),
html.Div(id="div-1"),
html.Div(id="div-2"),
html.Div(id="div-3"),
html.Div(id="div-4"),
html.Div(id="div-5"),
]
)
@dash.callback(Output("div-1", "children"), Input("input", "value"))
def update_1(value): # pylint: disable=unused-variable
return f"Input 1 - {value}"
@dash.callback(Output("div-2", "children"), Input("input", "value"))
def update_2(value): # pylint: disable=unused-variable
return f"Input 2 - {value}"
@app.callback(Output("div-3", "children"), Input("input", "value"))
def update_3(value): # pylint: disable=unused-variable
return f"Input 3 - {value}"
app.clientside_callback(
"""
function (args) {return ('Input 4 - ' + args);}
""",
Output("div-4", "children"),
Input("input", "value"),
)
dash.clientside_callback(
"""
function (args) {return ('Input 5 - ' + args);}
""",
Output("div-5", "children"),
Input("input", "value"),
)
dash_duo.start_server(app)
input_element = dash_duo.find_element("#input")
input_element.send_keys("dash.callback")
dash_duo.wait_for_text_to_equal("#div-1", "Input 1 - dash.callback")
dash_duo.wait_for_text_to_equal("#div-2", "Input 2 - dash.callback")
dash_duo.wait_for_text_to_equal("#div-3", "Input 3 - dash.callback")
dash_duo.wait_for_text_to_equal("#div-4", "Input 4 - dash.callback")
dash_duo.wait_for_text_to_equal("#div-5", "Input 5 - dash.callback")
| mit | b8cbd8d11c8541132958153dcda3371e | 31.611111 | 72 | 0.558773 | 3.237132 | false | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.