text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
#!/usr/bin/env python -i
# preceeding line should have path for Python on your machine
# viz_pymol.py
# Purpose: viz running LIGGGHTS simulation via PyMol
# Syntax: viz_pymol.py in.liggghts Nfreq Nsteps
# in.liggghts = LIGGGHTS input script
# Nfreq = dump and viz shapshot every this many steps
# Nsteps = run for this many steps
import sys
sys.path.append("./pizza")
# parse command line
argv = sys.argv
if len(argv) != 4:
print "Syntax: viz_pymol.py in.liggghts Nfreq Nsteps"
sys.exit()
infile = sys.argv[1]
nfreq = int(sys.argv[2])
nsteps = int(sys.argv[3])
me = 0
# uncomment if running in parallel via Pypar
#import pypar
#me = pypar.rank()
#nprocs = pypar.size()
from liggghts import liggghts
lmp = liggghts()
# run infile all at once
# assumed to have no run command in it
# dump a file in native LIGGGHTS dump format for Pizza.py dump tool
lmp.file(infile)
lmp.command("thermo %d" % nfreq)
lmp.command("dump python all atom %d tmp.dump" % nfreq)
# initial 0-step run to generate dump file and image
lmp.command("run 0 pre yes post no")
ntimestep = 0
# wrapper on PyMol
# just proc 0 handles reading of dump file and viz
if me == 0:
import pymol
pymol.finish_launching()
from dump import dump
from pdbfile import pdbfile
from pymol import cmd as pm
d = dump("tmp.dump",0)
p = pdbfile(d)
d.next()
d.unscale()
p.single(ntimestep)
pm.load("tmp.pdb")
pm.show("spheres","tmp")
# run nfreq steps at a time w/out pre/post, read dump snapshot, display it
while ntimestep < nsteps:
lmp.command("run %d pre no post no" % nfreq)
ntimestep += nfreq
if me == 0:
d.next()
d.unscale()
p.single(ntimestep)
pm.load("tmp.pdb")
pm.forward()
lmp.command("run 0 pre no post yes")
# uncomment if running in parallel via Pypar
#print "Proc %d out of %d procs has" % (me,nprocs), lmp
#pypar.finalize()
|
CFDEMproject/LIGGGHTS-PUBLIC
|
python/examples/viz_pymol.py
|
Python
|
gpl-2.0
| 1,892
|
[
"PyMOL"
] |
10f765bb8db41a804ed14dd2e5e9a287d42a2ae23d08810c2d76f601bf48f532
|
""" DIRAC JobDB class is a front-end to the main WMS database containing
job definitions and status information. It is used in most of the WMS
components
The following methods are provided for public usage:
getJobAttribute()
getJobAttributes()
getAllJobAttributes()
getDistinctJobAttributes()
getAttributesForJobList()
getJobParameter()
getJobParameters()
getAllJobParameters()
getInputData()
getJobJDL()
selectJobs()
selectJobsWithStatus()
setJobAttribute()
setJobAttributes()
setJobParameter()
setJobParameters()
setJobJDL()
setJobStatus()
setInputData()
insertNewJobIntoDB()
removeJobFromDB()
rescheduleJob()
rescheduleJobs()
getMask()
setMask()
allowSiteInMask()
banSiteInMask()
getCounters()
"""
__RCSID__ = "$Id$"
import sys
import operator
from DIRAC.Core.Utilities.ClassAd.ClassAdLight import ClassAd
from DIRAC import S_OK, S_ERROR, Time
from DIRAC.ConfigurationSystem.Client.Config import gConfig
from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getVOForGroup, getVOOption, getGroupOption
from DIRAC.Core.Base.DB import DB
from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getUsernameForDN, getDNForUsername
from DIRAC.ConfigurationSystem.Client.Helpers.Resources import getDIRACPlatform
from DIRAC.WorkloadManagementSystem.Client.JobState.JobManifest import JobManifest
DEBUG = False
JOB_STATES = ['Received', 'Checking', 'Staging', 'Waiting', 'Matched',
'Running', 'Stalled', 'Done', 'Completed', 'Failed']
JOB_FINAL_STATES = ['Done', 'Completed', 'Failed']
JOB_DEPRECATED_ATTRIBUTES = [ 'UserPriority', 'SystemPriority' ]
JOB_STATIC_ATTRIBUTES = [ 'JobID', 'JobType', 'DIRACSetup', 'JobGroup', 'JobSplitType', 'MasterJobID',
'JobName', 'Owner', 'OwnerDN', 'OwnerGroup', 'SubmissionTime', 'VerifiedFlag' ]
JOB_VARIABLE_ATTRIBUTES = [ 'Site', 'RescheduleTime', 'StartExecTime', 'EndExecTime', 'RescheduleCounter',
'DeletedFlag', 'KilledFlag', 'FailedFlag',
'ISandboxReadyFlag', 'OSandboxReadyFlag', 'RetrievedFlag', 'AccountedFlag' ]
JOB_DYNAMIC_ATTRIBUTES = [ 'LastUpdateTime', 'HeartBeatTime',
'Status', 'MinorStatus', 'ApplicationStatus', 'ApplicationNumStatus', 'CPUTime'
]
#############################################################################
class JobDB( DB ):
def __init__( self, maxQueueSize = 10 ):
""" Standard Constructor
"""
DB.__init__( self, 'JobDB', 'WorkloadManagement/JobDB', maxQueueSize, debug = DEBUG )
self.maxRescheduling = gConfig.getValue( self.cs_path + '/MaxRescheduling', 3 )
self.jobAttributeNames = []
self.nJobAttributeNames = 0
result = self.__getAttributeNames()
if not result['OK']:
error = 'Can not retrieve job Attributes'
self.log.fatal( 'JobDB: %s' % error )
sys.exit( error )
return
self.log.info( "MaxReschedule: %s" % self.maxRescheduling )
self.log.info( "==================================================" )
if DEBUG:
result = self.dumpParameters()
def dumpParameters( self ):
""" Dump the JobDB connection parameters to the stdout
"""
print "=================================================="
print "User: ", self.dbUser
print "Host: ", self.dbHost
print "Password ", self.dbPass
print "DBName ", self.dbName
print "MaxQueue ", self.maxQueueSize
print "=================================================="
return S_OK()
def __getAttributeNames( self ):
""" get Name of Job Attributes defined in DB
set self.jobAttributeNames to the list of Names
return S_OK()
return S_ERROR upon error
"""
res = self._query( 'DESCRIBE Jobs' )
if not res['OK']:
return res
self.jobAttributeNames = []
for row in res['Value']:
field = row[0]
self.jobAttributeNames.append( field )
self.nJobAttributeNames = len( self.jobAttributeNames )
return S_OK()
#############################################################################
def getAttributesForJobList( self, jobIDList, attrList = None ):
""" Get attributes for the jobs in the the jobIDList.
Returns an S_OK structure with a dictionary of dictionaries as its Value:
ValueDict[jobID][attribute_name] = attribute_value
"""
if not jobIDList:
return S_OK( {} )
if attrList:
attrNames = ','.join( [ str( x ) for x in attrList ] )
attr_tmp_list = attrList
else:
attrNames = ','.join( [ str( x ) for x in self.jobAttributeNames ] )
attr_tmp_list = self.jobAttributeNames
jobList = ','.join( [str( x ) for x in jobIDList] )
# FIXME: need to check if the attributes are in the list of job Attributes
cmd = 'SELECT JobID,%s FROM Jobs WHERE JobID in ( %s )' % ( attrNames, jobList )
res = self._query( cmd )
if not res['OK']:
return res
try:
retDict = {}
for retValues in res['Value']:
jobID = retValues[0]
jobDict = {}
jobDict[ 'JobID' ] = jobID
attrValues = retValues[1:]
for i in range( len( attr_tmp_list ) ):
try:
jobDict[attr_tmp_list[i]] = attrValues[i].tostring()
except Exception:
jobDict[attr_tmp_list[i]] = str( attrValues[i] )
retDict[int( jobID )] = jobDict
return S_OK( retDict )
except Exception, x:
return S_ERROR( 'JobDB.getAttributesForJobList: Failed\n%s' % str( x ) )
#############################################################################
def getDistinctJobAttributes( self, attribute, condDict = None, older = None,
newer = None, timeStamp = 'LastUpdateTime' ):
""" Get distinct values of the job attribute under specified conditions
"""
return self.getDistinctAttributeValues( 'Jobs', attribute, condDict = condDict,
older = older, newer = newer, timeStamp = timeStamp )
#############################################################################
def traceJobParameter( self, site, localID, parameter, date = None, until = None ):
ret = self.traceJobParameters( site, localID, [parameter], None, date, until )
if not ret['OK']:
return ret
returnDict = {}
for jobID in ret['Value']:
returnDict[jobID] = ret['Value'][jobID].get( parameter )
return S_OK( returnDict )
#############################################################################
def traceJobParameters( self, site, localIDs, paramList = None, attributeList = None, date = None, until = None ):
import datetime
exactTime = False
if not attributeList:
attributeList = []
attributeList = list( set( attributeList ) | set( ['StartExecTime', 'SubmissionTime', 'HeartBeatTime',
'EndExecTime', 'JobName', 'OwnerDN', 'OwnerGroup'] ) )
try:
if type( localIDs ) == type( [] ) or type( localIDs ) == type( {} ):
localIDs = [int( localID ) for localID in localIDs]
else:
localIDs = [int( localIDs )]
except:
return S_ERROR( "localIDs must be integers" )
now = datetime.datetime.utcnow()
if until:
if until.lower() == 'now':
until = now
else:
try:
until = datetime.datetime.strptime( until, '%Y-%m-%d' )
except:
return S_ERROR( "Error in format for 'until', expected '%Y-%m-%d'" )
if not date:
until = now
since = until - datetime.timedelta( hours = 24 )
else:
since = None
for dFormat in ( '%Y-%m-%d', '%Y-%m-%d %H:%M', '%Y-%m-%d %H:%M:%S' ):
try:
since = datetime.datetime.strptime( date, dFormat )
break
except:
exactTime = True
if not since:
return S_ERROR( 'Error in date format' )
if exactTime:
exactTime = since
if not until:
until = now
else:
if not until:
until = since + datetime.timedelta( hours = 24 )
if since > now:
return S_ERROR( 'Cannot find jobs in the future' )
if until > now:
until = now
result = self.selectJobs( {'Site':site}, older = str( until ), newer = str( since ) )
if not result['OK']:
return result
if not result['Value']:
return S_ERROR( 'No jobs found at %s for date %s' % ( site, date ) )
resultDict = {'Successful':{}, 'Failed':{}}
for jobID in result['Value']:
if jobID:
ret = self.getJobParameter( jobID, 'LocalJobID' )
if not ret['OK']:
return ret
localID = ret['Value']
if localID and int( localID ) in localIDs:
attributes = self.getJobAttributes( jobID, attributeList )
if not attributes['OK']:
return attributes
attributes = attributes['Value']
if exactTime:
for att in ( 'StartExecTime', 'SubmissionTime' ):
startTime = attributes.get( att )
if startTime == 'None':
startTime = None
if startTime:
break
startTime = datetime.datetime.strptime( startTime , '%Y-%m-%d %H:%M:%S' ) if startTime else now
for att in ( 'EndExecTime', 'HeartBeatTime' ):
lastTime = attributes.get( att )
if lastTime == 'None':
lastTime = None
if lastTime:
break
lastTime = datetime.datetime.strptime( lastTime, '%Y-%m-%d %H:%M:%S' ) if lastTime else now
okTime = ( exactTime >= startTime and exactTime <= lastTime )
else:
okTime = True
if okTime:
ret = self.getJobParameters( jobID, paramList = paramList )
if not ret['OK']:
return ret
attributes.update( ret['Value'] )
resultDict['Successful'].setdefault( int( localID ), {} )[int( jobID )] = attributes
for localID in localIDs:
if localID not in resultDict['Successful']:
resultDict['Failed'][localID] = 'localID not found'
return S_OK( resultDict )
#############################################################################
def getJobParameters( self, jobID, paramList = None ):
""" Get Job Parameters defined for jobID.
Returns a dictionary with the Job Parameters.
If parameterList is empty - all the parameters are returned.
"""
ret = self._escapeString( jobID )
if not ret['OK']:
return ret
e_jobID = ret['Value']
self.log.debug( 'JobDB.getParameters: Getting Parameters for job %s' % jobID )
resultDict = {}
if paramList:
paramNameList = []
for x in paramList:
ret = self._escapeString( x )
if not ret['OK']:
return ret
paramNameList.append( ret['Value'] )
paramNames = ','.join( paramNameList )
cmd = "SELECT Name, Value from JobParameters WHERE JobID=%s and Name in (%s)" % ( e_jobID, paramNames )
result = self._query( cmd )
if result['OK']:
if result['Value']:
for name, value in result['Value']:
try:
resultDict[name] = value.tostring()
except Exception:
resultDict[name] = value
return S_OK( resultDict )
else:
return S_ERROR( 'JobDB.getJobParameters: failed to retrieve parameters' )
else:
result = self.getFields( 'JobParameters', ['Name', 'Value'], {'JobID': jobID} )
if not result['OK']:
return result
else:
for name, value in result['Value']:
try:
resultDict[name] = value.tostring()
except Exception:
resultDict[name] = value
return S_OK( resultDict )
#############################################################################
def getAtticJobParameters( self, jobID, paramList = None, rescheduleCounter = -1 ):
""" Get Attic Job Parameters defined for a job with jobID.
Returns a dictionary with the Attic Job Parameters per each rescheduling cycle.
If parameterList is empty - all the parameters are returned.
If recheduleCounter = -1, all cycles are returned.
"""
ret = self._escapeString( jobID )
if not ret['OK']:
return ret
jobID = ret['Value']
self.log.debug( 'JobDB.getAtticJobParameters: Getting Attic Parameters for job %s' % jobID )
resultDict = {}
paramCondition = ''
if paramList:
paramNameList = []
for x in paramList:
ret = self._escapeString( x )
if not ret['OK']:
return ret
paramNameList.append( x )
paramNames = ','.join( paramNameList )
paramCondition = " AND Name in (%s)" % paramNames
rCounter = ''
if rescheduleCounter != -1:
rCounter = ' AND RescheduleCycle=%d' % int( rescheduleCounter )
cmd = "SELECT Name, Value, RescheduleCycle from AtticJobParameters"
cmd += " WHERE JobID=%s %s %s" % ( jobID, paramCondition, rCounter )
result = self._query( cmd )
if result['OK']:
if result['Value']:
for name, value, counter in result['Value']:
if not resultDict.has_key( counter ):
resultDict[counter] = {}
try:
resultDict[counter][name] = value.tostring()
except Exception:
resultDict[counter][name] = value
return S_OK( resultDict )
else:
return S_ERROR( 'JobDB.getAtticJobParameters: failed to retrieve parameters' )
#############################################################################
def getJobAttributes( self, jobID, attrList = None ):
""" Get all Job Attributes for a given jobID.
Return a dictionary with all Job Attributes,
return an empty dictionary if matching job found
"""
ret = self._escapeString( jobID )
if not ret['OK']:
return ret
jobID = ret['Value']
if attrList:
attrNameList = []
for x in attrList:
ret = self._escapeString( x )
if not ret['OK']:
return ret
x = "`" + ret['Value'][1:-1] + "`"
attrNameList.append( x )
attrNames = ','.join( attrNameList )
else:
attrNameList = []
for x in self.jobAttributeNames:
ret = self._escapeString( x )
if not ret['OK']:
return ret
x = "`" + ret['Value'][1:-1] + "`"
attrNameList.append( x )
attrNames = ','.join( attrNameList )
self.log.debug( 'JobDB.getAllJobAttributes: Getting Attributes for job = %s.' % jobID )
cmd = 'SELECT %s FROM Jobs WHERE JobID=%s' % ( attrNames, jobID )
res = self._query( cmd )
if not res['OK']:
return res
if len( res['Value'] ) == 0:
return S_OK ( {} )
values = res['Value'][0]
attributes = {}
if attrList:
for i in range( len( attrList ) ):
attributes[attrList[i]] = str( values[i] )
else:
for i in range( len( self.jobAttributeNames ) ):
attributes[self.jobAttributeNames[i]] = str( values[i] )
return S_OK( attributes )
#############################################################################
def getJobAttribute( self, jobID, attribute ):
""" Get the given attribute of a job specified by its jobID
"""
result = self.getJobAttributes( jobID, [attribute] )
if result['OK']:
value = result['Value'][attribute]
return S_OK( value )
else:
return result
#############################################################################
def getJobParameter( self, jobID, parameter ):
""" Get the given parameter of a job specified by its jobID
"""
result = self.getJobParameters( jobID, [parameter] )
if not result['OK']:
return result
return S_OK( result.get( 'Value', {} ).get( parameter ) )
#############################################################################
def getJobOptParameter( self, jobID, parameter ):
""" Get optimizer parameters for the given job.
"""
result = self.getFields( 'OptimizerParameters', ['Value'], {'JobID': jobID, 'Name': parameter} )
if result['OK']:
if result['Value']:
return S_OK( result['Value'][0][0] )
else:
return S_ERROR( 'Parameter not found' )
else:
return S_ERROR( 'Failed to access database' )
#############################################################################
def getJobOptParameters( self, jobID, paramList = None ):
""" Get optimizer parameters for the given job. If the list of parameter names is
empty, get all the parameters then
"""
ret = self._escapeString( jobID )
if not ret['OK']:
return ret
jobID = ret['Value']
resultDict = {}
if paramList:
paramNameList = []
for x in paramList:
ret = self._escapeString( x )
if not ret['OK']:
return ret
paramNameList.append( ret['Value'] )
paramNames = ','.join( paramNameList )
cmd = "SELECT Name, Value from OptimizerParameters WHERE JobID=%s and Name in (%s)" % ( jobID, paramNames )
else:
cmd = "SELECT Name, Value from OptimizerParameters WHERE JobID=%s" % jobID
result = self._query( cmd )
if result['OK']:
if result['Value']:
for name, value in result['Value']:
try:
resultDict[name] = value.tostring()
except Exception:
resultDict[name] = value
return S_OK( resultDict )
else:
return S_ERROR( 'JobDB.getJobOptParameters: failed to retrieve parameters' )
#############################################################################
def getInputData( self, jobID ):
"""Get input data for the given job
"""
ret = self._escapeString( jobID )
if not ret['OK']:
return ret
jobID = ret['Value']
cmd = 'SELECT LFN FROM InputData WHERE JobID=%s' % jobID
res = self._query( cmd )
if not res['OK']:
return res
return S_OK( [ i[0] for i in res['Value'] if i[0].strip() ] )
#############################################################################
def setInputData( self, jobID, inputData ):
"""Inserts input data for the given job
"""
ret = self._escapeString( jobID )
if not ret['OK']:
return ret
jobID = ret['Value']
cmd = 'DELETE FROM InputData WHERE JobID=%s' % ( jobID )
result = self._update( cmd )
if not result['OK']:
result = S_ERROR( 'JobDB.setInputData: operation failed.' )
for lfn in inputData:
# some jobs are setting empty string as InputData
if not lfn:
continue
ret = self._escapeString( lfn.strip() )
if not ret['OK']:
return ret
lfn = ret['Value']
cmd = 'INSERT INTO InputData (JobID,LFN) VALUES (%s, %s )' % ( jobID, lfn )
res = self._update( cmd )
if not res['OK']:
return res
return S_OK( 'Files added' )
#############################################################################
def setOptimizerChain( self, jobID, optimizerList ):
""" Set the optimizer chain for the given job. The 'TaskQueue'
optimizer should be the last one in the chain, it is added
if not present in the optimizerList
"""
optString = ','.join( optimizerList )
result = self.setJobOptParameter( jobID, 'OptimizerChain', optString )
return result
#############################################################################
def setNextOptimizer( self, jobID, currentOptimizer ):
""" Set the job status to be processed by the next optimizer in the
chain
"""
result = self.getJobOptParameter( jobID, 'OptimizerChain' )
if not result['OK']:
return result
optListString = result['Value']
optList = optListString.split( ',' )
try:
sindex = None
for i in xrange( len( optList ) ):
if optList[i] == currentOptimizer:
sindex = i
if sindex >= 0:
if sindex < len( optList ) - 1:
nextOptimizer = optList[sindex + 1]
else:
return S_ERROR( 'Unexpected end of the Optimizer Chain' )
else:
return S_ERROR( 'Could not find ' + currentOptimizer + ' in chain' )
except ValueError:
return S_ERROR( 'The ' + currentOptimizer + ' not found in the chain' )
result = self.setJobStatus( jobID, status = "Checking", minor = nextOptimizer )
if not result[ 'OK' ]:
return result
return S_OK( nextOptimizer )
############################################################################
def selectJobs( self, condDict, older = None, newer = None, timeStamp = 'LastUpdateTime',
orderAttribute = None, limit = None ):
""" Select jobs matching the following conditions:
- condDict dictionary of required Key = Value pairs;
- with the last update date older and/or newer than given dates;
The result is ordered by JobID if requested, the result is limited to a given
number of jobs if requested.
"""
self.log.debug( 'JobDB.selectJobs: retrieving jobs.' )
res = self.getFields( 'Jobs', ['JobID'], condDict = condDict, limit = limit,
older = older, newer = newer, timeStamp = timeStamp, orderAttribute = orderAttribute )
if not res['OK']:
return res
if not len( res['Value'] ):
return S_OK( [] )
return S_OK( [ self._to_value( i ) for i in res['Value'] ] )
#############################################################################
def setJobAttribute( self, jobID, attrName, attrValue, update = False, myDate = None ):
""" Set an attribute value for job specified by jobID.
The LastUpdate time stamp is refreshed if explicitly requested
"""
ret = self._escapeString( jobID )
if not ret['OK']:
return ret
jobID = ret['Value']
ret = self._escapeString( attrValue )
if not ret['OK']:
return ret
value = ret['Value']
# FIXME: need to check the validity of attrName
if update:
cmd = "UPDATE Jobs SET %s=%s,LastUpdateTime=UTC_TIMESTAMP() WHERE JobID=%s" % ( attrName, value, jobID )
else:
cmd = "UPDATE Jobs SET %s=%s WHERE JobID=%s" % ( attrName, value, jobID )
if myDate:
cmd += ' AND LastUpdateTime < %s' % myDate
res = self._update( cmd )
if res['OK']:
return res
else:
return S_ERROR( 'JobDB.setAttribute: failed to set attribute' )
#############################################################################
def setJobAttributes( self, jobID, attrNames, attrValues, update = False, myDate = None ):
""" Set an attribute value for job specified by jobID.
The LastUpdate time stamp is refreshed if explicitely requested
"""
ret = self._escapeString( jobID )
if not ret['OK']:
return ret
jobID = ret['Value']
if len( attrNames ) != len( attrValues ):
return S_ERROR( 'JobDB.setAttributes: incompatible Argument length' )
# FIXME: Need to check the validity of attrNames
attr = []
for i in range( len( attrNames ) ):
ret = self._escapeString( attrValues[i] )
if not ret['OK']:
return ret
value = ret['Value']
attr.append( "%s=%s" % ( attrNames[i], value ) )
if update:
attr.append( "LastUpdateTime=UTC_TIMESTAMP()" )
if len( attr ) == 0:
return S_ERROR( 'JobDB.setAttributes: Nothing to do' )
cmd = 'UPDATE Jobs SET %s WHERE JobID=%s' % ( ', '.join( attr ), jobID )
if myDate:
cmd += ' AND LastUpdateTime < %s' % myDate
res = self._update( cmd )
if res['OK']:
return res
else:
return S_ERROR( 'JobDB.setAttributes: failed to set attribute' )
#############################################################################
def setJobStatus( self, jobID, status = '', minor = '', application = '', appCounter = None ):
""" Set status of the job specified by its jobID
"""
# Do not update the LastUpdate time stamp if setting the Stalled status
update_flag = True
if status == "Stalled":
update_flag = False
attrNames = []
attrValues = []
if status:
attrNames.append( 'Status' )
attrValues.append( status )
if minor:
attrNames.append( 'MinorStatus' )
attrValues.append( minor )
if application:
attrNames.append( 'ApplicationStatus' )
attrValues.append( application )
if appCounter:
attrNames.append( 'ApplicationNumStatus' )
attrValues.append( appCounter )
result = self.setJobAttributes( jobID, attrNames, attrValues, update = update_flag )
if not result['OK']:
return result
return S_OK()
#############################################################################
def setEndExecTime( self, jobID, endDate = None ):
""" Set EndExecTime time stamp
"""
ret = self._escapeString( jobID )
if not ret['OK']:
return ret
jobID = ret['Value']
if endDate:
ret = self._escapeString( endDate )
if not ret['OK']:
return ret
endDate = ret['Value']
req = "UPDATE Jobs SET EndExecTime=%s WHERE JobID=%s AND EndExecTime IS NULL" % ( endDate, jobID )
else:
req = "UPDATE Jobs SET EndExecTime=UTC_TIMESTAMP() WHERE JobID=%s AND EndExecTime IS NULL" % jobID
result = self._update( req )
return result
#############################################################################
def setStartExecTime( self, jobID, startDate = None ):
""" Set StartExecTime time stamp
"""
ret = self._escapeString( jobID )
if not ret['OK']:
return ret
jobID = ret['Value']
if startDate:
ret = self._escapeString( startDate )
if not ret['OK']:
return ret
startDate = ret['Value']
req = "UPDATE Jobs SET StartExecTime=%s WHERE JobID=%s AND StartExecTime IS NULL" % ( startDate, jobID )
else:
req = "UPDATE Jobs SET StartExecTime=UTC_TIMESTAMP() WHERE JobID=%s AND StartExecTime IS NULL" % jobID
result = self._update( req )
return result
#############################################################################
def setJobParameter( self, jobID, key, value ):
""" Set a parameter specified by name,value pair for the job JobID
"""
ret = self._escapeString( key )
if not ret['OK']:
return ret
e_key = ret['Value']
ret = self._escapeString( value )
if not ret['OK']:
return ret
e_value = ret['Value']
cmd = 'REPLACE JobParameters (JobID,Name,Value) VALUES (%d,%s,%s)' % ( int( jobID ), e_key, e_value )
result = self._update( cmd )
if not result['OK']:
result = S_ERROR( 'JobDB.setJobParameter: operation failed.' )
return result
#############################################################################
def setJobParameters( self, jobID, parameters ):
""" Set parameters specified by a list of name/value pairs for the job JobID
"""
if not parameters:
return S_OK()
insertValueList = []
for name, value in parameters:
ret = self._escapeString( name )
if not ret['OK']:
return ret
e_name = ret['Value']
ret = self._escapeString( value )
if not ret['OK']:
return ret
e_value = ret['Value']
insertValueList.append( '(%s,%s,%s)' % ( jobID, e_name, e_value ) )
cmd = 'REPLACE JobParameters (JobID,Name,Value) VALUES %s' % ', '.join( insertValueList )
result = self._update( cmd )
if not result['OK']:
return S_ERROR( 'JobDB.setJobParameters: operation failed.' )
return result
#############################################################################
def setJobOptParameter( self, jobID, name, value ):
""" Set an optimzer parameter specified by name,value pair for the job JobID
"""
ret = self._escapeString( jobID )
if not ret['OK']:
return ret
e_jobID = ret['Value']
ret = self._escapeString( name )
if not ret['OK']:
return ret
e_name = ret['Value']
cmd = 'DELETE FROM OptimizerParameters WHERE JobID=%s AND Name=%s' % ( e_jobID, e_name )
if not self._update( cmd )['OK']:
return S_ERROR( 'JobDB.setJobOptParameter: operation failed.' )
result = self.insertFields( 'OptimizerParameters', ['JobID', 'Name', 'Value'], [jobID, name, value] )
if not result['OK']:
return S_ERROR( 'JobDB.setJobOptParameter: operation failed.' )
return S_OK()
#############################################################################
def removeJobOptParameter( self, jobID, name ):
""" Remove the specified optimizer parameter for jobID
"""
ret = self._escapeString( jobID )
if not ret['OK']:
return ret
jobID = ret['Value']
ret = self._escapeString( name )
if not ret['OK']:
return ret
name = ret['Value']
cmd = 'DELETE FROM OptimizerParameters WHERE JobID=%s AND Name=%s' % ( jobID, name )
if not self._update( cmd )['OK']:
return S_ERROR( 'JobDB.removeJobOptParameter: operation failed.' )
else:
return S_OK()
#############################################################################
def setAtticJobParameter( self, jobID, key, value, rescheduleCounter ):
""" Set attic parameter for job specified by its jobID when job rescheduling
for later debugging
"""
ret = self._escapeString( jobID )
if not ret['OK']:
return ret
jobID = ret['Value']
ret = self._escapeString( key )
if not ret['OK']:
return ret
key = ret['Value']
ret = self._escapeString( value )
if not ret['OK']:
return ret
value = ret['Value']
ret = self._escapeString( rescheduleCounter )
if not ret['OK']:
return ret
rescheduleCounter = ret['Value']
cmd = 'INSERT INTO AtticJobParameters (JobID,RescheduleCycle,Name,Value) VALUES(%s,%s,%s,%s)' % \
( jobID, rescheduleCounter, key, value )
result = self._update( cmd )
if not result['OK']:
result = S_ERROR( 'JobDB.setAtticJobParameter: operation failed.' )
return result
#############################################################################
def __setInitialJobParameters( self, classadJob, jobID ):
""" Set initial job parameters as was defined in the Classad
"""
# Extract initital job parameters
parameters = {}
if classadJob.lookupAttribute( "Parameters" ):
parameters = classadJob.getDictionaryFromSubJDL( "Parameters" )
res = self.setJobParameters( jobID, parameters.items() )
if not res['OK']:
return res
return S_OK()
#############################################################################
def setJobJDL( self, jobID, jdl = None, originalJDL = None ):
""" Insert JDL's for job specified by jobID
"""
ret = self._escapeString( jobID )
if not ret['OK']:
return ret
jobID = ret['Value']
ret = self._escapeString( jdl )
if not ret['OK']:
return ret
e_JDL = ret['Value']
ret = self._escapeString( originalJDL )
if not ret['OK']:
return ret
e_originalJDL = ret['Value']
req = "SELECT OriginalJDL FROM JobJDLs WHERE JobID=%s" % jobID
result = self._query( req )
updateFlag = False
if result['OK']:
if len( result['Value'] ) > 0:
updateFlag = True
if jdl:
if updateFlag:
cmd = "UPDATE JobJDLs Set JDL=%s WHERE JobID=%s" % ( e_JDL, jobID )
else:
cmd = "INSERT INTO JobJDLs (JobID,JDL) VALUES (%s,%s)" % ( jobID, e_JDL )
result = self._update( cmd )
if not result['OK']:
return result
if originalJDL:
if updateFlag:
cmd = "UPDATE JobJDLs Set OriginalJDL=%s WHERE JobID=%s" % ( e_originalJDL, jobID )
else:
cmd = "INSERT INTO JobJDLs (JobID,OriginalJDL) VALUES (%s,%s)" % ( jobID, e_originalJDL )
result = self._update( cmd )
return result
#############################################################################
def __insertNewJDL( self, jdl ):
"""Insert a new JDL in the system, this produces a new JobID
"""
err = 'JobDB.__insertNewJDL: Failed to retrieve a new Id.'
result = self.insertFields( 'JobJDLs' , ['OriginalJDL'], [jdl] )
if not result['OK']:
self.log.error( 'Can not insert New JDL', result['Message'] )
return result
if not 'lastRowId' in result:
return S_ERROR( '%s' % err )
jobID = int( result['lastRowId'] )
self.log.info( 'JobDB: New JobID served "%s"' % jobID )
return S_OK( jobID )
#############################################################################
def getJobJDL( self, jobID, original = False, status = '' ):
""" Get JDL for job specified by its jobID. By default the current job JDL
is returned. If 'original' argument is True, original JDL is returned
"""
ret = self._escapeString( jobID )
if not ret['OK']:
return ret
jobID = ret['Value']
ret = self._escapeString( status )
if not ret['OK']:
return ret
e_status = ret['Value']
if original:
cmd = "SELECT OriginalJDL FROM JobJDLs WHERE JobID=%s" % jobID
else:
cmd = "SELECT JDL FROM JobJDLs WHERE JobID=%s" % jobID
if status:
cmd = cmd + " AND Status=%s" % e_status
result = self._query( cmd )
if result['OK']:
jdl = result['Value']
if not jdl:
return S_OK( jdl )
else:
return S_OK( result['Value'][0][0] )
else:
return result
#############################################################################
def insertNewJobIntoDB( self, jdl, owner, ownerDN, ownerGroup, diracSetup ):
""" Insert the initial JDL into the Job database,
Do initial JDL crosscheck,
Set Initial job Attributes and Status
"""
jobManifest = JobManifest()
result = jobManifest.load( jdl )
if not result['OK']:
return result
jobManifest.setOptionsFromDict( { 'OwnerName' : owner,
'OwnerDN' : ownerDN,
'OwnerGroup' : ownerGroup,
'DIRACSetup' : diracSetup } )
result = jobManifest.check()
if not result['OK']:
return result
jobAttrNames = []
jobAttrValues = []
# 1.- insert original JDL on DB and get new JobID
# Fix the possible lack of the brackets in the JDL
if jdl.strip()[0].find( '[' ) != 0 :
jdl = '[' + jdl + ']'
result = self.__insertNewJDL( jdl )
if not result[ 'OK' ]:
return S_ERROR( 'Can not insert JDL in to DB' )
jobID = result[ 'Value' ]
jobManifest.setOption( 'JobID', jobID )
jobAttrNames.append( 'JobID' )
jobAttrValues.append( jobID )
jobAttrNames.append( 'LastUpdateTime' )
jobAttrValues.append( Time.toString() )
jobAttrNames.append( 'SubmissionTime' )
jobAttrValues.append( Time.toString() )
jobAttrNames.append( 'Owner' )
jobAttrValues.append( owner )
jobAttrNames.append( 'OwnerDN' )
jobAttrValues.append( ownerDN )
jobAttrNames.append( 'OwnerGroup' )
jobAttrValues.append( ownerGroup )
jobAttrNames.append( 'DIRACSetup' )
jobAttrValues.append( diracSetup )
# 2.- Check JDL and Prepare DIRAC JDL
classAdJob = ClassAd( jobManifest.dumpAsJDL() )
classAdReq = ClassAd( '[]' )
retVal = S_OK( jobID )
retVal['JobID'] = jobID
if not classAdJob.isOK():
jobAttrNames.append( 'Status' )
jobAttrValues.append( 'Failed' )
jobAttrNames.append( 'MinorStatus' )
jobAttrValues.append( 'Error in JDL syntax' )
result = self.insertFields( 'Jobs', jobAttrNames, jobAttrValues )
if not result['OK']:
return result
retVal['Status'] = 'Failed'
retVal['MinorStatus'] = 'Error in JDL syntax'
return retVal
classAdJob.insertAttributeInt( 'JobID', jobID )
result = self.__checkAndPrepareJob( jobID, classAdJob, classAdReq,
owner, ownerDN,
ownerGroup, diracSetup,
jobAttrNames, jobAttrValues )
if not result['OK']:
return result
priority = classAdJob.getAttributeInt( 'Priority' )
jobAttrNames.append( 'UserPriority' )
jobAttrValues.append( priority )
for jdlName in 'JobName', 'JobType', 'JobGroup':
# Defaults are set by the DB.
jdlValue = classAdJob.getAttributeString( jdlName )
if jdlValue:
jobAttrNames.append( jdlName )
jobAttrValues.append( jdlValue )
jdlValue = classAdJob.getAttributeString( 'Site' )
if jdlValue:
jobAttrNames.append( 'Site' )
if jdlValue.find( ',' ) != -1:
jobAttrValues.append( 'Multiple' )
else:
jobAttrValues.append( jdlValue )
jobAttrNames.append( 'VerifiedFlag' )
jobAttrValues.append( 'True' )
jobAttrNames.append( 'Status' )
jobAttrValues.append( 'Received' )
jobAttrNames.append( 'MinorStatus' )
jobAttrValues.append( 'Job accepted' )
reqJDL = classAdReq.asJDL()
classAdJob.insertAttributeInt( 'JobRequirements', reqJDL )
jobJDL = classAdJob.asJDL()
# Replace the JobID placeholder if any
if jobJDL.find( '%j' ) != -1:
jobJDL = jobJDL.replace( '%j', str( jobID ) )
result = self.setJobJDL( jobID, jobJDL )
if not result['OK']:
return result
# Adding the job in the Jobs table
result = self.insertFields( 'Jobs', jobAttrNames, jobAttrValues )
if not result['OK']:
return result
# Setting the Job parameters
result = self.__setInitialJobParameters( classAdJob, jobID )
if not result['OK']:
return result
# Looking for the Input Data
inputData = []
if classAdJob.lookupAttribute( 'InputData' ):
inputData = classAdJob.getListFromExpression( 'InputData' )
values = []
ret = self._escapeString( jobID )
if not ret['OK']:
return ret
e_jobID = ret['Value']
for lfn in inputData:
# some jobs are setting empty string as InputData
if not lfn:
continue
ret = self._escapeString( lfn.strip() )
if not ret['OK']:
return ret
lfn = ret['Value']
values.append( '(%s, %s )' % ( e_jobID, lfn ) )
if values:
cmd = 'INSERT INTO InputData (JobID,LFN) VALUES %s' % ', '.join( values )
result = self._update( cmd )
if not result['OK']:
return result
retVal['Status'] = 'Received'
retVal['MinorStatus'] = 'Job accepted'
return retVal
def __checkAndPrepareJob( self, jobID, classAdJob, classAdReq, owner, ownerDN,
ownerGroup, diracSetup, jobAttrNames, jobAttrValues ):
"""
Check Consistency of Submitted JDL and set some defaults
Prepare subJDL with Job Requirements
"""
error = ''
vo = getVOForGroup( ownerGroup )
jdlDiracSetup = classAdJob.getAttributeString( 'DIRACSetup' )
jdlOwner = classAdJob.getAttributeString( 'Owner' )
jdlOwnerDN = classAdJob.getAttributeString( 'OwnerDN' )
jdlOwnerGroup = classAdJob.getAttributeString( 'OwnerGroup' )
jdlVO = classAdJob.getAttributeString( 'VirtualOrganization' )
# The below is commented out since this is always overwritten by the submitter IDs
# but the check allows to findout inconsistent client environments
if jdlDiracSetup and jdlDiracSetup != diracSetup:
error = 'Wrong DIRAC Setup in JDL'
if jdlOwner and jdlOwner != owner:
error = 'Wrong Owner in JDL'
elif jdlOwnerDN and jdlOwnerDN != ownerDN:
error = 'Wrong Owner DN in JDL'
elif jdlOwnerGroup and jdlOwnerGroup != ownerGroup:
error = 'Wrong Owner Group in JDL'
elif jdlVO and jdlVO != vo:
error = 'Wrong Virtual Organization in JDL'
classAdJob.insertAttributeString( 'Owner', owner )
classAdJob.insertAttributeString( 'OwnerDN', ownerDN )
classAdJob.insertAttributeString( 'OwnerGroup', ownerGroup )
submitPools = getGroupOption( ownerGroup, "SubmitPools" )
if not submitPools and vo:
submitPools = getVOOption( vo, 'SubmitPools' )
if submitPools and not classAdJob.lookupAttribute( 'SubmitPools' ):
classAdJob.insertAttributeString( 'SubmitPools', submitPools )
if vo:
classAdJob.insertAttributeString( 'VirtualOrganization', vo )
classAdReq.insertAttributeString( 'Setup', diracSetup )
classAdReq.insertAttributeString( 'OwnerDN', ownerDN )
classAdReq.insertAttributeString( 'OwnerGroup', ownerGroup )
if vo:
classAdReq.insertAttributeString( 'VirtualOrganization', vo )
setup = gConfig.getValue( '/DIRAC/Setup', '' )
voPolicyDict = gConfig.getOptionsDict( '/DIRAC/VOPolicy/%s/%s' % ( vo, setup ) )
# voPolicyDict = gConfig.getOptionsDict('/DIRAC/VOPolicy')
if voPolicyDict['OK']:
voPolicy = voPolicyDict['Value']
for param, val in voPolicy.items():
if not classAdJob.lookupAttribute( param ):
classAdJob.insertAttributeString( param, val )
priority = classAdJob.getAttributeInt( 'Priority' )
platform = classAdJob.getAttributeString( 'Platform' )
# Legacy check to suite the LHCb logic
if not platform:
platform = classAdJob.getAttributeString( 'SystemConfig' )
cpuTime = classAdJob.getAttributeInt( 'CPUTime' )
if cpuTime == 0:
# Just in case check for MaxCPUTime for backward compatibility
cpuTime = classAdJob.getAttributeInt( 'MaxCPUTime' )
if cpuTime > 0:
classAdJob.insertAttributeInt( 'CPUTime', cpuTime )
classAdReq.insertAttributeInt( 'UserPriority', priority )
classAdReq.insertAttributeInt( 'CPUTime', cpuTime )
if platform and platform.lower() != 'any':
result = getDIRACPlatform( platform )
if result['OK'] and result['Value']:
classAdReq.insertAttributeVectorString( 'Platforms', result['Value'] )
else:
error = "OS compatibility info not found"
if error:
retVal = S_ERROR( error )
retVal['JobId'] = jobID
retVal['Status'] = 'Failed'
retVal['MinorStatus'] = error
jobAttrNames.append( 'Status' )
jobAttrValues.append( 'Failed' )
jobAttrNames.append( 'MinorStatus' )
jobAttrValues.append( error )
resultInsert = self.setJobAttributes( jobID, jobAttrNames, jobAttrValues )
if not resultInsert['OK']:
retVal['MinorStatus'] += '; %s' % resultInsert['Message']
return retVal
return S_OK()
#############################################################################
def removeJobFromDB( self, jobIDs ):
"""Remove job from DB
Remove job from the Job DB and clean up all the job related data
in various tables
"""
# ret = self._escapeString(jobID)
# if not ret['OK']:
# return ret
# e_jobID = ret['Value']
if type( jobIDs ) != type( [] ):
jobIDList = [jobIDs]
else:
jobIDList = jobIDs
failedTablesList = []
jobIDString = ','.join( [str( j ) for j in jobIDList] )
for table in ['InputData',
'JobParameters',
'AtticJobParameters',
'HeartBeatLoggingInfo',
'OptimizerParameters',
'JobCommands',
'Jobs',
'JobJDLs']:
cmd = 'DELETE FROM %s WHERE JobID in (%s)' % ( table, jobIDString )
result = self._update( cmd )
if not result['OK']:
failedTablesList.append( table )
result = S_OK()
if failedTablesList:
result = S_ERROR( 'Errors while job removal' )
result['FailedTables'] = failedTablesList
return result
#################################################################
def rescheduleJobs( self, jobIDs ):
""" Reschedule all the jobs in the given list
"""
result = S_OK()
failedJobs = []
for jobID in jobIDs:
result = self.rescheduleJob( jobID )
if not result['OK']:
failedJobs.append( jobID )
if failedJobs:
result = S_ERROR( 'JobDB.rescheduleJobs: Not all the jobs were rescheduled' )
result['FailedJobs'] = failedJobs
return result
#############################################################################
def rescheduleJob ( self, jobID ):
""" Reschedule the given job to run again from scratch. Retain the already
defined parameters in the parameter Attic
"""
# Check Verified Flag
result = self.getJobAttributes( jobID, ['Status', 'MinorStatus', 'VerifiedFlag', 'RescheduleCounter',
'Owner', 'OwnerDN', 'OwnerGroup', 'DIRACSetup'] )
if result['OK']:
resultDict = result['Value']
else:
return S_ERROR( 'JobDB.getJobAttributes: can not retrieve job attributes' )
if not 'VerifiedFlag' in resultDict:
return S_ERROR( 'Job ' + str( jobID ) + ' not found in the system' )
if not resultDict['VerifiedFlag']:
return S_ERROR( 'Job %s not Verified: Status = %s, MinorStatus = %s' % (
jobID,
resultDict['Status'],
resultDict['MinorStatus'] ) )
# Check the Reschedule counter first
rescheduleCounter = int( resultDict['RescheduleCounter'] ) + 1
self.maxRescheduling = gConfig.getValue( self.cs_path + '/MaxRescheduling', self.maxRescheduling )
# Exit if the limit of the reschedulings is reached
if rescheduleCounter > self.maxRescheduling:
self.log.warn( 'Maximum number of reschedulings is reached', 'Job %s' % jobID )
self.setJobStatus( jobID, status = 'Failed', minor = 'Maximum of reschedulings reached' )
return S_ERROR( 'Maximum number of reschedulings is reached: %s' % self.maxRescheduling )
jobAttrNames = []
jobAttrValues = []
jobAttrNames.append( 'RescheduleCounter' )
jobAttrValues.append( rescheduleCounter )
# Save the job parameters for later debugging
result = self.getJobParameters( jobID )
if result['OK']:
parDict = result['Value']
for key, value in parDict.items():
result = self.setAtticJobParameter( jobID, key, value, rescheduleCounter - 1 )
if not result['OK']:
break
ret = self._escapeString( jobID )
if not ret['OK']:
return ret
e_jobID = ret['Value']
cmd = 'DELETE FROM JobParameters WHERE JobID=%s' % e_jobID
res = self._update( cmd )
if not res['OK']:
return res
# Delete optimizer parameters
cmd = 'DELETE FROM OptimizerParameters WHERE JobID=%s' % ( e_jobID )
if not self._update( cmd )['OK']:
return S_ERROR( 'JobDB.removeJobOptParameter: operation failed.' )
# the Jobreceiver needs to know if there is InputData ??? to decide which optimizer to call
# proposal: - use the getInputData method
res = self.getJobJDL( jobID, original = True )
if not res['OK']:
return res
jdl = res['Value']
# Fix the possible lack of the brackets in the JDL
if jdl.strip()[0].find( '[' ) != 0 :
jdl = '[' + jdl + ']'
classAdJob = ClassAd( jdl )
classAdReq = ClassAd( '[]' )
retVal = S_OK( jobID )
retVal['JobID'] = jobID
classAdJob.insertAttributeInt( 'JobID', jobID )
result = self.__checkAndPrepareJob( jobID, classAdJob, classAdReq, resultDict['Owner'],
resultDict['OwnerDN'], resultDict['OwnerGroup'],
resultDict['DIRACSetup'],
jobAttrNames, jobAttrValues )
if not result['OK']:
return result
priority = classAdJob.getAttributeInt( 'Priority' )
jobAttrNames.append( 'UserPriority' )
jobAttrValues.append( priority )
siteList = classAdJob.getListFromExpression( 'Site' )
if not siteList:
site = 'ANY'
elif len( siteList ) > 1:
site = "Multiple"
else:
site = siteList[0]
jobAttrNames.append( 'Site' )
jobAttrValues.append( site )
jobAttrNames.append( 'Status' )
jobAttrValues.append( 'Received' )
jobAttrNames.append( 'MinorStatus' )
jobAttrValues.append( 'Job Rescheduled' )
jobAttrNames.append( 'ApplicationStatus' )
jobAttrValues.append( 'Unknown' )
jobAttrNames.append( 'ApplicationNumStatus' )
jobAttrValues.append( 0 )
jobAttrNames.append( 'LastUpdateTime' )
jobAttrValues.append( Time.toString() )
jobAttrNames.append( 'RescheduleTime' )
jobAttrValues.append( Time.toString() )
reqJDL = classAdReq.asJDL()
classAdJob.insertAttributeInt( 'JobRequirements', reqJDL )
jobJDL = classAdJob.asJDL()
# Replace the JobID placeholder if any
if jobJDL.find( '%j' ) != -1:
jobJDL = jobJDL.replace( '%j', str( jobID ) )
result = self.setJobJDL( jobID, jobJDL )
if not result['OK']:
return result
result = self.__setInitialJobParameters( classAdJob, jobID )
if not result['OK']:
return result
result = self.setJobAttributes( jobID, jobAttrNames, jobAttrValues )
if not result['OK']:
return result
retVal['InputData'] = classAdJob.lookupAttribute( "InputData" )
retVal['RescheduleCounter'] = rescheduleCounter
retVal['Status'] = 'Received'
retVal['MinorStatus'] = 'Job Rescheduled'
return retVal
#############################################################################
def getSiteMask( self, siteState = 'Active' ):
""" Get the currently active site list
"""
ret = self._escapeString( siteState )
if not ret['OK']:
return ret
siteState = ret['Value']
if siteState == "All":
cmd = "SELECT Site FROM SiteMask"
else:
cmd = "SELECT Site FROM SiteMask WHERE Status=%s" % siteState
result = self._query( cmd )
siteList = []
if result['OK']:
siteList = [ x[0] for x in result['Value']]
return S_OK( siteList )
#############################################################################
def getSiteMaskStatus( self ):
""" Get the currently site mask status
"""
cmd = "SELECT Site,Status FROM SiteMask"
result = self._query( cmd )
siteDict = {}
if result['OK']:
for site, status in result['Value']:
siteDict[site] = status
return S_OK( siteDict )
#############################################################################
def setSiteMask( self, siteMaskList, authorDN = 'Unknown', comment = 'No comment' ):
""" Set the Site Mask to the given mask in a form of a list of tuples (site,status)
"""
for site, status in siteMaskList:
result = self.__setSiteStatusInMask( site, status, authorDN, comment )
if not result['OK']:
return result
return S_OK()
#############################################################################
def __setSiteStatusInMask( self, site, status, authorDN = 'Unknown', comment = 'No comment' ):
""" Set the given site status to 'status' or add a new active site
"""
result = self._escapeString( site )
if not result['OK']:
return result
site = result['Value']
result = self._escapeString( status )
if not result['OK']:
return result
status = result['Value']
result = self._escapeString( authorDN )
if not result['OK']:
return result
authorDN = result['Value']
result = self._escapeString( comment )
if not result['OK']:
return result
comment = result['Value']
req = "SELECT Status FROM SiteMask WHERE Site=%s" % site
result = self._query( req )
if result['OK']:
if len( result['Value'] ) > 0:
current_status = result['Value'][0][0]
if current_status == status:
return S_OK()
else:
req = "UPDATE SiteMask SET Status=%s,LastUpdateTime=UTC_TIMESTAMP()," \
"Author=%s, Comment=%s WHERE Site=%s"
req = req % ( status, authorDN, comment, site )
else:
req = "INSERT INTO SiteMask VALUES (%s,%s,UTC_TIMESTAMP(),%s,%s)" % ( site, status, authorDN, comment )
result = self._update( req )
if not result['OK']:
return S_ERROR( 'Failed to update the Site Mask' )
# update the site mask logging record
req = "INSERT INTO SiteMaskLogging VALUES (%s,%s,UTC_TIMESTAMP(),%s,%s)" % ( site, status, authorDN, comment )
result = self._update( req )
if not result['OK']:
self.log.warn( 'Failed to update site mask logging for %s' % site )
else:
return S_ERROR( 'Failed to get the Site Status from the Mask' )
return S_OK()
#############################################################################
def banSiteInMask( self, site, authorDN = 'Unknown', comment = 'No comment' ):
""" Forbid the given site in the Site Mask
"""
result = self.__setSiteStatusInMask( site, 'Banned', authorDN, comment )
return result
#############################################################################
def allowSiteInMask( self, site, authorDN = 'Unknown', comment = 'No comment' ):
""" Forbid the given site in the Site Mask
"""
result = self.__setSiteStatusInMask( site, 'Active', authorDN, comment )
return result
#############################################################################
def removeSiteFromMask( self, site = None ):
""" Remove the given site from the mask
"""
if not site:
req = "DELETE FROM SiteMask"
else:
ret = self._escapeString( site )
if not ret['OK']:
return ret
site = ret['Value']
req = "DELETE FROM SiteMask WHERE Site=%s" % site
return self._update( req )
#############################################################################
def getSiteMaskLogging( self, siteList ):
""" Get the site mask logging history for the list if site names
"""
if siteList:
siteString = ','.join( [ "'" + x + "'" for x in siteList ] )
req = "SELECT Site,Status,UpdateTime,Author,Comment FROM SiteMaskLogging WHERE Site in (%s)" % siteString
else:
req = "SELECT Site,Status,UpdateTime,Author,Comment FROM SiteMaskLogging"
req += " ORDER BY UpdateTime ASC"
result = self._query( req )
if not result['OK']:
return result
availableSiteList = []
for row in result['Value']:
site, status, utime, author, comment = row
availableSiteList.append( site )
resultDict = {}
for site in siteList:
if not result['Value'] or site not in availableSiteList:
ret = self._escapeString( site )
if not ret['OK']:
continue
e_site = ret['Value']
req = "SELECT Status Site,Status,LastUpdateTime,Author,Comment FROM SiteMask WHERE Site=%s" % e_site
resSite = self._query( req )
if resSite['OK']:
if resSite['Value']:
site, status, lastUpdate, author, comment = resSite['Value'][0]
resultDict[site] = [( status, str( lastUpdate ), author, comment )]
else:
resultDict[site] = [( 'Unknown', '', '', 'Site not present in logging table' )]
for row in result['Value']:
site, status, utime, author, comment = row
if not resultDict.has_key( site ):
resultDict[site] = []
resultDict[site].append( ( status, str( utime ), author, comment ) )
return S_OK( resultDict )
#############################################################################
def getSiteSummary( self ):
""" Get the summary of jobs in a given status on all the sites
"""
waitingList = ['"Submitted"', '"Assigned"', '"Waiting"', '"Matched"']
waitingString = ','.join( waitingList )
result = self.getDistinctJobAttributes( 'Site' )
if not result['OK']:
return result
siteList = result['Value']
siteDict = {}
totalDict = {'Waiting':0, 'Running':0, 'Stalled':0, 'Done':0, 'Failed':0}
for site in siteList:
if site == "ANY":
continue
# Waiting
siteDict[site] = {}
ret = self._escapeString( site )
if not ret['OK']:
return ret
e_site = ret['Value']
req = "SELECT COUNT(JobID) FROM Jobs WHERE Status IN (%s) AND Site=%s" % ( waitingString, e_site )
result = self._query( req )
if result['OK']:
count = result['Value'][0][0]
else:
return S_ERROR( 'Failed to get Site data from the JobDB' )
siteDict[site]['Waiting'] = count
totalDict['Waiting'] += count
# Running,Stalled,Done,Failed
for status in ['"Running"', '"Stalled"', '"Done"', '"Failed"']:
req = "SELECT COUNT(JobID) FROM Jobs WHERE Status=%s AND Site=%s" % ( status, e_site )
result = self._query( req )
if result['OK']:
count = result['Value'][0][0]
else:
return S_ERROR( 'Failed to get Site data from the JobDB' )
siteDict[site][status.replace( '"', '' )] = count
totalDict[status.replace( '"', '' )] += count
siteDict['Total'] = totalDict
return S_OK( siteDict )
#################################################################################
def getSiteSummaryWeb( self, selectDict, sortList, startItem, maxItems ):
""" Get the summary of jobs in a given status on all the sites in the standard Web form
"""
paramNames = ['Site', 'GridType', 'Country', 'Tier', 'MaskStatus']
paramNames += JOB_STATES
paramNames += ['Efficiency', 'Status']
#FIXME: hack!!!
siteT1List = ['CERN', 'IN2P3', 'NIKHEF', 'SARA', 'PIC', 'CNAF', 'RAL', 'GRIDKA', 'RRCKI']
# Sort out records as requested
sortItem = -1
sortOrder = "ASC"
if sortList:
item = sortList[0][0] # only one item for the moment
sortItem = paramNames.index( item )
sortOrder = sortList[0][1]
last_update = None
if selectDict.has_key( 'LastUpdateTime' ):
last_update = selectDict['LastUpdateTime']
del selectDict['LastUpdateTime']
result = self.getCounters( 'Jobs', ['Site', 'Status'],
{}, newer = last_update,
timeStamp = 'LastUpdateTime' )
last_day = Time.dateTime() - Time.day
resultDay = self.getCounters( 'Jobs', ['Site', 'Status'],
{}, newer = last_day,
timeStamp = 'EndExecTime' )
# Get the site mask status
siteMask = {}
resultMask = self.getSiteMask( 'All' )
if resultMask['OK']:
for site in resultMask['Value']:
siteMask[site] = 'NoMask'
resultMask = self.getSiteMask( 'Active' )
if resultMask['OK']:
for site in resultMask['Value']:
siteMask[site] = 'Active'
resultMask = self.getSiteMask( 'Banned' )
if resultMask['OK']:
for site in resultMask['Value']:
siteMask[site] = 'Banned'
# Sort out different counters
resultDict = {}
if result['OK']:
for attDict, count in result['Value']:
siteFullName = attDict['Site']
status = attDict['Status']
if not resultDict.has_key( siteFullName ):
resultDict[siteFullName] = {}
for state in JOB_STATES:
resultDict[siteFullName][state] = 0
if status not in JOB_FINAL_STATES:
resultDict[siteFullName][status] = count
if resultDay['OK']:
for attDict, count in resultDay['Value']:
siteFullName = attDict['Site']
if not resultDict.has_key( siteFullName ):
resultDict[siteFullName] = {}
for state in JOB_STATES:
resultDict[siteFullName][state] = 0
status = attDict['Status']
if status in JOB_FINAL_STATES:
resultDict[siteFullName][status] = count
# Collect records now
records = []
countryCounts = {}
for siteFullName in resultDict:
siteDict = resultDict[siteFullName]
if siteFullName.find( '.' ) != -1:
grid, site, country = siteFullName.split( '.' )
else:
grid, site, country = 'Unknown', 'Unknown', 'Unknown'
tier = 'Tier-2'
if site in siteT1List:
tier = 'Tier-1'
if not countryCounts.has_key( country ):
countryCounts[country] = {}
for state in JOB_STATES:
countryCounts[country][state] = 0
rList = [siteFullName, grid, country, tier]
if siteMask.has_key( siteFullName ):
rList.append( siteMask[siteFullName] )
else:
rList.append( 'NoMask' )
for status in JOB_STATES:
rList.append( siteDict[status] )
countryCounts[country][status] += siteDict[status]
efficiency = 0
total_finished = 0
for state in JOB_FINAL_STATES:
total_finished += resultDict[siteFullName][state]
if total_finished > 0:
efficiency = float( siteDict['Done'] + siteDict['Completed'] ) / float( total_finished )
rList.append( '%.1f' % ( efficiency * 100. ) )
# Estimate the site verbose status
if efficiency > 0.95:
rList.append( 'Good' )
elif efficiency > 0.80:
rList.append( 'Fair' )
elif efficiency > 0.60:
rList.append( 'Poor' )
elif total_finished == 0:
rList.append( 'Idle' )
else:
rList.append( 'Bad' )
records.append( rList )
# Select records as requested
if selectDict:
for item in selectDict:
selectItem = paramNames.index( item )
values = selectDict[item]
if type( values ) != type( [] ):
values = [values]
indices = range( len( records ) )
indices.reverse()
for ind in indices:
if records[ind][selectItem] not in values:
del records[ind]
# Sort records as requested
if sortItem != -1 :
if sortOrder.lower() == "asc":
records.sort( key = operator.itemgetter( sortItem ) )
else:
records.sort( key = operator.itemgetter( sortItem ), reverse = True )
# Collect the final result
finalDict = {}
finalDict['ParameterNames'] = paramNames
# Return all the records if maxItems == 0 or the specified number otherwise
if maxItems:
if startItem + maxItems > len( records ):
finalDict['Records'] = records[startItem:]
else:
finalDict['Records'] = records[startItem:startItem + maxItems]
else:
finalDict['Records'] = records
finalDict['TotalRecords'] = len( records )
finalDict['Extras'] = countryCounts
return S_OK( finalDict )
#################################################################################
def getUserSummaryWeb( self, selectDict, sortList, startItem, maxItems ):
""" Get the summary of user jobs in a standard form for the Web portal.
Pagination and global sorting is supported.
"""
paramNames = ['Owner', 'OwnerDN', 'OwnerGroup']
paramNames += JOB_STATES
paramNames += ['TotalJobs']
# Sort out records as requested
sortItem = -1
sortOrder = "ASC"
if sortList:
item = sortList[0][0] # only one item for the moment
sortItem = paramNames.index( item )
sortOrder = sortList[0][1]
last_update = None
if selectDict.has_key( 'LastUpdateTime' ):
last_update = selectDict['LastUpdateTime']
del selectDict['LastUpdateTime']
if selectDict.has_key( 'Owner' ):
username = selectDict['Owner']
del selectDict['Owner']
result = getDNForUsername( username )
if result['OK']:
selectDict['OwnerDN'] = result['Value']
else:
return S_ERROR( 'Unknown user %s' % username )
result = self.getCounters( 'Jobs', ['OwnerDN', 'OwnerGroup', 'Status'],
selectDict, newer = last_update,
timeStamp = 'LastUpdateTime' )
last_day = Time.dateTime() - Time.day
resultDay = self.getCounters( 'Jobs', ['OwnerDN', 'OwnerGroup', 'Status'],
selectDict, newer = last_day,
timeStamp = 'EndExecTime' )
# Sort out different counters
resultDict = {}
for attDict, count in result['Value']:
owner = attDict['OwnerDN']
group = attDict['OwnerGroup']
status = attDict['Status']
if not resultDict.has_key( owner ):
resultDict[owner] = {}
if not resultDict[owner].has_key( group ):
resultDict[owner][group] = {}
for state in JOB_STATES:
resultDict[owner][group][state] = 0
resultDict[owner][group][status] = count
for attDict, count in resultDay['Value']:
owner = attDict['OwnerDN']
group = attDict['OwnerGroup']
status = attDict['Status']
if status in JOB_FINAL_STATES:
resultDict[owner][group][status] = count
# Collect records now
records = []
totalUser = {}
for owner in resultDict:
totalUser[owner] = 0
for group in resultDict[owner]:
result = getUsernameForDN( owner )
if result['OK']:
username = result['Value']
else:
username = 'Unknown'
rList = [username, owner, group]
count = 0
for state in JOB_STATES:
s_count = resultDict[owner][group][state]
rList.append( s_count )
count += s_count
rList.append( count )
records.append( rList )
totalUser[owner] += count
# Sort out records
if sortItem != -1 :
if sortOrder.lower() == "asc":
records.sort( key = operator.itemgetter( sortItem ) )
else:
records.sort( key = operator.itemgetter( sortItem ), reverse = True )
# Collect the final result
finalDict = {}
finalDict['ParameterNames'] = paramNames
# Return all the records if maxItems == 0 or the specified number otherwise
if maxItems:
if startItem + maxItems > len( records ):
finalDict['Records'] = records[startItem:]
else:
finalDict['Records'] = records[startItem:startItem + maxItems]
else:
finalDict['Records'] = records
finalDict['TotalRecords'] = len( records )
return S_OK( finalDict )
#####################################################################################
def setHeartBeatData( self, jobID, staticDataDict, dynamicDataDict ):
""" Add the job's heart beat data to the database
"""
# Set the time stamp first
ret = self._escapeString( jobID )
if not ret['OK']:
return ret
e_jobID = ret['Value']
req = "UPDATE Jobs SET HeartBeatTime=UTC_TIMESTAMP(), Status='Running' WHERE JobID=%s" % e_jobID
result = self._update( req )
if not result['OK']:
return S_ERROR( 'Failed to set the heart beat time: ' + result['Message'] )
ok = True
# FIXME: It is rather not optimal to use parameters to store the heartbeat info, must find a proper solution
# Add static data items as job parameters
result = self.setJobParameters( jobID, staticDataDict.items() )
if not result['OK']:
ok = False
self.log.warn( result['Message'] )
# Add dynamic data to the job heart beat log
# start = time.time()
valueList = []
for key, value in dynamicDataDict.items():
result = self._escapeString( key )
if not result['OK']:
self.log.warn( 'Failed to escape string ' + key )
continue
e_key = result['Value']
result = self._escapeString( value )
if not result['OK']:
self.log.warn( 'Failed to escape string ' + value )
continue
e_value = result['Value']
valueList.append( "( %s, %s,%s,UTC_TIMESTAMP())" % ( e_jobID, e_key, e_value ) )
if valueList:
valueString = ','.join( valueList )
req = "INSERT INTO HeartBeatLoggingInfo (JobID,Name,Value,HeartBeatTime) VALUES "
req += valueString
result = self._update( req )
if not result['OK']:
ok = False
self.log.warn( result['Message'] )
if ok:
return S_OK()
else:
return S_ERROR( 'Failed to store some or all the parameters' )
#####################################################################################
def getHeartBeatData( self, jobID ):
""" Retrieve the job's heart beat data
"""
ret = self._escapeString( jobID )
if not ret['OK']:
return ret
jobID = ret['Value']
cmd = 'SELECT Name,Value,HeartBeatTime from HeartBeatLoggingInfo WHERE JobID=%s' % jobID
res = self._query( cmd )
if not res['OK']:
return res
if len( res['Value'] ) == 0:
return S_OK ( [] )
result = []
values = res['Value']
for row in values:
result.append( ( str( row[0] ), '%.01f' % ( float( row[1].replace( '"', '' ) ) ), str( row[2] ) ) )
return S_OK( result )
#####################################################################################
def setJobCommand( self, jobID, command, arguments = None ):
""" Store a command to be passed to the job together with the
next heart beat
"""
ret = self._escapeString( jobID )
if not ret['OK']:
return ret
jobID = ret['Value']
ret = self._escapeString( command )
if not ret['OK']:
return ret
command = ret['Value']
if arguments:
ret = self._escapeString( arguments )
if not ret['OK']:
return ret
arguments = ret['Value']
else:
arguments = "''"
req = "INSERT INTO JobCommands (JobID,Command,Arguments,ReceptionTime) "
req += "VALUES (%s,%s,%s,UTC_TIMESTAMP())" % ( jobID, command, arguments )
result = self._update( req )
return result
#####################################################################################
def getJobCommand( self, jobID, status = 'Received' ):
""" Get a command to be passed to the job together with the
next heart beat
"""
ret = self._escapeString( jobID )
if not ret['OK']:
return ret
jobID = ret['Value']
ret = self._escapeString( status )
if not ret['OK']:
return ret
status = ret['Value']
req = "SELECT Command, Arguments FROM JobCommands WHERE JobID=%s AND Status=%s" % ( jobID, status )
result = self._query( req )
if not result['OK']:
return result
resultDict = {}
if result['Value']:
for row in result['Value']:
resultDict[row[0]] = row[1]
return S_OK( resultDict )
#####################################################################################
def setJobCommandStatus( self, jobID, command, status ):
""" Set the command status
"""
ret = self._escapeString( jobID )
if not ret['OK']:
return ret
jobID = ret['Value']
ret = self._escapeString( command )
if not ret['OK']:
return ret
command = ret['Value']
ret = self._escapeString( status )
if not ret['OK']:
return ret
status = ret['Value']
req = "UPDATE JobCommands SET Status=%s WHERE JobID=%s AND Command=%s" % ( status, jobID, command )
result = self._update( req )
return result
#####################################################################################
def getSummarySnapshot( self, requestedFields = False ):
""" Get the summary snapshot for a given combination
"""
if not requestedFields:
requestedFields = [ 'Status', 'MinorStatus',
'Site', 'Owner', 'OwnerGroup', 'JobGroup', 'JobSplitType' ]
defFields = [ 'DIRACSetup' ] + requestedFields
valueFields = [ 'COUNT(JobID)', 'SUM(RescheduleCounter)' ]
defString = ", ".join( defFields )
valueString = ", ".join( valueFields )
sqlCmd = "SELECT %s, %s From Jobs GROUP BY %s" % ( defString, valueString, defString )
result = self._query( sqlCmd )
if not result[ 'OK' ]:
return result
return S_OK( ( ( defFields + valueFields ), result[ 'Value' ] ) )
|
calancha/DIRAC
|
WorkloadManagementSystem/DB/JobDB.py
|
Python
|
gpl-3.0
| 72,060
|
[
"DIRAC"
] |
d15f1687d6b18be01b06fd1c42a8ef479d31435d684af05a797d63eb1170374d
|
"""Basic UI test that checks if the stack analysis is visible for the newly created project."""
from splinter import Browser
import time
import os
from urllib.parse import urljoin
SLEEP_BETWEEN_PAGES = 15
SLEEP_BEFORE_CLICK = 15
class Context:
"""Class that holds context for the UI tests."""
def __init__(self, server, username, password):
"""Initialize the attributes holding server URL, user name, and password."""
self.browser = None
self.space_name = None
self.server = server
self.username = username
self.password = password
def check_env_variable(env_var_name):
"""Check if the given environment variable is present."""
assert os.environ.get(env_var_name), \
'The environment variable {v} should be set properly'.format(
v=env_var_name)
def check_setup():
"""Check if all required environment variables are present."""
check_env_variable('TARGET_SERVER')
check_env_variable('OPENSHIFT_USERNAME')
check_env_variable('OPENSHIFT_PASSWORD')
def front_page(context):
"""Go to the Openshift.io front page and click the Login button."""
print("Front page")
url = context.server
context.browser.visit(url)
time.sleep(SLEEP_BEFORE_CLICK)
login_button = context.browser.find_by_css('button#login').first
assert login_button.visible
assert login_button.value == 'LOG IN'
time.sleep(SLEEP_BEFORE_CLICK)
login_button.click()
time.sleep(SLEEP_BETWEEN_PAGES)
def login_page(context):
"""Login into the Openshift.io using the provided username and password."""
print("Login page")
username_input = context.browser.find_by_id('username').first
password_input = context.browser.find_by_id('password').first
assert username_input.visible
assert password_input.visible
context.browser.fill('username', context.username)
context.browser.fill('password', context.password)
login_button = context.browser.find_by_id('kc-login').first
assert login_button.visible
time.sleep(SLEEP_BEFORE_CLICK)
login_button.click()
time.sleep(SLEEP_BETWEEN_PAGES)
def get_all_existing_space_names(browser):
"""Return list of names of Spaces."""
spaces = browser.find_by_xpath("//div[@class='space-item']/h2/a")
assert spaces is not None
names = [space.value for space in spaces]
print("Already created Spaces")
print(" ".join(names))
return names
def generate_space_prefix():
"""Generate prefix for the new Space.
The prefix is based on the current local time, so very probably it will be
unique for given user (if not, the index will be updated).
"""
localtime = time.localtime()
return time.strftime("test%Y-%m-%d-", localtime)
def space_name(prefix, index):
"""Construct name of space from the prefix and its index."""
return "{p}{i}".format(p=prefix, i=index)
def is_space_name_unique(prefix, index, space_names):
"""Check if the name of the Space is unique."""
name = space_name(prefix, index)
return name not in space_names
def generate_unique_space_name(space_names):
"""Generate a name for a Space.
The name is based on current date and is unique (by adding a small index to the date).
"""
prefix = generate_space_prefix()
index = 1
while not is_space_name_unique(prefix, index, space_names):
index += 1
return space_name(prefix, index)
def create_new_space_step_1(context):
"""Perform the first step to create new Space."""
print('Create new Space: step 1')
new_space_button = context.browser.find_by_text('New').first
assert new_space_button is not None
time.sleep(SLEEP_BEFORE_CLICK)
new_space_button.click()
name_input = context.browser.find_by_id('name').first
assert name_input.visible
context.browser.fill('name', context.space_name)
create_space_button = context.browser.find_by_id('createSpaceButton').first
assert create_space_button.visible
time.sleep(SLEEP_BEFORE_CLICK)
create_space_button.click()
time.sleep(SLEEP_BETWEEN_PAGES)
def create_new_space_step_2(context):
"""Perform the second step to create new Space."""
print('Create new Space: step 2')
time.sleep(15)
quick_start_button = context.browser.find_by_text('Quickstart').first
assert quick_start_button is not None
time.sleep(SLEEP_BEFORE_CLICK)
quick_start_button.mouse_over()
time.sleep(SLEEP_BEFORE_CLICK)
quick_start_button.click()
time.sleep(SLEEP_BETWEEN_PAGES)
def create_new_space_step_3(context):
"""Perform third step to create new Space."""
print('Create new Space: step 3')
time.sleep(15)
next_button = context.browser.find_by_id('forge-next-button').first
assert next_button is not None
print(next_button.text)
time.sleep(SLEEP_BEFORE_CLICK)
next_button.click()
time.sleep(SLEEP_BETWEEN_PAGES)
def create_new_space_step_4(context):
"""Perform fourth step to create new Space."""
print('Create new Space: step 4')
release_radio = context.browser.find_by_value('Release').first
assert release_radio is not None
time.sleep(SLEEP_BEFORE_CLICK)
release_radio.click()
next_button = context.browser.find_by_id('forge-next-button').first
assert next_button is not None
print(next_button.text)
time.sleep(SLEEP_BEFORE_CLICK)
next_button.click()
time.sleep(SLEEP_BETWEEN_PAGES)
def create_new_space_step_5(context):
"""Perform fifth step to create new Space."""
print('Create new Space: step 5')
next_button = context.browser.find_by_id('forge-next-button').first
assert next_button is not None
print(next_button.text)
time.sleep(SLEEP_BEFORE_CLICK)
next_button.click()
time.sleep(SLEEP_BETWEEN_PAGES)
def create_new_space_step_6(context):
"""Perform sixth step to create new Space."""
print('Create new Space: step 6')
finish_button = context.browser.find_by_id('forge-finish-button').first
assert finish_button is not None
print(finish_button.text)
time.sleep(SLEEP_BEFORE_CLICK)
finish_button.click()
time.sleep(SLEEP_BETWEEN_PAGES)
def create_new_space_step_7(context):
"""Perform the last step to create new Space.
Click in the OK button on the last past of the forge wizard.
This step is needed so the repo will be shown on the Space page!
"""
print('Create new Space: step 7')
finish_button = context.browser.find_by_id('forge-ok-button').first
assert finish_button is not None
print(finish_button.text)
time.sleep(SLEEP_BEFORE_CLICK)
finish_button.click()
time.sleep(SLEEP_BETWEEN_PAGES)
def spaces_page(context):
"""Go to the Spaces page with list of available Spaces."""
print("Spaces page")
url = urljoin(context.server, context.username + "/_spaces")
context.browser.visit(url)
space_names = get_all_existing_space_names(context.browser)
new_space_name = generate_unique_space_name(space_names)
context.space_name = new_space_name
print("Unique name for new Space\n " + new_space_name)
create_new_space_step_1(context)
create_new_space_step_2(context)
create_new_space_step_3(context)
create_new_space_step_4(context)
create_new_space_step_5(context)
create_new_space_step_6(context)
create_new_space_step_7(context)
def check_text_presence(context, text):
"""Check if the given text is present on the current web page."""
tag = context.browser.find_by_text(text).first
assert tag is not None
print(" The text '{t}' is found on the page".format(t=text))
def stack_recommendation_on_space_page(context):
"""Check the presence of stack recommendation on the Space page."""
url = urljoin(context.server, context.username + "/" + context.space_name)
print("Going to the Space {s}".format(s=context.space_name))
context.browser.visit(url)
time.sleep(SLEEP_BEFORE_CLICK)
recommendation1 = 'Recommendation: Change io.vertx:vertx-web : 3.4.1'
check_text_presence(context, recommendation1)
recommendation2 = 'Recommendation: Change io.vertx:vertx-core : 3.4.1'
check_text_presence(context, recommendation2)
time.sleep(SLEEP_BETWEEN_PAGES)
def stack_reccomendation_on_pipepines_page(context):
"""Check the presence of stack recommendation on the Pipelines page."""
url = urljoin(context.server, context.username + "/" + context.space_name +
"/create/pipelines")
print("Going to the pipeline page for the Space {s}".format(
s=context.space_name))
context.browser.visit(url)
time.sleep(SLEEP_BEFORE_CLICK)
check_text_presence(context, "Stack Reports")
link = context.browser.find_by_text("Stack Reports")
link.click()
time.sleep(SLEEP_BETWEEN_PAGES)
# TODO - ask why the text is different: Recommendation/Recommended
recommendation1 = 'Recommended - Change io.vertx:vertx-web : 3.4.1'
check_text_presence(context, recommendation1)
recommendation2 = 'Recommended - Change io.vertx:vertx-core : 3.4.1'
check_text_presence(context, recommendation2)
time.sleep(SLEEP_BETWEEN_PAGES)
def stack_recommendation(context):
"""Check the presence of stack recommendation on all relevant pages on OpenShift.io."""
stack_recommendation_on_space_page(context)
stack_reccomendation_on_pipepines_page(context)
def run_tests(engine, server, username, password):
"""Start all UI tests."""
context = Context(server, username, password)
with Browser(engine) as browser:
context.browser = browser
front_page(context)
login_page(context)
spaces_page(context)
# it is really needed to wait for > 10 minutes here
time.sleep(60 * 10)
stack_recommendation(context)
def main():
"""Start all UI tests by using the provided environment variables."""
check_setup()
server = os.environ.get('TARGET_SERVER')
username = os.environ.get('OPENSHIFT_USERNAME')
password = os.environ.get('OPENSHIFT_PASSWORD')
engine = os.environ.get('BROWSER_ENGINE', 'chrome')
print("Using the following browser engine {e}".format(e=engine))
run_tests(engine, server, username, password)
if __name__ == "__main__":
main()
|
tisnik/fabric8-analytics-common
|
ui-tests/test.py
|
Python
|
apache-2.0
| 10,299
|
[
"VisIt"
] |
a502d799af214617ced9873ebca3415a5483de57206887f366f9e730c19b0486
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2013 Async Open Source
##
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU Lesser General Public License
## as published by the Free Software Foundation; either version 2
## of the License, or (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
import collections
import decimal
import gtk
from kiwi.datatypes import ValidationError
from kiwi.python import Settable
from kiwi.ui.forms import TextField
from stoqlib.api import api
from stoqlib.domain.sale import Sale
from stoqlib.domain.event import Event
from stoqlib.domain.loan import Loan
from stoqlib.gui.base.dialogs import run_dialog
from stoqlib.gui.dialogs.credentialsdialog import CredentialsDialog
from stoqlib.gui.editors.baseeditor import BaseEditor
from stoqlib.lib.decorators import cached_property
from stoqlib.lib.translation import stoqlib_gettext as _
class DiscountEditor(BaseEditor):
"""An editor for applying discounts
It has a simple entry that understands discount values and discount
percentages, for instance, '10.5' to give a $10.5 discount on the
sale, and '10.5%' to give 10.5% discount on the sale
"""
title = _('Select discount to apply')
model_type = object
confirm_widgets = ['discount']
@cached_property()
def fields(self):
return collections.OrderedDict(
discount=TextField(_('Discount to apply'), mandatory=True)
)
def __init__(self, store, model, user=None, visual_mode=False):
if not isinstance(model, (Sale, Loan)):
raise TypeError("Expected Sale or Loan, found: %r" % self.model_type)
self._user = user
BaseEditor.__init__(self, store, model=model, visual_mode=visual_mode)
#
# BaseEditor
#
def setup_proxies(self):
self.discount.set_tooltip_text(_("Use absolute or percentage (%) value"))
# We need to put discount on a proxy or else it won't be validated
# on it's validate callback
self.add_proxy(Settable(discount=u''), ['discount'])
def on_confirm(self):
price = self.model.get_sale_base_subtotal()
discount = self._get_discount_percentage()
new_price = price - (price * discount / 100)
# If user that authorized the discount is not the current user
if discount > 0 and self._user is not api.get_current_user(self.store):
Event.log_sale_discount(store=self.store,
sale_number=self.model.identifier,
user_name=self._user.username,
discount_value=discount,
original_price=price,
new_price=new_price)
self.model.set_items_discount(self._get_discount_percentage())
#
# Private
#
def _get_discount_percentage(self):
discount = self.discount.get_text().strip()
discount = discount.replace(',', '.')
if discount.endswith('%'):
percentage = True
discount = discount[:-1]
else:
percentage = False
if not discount:
return None
# Don't allow operators or anything else. The rest of the string
# will be validated by decimal bellow
if not discount[0].isdigit():
return None
try:
discount = decimal.Decimal(discount)
except decimal.InvalidOperation:
return None
if not percentage:
discount = (discount / self.model.get_sale_base_subtotal()) * 100
return discount
#
# Callbacks
#
def on_discount__icon_press(self, entry, icon_pos, event):
if icon_pos != gtk.ENTRY_ICON_SECONDARY:
return
# Ask for the credentials of a different user that can possibly allow
# a bigger discount
self._user = run_dialog(CredentialsDialog, self, self.store)
if self._user:
self.discount.validate(force=True)
def on_discount__validate(self, widget, value):
if not value:
return
discount = self._get_discount_percentage()
if discount is None:
return ValidationError(_("The discount syntax is not valid"))
self._user = self._user or api.get_current_user(self.store)
max_discount = self._user.profile.max_discount
if discount > max_discount:
return ValidationError(
_("You are only allowed to give a discount of %d%%") % (
max_discount, ))
|
tiagocardosos/stoq
|
stoqlib/gui/editors/discounteditor.py
|
Python
|
gpl-2.0
| 5,161
|
[
"VisIt"
] |
0301e49af6007b19818588cb9587b07f9672548f48aba24085ae7850ad4d2106
|
import os
import sys
from utils import *
import operator
from time import localtime, strftime
import argparse
import re
from Bio import SeqIO
from abifpy import Trace
import pylab
from Bio.Blast import NCBIXML
from Bio.Blast import NCBIWWW
from Bio import Entrez
# usage: python /home/williamslab/scripts/python/blast_search_and_parse.py -i f.fasta
def main(args):
# read sanger seq files
# output to format
# blast
parser = argparse.ArgumentParser(description='Analyze Sanger seq abi files')
parser.add_argument('-i', '--infile') # file containing blast results to be processed
parser.add_argument('-o', '--outfile', default="default_output_file") # output filename
parser.add_argument('-d', '--delimiter', default='\t') # delimiter for file
parser.add_argument('-e', '--extension', default='fasta') # output file format (extension) options allowed: fasta
#parser.add_argument('-l', '--minseqlength', default=250, type=int) # minimum sequence length before trimming
#parser.add_argument('-q', '--phredqthreshold', default=25, type=int) # phred quality threshold, 1 error in approx. 316 bases
# http://www.sourcecodebrowser.com/python-biopython/1.59/namespace_bio_1_1_seq_i_o_1_1_abi_i_o.html#a7fb9b10aa5e88c3f1da16b212f364a3a
#parser.add_argument('-r', '--revprimer', default='1492R') # name of reverse primer
#parser.add_argument('-s', '--fillerstring', default='SWG__') # string to make the header name >10 chars. long to make libshuff compatible
args = parser.parse_args()
if len(sys.argv)==1 :
parser.print_help()
sys.exit('\natleast one argument required\n')
infile = args.infile
outfilename = args.outfile
delim = args.delimiter
extens = args.extension
#segment = args.minseqlength
#query_blast(infile, extens, outfilename, delim)
query_blast_individ(infile, extens, outfilename, delim)
#http://biopython.org/DIST/docs/tutorial/Tutorial.html#htoc84
# blast all queries at once as a single string
def query_blast(infile, extens, outfilename, delim):
xml_files = list() # list of xml filenames of all queries # since only one query of multiple subqueries is sent, this will be only one file
fasta_str = ''.join([rec.format(extens) for rec in SeqIO.parse(infile, extens)])
#fasta_str='tacttgttgatattggatcgaacaaactggagaaccaacatgctcacgtcacttttagtcccttacatattcctc'
results_handle = NCBIWWW.qblast("blastn", "nt", fasta_str ,\
hitlist_size=10, nucl_penalty=-2, nucl_reward=1, megablast="TRUE")
xml_file = outfilename + "_megablast_results.xml"
xml_files.append(xml_file) # add file name to list of xmls
save_file = open(xml_file, "w")
save_file.write(results_handle.read())
save_file.close()
results_handle.close()
parse_blast_xmls(outfilename, xml_files, delim)
# blast each query by itself (individually)
def query_blast_individ(infile, extens, outfilename, delim):
xml_files = list() # list of xml filenames of all queries
for rec in SeqIO.parse(infile, extens):
fasta_str = rec.format(extens)
results_handle = NCBIWWW.qblast("blastn", "nt", fasta_str ,\
hitlist_size=10, nucl_penalty=-2, nucl_reward=1, megablast="TRUE")
xml_file = outfilename + "_query_" + rec.id + "_individual_megablast_results.xml"
xml_files.append(xml_file) # add file name to list of xmls
save_file = open(xml_file, "w")
save_file.write(results_handle.read())
save_file.close()
results_handle.close()
parse_blast_xmls(outfilename, xml_files, delim)
'''
qblast options:
http://www.ncbi.nlm.nih.gov/BLAST/Doc/urlapi.html
why to use megablast:
http://www.ncbi.nlm.nih.gov/blast/Why.shtml
http://www.ncbi.nlm.nih.gov/staff/tao/URLAPI/new/node81.html
http://www.ncbi.nlm.nih.gov/books/NBK25497/#chapter2.T._entrez_unique_identifiers_ui
http://www.ncbi.nlm.nih.gov/books/NBK25499/#chapter4.EFetch
Other non-python resources if needed:
https://www.biostars.org/p/13452/
http://www.polarmicrobes.org/?p=759
http://search.cpan.org/~cjfields/BioPerl/Bio/DB/Taxonomy.pm
http://search.cpan.org/~motif/Bio-LITE-Taxonomy-NCBI-0.09/lib/Bio/LITE/Taxonomy/NCBI.pm
'''
#Usage, opens an outfile and then parses any number of .xml files (from the xml_files list) into that outfile, printing all hits
def parse_blast_xmls(outfilename, xml_files, delim):
outfile = outfilename+"_parsed_megablast_results.txt"
OUT = open(outfile, 'w')
########## Double check, THESE RESULTS MAY NOT SORTED FROM TOP TO WORST HIT #################
OUT.write("Query Name\tQuery Length\tSubject Name\tSubject Length\tAlignment Length\tQuery Sequence\tHsp Score\tHsp Expect\tHsp Identities\tPercent Match\tNumber_of_gaps\n")
for xml_file in xml_files:
result_handle = open(xml_file)
blast_records = NCBIXML.parse(result_handle)
for rec in blast_records:
for alignment in rec.alignments:
for hsp in alignment.hsps:
OUT.write(str(rec.query) + '\t' + str(rec.query_length) + '\t' + str(alignment.title) + '\t' + str(alignment.length) + '\t' + str(hsp.align_length) + '\t' + str(hsp.query) + '\t' + str(hsp.score) + '\t' + str(hsp.expect) + '\t' + str(hsp.identities) + '\t' + str(float(hsp.identities)/int(hsp.align_length)) + '\t' + str(hsp.gaps) + '\n')
result_handle.close()
OUT.close()
parse_gis_megab_result(outfilename, outfile, delim)
# http://www.ncbi.nlm.nih.gov/books/NBK25497/table/chapter2.T._entrez_unique_identifiers_ui/?report=objectonly
# https://www.biostars.org/p/99855/
def parse_gis_megab_result(outfilename, parsed_blast_infile, delim):
# for each query seq. in the file, use the gi for top three (assuming the provided results were already desc. sorted as per top hits)
lines = read_file(parsed_blast_infile)
queries_gis = dict()
gid_col = 0 # 0 based index
perc_match_col = 0 # 0 based index
for l in lines:
l = l.strip()
if '#' in l or not l:
continue
contents = l.split(delim)
if 'Subject Name' in contents or 'Percent Match' in contents:
gid_col = contents.index('Subject Name')
perc_match_col = contents.index('Percent Match')
continue
query = contents[0]
hit_id = contents[gid_col].split('|')[1] # gi|703126872|ref|XM_010105390.1| Morus notabilis hypothetical protein partial mRNA
perc_match = contents[perc_match_col]
if query not in queries_gis:
queries_gis[query] = [[hit_id, perc_match]] # list of lists
#queries_gis[query] = [hit_id]
elif len(queries_gis[query]) < 3:
#queries_gis[query].append(hit_id)
queries_gis[query].append([hit_id, perc_match])
#print queries_gis
#download_parse_gis_per_query(outfilename, queries_gis)
download_parse_indiv_gis(outfilename, queries_gis)
def download_parse_indiv_gis(outfilename, queries_gis):
results_organism_taxa_list = list()
header_Str = '#query\tgi\tperc_match\tGBSeq_primary-accession\tGBSeq_organism\tGBSeq_taxonomy'
for q in queries_gis:
#for gid in queries_gis[q]:
for gid_pm in queries_gis[q]:
gid = gid_pm[0]
pm = gid_pm[1]
filename = "gb_" + gid +".xml"
Entrez.email = "richrr@vt.edu"
# avoid redownloading same files
if not os.path.isfile(filename):
handle = Entrez.efetch(db="nucleotide", id=gid, rettype="gb", retmode="xml")
#save entrez result to xml file
out_handle = open(filename, "w")
out_handle.write(handle.read())
out_handle.close()
handle.close()
# parse the entrez xml file
gb_records = Entrez.parse(open(filename), validate=False)
for record in gb_records:
#str = q + '\t' + gid + '\t' + record['GBSeq_primary-accession'] + '\t' + record['GBSeq_organism'] + '\t' + record['GBSeq_taxonomy']
str = q + '\t' + gid + '\t' + pm + '\t' + record['GBSeq_primary-accession'] + '\t' + record['GBSeq_organism'] + '\t' + record['GBSeq_taxonomy']
results_organism_taxa_list.append(str)
#print "%s" % (str)
results_taxa_file = outfilename+"_parsed_wtaxa_results.txt"
writeLIST_to_file(results_organism_taxa_list, results_taxa_file)
def download_parse_gis_per_query(outfilename, queries_gis):
# this would reduce the number of calls to NCBI by simultaneously downloading file for multiple gis
# need to check how it behaves if the same gid is given multiple times in a list for a query
# may not be able to print the percent match associated with each gi for a query
# a hack would be to search the gi name from the record, and use the corressp perc match from the list
# incase the gi is repeated in the list, then the perc match really doesn't matter
results_organism_taxa_list = list()
for q in queries_gis:
#gid_list = queries_gis[q]
gid_list = [gid_pm[0] for gid_pm in queries_gis[q]]
filename = "q_" + q +".xml"
Entrez.email = "richrr@vt.edu"
handle = Entrez.efetch(db="nucleotide", id=gid_list, rettype="gb", retmode="xml")
#save entrez result to xml file
out_handle = open(filename, "w")
out_handle.write(handle.read())
out_handle.close()
handle.close()
# parse the entrez xml file
gb_records = Entrez.parse(open(filename), validate=False)
for record in gb_records:
str = q + '\t' + record['GBSeq_other-seqids'][1] + '\t' + record['GBSeq_primary-accession'] + '\t' + record['GBSeq_organism'] + '\t' + record['GBSeq_taxonomy']
results_organism_taxa_list.append(str)
#print "%s" % (str)
results_taxa_file = outfilename+"_parsed_gis_per_query_wtaxa_results.txt"
writeLIST_to_file(results_organism_taxa_list, results_taxa_file)
'''
gid="186972394"
gid="568824607"
filename = "gb_" + gid+".xml"
Entrez.email = "richrr@vt.edu"
handle = Entrez.efetch(db="nucleotide", id=gid, rettype="gb", retmode="xml")
#save to xml file
out_handle = open(filename, "w")
out_handle.write(handle.read())
out_handle.close()
handle.close()
# parse the entrez xml file
gb_records = Entrez.parse(open(filename), validate=False)
for record in gb_records:
print "%s\t%s\t%s" % (record['GBSeq_primary-accession'], record['GBSeq_organism'] , record['GBSeq_taxonomy'])
'''
'''
http://biopython.org/DIST/docs/tutorial/Tutorial.html#sec:entrez-webenv
use the taxonomic strategy if the above gi identifiers fail
>>> from Bio import Entrez
>>> Entrez.email = "A.N.Other@example.com" # Always tell NCBI who you are
>>> handle = Entrez.esearch(db="Taxonomy", term="Cypripedioideae")
>>> record = Entrez.read(handle)
>>> record["IdList"][0]
'158330'
Now, we use efetch to download this entry in the Taxonomy database, and then parse it:
>>> handle = Entrez.efetch(db="Taxonomy", id="158330", retmode="xml")
>>> records = Entrez.read(handle)
Again, this record stores lots of information:
>>> records[0].keys()
[u'Lineage', u'Division', u'ParentTaxId', u'PubDate', u'LineageEx',
u'CreateDate', u'TaxId', u'Rank', u'GeneticCode', u'ScientificName',
u'MitoGeneticCode', u'UpdateDate']
We can get the lineage directly from this record:
>>> records[0]["Lineage"]
'cellular organisms; Eukaryota; Viridiplantae; Streptophyta; Streptophytina;
Embryophyta; Tracheophyta; Euphyllophyta; Spermatophyta; Magnoliophyta;
Liliopsida; Asparagales; Orchidaceae'
'''
if __name__=='__main__':
datetime = strftime("%a, %d %b %Y %I:%M:%S %p", localtime())
cmd = 'echo ' + datetime
os.system(cmd)
main(sys.argv)
|
richrr/scripts
|
python/blast_search_and_parse_indivi_queries.py
|
Python
|
gpl-3.0
| 11,742
|
[
"BLAST",
"BioPerl",
"Biopython"
] |
94077ce5a6a66d0a6ebe89c9eaabb4eba035aff81220c85686a9f4ee5c1103eb
|
#!/usr/bin/env python3
import optparse
import re
def parse_options():
"""
Parse the options guiven to the script
"""
parser = optparse.OptionParser(description='Get unmatched blast queries')
parser.add_option('-f', '--fasta', dest='fasta_file',
help='Query fasta file used during blast')
parser.add_option('-b', '--blast', dest='blast_file',
help='Blast tabular output (queries in 1rst column)')
parser.add_option('-o', '--output', dest='output_file',
help='Output file name')
(options, args) = parser.parse_args()
if len(args) > 0:
parser.error('Wrong number of arguments')
return options
def get_matched(blast_file):
"""
Get a dictionary of all the queries that got a match
"""
matched = dict()
with open(blast_file, 'r') as infile:
for line in infile:
fields = line.split("\t")
query_id = fields[0]
matched[query_id] = 1
return matched
def get_unmatched(output_file, fasta_file, matched):
"""
Compares matched queries to query fasta file and print unmatched to ouput
"""
output_file_handle = open(output_file, 'w')
unmatched = False
end = re.compile(r".+\W$")
with open(fasta_file, 'r') as infile:
for line in infile:
if line.startswith('>'):
subline = line[1:].rstrip() # qid are 100chars long in blast
if end.match(subline) is not None:
subline = subline[:-1]
if subline not in matched:
output_file_handle.write(line)
unmatched = True
else:
unmatched = False
elif unmatched:
output_file_handle.write(line)
output_file_handle.close()
def __main__():
opts = parse_options()
matched = get_matched(opts.blast_file)
get_unmatched(opts.output_file, opts.fasta_file, matched)
if __main__():
__main__()
|
ARTbio/tools-artbio
|
tools/blast_unmatched/blast_unmatched.py
|
Python
|
mit
| 2,034
|
[
"BLAST"
] |
2e5bc8b14ceb907d1aa7d09912be3c01a6c92bb2c845d0d21f3bf2452af85324
|
import numpy as np
from .control import model_setup
from .cp_confocal import threed
from .cp_triplet import trip
from .cp_mix import double_pnum
# 3D + 3D + TT Gauß
# Model 6043
def CF_Gxyz_gauss_3D3DTT(parms, tau):
u""" Two-component three-dimensional free diffusion
with a Gaussian laser profile, including two triplet components.
The triplet factor takes into account a blinking term.
Set *T* or *τ_trip* to 0, if no triplet component is wanted.
particle1 = F₁/( (1+τ/τ₁) * sqrt(1+τ/(τ₁*SP²)))
particle2 = α*(1-F₁)/( (1+τ/τ₂) * sqrt(1+τ/(τ₂*SP²)))
triplet1 = 1 + T₁/(1-T₁)*exp(-τ/τ_trip₁)
triplet2 = 1 + T₂/(1-T₂)*exp(-τ/τ_trip₂)
norm = (F₁ + α*(1-F₁))²
G = 1/n*(particle1 + particle2)*triplet1*triplet2/norm + offset
*parms* - a list of parameters.
Parameters (parms[i]):
[0] n Effective number of particles in confocal volume
(n = n₁+n₂)
[1] τ₁ Diffusion time of particle species 1
[2] τ₂ Diffusion time of particle species 2
[3] F₁ Fraction of molecules of species 1 (n₁ = n*F₁)
0 <= F₁ <= 1
[4] SP SP=z₀/r₀, Structural parameter,
describes elongation of the confocal volume
[5] α Relative molecular brightness of particle
2 compared to particle 1 (α = q₂/q₁)
[6] τ_trip₁ Characteristic residence time in triplet state
[7] T₁ Fraction of particles in triplet (non-fluorescent) state
0 <= T < 1
[8] τ_trip₂ Characteristic residence time in triplet state
[9] T₂ Fraction of particles in triplet (non-fluorescent) state
0 <= T < 1
[10] offset
*tau* - lag time
"""
n = parms[0]
taud1 = parms[1]
taud2 = parms[2]
F = parms[3]
SP = parms[4]
alpha = parms[5]
tautrip1 = parms[6]
T1 = parms[7]
tautrip2 = parms[8]
T2 = parms[9]
off = parms[10]
g = double_pnum(n=n,
F1=F,
alpha=alpha,
comp1=threed,
comp2=threed,
kwargs1={"tau": tau,
"taudiff": taud1,
"SP": SP},
kwargs2={"tau": tau,
"taudiff": taud2,
"SP": SP},
)
tr1 = trip(tau=tau, T=T1, tautrip=tautrip1)
tr2 = trip(tau=tau, T=T2, tautrip=tautrip2)
G = off + g * tr1 * tr2
return G
def supplements(parms, countrate=None):
u"""Supplementary parameters:
[11] n₁ = n*F₁ Particle number of species 1
[12] n₂ = n*(1-F₁) Particle number of species 2
"""
# We can only give you the effective particle number
n = parms[0]
F1 = parms[3]
Info = list()
# The enumeration of these parameters is very important for
# plotting the normalized curve. Countrate must come out last!
Info.append([u"n\u2081", n*F1])
Info.append([u"n\u2082", n*(1.-F1)])
if countrate is not None:
# CPP
cpp = countrate/n
Info.append(["cpp [kHz]", cpp])
return Info
parms = [
25, # n
5, # taud1
1000, # taud2
0.5, # F
5, # SP
1.0, # alpha
0.001, # tautrip1
0.01, # T1
0.002, # tautrip2
0.01, # T2
0.0 # offset
]
# Boundaries
# strictly positive
boundaries = [[0, np.inf]]*len(parms)
# F
boundaries[3] = [0, .9999999999999]
# T
boundaries[7] = [0, .9999999999999]
boundaries[9] = [0, .9999999999999]
boundaries[-1] = [-np.inf, np.inf]
model_setup(
modelid=6043,
name="Separate 3D diffusion with double triplet (confocal)",
comp="T+T+3D+3D",
mtype="Confocal (Gaussian) with double triplet",
fctn=CF_Gxyz_gauss_3D3DTT,
par_labels=[
u"n",
u"τ"+u"\u2081"+" [ms]",
u"τ"+u"\u2082"+" [ms]",
u"F"+u"\u2081",
u"SP",
u"\u03b1"+" (q"+u"\u2082"+"/q"+u"\u2081"+")",
u"τ_trip₁ [ms]",
u"T₁",
u"τ_trip₂ [ms]",
u"T₂",
u"offset"
],
par_values=parms,
par_vary=[True, True, True, True, False,
False, False, False, False, False, False],
par_boundaries=boundaries,
par_constraints=[[2, ">", 1], [6, "<", 1], [8, ">", 6]],
par_hr_labels=[
u"n",
u"τ₁ [ms]",
u"τ₂ [ms]",
u"F₁",
u"SP",
u"\u03b1"+u" (q₂/q₁)",
u"τ_trip₁ [µs]",
u"T₁",
u"τ_trip₂ [µs]",
u"T₂",
u"offset"
],
par_hr_factors=[
1., # n
1., # taud1
1., # taud2
1., # F
1., # SP
1., # alpha
1000., # tautrip1 [µs]
1., # T1
1000., # tautrip2 [µs]
1., # T2
1. # offset
],
supplementary_method=supplements
)
|
paulmueller/PyCorrFit
|
pycorrfit/models/model_confocal_tt_3d_3d.py
|
Python
|
gpl-2.0
| 5,161
|
[
"Gaussian"
] |
99f554b9e299c3c19230765ea26576bce2ad4e76b6e18e28ada2b045fdc9b449
|
#!/usr/bin/python
import numpy as np
import pylab as pl
from auryntools import *
# This code snipped assumes that you have run the example simulation
# sim_coba_binmon with mpirun and default paramters.
# This generates spk output files under /tmp/
num_mpi_ranks = 4
seconds = 0.1
filenames = [ "/tmp/coba.%i.e.spk"%i for i in range(num_mpi_ranks) ]
sf = AurynBinarySpikeView(filenames)
spikes = np.array(sf.get_last(seconds))
pl.scatter(spikes[:,0], spikes[:,1])
pl.xlabel("Time [s]")
pl.ylabel("Neuron ID")
pl.show()
|
idiot-z/auryn
|
tools/python/merged_spike_raster.py
|
Python
|
gpl-3.0
| 533
|
[
"NEURON"
] |
fa33cf03c86f953ff63d3350445dd22a70a643e0362c9c41557cb22bae601410
|
#!/usr/bin/env python
"""
Adds Manually created builds and chrom info to Galaxy's info tables
Usage:
python add_manual_builds.py input_file builds.txt chrom_length_dir
"""
import sys,os
def add_manual_builds(input_file, build_file, chr_dir):
#determine existing builds, so as to not overwrite
existing_builds = []
for line in open(build_file):
try:
if line.startswith("#"): continue
existing_builds.append(line.replace("\n","").replace("\r","").split("\t")[0])
except:
continue
build_file_out = open(build_file,'a')
for line in open(input_file):
try:
fields = line.replace("\n","").replace("\r","").split("\t")
build = fields.pop(0)
if build in existing_builds: continue # if build exists, leave alone
name = fields.pop(0)
try: # get chrom lens if included in file, otherwise still add build
chrs = fields.pop(0).split(",")
except:
chrs = []
print>>build_file_out, build+"\t"+name+" ("+build+")"
if chrs: # create len file if provided chrom lens
chr_len_out=open( os.path.join(chr_dir,build+".len"),'w')
for chr in chrs:
print>>chr_len_out, chr.replace("=","\t")
chr_len_out.close()
except:
continue
build_file_out.close()
if __name__ == "__main__":
if len(sys.argv) < 4:
print "USAGE: python add_manual_builds.py input_file builds.txt chrom_length_dir"
sys.exit(1)
input_file = sys.argv[1]
build_file = sys.argv[2]
chr_dir = sys.argv[3]
add_manual_builds(input_file,build_file,chr_dir)
|
mikel-egana-aranguren/SADI-Galaxy-Docker
|
galaxy-dist/cron/add_manual_builds.py
|
Python
|
gpl-3.0
| 1,728
|
[
"Galaxy"
] |
a53bf2c24e969754d711896a1ef12dca1cf221e13432ed2477a834005e10498d
|
# -*- coding: utf-8 -*-
# Copyright 2013 by Petra Kubincova
import sys
try:
import Bio.bgzf as bgzf
except ImportError as e:
print "Missing Biopython module"
sys.exit(0)
import seq_structures as seq
def write_bgzf(writer, data):
if len(data) > seq.MAX_LEN:
raise BaseException()
else:
writer.write(data)
def get_writer(filename):
return bgzf.BgzfWriter(filename)
def close_writer(writer):
writer.close()
def write_bgzf_block(writer, block):
data = block.to_bytes()
bgzf_starts = []
for reference in data:
bgzf_starts.append(writer.tell())
write_bgzf(writer, reference.tostring())
writer.flush()
return bgzf_starts
def write_bgzf_blocks(filename, blocks):
writer = bgzf.BgzfWriter(filename)
for block in blocks:
data = block.to_bytes()
for reference in data:
block.bgzf_starts.append(writer.tell())
write_bgzf(writer, reference.tostring())
writer.flush()
writer.close()
class ReadBgzf:
def __init__(self, filename):
self._filename = filename
self._reader = bgzf.BgzfReader(filename)
self._references = {}
def _read_to_number(self, size):
data = self._reader.read(seq.round_up8(size)/8)
number = 0
for value in data:
if len(value) == 1:
value = ord(value)
else:
value = int(repr(value)[3:-1])
number <<= 8
number += int(value)
return number
def _read_sequence(self, size):
numbers = []
size_in_bytes = seq.round_up8(size)/8
for i in range(size_in_bytes):
number = self._read_to_number(1)
binary = bin(number)[2:].zfill(8)
if size - i*8 >= 8:
numbers.append(binary)
else:
numbers.append(binary[8 - (size - i*8):])
return ''.join(numbers)
def read_reference(self, virtual_offset):
if virtual_offset in self._references:
return self._references[virtual_offset]
self._reader.seek(virtual_offset)
length = self._read_to_number(seq.LEN_SIZE)
sequence = self._read_sequence(length)
inf_num = self._read_to_number(seq.INF_NUM_SIZE)
reference = seq.Reference(sequence, -2, -2, True)
# Read informants
infs = []
for j in range(inf_num):
inf_id = self._read_to_number(seq.INF_ID_SIZE)
inf_block_num = self._read_to_number(seq.BLOCKS_NUM_SIZE)
infs.append((inf_id, inf_block_num))
for j in range(len(infs)):
for k in range(infs[j][1]):
chr_id = self._read_to_number(seq.CHR_ID_SIZE)
strand = self._read_to_number(seq.STRAND_SIZE)
plus_strand = True
if not strand:
plus_strand = False
chr_pos = self._read_to_number(seq.POS_SIZE)
seq_pos = self._read_to_number(seq.POS_SIZE)
seq_pos -= seq.READ_SP_MINUS
seq_len = self._read_to_number(seq.LEN_SIZE)
bases_count = self._read_to_number(seq.POS_SIZE)
sequence = self._read_sequence(seq_len)
informant = seq.Informant(sequence, chr_id, chr_pos,
plus_strand, bases_count, seq_pos)
reference.add_informant(infs[j][0], informant)
self._references[virtual_offset] = reference
return reference
|
kpetra/maptool-python
|
bgzf_tool.py
|
Python
|
mit
| 3,548
|
[
"Biopython"
] |
42f79c1999648383b1be5588f58c32f63ce427b46db1ecabfe5a007611bebd0c
|
from sympy import factorial, sqrt, exp, S, assoc_laguerre, Float
def R_nl(n, l, r, Z=1):
"""
Returns the Hydrogen radial wavefunction R_{nl}.
n, l
quantum numbers 'n' and 'l'
r
radial coordinate
Z
atomic number (1 for Hydrogen, 2 for Helium, ...)
Everything is in Hartree atomic units.
Examples
========
>>> from sympy.physics.hydrogen import R_nl
>>> from sympy import var
>>> var("r Z")
(r, Z)
>>> R_nl(1, 0, r, Z)
2*sqrt(Z**3)*exp(-Z*r)
>>> R_nl(2, 0, r, Z)
sqrt(2)*(-Z*r + 2)*sqrt(Z**3)*exp(-Z*r/2)/4
>>> R_nl(2, 1, r, Z)
sqrt(6)*Z*r*sqrt(Z**3)*exp(-Z*r/2)/12
For Hydrogen atom, you can just use the default value of Z=1:
>>> R_nl(1, 0, r)
2*exp(-r)
>>> R_nl(2, 0, r)
sqrt(2)*(-r + 2)*exp(-r/2)/4
>>> R_nl(3, 0, r)
2*sqrt(3)*(2*r**2/9 - 2*r + 3)*exp(-r/3)/27
For Silver atom, you would use Z=47:
>>> R_nl(1, 0, r, Z=47)
94*sqrt(47)*exp(-47*r)
>>> R_nl(2, 0, r, Z=47)
47*sqrt(94)*(-47*r + 2)*exp(-47*r/2)/4
>>> R_nl(3, 0, r, Z=47)
94*sqrt(141)*(4418*r**2/9 - 94*r + 3)*exp(-47*r/3)/27
The normalization of the radial wavefunction is:
>>> from sympy import integrate, oo
>>> integrate(R_nl(1, 0, r)**2 * r**2, (r, 0, oo))
1
>>> integrate(R_nl(2, 0, r)**2 * r**2, (r, 0, oo))
1
>>> integrate(R_nl(2, 1, r)**2 * r**2, (r, 0, oo))
1
It holds for any atomic number:
>>> integrate(R_nl(1, 0, r, Z=2)**2 * r**2, (r, 0, oo))
1
>>> integrate(R_nl(2, 0, r, Z=3)**2 * r**2, (r, 0, oo))
1
>>> integrate(R_nl(2, 1, r, Z=4)**2 * r**2, (r, 0, oo))
1
"""
# sympify arguments
n, l, r, Z = S(n), S(l), S(r), S(Z)
# radial quantum number
n_r = n - l - 1
# rescaled "r"
a = 1/Z # Bohr radius
r0 = 2 * r / (n * a)
# normalization coefficient
C = sqrt((S(2)/(n*a))**3 * factorial(n_r) / (2*n*factorial(n+l)))
# This is an equivalent normalization coefficient, that can be found in
# some books. Both coefficients seem to be the same fast:
# C = S(2)/n**2 * sqrt(1/a**3 * factorial(n_r) / (factorial(n+l)))
return C * r0**l * assoc_laguerre(n_r, 2*l+1, r0).expand() * exp(-r0/2)
def E_nl(n, Z=1):
"""
Returns the energy of the state (n, l) in Hartree atomic units.
The energy doesn't depend on "l".
Examples
========
>>> from sympy import var
>>> from sympy.physics.hydrogen import E_nl
>>> var("n Z")
(n, Z)
>>> E_nl(n, Z)
-Z**2/(2*n**2)
>>> E_nl(1)
-1/2
>>> E_nl(2)
-1/8
>>> E_nl(3)
-1/18
>>> E_nl(3, 47)
-2209/18
"""
n, Z = S(n), S(Z)
if n.is_integer and (n < 1):
raise ValueError("'n' must be positive integer")
return -Z**2/(2*n**2)
def E_nl_dirac(n, l, spin_up=True, Z=1, c=Float("137.035999037")):
"""
Returns the relativistic energy of the state (n, l, spin) in Hartree atomic
units.
The energy is calculated from the Dirac equation. The rest mass energy is
*not* included.
n, l
quantum numbers 'n' and 'l'
spin_up
True if the electron spin is up (default), otherwise down
Z
atomic number (1 for Hydrogen, 2 for Helium, ...)
c
speed of light in atomic units. Default value is 137.035999037,
taken from: http://arxiv.org/abs/1012.3627
Examples
========
>>> from sympy.physics.hydrogen import E_nl_dirac
>>> E_nl_dirac(1, 0)
-0.500006656595360
>>> E_nl_dirac(2, 0)
-0.125002080189006
>>> E_nl_dirac(2, 1)
-0.125000416028342
>>> E_nl_dirac(2, 1, False)
-0.125002080189006
>>> E_nl_dirac(3, 0)
-0.0555562951740285
>>> E_nl_dirac(3, 1)
-0.0555558020932949
>>> E_nl_dirac(3, 1, False)
-0.0555562951740285
>>> E_nl_dirac(3, 2)
-0.0555556377366884
>>> E_nl_dirac(3, 2, False)
-0.0555558020932949
"""
if not (l >= 0):
raise ValueError("'l' must be positive or zero")
if not (n > l):
raise ValueError("'n' must be greater than 'l'")
if (l==0 and spin_up is False):
raise ValueError("Spin must be up for l==0.")
# skappa is sign*kappa, where sign contains the correct sign
if spin_up:
skappa = -l - 1
else:
skappa = -l
c = S(c)
beta = sqrt(skappa**2 - Z**2/c**2)
return c**2/sqrt(1+Z**2/(n + skappa + beta)**2/c**2) - c**2
|
flacjacket/sympy
|
sympy/physics/hydrogen.py
|
Python
|
bsd-3-clause
| 4,437
|
[
"DIRAC"
] |
3c1468889557906490a2b73aa38ddb71a9ee18ded907fab92888bdf7687ad619
|
"""
BigchainDB: A Scalable Blockchain Database
For full docs visit https://docs.bigchaindb.com
"""
from setuptools import setup, find_packages
# get the version
version = {}
with open('bigchaindb/version.py') as fp:
exec(fp.read(), version)
# check if setuptools is up to date
def check_setuptools_features():
import pkg_resources
try:
list(pkg_resources.parse_requirements('foo~=1.0'))
except ValueError:
exit('Your Python distribution comes with an incompatible version '
'of `setuptools`. Please run:\n'
' $ pip3 install --upgrade setuptools\n'
'and then run this command again')
check_setuptools_features()
dev_require = [
'ipdb',
'ipython',
'watchdog',
'logging_tree',
]
docs_require = [
'Sphinx>=1.4.8',
'recommonmark>=0.4.0',
'sphinx-rtd-theme>=0.1.9',
'sphinxcontrib-httpdomain>=1.5.0',
'sphinxcontrib-napoleon>=0.4.4',
]
tests_require = [
'coverage',
'pep8',
'flake8',
'flake8-quotes==0.8.1',
'hypothesis',
'hypothesis-regex',
'pylint',
'pytest>=3.0.0',
'pytest-catchlog>=1.2.2',
'pytest-cov>=2.2.1',
'pytest-mock',
'pytest-xdist',
'pytest-flask',
'pytest-aiohttp',
'tox',
] + docs_require
benchmarks_require = [
'line-profiler==1.0',
]
install_requires = [
# TODO Consider not installing the db drivers, or putting them in extras.
'rethinkdb~=2.3', # i.e. a version between 2.3 and 3.0
'pymongo~=3.4',
'pysha3~=1.0.2',
'cryptoconditions~=0.6.0.dev',
'python-rapidjson==0.0.11',
'logstats~=0.2.1',
'flask>=0.10.1',
'flask-cors~=3.0.0',
'flask-restful~=0.3.0',
'requests~=2.9',
'gunicorn~=19.0',
'multipipes~=0.1.0',
'jsonschema~=2.5.1',
'pyyaml~=3.12',
'aiohttp~=2.0',
'python-rapidjson-schema==0.1.1',
'statsd==3.2.1',
]
setup(
name='BigchainDB',
version=version['__version__'],
description='BigchainDB: A Scalable Blockchain Database',
long_description=(
"BigchainDB allows developers and enterprises to deploy blockchain "
"proof-of-concepts, platforms and applications with a scalable blockchain "
"database. BigchainDB supports a wide range of industries and use cases "
"from identity and intellectual property to supply chains, energy, IoT "
"and financial ecosystems. With high throughput, sub-second latency and "
"powerful functionality to automate business processes, BigchainDB looks, "
"acts and feels like a database but has the core blockchain "
"characteristics that enterprises want."
),
url='https://github.com/BigchainDB/bigchaindb/',
author='BigchainDB Contributors',
author_email='dev@bigchaindb.com',
license='AGPLv3',
zip_safe=False,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Database',
'Topic :: Database :: Database Engines/Servers',
'Topic :: Software Development',
'Natural Language :: English',
'License :: OSI Approved :: GNU Affero General Public License v3',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX :: Linux',
],
packages=find_packages(exclude=['tests*']),
entry_points={
'console_scripts': [
'bigchaindb=bigchaindb.commands.bigchaindb:main'
],
},
install_requires=install_requires,
setup_requires=['pytest-runner'],
tests_require=tests_require,
extras_require={
'test': tests_require,
'dev': dev_require + tests_require + docs_require + benchmarks_require,
'docs': docs_require,
},
package_data={'bigchaindb.common.schema': ['*.yaml']},
)
|
stanta/darfchain
|
darfchain_docker_vagrant/setup.py
|
Python
|
gpl-3.0
| 3,948
|
[
"VisIt"
] |
8b7bd7c48e08bf8d270780353992fabec9ecb6231b7caa8210ecac79c2996922
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2008 Brian G. Matherly
# Copyright (C) 2008 Jerome Rapinat
# Copyright (C) 2008 Benny Malengier
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# -------------------------------------------------------------------------
#
# Gramps modules
#
# -------------------------------------------------------------------------
from .. import Rule
from ....const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
# -------------------------------------------------------------------------
#
# HasAddressText
#
# -------------------------------------------------------------------------
class HasAddressText(Rule):
"""Rule that checks for text in personal addresses"""
labels = [_('Text:')]
name = _('People with an address containing <text>')
description = _("Matches people with a personal address containing "
"the given text")
category = _('General filters')
allow_regex = True
def apply(self, db, person):
for address in person.get_address_list():
for string in address.get_text_data_list():
if self.match_substring(0, string):
return True
return False
|
SNoiraud/gramps
|
gramps/gen/filters/rules/person/_hasaddresstext.py
|
Python
|
gpl-2.0
| 1,909
|
[
"Brian"
] |
138e86b3806efd0bf5f12ed4767e34ba31d0e97143c2e9849e34644ed5231c6d
|
"""
This file provides kernels for par loops.
These are contained in this file as functions so that they can be tested separately.
"""
import numpy as np
from firedrake import dx
from firedrake.parloops import par_loop, READ, INC, WRITE
from pyop2 import ON_TOP, ON_BOTTOM
class GaussianElimination(object):
"""
A kernel for performing Gaussian elimination locally in each element
for the BoundaryRecoverer procedure.
The apply method takes a scalar field in DG1, the coordinates of the DoFs
and the effective" coordinates of the DoFs. These "effective" coordinates
correspond to the locations of the DoFs from the original space (i.e.
the one being recovered from).
This kernel expands the field in the cell as a local Taylor series,
keeping only the linear terms. By assuming that the field would be
correct for the effective coordinates, we can extrapolate to find
what the field would be at the actual coordinates, which involves
inverting a local matrix -- which is done by this kernel using
Gaussian elimination.
:arg DG1: A 1st order discontinuous Galerkin FunctionSpace.
"""
def __init__(self, DG1):
shapes = {"nDOFs": DG1.finat_element.space_dimension(),
"dim": DG1.mesh().topological_dimension()}
# EFF_COORDS are the effective coordinates
# ACT_COORDS are the actual coordinates
# DG1_OLD is the original field
# DG1 is the target field
# NUM_EXT is the field containing number of exterior nodes
# In 1D EFF_COORDS and ACT_COORDS have only a single index
# We can't generalise the expression without causing an error
# So here we write special bits of code for the 1D case vs multi-dimensional
if shapes['dim'] == 1:
eff_coord_expr = (
"""
A[i,0] = 1.0
A[i,1] = EFF_COORDS[i]
""")
act_coord_expr = (
"""
DG1[iiii] = a[0] + a[1]*ACT_COORDS[iiii]
""")
else:
eff_coord_expr = (
"""
A[i,0] = 1.0
A[i,1] = EFF_COORDS[i,0]
if {nDOFs} == 3
A[i,2] = EFF_COORDS[i,1]
elif {nDOFs} == 4
A[i,2] = EFF_COORDS[i,1]
A[i,3] = EFF_COORDS[i,0]*EFF_COORDS[i,1]
elif {nDOFs} == 6
"""
# N.B we use {nDOFs} - 1 to access the z component in 3D cases
# Otherwise loopy tries to search for this component in 2D cases, raising an error
"""
A[i,2] = EFF_COORDS[i,1]
A[i,3] = EFF_COORDS[i,{dim}-1]
A[i,4] = EFF_COORDS[i,0]*EFF_COORDS[i,{dim}-1]
A[i,5] = EFF_COORDS[i,1]*EFF_COORDS[i,{dim}-1]
elif {nDOFs} == 8
A[i,2] = EFF_COORDS[i,1]
A[i,3] = EFF_COORDS[i,0]*EFF_COORDS[i,1]
A[i,4] = EFF_COORDS[i,{dim}-1]
A[i,5] = EFF_COORDS[i,0]*EFF_COORDS[i,{dim}-1]
A[i,6] = EFF_COORDS[i,1]*EFF_COORDS[i,{dim}-1]
A[i,7] = EFF_COORDS[i,0]*EFF_COORDS[i,1]*EFF_COORDS[i,{dim}-1]
end
""").format(**shapes)
act_coord_expr = (
"""
if {nDOFs} == 2
DG1[iiii] = a[0] + a[1]*ACT_COORDS[iiii,0]
elif {nDOFs} == 3
DG1[iiii] = a[0] + a[1]*ACT_COORDS[iiii,0] + a[2]*ACT_COORDS[iiii,1]
elif {nDOFs} == 4
DG1[iiii] = a[0] + a[1]*ACT_COORDS[iiii,0] + a[2]*ACT_COORDS[iiii,1] + a[3]*ACT_COORDS[iiii,0]*ACT_COORDS[iiii,1]
elif {nDOFs} == 6
DG1[iiii] = a[0] + a[1]*ACT_COORDS[iiii,0] + a[2]*ACT_COORDS[iiii,1] + a[3]*ACT_COORDS[iiii,{dim}-1] + a[4]*ACT_COORDS[iiii,0]*ACT_COORDS[iiii,{dim}-1] + a[5]*ACT_COORDS[iiii,1]*ACT_COORDS[iiii,{dim}-1]
elif {nDOFs} == 8
DG1[iiii] = a[0] + a[1]*ACT_COORDS[iiii,0] + a[2]*ACT_COORDS[iiii,1] + a[3]*ACT_COORDS[iiii,0]*ACT_COORDS[iiii,1] + a[4]*ACT_COORDS[iiii,{dim}-1] + a[5]*ACT_COORDS[iiii,0]*ACT_COORDS[iiii,{dim}-1] + a[6]*ACT_COORDS[iiii,1]*ACT_COORDS[iiii,{dim}-1] + a[7]*ACT_COORDS[iiii,0]*ACT_COORDS[iiii,1]*ACT_COORDS[iiii,{dim}-1]
end
""").format(**shapes)
shapes['act_coord_expr'] = act_coord_expr
shapes['eff_coord_expr'] = eff_coord_expr
domain = (
"""
{{[i, ii_loop, jj_loop, kk, ll_loop, mm, iii_loop, kkk_loop, iiii]:
0 <= i < {nDOFs} and 0 <= ii_loop < {nDOFs} and
0 <= jj_loop < {nDOFs} and 0 <= kk < {nDOFs} and
0 <= ll_loop < {nDOFs} and 0 <= mm < {nDOFs} and
0 <= iii_loop < {nDOFs} and 0 <= kkk_loop < {nDOFs} and
0 <= iiii < {nDOFs}}}
""").format(**shapes)
instrs = (
"""
<int> ii = 0
<int> jj = 0
<int> ll = 0
<int> iii = 0
<int> jjj = 0
<int> i_max = 0
<float64> A_max = 0.0
<float64> temp_f = 0.0
<float64> temp_A = 0.0
<float64> c = 0.0
<float64> f[{nDOFs}] = 0.0
<float64> a[{nDOFs}] = 0.0
<float64> A[{nDOFs},{nDOFs}] = 0.0
"""
# We are aiming to find the vector a that solves A*a = f, for matrix A and vector f.
# This is done by performing row operations (swapping and scaling) to obtain A in upper diagonal form.
# N.B. several for loops must be executed in numerical order (loopy does not necessarily do this).
# For these loops we must manually iterate the index.
"""
if NUM_EXT[0] > 0.0
"""
# only do Gaussian elimination for elements with effective coordinates
"""
for i
"""
# fill f with the original field values and A with the effective coordinate values
"""
f[i] = DG1_OLD[i]
a[i] = 0.0
{eff_coord_expr}
end
"""
# now loop through rows/columns of A
"""
for ii_loop
A_max = abs(A[ii,ii])
i_max = ii
"""
# loop to find the largest value in the ii-th column
# set i_max as the index of the row with this largest value.
"""
jj = ii + 1
for jj_loop
if jj < {nDOFs}
if abs(A[jj,ii]) > A_max
i_max = jj
end
A_max = fmax(A_max, abs(A[jj,ii]))
end
jj = jj + 1
end
"""
# if the max value in the ith column isn't in the ii-th row, we must swap the rows
"""
if i_max != ii
"""
# swap the elements of f
"""
temp_f = f[ii] {{id=set_temp_f}}
f[ii] = f[i_max] {{id=set_f_imax, dep=set_temp_f}}
f[i_max] = temp_f {{id=set_f_ii, dep=set_f_imax}}
"""
# swap the elements of A
# N.B. kk runs from ii to (nDOFs-1) as elements below diagonal should be 0
"""
for kk
if kk > ii - 1
temp_A = A[ii,kk] {{id=set_temp_A}}
A[ii, kk] = A[i_max, kk] {{id=set_A_ii, dep=set_temp_A}}
A[i_max, kk] = temp_A {{id=set_A_imax, dep=set_A_ii}}
end
end
end
"""
# scale the rows below the ith row
"""
ll = ii + 1
for ll_loop
if ll > ii
if ll < {nDOFs}
"""
# find scaling factor
"""
c = - A[ll,ii] / A[ii,ii]
for mm
A[ll, mm] = A[ll, mm] + c * A[ii,mm]
end
f[ll] = f[ll] + c * f[ii]
end
end
ll = ll + 1
end
ii = ii + 1
end
"""
# do back substitution of upper diagonal A to obtain a
"""
iii = 0
for iii_loop
"""
# jjj starts at the bottom row and works upwards
"""
jjj = {nDOFs} - iii - 1 {{id=assign_jjj}}
a[jjj] = f[jjj] {{id=set_a, dep=assign_jjj}}
for kkk_loop
if kkk_loop > {nDOFs} - iii_loop - 1
a[jjj] = a[jjj] - A[jjj,kkk_loop] * a[kkk_loop]
end
end
a[jjj] = a[jjj] / A[jjj,jjj]
iii = iii + 1
end
end
"""
# Do final loop to assign output values
"""
for iiii
"""
# Having found a, this gives us the coefficients for the Taylor expansion with the actual coordinates.
"""
if NUM_EXT[0] > 0.0
{act_coord_expr}
"""
# if element is not external, just use old field values.
"""
else
DG1[iiii] = DG1_OLD[iiii]
end
end
""").format(**shapes)
self._kernel = (domain, instrs)
def apply(self, v_DG1_old, v_DG1, act_coords, eff_coords, num_ext):
"""
Performs the par loop for the Gaussian elimination kernel.
:arg v_DG1_old: the originally recovered field in DG1.
:arg v_DG1: the new target field in DG1.
:arg act_coords: the actual coordinates in vec DG1.
:arg eff_coords: the effective coordinates of the recovery in vec DG1.
:arg num_ext: the number of exterior DOFs in the cell, in DG0.
"""
par_loop(self._kernel, dx,
{"DG1_OLD": (v_DG1_old, READ),
"DG1": (v_DG1, WRITE),
"ACT_COORDS": (act_coords, READ),
"EFF_COORDS": (eff_coords, READ),
"NUM_EXT": (num_ext, READ)},
is_loopy_kernel=True)
class Average(object):
"""
A kernel for the Averager object.
For vertices shared between cells, it computes the average
value from the neighbouring cells.
:arg V: The FunctionSpace of the target field for the Averager.
"""
def __init__(self, V):
shapes = {"nDOFs": V.finat_element.space_dimension(),
"dim": np.prod(V.shape, dtype=int)}
domain = "{{[i, j]: 0 <= i < {nDOFs} and 0 <= j < {dim}}}".format(**shapes)
# Loop over node extent and dof extent
# vo is v_out, v is the function in, w is the weight
# NOTE: Any bcs on the function v should just work.
instrs = (
"""
for i
for j
vo[i,j] = vo[i,j] + v[i,j] / w[i,j]
end
end
""")
self._kernel = (domain, instrs)
def apply(self, v_out, weighting, v_in):
"""
Perform the averaging par loop.
:arg v_out: the continuous target field.
:arg weighting: the weights to be used for the averaging.
:arg v_in: the input field.
"""
par_loop(self._kernel, dx,
{"vo": (v_out, INC),
"w": (weighting, READ),
"v": (v_in, READ)},
is_loopy_kernel=True)
class AverageWeightings(object):
"""
A kernel for finding the weights for the Averager object.
:arg V: The FunctionSpace of the target field for the Averager.
"""
def __init__(self, V):
shapes = {"nDOFs": V.finat_element.space_dimension(),
"dim": np.prod(V.shape, dtype=int)}
domain = "{{[i, j]: 0 <= i < {nDOFs} and 0 <= j < {dim}}}".format(**shapes)
# w is the weights
instrs = (
"""
for i
for j
w[i,j] = w[i,j] + 1.0
end
end
""")
self._kernel = (domain, instrs)
def apply(self, w):
"""
Perform the par loop for calculating the weightings for the Averager.
:arg w: the field to store the weights in.
"""
par_loop(self._kernel, dx,
{"w": (w, INC)},
is_loopy_kernel=True)
class PhysicsRecoveryTop():
"""
A kernel for fixing the physics recovery method at the top boundary.
This takes a variable from the lowest order density space to the lowest
order temperature space.
"""
def __init__(self):
domain = ("{[i]: 0 <= i < 1}")
# CG1 is the uncorrected field that has been originally recovered
# DG1 is the corrected output field
instrs = (
"""
DG1[0] = CG1[0]
DG1[1] = -CG1[0] + 2 * CG1[1]
""")
self._kernel = (domain, instrs)
def apply(self, v_DG1, v_CG1):
"""
Performs the par loop.
:arg v_DG1: the target field to correct.
:arg v_CG1: the initially recovered uncorrected field.
"""
par_loop(self._kernel, dx,
args={"DG1": (v_DG1, WRITE),
"CG1": (v_CG1, READ)},
is_loopy_kernel=True,
iteration_region=ON_TOP)
class PhysicsRecoveryBottom():
"""
A kernel for fixing the physics recovery method at the bottom boundary.
This takes a variable from the lowest order density space to the lowest
order temperature space.
"""
def __init__(self):
domain = ("{[i]: 0 <= i < 1}")
# CG1 is the uncorrected field that has been originally recovered
# DG1 is the corrected output field
instrs = (
"""
DG1[0] = 2 * CG1[0] - CG1[1]
DG1[1] = CG1[1]
""")
self._kernel = (domain, instrs)
def apply(self, v_DG1, v_CG1):
"""
Performs the par loop.
:arg v_DG1: the target field to correct.
:arg v_CG1: the initially recovered uncorrected field.
"""
par_loop(self._kernel, dx,
args={"DG1": (v_DG1, WRITE),
"CG1": (v_CG1, READ)},
is_loopy_kernel=True,
iteration_region=ON_BOTTOM)
|
firedrakeproject/gusto
|
gusto/kernels.py
|
Python
|
mit
| 15,143
|
[
"Gaussian"
] |
cf835bba0335a462f2f9431ce8e5690fd8cd2b1c9cde04283e72eec68fa02e6f
|
'''Color functions for PyMOLProbity plugin.'''
from pymol import cmd
def get_pymol_color(color):
"""Return the PyMOL color corresponding to a Kinemage color name."""
color_list = {
# Color names defined in the KiNG source that aren't included in a
# standard PyMOL installation are listed here with the names of
# (approximately) equivalent PyMOL named colors.
#"king_color": "pymol_color",
"sea": "teal",
"sky": "skyblue",
"peach": "yelloworange",
"lilac": "arsenic",
"pinktint": "lightpink",
"peachtint": "lightorange",
"yellowtint": "paleyellow",
"greentint": "palegreen",
"bluetint": "lightblue",
"lilactint": "lithium",
"deadwhite": "white",
"deadblack": "black",
}
try:
return color_list[color]
except:
return color
def get_color_rgb(color):
"""Given a color name, returns the RGB values."""
index = cmd.get_color_index(color)
rgb = cmd.get_color_tuple(index)
return rgb # e.g. (1.0, 1.0, 1.0)
|
jaredsampson/pymolprobity
|
pymolprobity/colors.py
|
Python
|
mit
| 1,139
|
[
"PyMOL"
] |
655a881665f7b1c9d5804261f8ded088805a3ab1494815af4861e17ab95024fe
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'MailItTemplate.content_html_template'
db.add_column(u'mailit_mailittemplate', 'content_html_template',
self.gf('django.db.models.fields.TextField')(default='Hello {{ person }}: <br />\nYou have a new message: <br />\n<strong>subject:</strong> {{ subject }} <br />\n<strong>content:</strong> {{ content }} <br />\n\n\nIf you want to see all the other messages please visit {{ writeit_url }}.<br />\nSeeya<br />\n--<br /><br />\nYou writeIt and we deliverit.'),
keep_default=False)
def backwards(self, orm):
# Deleting field 'MailItTemplate.content_html_template'
db.delete_column(u'mailit_mailittemplate', 'content_html_template')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contactos.contact': {
'Meta': {'object_name': 'Contact'},
'contact_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contactos.ContactType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_bounced': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'contacts'", 'to': u"orm['auth.User']"}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['popit.Person']"}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '512'})
},
u'contactos.contacttype': {
'Meta': {'object_name': 'ContactType'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'mailit.bouncedmessagerecord': {
'Meta': {'object_name': 'BouncedMessageRecord'},
'bounce_text': ('django.db.models.fields.TextField', [], {}),
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'outbound_message': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['nuntium.OutboundMessage']", 'unique': 'True'})
},
u'mailit.mailittemplate': {
'Meta': {'object_name': 'MailItTemplate'},
'content_html_template': ('django.db.models.fields.TextField', [], {'default': "'Hello {{ person }}: <br />\\nYou have a new message: <br />\\n<strong>subject:</strong> {{ subject }} <br />\\n<strong>content:</strong> {{ content }} <br />\\n\\n\\nIf you want to see all the other messages please visit {{ writeit_url }}.<br />\\nSeeya<br />\\n--<br /><br />\\nYou writeIt and we deliverit.'"}),
'content_template': ('django.db.models.fields.TextField', [], {'default': "'Hello {{ person }}:\\nYou have a new message:\\nsubject: {{ subject }} \\ncontent: {{ content }}\\n\\n\\nIf you want to see all the other messages please visit {{ writeit_url }}.\\nSeeya\\n--\\nYou writeIt and we deliverit.'"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'subject_template': ('django.db.models.fields.CharField', [], {'default': "'[WriteIT] Message: %(subject)s'", 'max_length': '255'}),
'writeitinstance': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'mailit_template'", 'unique': 'True', 'to': u"orm['nuntium.WriteItInstance']"})
},
u'nuntium.membership': {
'Meta': {'object_name': 'Membership'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['popit.Person']"}),
'writeitinstance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['nuntium.WriteItInstance']"})
},
u'nuntium.message': {
'Meta': {'object_name': 'Message'},
'author_email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'author_name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'confirmated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'content': ('django.db.models.fields.TextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderated': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255'}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'writeitinstance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['nuntium.WriteItInstance']"})
},
u'nuntium.outboundmessage': {
'Meta': {'object_name': 'OutboundMessage'},
'contact': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contactos.Contact']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['nuntium.Message']"}),
'status': ('django.db.models.fields.CharField', [], {'default': "'new'", 'max_length': "'10'"})
},
u'nuntium.writeitinstance': {
'Meta': {'object_name': 'WriteItInstance'},
'allow_messages_using_form': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'autoconfirm_api_messages': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderation_needed_in_all_messages': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'notify_owner_when_new_answer': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'writeitinstances'", 'to': u"orm['auth.User']"}),
'persons': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'writeit_instances'", 'symmetrical': 'False', 'through': u"orm['nuntium.Membership']", 'to': u"orm['popit.Person']"}),
'rate_limiter': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '50', 'populate_from': "'name'", 'unique_with': '()'})
},
u'popit.apiinstance': {
'Meta': {'object_name': 'ApiInstance'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'url': ('popit.fields.ApiInstanceURLField', [], {'unique': 'True', 'max_length': '200'})
},
u'popit.person': {
'Meta': {'object_name': 'Person'},
'api_instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['popit.ApiInstance']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'popit_id': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'popit_url': ('popit.fields.PopItURLField', [], {'default': "''", 'max_length': '200', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'summary': ('django.db.models.fields.TextField', [], {'blank': 'True'})
}
}
complete_apps = ['mailit']
|
TEDICpy/write-it
|
mailit/migrations/0003_auto__add_field_mailittemplate_content_html_template.py
|
Python
|
gpl-3.0
| 11,683
|
[
"VisIt"
] |
17c6476141cc740dd201372577dd5934e6f684cd031786d9db72d3d911e07c4c
|
# Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robot.model import SuiteVisitor
class ReRunMerger(SuiteVisitor):
def __init__(self, result):
self.root = result.suite
self.current = None
def merge(self, merged):
merged.suite.visit(self)
def start_suite(self, suite):
try:
if not self.current:
self.current = self._find_root(suite)
else:
self.current = self._find(self.current.suites, suite.name)
except ValueError:
self._report_ignored(suite)
return False
def _find_root(self, suite):
if self.root.name != suite.name:
raise ValueError
return self.root
def _find(self, items, name):
for item in items:
if item.name == name:
return item
raise ValueError
def _report_ignored(self, item, test=False):
from robot.output import LOGGER
type = 'suite' if not test else 'test'
LOGGER.error("Merged %s '%s' is ignored because it is not found from "
"original result." % (type, item.longname))
def end_suite(self, suite):
self.current = self.current.parent
def visit_test(self, test):
try:
old = self._find(self.current.tests, test.name)
except ValueError:
self._report_ignored(test, test=True)
else:
test.message = self._create_merge_message(test, old)
index = self.current.tests.index(old)
self.current.tests[index] = test
def _create_merge_message(self, new, old):
return '\n'.join(['Test has been re-run and results replaced.',
'- - -',
'New status: %s' % new.status,
'New message: %s' % new.message,
'- - -',
'Old status: %s' % old.status,
'Old message: %s' % old.message])
|
userzimmermann/robotframework-python3
|
src/robot/result/rerunmerger.py
|
Python
|
apache-2.0
| 2,580
|
[
"VisIt"
] |
ae8f52591717899a387c70efda8c54b9edb9a8a8d591cc1d4b2f054860a8e671
|
from moviepy.editor import VideoFileClip
import matplotlib.pyplot as plt
import matplotlib.image as mplimg
import numpy as np
import cv2
blur_ksize = 5 # Gaussian blur kernel size
canny_lthreshold = 70 # Canny edge detection low threshold
canny_hthreshold = 150 # Canny edge detection high threshold
# Hough transform parameters
rho = 1
theta = np.pi / 180
threshold = 15
min_line_length = 60
max_line_gap = 30
def roi_mask(img, vertices):
mask = np.zeros_like(img)
#defining a 3 channel or 1 channel color to fill the mask with depending on the input image
if len(img.shape) > 2:
channel_count = img.shape[2] # i.e. 3 or 4 depending on your image
mask_color = (255,) * channel_count
else:
mask_color = 255
cv2.fillPoly(mask, vertices, mask_color)
masked_img = cv2.bitwise_and(img, mask)
return masked_img
def draw_roi(img, vertices):
cv2.polylines(img, vertices, True, [255, 0, 0], thickness=2)
def draw_lines(img, lines, color=[255, 0, 0], thickness=2):
for line in lines:
for x1, y1, x2, y2 in line:
cv2.line(img, (x1, y1), (x2, y2), color, thickness)
def hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap):
lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap)
line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)
# draw_lines(line_img, lines)
draw_lanes(line_img, lines)
return line_img
def draw_lanes(img, lines, color=[255, 0, 0], thickness=8):
left_lines, right_lines = [], []
for line in lines:
for x1, y1, x2, y2 in line:
k = (y2 - y1) / (x2 - x1)
if k < 0:
left_lines.append(line)
else:
right_lines.append(line)
if (len(left_lines) <= 0 or len(right_lines) <= 0):
return img
clean_lines(left_lines, 0.1)
clean_lines(right_lines, 0.1)
left_points = [(x1, y1) for line in left_lines for x1,y1,x2,y2 in line]
left_points = left_points + [(x2, y2) for line in left_lines for x1,y1,x2,y2 in line]
right_points = [(x1, y1) for line in right_lines for x1,y1,x2,y2 in line]
right_points = right_points + [(x2, y2) for line in right_lines for x1,y1,x2,y2 in line]
left_vtx = calc_lane_vertices(left_points, 450, img.shape[0])
right_vtx = calc_lane_vertices(right_points, 450, img.shape[0])
cv2.line(img, left_vtx[0], left_vtx[1], color, thickness)
cv2.line(img, right_vtx[0], right_vtx[1], color, thickness)
def clean_lines(lines, threshold):
slope = [(y2 - y1) / (x2 - x1) for line in lines for x1, y1, x2, y2 in line]
while len(lines) > 0:
mean = np.mean(slope)
diff = [abs(s - mean) for s in slope]
idx = np.argmax(diff)
if diff[idx] > threshold:
slope.pop(idx)
lines.pop(idx)
else:
break
def calc_lane_vertices(point_list, ymin, ymax):
x = [p[0] for p in point_list]
y = [p[1] for p in point_list]
fit = np.polyfit(y, x, 1)
fit_fn = np.poly1d(fit)
xmin = int(fit_fn(ymin))
xmax = int(fit_fn(ymax))
return [(xmin, ymin), (xmax, ymax)]
def process_an_image(img):
roi_vtx = np.array([[(0+20, img.shape[0]-60), (600, 450), (720, 450), (img.shape[1]-150, img.shape[0]-60)]])
# extract yellow line and white line
hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
'''
>>> yellow = np.uint8([[[255, 255, 0]]])
>>> hsv_yellow = cv2.cvtColor(yellow, cv2.COLOR_RGB2HSV)
>>> hsv_yellow
array([[[ 30, 255, 255]]], dtype=uint8)
Now you take [H-10, 100,100] and [H+10, 255, 255] as lower bound and upper bound respectively
'''
lower_yellow = np.array([20, 100, 100])
upper_yellow = np.array([40, 255, 255])
yellow_mask = cv2.inRange(hsv, lower_yellow, upper_yellow)
# http://stackoverflow.com/questions/22588146/tracking-white-color-using-python-opencv
lower_white = np.array([0, 0, 215])
upper_white = np.array([180, 40, 255])
white_mask = cv2.inRange(hsv, lower_white, upper_white)
color_mask = cv2.bitwise_or(yellow_mask, white_mask)
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
darken = (gray / 3).astype(np.uint8)
color_masked = cv2.bitwise_or(darken, color_mask)
blur_gray = cv2.GaussianBlur(color_masked, (blur_ksize, blur_ksize), 0, 0)
edges = cv2.Canny(blur_gray, canny_lthreshold, canny_hthreshold)
roi_edges = roi_mask(edges, roi_vtx)
line_img = hough_lines(roi_edges, rho, theta, threshold, min_line_length, max_line_gap)
res_img = cv2.addWeighted(img, 0.8, line_img, 1, 0)
'''
plt.figure()
plt.imshow(img)
plt.savefig('images/lane_original.png', bbox_inches='tight')
plt.figure()
plt.imshow(gray, cmap='gray')
plt.savefig('images/gray.png', bbox_inches='tight')
plt.figure()
plt.imshow(blur_gray, cmap='gray')
plt.savefig('images/blur_gray.png', bbox_inches='tight')
plt.figure()
plt.imshow(edges, cmap='gray')
plt.savefig('images/edges.png', bbox_inches='tight')
plt.figure()
plt.imshow(roi_edges, cmap='gray')
plt.savefig('images/roi_edges.png', bbox_inches='tight')
plt.figure()
plt.imshow(line_img, cmap='gray')
plt.savefig('images/line_img.png', bbox_inches='tight')
plt.figure()
plt.imshow(res_img)
plt.savefig('images/res_img.png', bbox_inches='tight')
plt.show()
'''
return res_img
# img = mplimg.imread("lane.jpg")
# process_an_image(img)
output = 'video_5_sol.mp4'
# clip = VideoFileClip("/Users/yujifan/Downloads/video_2.mp4")
clip = VideoFileClip("/Users/yujifan/Downloads/data/IMG_0906.MOV")
out_clip = clip.fl_image(process_an_image)
out_clip.write_videofile(output, audio=False)
|
framefreeze/HangDriver
|
lane_detection/lanedetection2.py
|
Python
|
mit
| 5,523
|
[
"Gaussian"
] |
e9617b3b64028d59b67ed85511b361265f17c61727f9a399c6323f4f5f69e6a6
|
from __future__ import print_function
"""Module for calculating phonons of periodic systems."""
import sys
import pickle
from math import sin, pi, sqrt
from os import remove
from os.path import isfile
import numpy as np
import numpy.linalg as la
import numpy.fft as fft
has_spglib = False
try:
from pyspglib import spglib
has_spglib = True
except ImportError:
pass
import ase.units as units
from ase.parallel import rank, barrier
from ase.dft import monkhorst_pack
from ase.io.trajectory import Trajectory
from ase.utils import opencew
class Displacement:
"""Abstract base class for phonon and el-ph supercell calculations.
Both phonons and the electron-phonon interaction in periodic systems can be
calculated with the so-called finite-displacement method where the
derivatives of the total energy and effective potential are obtained from
finite-difference approximations, i.e. by displacing the atoms. This class
provides the required functionality for carrying out the calculations for
the different displacements in its ``run`` member function.
Derived classes must overwrite the ``__call__`` member function which is
called for each atomic displacement.
"""
def __init__(self, atoms, calc=None, supercell=(1, 1, 1), name=None,
delta=0.01, refcell=None):
"""Init with an instance of class ``Atoms`` and a calculator.
Parameters
----------
atoms: Atoms object
The atoms to work on.
calc: Calculator
Calculator for the supercell calculation.
supercell: tuple
Size of supercell given by the number of repetitions (l, m, n) of
the small unit cell in each direction.
name: str
Base name to use for files.
delta: float
Magnitude of displacement in Ang.
refcell: str
Reference cell in which the atoms will be displaced. If ``None``,
corner cell in supercell is used. If ``str``, cell in the center of
the supercell is used.
"""
# Store atoms and calculator
self.atoms = atoms
self.calc = calc
# Displace all atoms in the unit cell by default
self.indices = range(len(atoms))
self.name = name
self.delta = delta
self.N_c = supercell
# Reference cell offset
if refcell is None:
# Corner cell
self.offset = 0
else:
# Center cell
N_c = self.N_c
self.offset = N_c[0] // 2 * (N_c[1] * N_c[2]) + N_c[1] // \
2 * N_c[2] + N_c[2] // 2
def __call__(self, *args, **kwargs):
"""Member function called in the ``run`` function."""
raise NotImplementedError("Implement in derived classes!.")
def set_atoms(self, atoms):
"""Set the atoms to vibrate.
Parameters
----------
atoms: list
Can be either a list of strings, ints or ...
"""
assert isinstance(atoms, list)
assert len(atoms) <= len(self.atoms)
if isinstance(atoms[0], str):
assert np.all([isinstance(atom, str) for atom in atoms])
sym_a = self.atoms.get_chemical_symbols()
# List for atomic indices
indices = []
for type in atoms:
indices.extend([a for a, atom in enumerate(sym_a)
if atom == type])
else:
assert np.all([isinstance(atom, int) for atom in atoms])
indices = atoms
self.indices = indices
def lattice_vectors(self):
"""Return lattice vectors for cells in the supercell."""
# Lattice vectors relevative to the reference cell
R_cN = np.indices(self.N_c).reshape(3, -1)
N_c = np.array(self.N_c)[:, np.newaxis]
if self.offset == 0:
R_cN += N_c // 2
R_cN %= N_c
R_cN -= N_c // 2
return R_cN
def run(self):
"""Run the calculations for the required displacements.
This will do a calculation for 6 displacements per atom, +-x, +-y, and
+-z. Only those calculations that are not already done will be
started. Be aware that an interrupted calculation may produce an empty
file (ending with .pckl), which must be deleted before restarting the
job. Otherwise the calculation for that displacement will not be done.
"""
# Atoms in the supercell -- repeated in the lattice vector directions
# beginning with the last
atoms_N = self.atoms * self.N_c
# Set calculator if provided
assert self.calc is not None, "Provide calculator in __init__ method"
atoms_N.set_calculator(self.calc)
# Do calculation on equilibrium structure
filename = self.name + '.eq.pckl'
fd = opencew(filename)
if fd is not None:
# Call derived class implementation of __call__
output = self.__call__(atoms_N)
# Write output to file
if rank == 0:
pickle.dump(output, fd)
sys.stdout.write('Writing %s\n' % filename)
fd.close()
sys.stdout.flush()
# Positions of atoms to be displaced in the reference cell
natoms = len(self.atoms)
offset = natoms * self.offset
pos = atoms_N.positions[offset: offset + natoms].copy()
# Loop over all displacements
for a in self.indices:
for i in range(3):
for sign in [-1, 1]:
# Filename for atomic displacement
filename = '%s.%d%s%s.pckl' % \
(self.name, a, 'xyz'[i], ' +-'[sign])
# Wait for ranks before checking for file
# barrier()
fd = opencew(filename)
if fd is None:
# Skip if already done
continue
# Update atomic positions
atoms_N.positions[offset + a, i] = \
pos[a, i] + sign * self.delta
# Call derived class implementation of __call__
output = self.__call__(atoms_N)
# Write output to file
if rank == 0:
pickle.dump(output, fd)
sys.stdout.write('Writing %s\n' % filename)
fd.close()
sys.stdout.flush()
# Return to initial positions
atoms_N.positions[offset + a, i] = pos[a, i]
def clean(self):
"""Delete generated pickle files."""
if isfile(self.name + '.eq.pckl'):
remove(self.name + '.eq.pckl')
for a in self.indices:
for i in 'xyz':
for sign in '-+':
name = '%s.%d%s%s.pckl' % (self.name, a, i, sign)
if isfile(name):
remove(name)
class Phonons(Displacement):
"""Class for calculating phonon modes using the finite displacement method.
The matrix of force constants is calculated from the finite difference
approximation to the first-order derivative of the atomic forces as::
2 nbj nbj
nbj d E F- - F+
C = ------------ ~ ------------- ,
mai dR dR 2 * delta
mai nbj
where F+/F- denotes the force in direction j on atom nb when atom ma is
displaced in direction +i/-i. The force constants are related by various
symmetry relations. From the definition of the force constants it must
be symmetric in the three indices mai::
nbj mai bj ai
C = C -> C (R ) = C (-R ) .
mai nbj ai n bj n
As the force constants can only depend on the difference between the m and
n indices, this symmetry is more conveniently expressed as shown on the
right hand-side.
The acoustic sum-rule::
_ _
aj \ bj
C (R ) = - ) C (R )
ai 0 /__ ai m
(m, b)
!=
(0, a)
Ordering of the unit cells illustrated here for a 1-dimensional system (in
case ``refcell=None`` in constructor!):
::
m = 0 m = 1 m = -2 m = -1
-----------------------------------------------------
| | | | |
| * b | * | * | * |
| | | | |
| * a | * | * | * |
| | | | |
-----------------------------------------------------
Example:
>>> from ase.lattice import bulk
>>> from ase.phonons import Phonons
>>> from gpaw import GPAW, FermiDirac
>>> atoms = bulk('Si', 'diamond', a=5.4)
>>> calc = GPAW(kpts=(5, 5, 5),
h=0.2,
occupations=FermiDirac(0.))
>>> ph = Phonons(atoms, calc, supercell=(5, 5, 5))
>>> ph.run()
>>> ph.read(method='frederiksen', acoustic=True)
"""
def __init__(self, *args, **kwargs):
"""Initialize with base class args and kwargs."""
if 'name' not in kwargs.keys():
kwargs['name'] = "phonon"
Displacement.__init__(self, *args, **kwargs)
# Attributes for force constants and dynamical matrix in real space
self.C_N = None # in units of eV / Ang**2
self.D_N = None # in units of eV / Ang**2 / amu
# Attributes for born charges and static dielectric tensor
self.Z_avv = None
self.eps_vv = None
def __call__(self, atoms_N):
"""Calculate forces on atoms in supercell."""
# Calculate forces
forces = atoms_N.get_forces()
return forces
def check_eq_forces(self):
"""Check maximum size of forces in the equilibrium structure."""
fname = '%s.eq.pckl' % self.name
feq_av = pickle.load(open(fname))
fmin = feq_av.max()
fmax = feq_av.min()
i_min = np.where(feq_av == fmin)
i_max = np.where(feq_av == fmax)
return fmin, fmax, i_min, i_max
def read_born_charges(self, name=None, neutrality=True):
"""Read Born charges and dieletric tensor from pickle file.
The charge neutrality sum-rule::
_ _
\ a
) Z = 0
/__ ij
a
Parameters
----------
neutrality: bool
Restore charge neutrality condition on calculated Born effective
charges.
"""
# Load file with Born charges and dielectric tensor for atoms in the
# unit cell
if name is None:
filename = '%s.born.pckl' % self.name
else:
filename = name
fd = open(filename)
Z_avv, eps_vv = pickle.load(fd)
fd.close()
# Neutrality sum-rule
if neutrality:
Z_mean = Z_avv.sum(0) / len(Z_avv)
Z_avv -= Z_mean
self.Z_avv = Z_avv[self.indices]
self.eps_vv = eps_vv
def read(self, method='Frederiksen', symmetrize=3, acoustic=True,
cutoff=None, born=False, **kwargs):
"""Read forces from pickle files and calculate force constants.
Extra keyword arguments will be passed to ``read_born_charges``.
Parameters
----------
method: str
Specify method for evaluating the atomic forces.
symmetrize: int
Symmetrize force constants (see doc string at top) when
``symmetrize != 0`` (default: 3). Since restoring the acoustic sum
rule breaks the symmetry, the symmetrization must be repeated a few
times until the changes a insignificant. The integer gives the
number of iterations that will be carried out.
acoustic: bool
Restore the acoustic sum rule on the force constants.
cutoff: None or float
Zero elements in the dynamical matrix between atoms with an
interatomic distance larger than the cutoff.
born: bool
Read in Born effective charge tensor and high-frequency static
dielelctric tensor from file.
"""
method = method.lower()
assert method in ['standard', 'frederiksen']
if cutoff is not None:
cutoff = float(cutoff)
# Read Born effective charges and optical dielectric tensor
if born:
self.read_born_charges(**kwargs)
# Number of atoms
natoms = len(self.indices)
# Number of unit cells
N = np.prod(self.N_c)
# Matrix of force constants as a function of unit cell index in units
# of eV / Ang**2
C_xNav = np.empty((natoms * 3, N, natoms, 3), dtype=float)
# Loop over all atomic displacements and calculate force constants
for i, a in enumerate(self.indices):
for j, v in enumerate('xyz'):
# Atomic forces for a displacement of atom a in direction v
basename = '%s.%d%s' % (self.name, a, v)
fminus_av = pickle.load(open(basename + '-.pckl'))
fplus_av = pickle.load(open(basename + '+.pckl'))
if method == 'frederiksen':
fminus_av[a] -= fminus_av.sum(0)
fplus_av[a] -= fplus_av.sum(0)
# Finite difference derivative
C_av = fminus_av - fplus_av
C_av /= 2 * self.delta
# Slice out included atoms
C_Nav = C_av.reshape((N, len(self.atoms), 3))[:, self.indices]
index = 3*i + j
C_xNav[index] = C_Nav
# Make unitcell index the first and reshape
C_N = C_xNav.swapaxes(0 ,1).reshape((N,) + (3 * natoms, 3 * natoms))
# Cut off before symmetry and acoustic sum rule are imposed
if cutoff is not None:
self.apply_cutoff(C_N, cutoff)
# Symmetrize force constants
if symmetrize:
for i in range(symmetrize):
# Symmetrize
C_N = self.symmetrize(C_N)
# Restore acoustic sum-rule
if acoustic:
self.acoustic(C_N)
else:
break
# Store force constants and dynamical matrix
self.C_N = C_N
self.D_N = C_N.copy()
# Add mass prefactor
m_a = self.atoms.get_masses()
self.m_inv_x = np.repeat(m_a[self.indices]**-0.5, 3)
M_inv = np.outer(self.m_inv_x, self.m_inv_x)
for D in self.D_N:
D *= M_inv
def symmetrize(self, C_N):
"""Symmetrize force constant matrix."""
# Number of atoms
natoms = len(self.indices)
# Number of unit cells
N = np.prod(self.N_c)
# Reshape force constants to (l, m, n) cell indices
C_lmn = C_N.reshape(self.N_c + (3 * natoms, 3 * natoms))
# Shift reference cell to center index
if self.offset == 0:
C_lmn = fft.fftshift(C_lmn, axes=(0, 1, 2)).copy()
# Make force constants symmetric in indices -- in case of an even
# number of unit cells don't include the first
i, j, k = np.asarray(self.N_c) % 2 - 1
C_lmn[i:, j:, k:] *= 0.5
C_lmn[i:, j:, k:] += \
C_lmn[i:, j:, k:][::-1, ::-1, ::-1].transpose(0, 1, 2, 4, 3).copy()
if self.offset == 0:
C_lmn = fft.ifftshift(C_lmn, axes=(0, 1, 2)).copy()
# Change to single unit cell index shape
C_N = C_lmn.reshape((N, 3 * natoms, 3 * natoms))
return C_N
def acoustic(self, C_N):
"""Restore acoustic sumrule on force constants."""
# Number of atoms
natoms = len(self.indices)
# Copy force constants
C_N_temp = C_N.copy()
# Correct atomic diagonals of R_m = (0, 0, 0) matrix
for C in C_N_temp:
for a in range(natoms):
for a_ in range(natoms):
C_N[self.offset, 3*a: 3*a + 3, 3*a: 3*a + 3] -= \
C[3*a: 3*a+3, 3*a_: 3*a_+3]
def apply_cutoff(self, D_N, r_c):
"""Zero elements for interatomic distances larger than the cutoff.
Parameters
----------
D_N: ndarray
Dynamical/force constant matrix.
r_c: float
Cutoff in Angstrom.
"""
# Number of atoms and primitive cells
natoms = len(self.indices)
N = np.prod(self.N_c)
# Lattice vectors
R_cN = self.lattice_vectors()
# Reshape matrix to individual atomic and cartesian dimensions
D_Navav = D_N.reshape((N, natoms, 3, natoms, 3))
# Cell vectors
cell_vc = self.atoms.cell.transpose()
# Atomic positions in reference cell
pos_av = self.atoms.get_positions()
# Zero elements with a distance to atoms in the reference cell
# larger than the cutoff
for n in range(N):
# Lattice vector to cell
R_v = np.dot(cell_vc, R_cN[:, n])
# Atomic positions in cell
posn_av = pos_av + R_v
# Loop over atoms and zero elements
for i, a in enumerate(self.indices):
dist_a = np.sqrt(np.sum((pos_av[a] - posn_av)**2, axis=-1))
# Atoms where the distance is larger than the cufoff
i_a = dist_a > r_c #np.where(dist_a > r_c)
# Zero elements
D_Navav[n, i, :, i_a, :] = 0.0
# print ""
def get_force_constant(self):
"""Return matrix of force constants."""
assert self.C_N is not None
return self.C_N
def band_structure(self, path_kc, modes=False, born=False, verbose=True):
"""Calculate phonon dispersion along a path in the Brillouin zone.
The dynamical matrix at arbitrary q-vectors is obtained by Fourier
transforming the real-space force constants. In case of negative
eigenvalues (squared frequency), the corresponding negative frequency
is returned.
Eigenvalues and modes are in units of eV and Ang/sqrt(amu),
respectively.
Parameters
----------
path_kc: ndarray
List of k-point coordinates (in units of the reciprocal lattice
vectors) specifying the path in the Brillouin zone for which the
dynamical matrix will be calculated.
modes: bool
Returns both frequencies and modes when True.
born: bool
Include non-analytic part given by the Born effective charges and
the static part of the high-frequency dielectric tensor. This
contribution to the force constant accounts for the splitting
between the LO and TO branches for q -> 0.
verbose: bool
Print warnings when imaginary frequncies are detected.
"""
assert self.D_N is not None
if born:
assert self.Z_avv is not None
assert self.eps_vv is not None
# Lattice vectors -- ordered as illustrated in class docstring
R_cN = self.lattice_vectors()
# Dynamical matrix in real-space
D_N = self.D_N
# Lists for frequencies and modes along path
omega_kl = []
u_kl = []
# Reciprocal basis vectors for use in non-analytic contribution
reci_vc = 2 * pi * la.inv(self.atoms.cell)
# Unit cell volume in Bohr^3
vol = abs(la.det(self.atoms.cell)) / units.Bohr**3
for q_c in path_kc:
# Add non-analytic part
if born:
# q-vector in cartesian coordinates
q_v = np.dot(reci_vc, q_c)
# Non-analytic contribution to force constants in atomic units
qdotZ_av = np.dot(q_v, self.Z_avv).ravel()
C_na = 4 * pi * np.outer(qdotZ_av, qdotZ_av) / \
np.dot(q_v, np.dot(self.eps_vv, q_v)) / vol
self.C_na = C_na / units.Bohr**2 * units.Hartree
# Add mass prefactor and convert to eV / (Ang^2 * amu)
M_inv = np.outer(self.m_inv_x, self.m_inv_x)
D_na = C_na * M_inv / units.Bohr**2 * units.Hartree
self.D_na = D_na
D_N = self.D_N + D_na / np.prod(self.N_c)
## if np.prod(self.N_c) == 1:
##
## q_av = np.tile(q_v, len(self.indices))
## q_xx = np.vstack([q_av]*len(self.indices)*3)
## D_m += q_xx
# Evaluate fourier sum
phase_N = np.exp(-2.j * pi * np.dot(q_c, R_cN))
D_q = np.sum(phase_N[:, np.newaxis, np.newaxis] * D_N, axis=0)
if modes:
omega2_l, u_xl = la.eigh(D_q, UPLO='U')
# Sort eigenmodes according to eigenvalues (see below) and
# multiply with mass prefactor
u_lx = (self.m_inv_x[:, np.newaxis] *
u_xl[:, omega2_l.argsort()]).T.copy()
u_kl.append(u_lx.reshape((-1, len(self.indices), 3)))
else:
omega2_l = la.eigvalsh(D_q, UPLO='U')
# Sort eigenvalues in increasing order
omega2_l.sort()
# Use dtype=complex to handle negative eigenvalues
omega_l = np.sqrt(omega2_l.astype(complex))
# Take care of imaginary frequencies
if not np.all(omega2_l >= 0.):
indices = np.where(omega2_l < 0)[0]
if verbose:
print('WARNING, %i imaginary frequencies at '
'q = (% 5.2f, % 5.2f, % 5.2f) ; (omega_q =% 5.3e*i)'
% (len(indices), q_c[0], q_c[1], q_c[2],
omega_l[indices][0].imag))
omega_l[indices] = -1 * np.sqrt(np.abs(omega2_l[indices].real))
omega_kl.append(omega_l.real)
# Conversion factor: sqrt(eV / Ang^2 / amu) -> eV
s = units._hbar * 1e10 / sqrt(units._e * units._amu)
omega_kl = s * np.asarray(omega_kl)
if modes:
return omega_kl, np.asarray(u_kl)
return omega_kl
def dos(self, kpts=(10, 10, 10), npts=1000, delta=1e-3, indices=None):
"""Calculate phonon dos as a function of energy.
Parameters
----------
qpts: tuple
Shape of Monkhorst-Pack grid for sampling the Brillouin zone.
npts: int
Number of energy points.
delta: float
Broadening of Lorentzian line-shape in eV.
indices: list
If indices is not None, the atomic-partial dos for the specified
atoms will be calculated.
"""
# Monkhorst-Pack grid
kpts_kc = monkhorst_pack(kpts)
N = np.prod(kpts)
# Get frequencies
omega_kl = self.band_structure(kpts_kc)
# Energy axis and dos
omega_e = np.linspace(0., np.amax(omega_kl) + 5e-3, num=npts)
dos_e = np.zeros_like(omega_e)
# Sum up contribution from all q-points and branches
for omega_l in omega_kl:
diff_el = (omega_e[:, np.newaxis] - omega_l[np.newaxis, :])**2
dos_el = 1. / (diff_el + (0.5 * delta)**2)
dos_e += dos_el.sum(axis=1)
dos_e *= 1. / (N * pi) * 0.5 * delta
return omega_e, dos_e
def write_modes(self, q_c, branches=0, kT=units.kB*300, born=False,
repeat=(1, 1, 1), nimages=30, center=False):
"""Write modes to trajectory file.
Parameters
----------
q_c: ndarray
q-vector of the modes.
branches: int or list
Branch index of modes.
kT: float
Temperature in units of eV. Determines the amplitude of the atomic
displacements in the modes.
born: bool
Include non-analytic contribution to the force constants at q -> 0.
repeat: tuple
Repeat atoms (l, m, n) times in the directions of the lattice
vectors. Displacements of atoms in repeated cells carry a Bloch
phase factor given by the q-vector and the cell lattice vector R_m.
nimages: int
Number of images in an oscillation.
center: bool
Center atoms in unit cell if True (default: False).
"""
if isinstance(branches, int):
branch_l = [branches]
else:
branch_l = list(branches)
# Calculate modes
omega_l, u_l = self.band_structure([q_c], modes=True, born=born)
# Repeat atoms
atoms = self.atoms * repeat
# Center
if center:
atoms.center()
# Here ``Na`` refers to a composite unit cell/atom dimension
pos_Nav = atoms.get_positions()
# Total number of unit cells
N = np.prod(repeat)
# Corresponding lattice vectors R_m
R_cN = np.indices(repeat).reshape(3, -1)
# Bloch phase
phase_N = np.exp(2.j * pi * np.dot(q_c, R_cN))
phase_Na = phase_N.repeat(len(self.atoms))
for l in branch_l:
omega = omega_l[0, l]
u_av = u_l[0, l]
# Mean displacement of a classical oscillator at temperature T
u_av *= sqrt(kT) / abs(omega)
mode_av = np.zeros((len(self.atoms), 3), dtype=complex)
# Insert slice with atomic displacements for the included atoms
mode_av[self.indices] = u_av
# Repeat and multiply by Bloch phase factor
mode_Nav = np.vstack(N * [mode_av]) * phase_Na[:, np.newaxis]
traj = Trajectory('%s.mode.%d.traj' % (self.name, l), 'w')
for x in np.linspace(0, 2*pi, nimages, endpoint=False):
atoms.set_positions((pos_Nav + np.exp(1.j * x) * mode_Nav).real)
traj.write(atoms)
traj.close()
|
suttond/MODOI
|
ase/phonons.py
|
Python
|
lgpl-3.0
| 27,222
|
[
"ASE",
"GPAW"
] |
cd510241fc7b314adf06477d013f9c2ef949ec89cf6651221a2a4678bd9b731e
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2022, Exa Analytics Development Team
# Distributed under the terms of the Apache License 2.0
import numpy as np
from unittest import TestCase
from os import sep, remove, rmdir
from tempfile import mkdtemp
import tarfile
from glob import glob
from exatomic.base import resource
from exatomic.va import VA, get_data, gen_delta
from exatomic.gaussian import Fchk, Output as gOutput
from exatomic.nwchem import Output
TMPDIR = mkdtemp()
h2o2_freq = Fchk(resource('g16-h2o2-def2tzvp-freq.fchk'))
methyloxirane_freq = Fchk(resource('g16-methyloxirane-def2tzvp-freq.fchk'))
tar = tarfile.open(resource('va-vroa-h2o2.tar.bz'), mode='r')
tar.extractall(TMPDIR)
tar.close()
tar = tarfile.open(resource('va-vroa-methyloxirane.tar.bz'), mode='r')
tar.extractall(TMPDIR)
tar.close()
nitro_freq = gOutput(resource('g09-nitromalonamide-6-31++g-freq.out'))
tar = tarfile.open(resource('va-zpvc-nitro_nmr.tar.bz'), mode='r')
tar.extractall(TMPDIR)
tar.close()
class TestGetData(TestCase):
def test_getter_small(self):
path = sep.join([TMPDIR, 'h2o2', '*'])
df = get_data(path=path, attr='roa', soft=Output, f_start='va-roa-h2o2-def2tzvp-514.5-',
f_end='.out')
self.assertEqual(df.shape[0], 130)
df = get_data(path=path, attr='gradient', soft=Output, f_start='va-roa-h2o2-def2tzvp-514.5-',
f_end='.out')
self.assertEqual(df.shape[0], 52)
def test_getter_large(self):
path = sep.join([TMPDIR, 'methyloxirane', '*'])
df = get_data(path=path, attr='roa', soft=Output, f_start='va-roa-methyloxirane-def2tzvp-488.9-',
f_end='.out')
self.assertEqual(df.shape[0], 160)
df = get_data(path=path, attr='gradient', soft=Output, f_start='va-roa-methyloxirane-def2tzvp-488.9-',
f_end='.out')
self.assertEqual(df.shape[0], 160)
class TestVROA(TestCase):
def test_vroa(self):
h2o2_freq.parse_frequency()
h2o2_freq.parse_frequency_ext()
delta = gen_delta(delta_type=2, freq=h2o2_freq.frequency.copy())
va_corr = VA()
path = sep.join([TMPDIR, 'h2o2', '*'])
va_corr.roa = get_data(path=path, attr='roa', soft=Output, f_start='va-roa-h2o2-def2tzvp-514.5-',
f_end='.out')
va_corr.roa['exc_freq'] = np.tile(514.5, len(va_corr.roa))
va_corr.gradient = get_data(path=path, attr='gradient', soft=Output,
f_start='va-roa-h2o2-def2tzvp-514.5-', f_end='.out')
va_corr.gradient['exc_freq'] = np.tile(514.5, len(va_corr.gradient))
va_corr.vroa(uni=h2o2_freq, delta=delta['delta'].values)
scatter_data = np.array([[ 3.47311779e+02, 0.00000000e+00, -3.27390198e+02,
-8.44921542e+01, -4.22102267e-02, -3.41332079e-02,
-3.91676006e-03, 5.14500000e+02],
[ 8.60534577e+02, 1.00000000e+00, 1.75268228e+02,
-8.09603043e+00, -8.26286589e+00, 1.65666769e-02,
-3.01543530e-03, 5.14500000e+02],
[ 1.24319010e+03, 2.00000000e+00, -3.35605422e+02,
-7.26516978e+01, 1.06370293e-06, -3.45429748e-02,
-4.20725882e-03, 5.14500000e+02],
[ 1.37182002e+03, 3.00000000e+00, 2.20210484e+02,
5.78999940e+01, -4.46257676e+00, 2.29930063e-02,
-6.16087427e-04, 5.14500000e+02],
[ 3.59750268e+03, 4.00000000e+00, -3.50819253e+03,
-3.90826518e+02, 5.90325028e-02, -3.49292932e-01,
-4.98353528e-02, 5.14500000e+02],
[ 3.59821746e+03, 5.00000000e+00, 5.05236006e+03,
4.00023286e+02, 6.60389814e+00, 4.97827311e-01,
7.91921951e-02, 5.14500000e+02]])
raman_data = np.array([[3.47311779e+02, 0.00000000e+00, 3.42307581e-05, 6.38955258e-01,
2.04527298e+01, 5.14500000e+02],
[8.60534577e+02, 1.00000000e+00, 1.90190339e-01, 1.07090867e+00,
6.85033386e+01, 5.14500000e+02],
[1.24319010e+03, 2.00000000e+00, 1.27606294e-08, 3.46909710e-01,
1.11011130e+01, 5.14500000e+02],
[1.37182002e+03, 3.00000000e+00, 3.94139588e-02, 1.19286864e+00,
4.52663091e+01, 5.14500000e+02],
[3.59750268e+03, 4.00000000e+00, 1.52822910e-02, 6.21166773e+00,
2.01524180e+02, 5.14500000e+02],
[3.59821746e+03, 5.00000000e+00, 1.60412161e+00, 5.19841596e+00,
4.55091201e+02, 5.14500000e+02]])
scatter_data = scatter_data.T.copy()
raman_data = raman_data.T.copy()
# test all columns of the respective dataframe to get a better sense of what is broken
self.assertTrue(np.allclose(va_corr.scatter['freq'].values, scatter_data[0], rtol=5e-4))
self.assertTrue(np.allclose(va_corr.scatter['freqdx'].values, scatter_data[1], rtol=5e-4))
self.assertTrue(np.allclose(va_corr.scatter['beta_g*1e6'].values, scatter_data[2], rtol=5e-4))
self.assertTrue(np.allclose(va_corr.scatter['beta_A*1e6'].values, scatter_data[3], rtol=5e-4))
self.assertTrue(np.allclose(va_corr.scatter['alpha_g*1e6'].values, scatter_data[4], rtol=5e-4))
self.assertTrue(np.allclose(va_corr.scatter['backscatter'].values, scatter_data[5], rtol=5e-4))
self.assertTrue(np.allclose(va_corr.scatter['forwardscatter'].values, scatter_data[6], rtol=5e-4))
self.assertTrue(np.allclose(va_corr.scatter['exc_freq'].values, scatter_data[7], rtol=5e-4))
self.assertTrue(np.allclose(va_corr.raman['freq'].values, raman_data[0], rtol=5e-4))
self.assertTrue(np.allclose(va_corr.raman['freqdx'].values, raman_data[1], rtol=5e-4))
self.assertTrue(np.allclose(va_corr.raman['alpha_squared'].values, raman_data[2], rtol=5e-4))
self.assertTrue(np.allclose(va_corr.raman['beta_alpha'].values, raman_data[3], rtol=5e-4))
self.assertTrue(np.allclose(va_corr.raman['raman_int'].values, raman_data[4], rtol=5e-4))
self.assertTrue(np.allclose(va_corr.raman['exc_freq'].values, raman_data[5], rtol=5e-4))
def test_select_freq(self):
methyloxirane_freq.parse_frequency()
methyloxirane_freq.parse_frequency_ext()
delta = gen_delta(delta_type=2, freq=methyloxirane_freq.frequency.copy())
va_corr = VA()
path = sep.join([TMPDIR, 'methyloxirane', '*'])
va_corr.roa = get_data(path=path, attr='roa', soft=Output,
f_start='va-roa-methyloxirane-def2tzvp-488.9-', f_end='.out')
va_corr.roa['exc_freq'] = np.tile(488.9, len(va_corr.roa))
va_corr.gradient = get_data(path=path, attr='gradient', soft=Output,
f_start='va-roa-methyloxirane-def2tzvp-488.9-', f_end='.out')
va_corr.gradient['exc_freq'] = np.tile(488.9, len(va_corr.gradient))
va_corr.vroa(uni=methyloxirane_freq, delta=delta['delta'].values)
scatter_data = np.array([[ 1.12639199e+03, 1.00000000e+01, -6.15736884e+01,
-1.53103521e+01, -1.68892383e-01, -6.40100535e-03,
-8.61815897e-04, 4.88900000e+02],
[ 1.15100631e+03, 1.10000000e+01, -1.06898371e+02,
-2.76794343e+01, 3.53857297e+00, -1.11479855e-02,
1.28026956e-03, 4.88900000e+02],
[ 1.24937064e+03, 1.20000000e+01, 5.23431984e+01,
5.17874012e+00, -8.24615516e+00, 5.19066673e-03,
-5.18260038e-03, 4.88900000e+02],
[ 1.37094149e+03, 1.30000000e+01, 3.49746537e+01,
-9.43653998e+00, -8.72689935e-02, 3.05559747e-03,
6.47745423e-04, 4.88900000e+02],
[ 1.39064221e+03, 1.40000000e+01, -5.31532967e+01,
-5.65151249e+00, -6.11336634e+00, -5.28356488e-03,
-5.16165231e-03, 4.88900000e+02],
[ 1.44754882e+03, 1.50000000e+01, -1.25064010e+02,
-2.49033712e+01, -8.12931706e-02, -1.28030529e-02,
-1.66110131e-03, 4.88900000e+02]])
raman_data = np.array([[1.12639199e+03, 1.00000000e+01, 3.61528920e-04, 5.94192417e-01,
1.90792325e+01, 4.88900000e+02],
[1.15100631e+03, 1.10000000e+01, 3.36862711e-02, 1.62723253e-01,
1.12706729e+01, 4.88900000e+02],
[1.24937064e+03, 1.20000000e+01, 3.10356963e-01, 1.61670230e+00,
1.07598727e+02, 4.88900000e+02],
[1.37094149e+03, 1.30000000e+01, 6.63217766e-03, 2.37302109e-01,
8.78745947e+00, 4.88900000e+02],
[1.39064221e+03, 1.40000000e+01, 6.30361373e-02, 8.04145907e-01,
3.70791737e+01, 4.88900000e+02],
[1.44754882e+03, 1.50000000e+01, 5.36564516e-05, 1.07944901e+00,
3.45520265e+01, 4.88900000e+02]])
scatter_data = scatter_data.T.copy()
raman_data = raman_data.T.copy()
# test all columns of the respective dataframe to get a better sense of what is broken
self.assertTrue(np.allclose(va_corr.scatter['freq'].values, scatter_data[0], rtol=5e-4))
self.assertTrue(np.allclose(va_corr.scatter['freqdx'].values, scatter_data[1], rtol=5e-4))
self.assertTrue(np.allclose(va_corr.scatter['beta_g*1e6'].values, scatter_data[2], rtol=5e-4))
self.assertTrue(np.allclose(va_corr.scatter['beta_A*1e6'].values, scatter_data[3], rtol=5e-4))
self.assertTrue(np.allclose(va_corr.scatter['alpha_g*1e6'].values, scatter_data[4], rtol=5e-4))
self.assertTrue(np.allclose(va_corr.scatter['backscatter'].values, scatter_data[5], rtol=5e-4))
self.assertTrue(np.allclose(va_corr.scatter['forwardscatter'].values, scatter_data[6], rtol=5e-4))
self.assertTrue(np.allclose(va_corr.scatter['exc_freq'].values, scatter_data[7], rtol=5e-4))
self.assertTrue(np.allclose(va_corr.raman['freq'].values, raman_data[0], rtol=5e-4))
self.assertTrue(np.allclose(va_corr.raman['freqdx'].values, raman_data[1], rtol=5e-4))
self.assertTrue(np.allclose(va_corr.raman['alpha_squared'].values, raman_data[2], rtol=5e-4))
self.assertTrue(np.allclose(va_corr.raman['beta_alpha'].values, raman_data[3], rtol=5e-4))
self.assertTrue(np.allclose(va_corr.raman['raman_int'].values, raman_data[4], rtol=5e-4))
self.assertTrue(np.allclose(va_corr.raman['exc_freq'].values, raman_data[5], rtol=5e-4))
#class TestZPVC(TestCase):
def test_zpvc(self):
nitro_freq.parse_frequency()
nitro_freq.parse_frequency_ext()
path = sep.join([TMPDIR, 'nitromalonamide_nmr', '*'])
va_corr = VA()
va_corr.gradient = get_data(path=path, attr='gradient', soft=gOutput, f_start='nitromal_grad_',
f_end='.out')
va_corr.property = get_data(path=path, attr='nmr_shielding', soft=gOutput,
f_start='nitromal_prop', f_end='.out').groupby(
'atom').get_group(0)[['isotropic', 'file']].reset_index(drop=True)
delta = gen_delta(delta_type=2, freq=nitro_freq.frequency.copy())
va_corr.zpvc(uni=nitro_freq, delta=delta['delta'].values, temperature=[0, 200])
zpvc_results = np.array([[ 13.9329 , -1.80706136, 12.12583864, -2.65173195,
0.84467059, 0. ],
[ 13.9329 , -1.48913965, 12.44376035, -2.39264653,
0.90350687, 200. ]])
eff_coord = np.array([[ 1. , 0.43933078, -4.1685104 , 0. ],
[ 1. , -5.89314484, -2.18542914, 0. ],
[ 1. , -4.92050271, 1.02704248, 0. ],
[ 1. , 6.27207499, -0.61188352, 0. ],
[ 1. , 4.51177692, 2.23712277, 0. ],
[ 6. , 2.50712078, -1.03455595, 0. ],
[ 8. , 2.68478934, -3.43995936, 0. ],
[ 7. , 4.63712862, 0.33562593, 0. ],
[ 6. , -0.00921467, 0.10958486, 0. ],
[ 6. , -2.14821065, -1.60648521, 0. ],
[ 8. , -1.72316811, -4.00875859, 0. ],
[ 7. , -0.356593 , 2.76254918, 0. ],
[ 8. , 1.51924782, 4.1859399 , 0. ],
[ 8. , -2.53953864, 3.65511737, 0. ],
[ 7. , -4.55751742, -0.8483932 , 0. ],
[ 1. , 0.42984441, -4.17018127, 0. ],
[ 1. , -5.88480085, -2.18035498, 0. ],
[ 1. , -4.91515264, 1.02280195, 0. ],
[ 1. , 6.26195233, -0.60839022, 0. ],
[ 1. , 4.50575843, 2.22964105, 0. ],
[ 6. , 2.50646903, -1.03456634, 0. ],
[ 8. , 2.68667123, -3.43354071, 0. ],
[ 7. , 4.62935451, 0.33263776, 0. ],
[ 6. , -0.00919927, 0.1084463 , 0. ],
[ 6. , -2.14622625, -1.60477806, 0. ],
[ 8. , -1.72498822, -4.00524579, 0. ],
[ 7. , -0.35655368, 2.76183552, 0. ],
[ 8. , 1.50986822, 4.18058144, 0. ],
[ 8. , -2.52931977, 3.65383296, 0. ],
[ 7. , -4.55110836, -0.84857983, 0. ]])
vib_average = np.array([[ 8.50080000e+01, 0.00000000e+00, -0.00000000e+00,
7.20197387e-03, 7.20197387e-03, 0.00000000e+00],
[ 8.92980000e+01, 1.00000000e+00, -0.00000000e+00,
-1.92131709e-03, -1.92131709e-03, 0.00000000e+00],
[ 1.46093800e+02, 2.00000000e+00, -0.00000000e+00,
2.20218342e-02, 2.20218342e-02, 0.00000000e+00],
[ 2.17704400e+02, 3.00000000e+00, -0.00000000e+00,
9.06126467e-04, 9.06126467e-04, 0.00000000e+00],
[ 3.21218700e+02, 4.00000000e+00, -1.04570441e+00,
7.43587697e-02, -9.71345643e-01, 0.00000000e+00],
[ 3.54578000e+02, 5.00000000e+00, -1.69193968e-01,
1.02299102e-02, -1.58964058e-01, 0.00000000e+00],
[ 4.01846100e+02, 6.00000000e+00, -9.95359531e-04,
3.02869005e-03, 2.03333052e-03, 0.00000000e+00],
[ 4.18516500e+02, 7.00000000e+00, -0.00000000e+00,
-1.59091073e-02, -1.59091073e-02, 0.00000000e+00],
[ 4.25136100e+02, 8.00000000e+00, 9.96865969e-02,
9.60161049e-03, 1.09288207e-01, 0.00000000e+00],
[ 4.33864900e+02, 9.00000000e+00, -0.00000000e+00,
-2.24181573e-02, -2.24181573e-02, 0.00000000e+00],
[ 4.61383400e+02, 1.00000000e+01, 3.02755804e-01,
7.35128407e-02, 3.76268645e-01, 0.00000000e+00],
[ 4.85255900e+02, 1.10000000e+01, 5.73683252e-04,
3.43416159e-03, 4.00784484e-03, 0.00000000e+00],
[ 6.09549200e+02, 1.20000000e+01, 8.95579596e-02,
9.59664081e-03, 9.91546004e-02, 0.00000000e+00],
[ 6.66622400e+02, 1.30000000e+01, -0.00000000e+00,
-2.76336851e-03, -2.76336851e-03, 0.00000000e+00],
[ 6.85145800e+02, 1.40000000e+01, -0.00000000e+00,
-7.69008204e-03, -7.69008204e-03, 0.00000000e+00],
[ 7.03986700e+02, 1.50000000e+01, -3.59227410e-01,
2.76777550e-02, -3.31549655e-01, 0.00000000e+00],
[ 7.14892300e+02, 1.60000000e+01, -0.00000000e+00,
2.59325707e-03, 2.59325707e-03, 0.00000000e+00],
[ 7.25846000e+02, 1.70000000e+01, -0.00000000e+00,
-6.41058056e-03, -6.41058056e-03, 0.00000000e+00],
[ 7.62762300e+02, 1.80000000e+01, -0.00000000e+00,
-5.35324542e-03, -5.35324542e-03, 0.00000000e+00],
[ 8.46200900e+02, 1.90000000e+01, -3.55846141e-03,
-8.75803015e-04, -4.43426443e-03, 0.00000000e+00],
[ 1.07527990e+03, 2.00000000e+01, -1.84207554e-02,
4.72086557e-03, -1.36998899e-02, 0.00000000e+00],
[ 1.09465730e+03, 2.10000000e+01, -3.15295434e-02,
5.46069862e-03, -2.60688448e-02, 0.00000000e+00],
[ 1.10619190e+03, 2.20000000e+01, -0.00000000e+00,
4.49794220e-02, 4.49794220e-02, 0.00000000e+00],
[ 1.16155690e+03, 2.30000000e+01, -1.07394061e-02,
2.19152295e-03, -8.54788314e-03, 0.00000000e+00],
[ 1.17408590e+03, 2.40000000e+01, 2.00299542e-02,
9.48213114e-03, 2.95120853e-02, 0.00000000e+00],
[ 1.26700700e+03, 2.50000000e+01, -5.20609316e-01,
1.81355739e-01, -3.39253577e-01, 0.00000000e+00],
[ 1.31668580e+03, 2.60000000e+01, -3.86074967e-03,
2.99473191e-03, -8.66017757e-04, 0.00000000e+00],
[ 1.39527270e+03, 2.70000000e+01, -1.48603969e-03,
3.70156572e-03, 2.21552603e-03, 0.00000000e+00],
[ 1.45205880e+03, 2.80000000e+01, -2.50446596e-03,
-1.17089006e-03, -3.67535602e-03, 0.00000000e+00],
[ 1.55570980e+03, 2.90000000e+01, -6.91414901e-04,
-9.62715516e-04, -1.65413042e-03, 0.00000000e+00],
[ 1.57585090e+03, 3.00000000e+01, 4.04075730e-03,
-1.72319285e-03, 2.31756445e-03, 0.00000000e+00],
[ 1.59816940e+03, 3.10000000e+01, -3.02221180e-03,
-1.26461431e-02, -1.56683549e-02, 0.00000000e+00],
[ 1.63134120e+03, 3.20000000e+01, -3.85968122e-02,
1.18178497e-02, -2.67789625e-02, 0.00000000e+00],
[ 1.71103720e+03, 3.30000000e+01, -5.30081127e-02,
9.07966487e-03, -4.39284478e-02, 0.00000000e+00],
[ 2.26025580e+03, 3.40000000e+01, -9.00414250e-01,
4.04124232e-01, -4.96290017e-01, 0.00000000e+00],
[ 3.52007010e+03, 3.50000000e+01, -3.79625326e-04,
7.67211701e-04, 3.87586375e-04, 0.00000000e+00],
[ 3.54188550e+03, 3.60000000e+01, -2.40654106e-03,
-3.52958626e-04, -2.75949969e-03, 0.00000000e+00],
[ 3.68688910e+03, 3.70000000e+01, -1.40898432e-03,
3.54638132e-04, -1.05434619e-03, 0.00000000e+00],
[ 3.69638850e+03, 3.80000000e+01, -6.18866036e-04,
-3.25687908e-04, -9.44553944e-04, 0.00000000e+00],
[ 8.50080000e+01, 0.00000000e+00, -0.00000000e+00,
2.42846696e-02, 2.42846696e-02, 2.00000000e+02],
[ 8.92980000e+01, 1.00000000e+00, -0.00000000e+00,
-6.18637794e-03, -6.18637794e-03, 2.00000000e+02],
[ 1.46093800e+02, 2.00000000e+00, -0.00000000e+00,
4.56979114e-02, 4.56979114e-02, 2.00000000e+02],
[ 2.17704400e+02, 3.00000000e+00, -0.00000000e+00,
1.38459170e-03, 1.38459170e-03, 2.00000000e+02],
[ 3.21218700e+02, 4.00000000e+00, -1.01880224e+00,
9.07354499e-02, -9.28066794e-01, 2.00000000e+02],
[ 3.54578000e+02, 5.00000000e+00, -1.47884125e-01,
1.19615757e-02, -1.35922549e-01, 2.00000000e+02],
[ 4.01846100e+02, 6.00000000e+00, -1.06537672e-03,
3.38490386e-03, 2.31952714e-03, 2.00000000e+02],
[ 4.18516500e+02, 7.00000000e+00, -0.00000000e+00,
-1.75578234e-02, -1.75578234e-02, 2.00000000e+02],
[ 4.25136100e+02, 8.00000000e+00, 1.13889092e-01,
1.05481068e-02, 1.24437199e-01, 2.00000000e+02],
[ 4.33864900e+02, 9.00000000e+00, -0.00000000e+00,
-2.44873679e-02, -2.44873679e-02, 2.00000000e+02],
[ 4.61383400e+02, 1.00000000e+01, 3.50247546e-01,
7.90337919e-02, 4.29281338e-01, 2.00000000e+02],
[ 4.85255900e+02, 1.10000000e+01, 1.71633606e-04,
3.65009840e-03, 3.82173201e-03, 2.00000000e+02],
[ 6.09549200e+02, 1.20000000e+01, 7.81791456e-02,
9.83892770e-03, 8.80180732e-02, 2.00000000e+02],
[ 6.66622400e+02, 1.30000000e+01, -0.00000000e+00,
-2.80944797e-03, -2.80944797e-03, 2.00000000e+02],
[ 6.85145800e+02, 1.40000000e+01, -0.00000000e+00,
-7.80220122e-03, -7.80220122e-03, 2.00000000e+02],
[ 7.03986700e+02, 1.50000000e+01, -2.69187645e-01,
2.80298166e-02, -2.41157829e-01, 2.00000000e+02],
[ 7.14892300e+02, 1.60000000e+01, -0.00000000e+00,
2.62373990e-03, 2.62373990e-03, 2.00000000e+02],
[ 7.25846000e+02, 1.70000000e+01, -0.00000000e+00,
-6.48019409e-03, -6.48019409e-03, 2.00000000e+02],
[ 7.62762300e+02, 1.80000000e+01, -0.00000000e+00,
-5.39776338e-03, -5.39776338e-03, 2.00000000e+02],
[ 8.46200900e+02, 1.90000000e+01, 2.39979858e-03,
-8.79791824e-04, 1.52000675e-03, 2.00000000e+02],
[ 1.07527990e+03, 2.00000000e+01, -1.08417544e-02,
4.72499608e-03, -6.11675836e-03, 2.00000000e+02],
[ 1.09465730e+03, 2.10000000e+01, -2.64528256e-03,
5.46485456e-03, 2.81957200e-03, 2.00000000e+02],
[ 1.10619190e+03, 2.20000000e+01, -0.00000000e+00,
4.50109276e-02, 4.50109276e-02, 2.00000000e+02],
[ 1.16155690e+03, 2.30000000e+01, -6.76838012e-03,
2.19255359e-03, -4.57582653e-03, 2.00000000e+02],
[ 1.17408590e+03, 2.40000000e+01, 9.29716984e-03,
9.48620604e-03, 1.87833759e-02, 2.00000000e+02],
[ 1.26700700e+03, 2.50000000e+01, -5.14657884e-01,
1.81395679e-01, -3.33262205e-01, 2.00000000e+02],
[ 1.31668580e+03, 2.60000000e+01, -2.24279961e-02,
2.99519325e-03, -1.94328029e-02, 2.00000000e+02],
[ 1.39527270e+03, 2.70000000e+01, -9.34932877e-04,
3.70188971e-03, 2.76695683e-03, 2.00000000e+02],
[ 1.45205880e+03, 2.80000000e+01, 3.53202807e-03,
-1.17095817e-03, 2.36106990e-03, 2.00000000e+02],
[ 1.55570980e+03, 2.90000000e+01, -2.49842213e-04,
-9.62742087e-04, -1.21258430e-03, 2.00000000e+02],
[ 1.57585090e+03, 3.00000000e+01, 2.39535538e-03,
-1.72323400e-03, 6.72121388e-04, 2.00000000e+02],
[ 1.59816940e+03, 3.10000000e+01, 4.90707509e-03,
-1.26464003e-02, -7.73932517e-03, 2.00000000e+02],
[ 1.63134120e+03, 3.20000000e+01, -3.00698127e-02,
1.18180390e-02, -1.82517738e-02, 2.00000000e+02],
[ 1.71103720e+03, 3.30000000e+01, -3.95916986e-02,
9.07974685e-03, -3.05119518e-02, 2.00000000e+02],
[ 2.26025580e+03, 3.40000000e+01, -8.86090407e-01,
4.04124303e-01, -4.81966104e-01, 2.00000000e+02],
[ 3.52007010e+03, 3.50000000e+01, -5.63472032e-04,
7.67211701e-04, 2.03739669e-04, 2.00000000e+02],
[ 3.54188550e+03, 3.60000000e+01, -3.39925478e-03,
-3.52958626e-04, -3.75221341e-03, 2.00000000e+02],
[ 3.68688910e+03, 3.70000000e+01, -1.73391855e-03,
3.54638132e-04, -1.37928042e-03, 2.00000000e+02],
[ 3.69638850e+03, 3.80000000e+01, -7.51344750e-04,
-3.25687908e-04, -1.07703266e-03, 2.00000000e+02]])
cols = ['property', 'zpvc', 'zpva', 'tot_anharm', 'tot_curva', 'temp']
self.assertTrue(np.allclose(va_corr.zpvc_results[cols].values, zpvc_results, rtol=5e-4))
va_corr.eff_coord['Z'] = va_corr.eff_coord['Z'].astype(int)
self.assertTrue(np.allclose(va_corr.eff_coord[['Z','x','y','z']].values, eff_coord, atol=5e-5))
cols = ['freq', 'freqdx', 'anharm', 'curva', 'sum', 'temp']
self.assertTrue(np.allclose(va_corr.vib_average[cols].values, vib_average, rtol=5e-4))
|
exa-analytics/exatomic
|
exatomic/va/tests/test_va.py
|
Python
|
apache-2.0
| 24,371
|
[
"Gaussian",
"NWChem"
] |
222729fa009b637e68713de88f725e1a0e5e505d4f3e2916252964aa570010ae
|
# -*- coding: utf-8 -*-
## Ranking of records using different parameters and methods on the fly.
##
## This file is part of Invenio.
## Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
__revision__ = "$Id$"
"""This file defines class used for the management of DRANK ranking tables.
It includes rank normalization, rescaling, filtering and aggregation/merging."""
import sys
from math import exp
from invenio.dbquery import deserialize_via_marshal, serialize_via_marshal, run_sql
from invenio.bibrank_downloads_indexer import uniq
from scipy.stats.mstats import scoreatpercentile
from scipy.stats.kde import gaussian_kde
from numpy import inf
from Numeric import array
class rnkDict:
"""Class to manage ranking tables."""
def __init__(self, rnkdict={}):
self.content = rnkdict
def strip(self):
"""Remove entries that contain nil scores. Inverse of fill."""
_content = {}
keys = filter(lambda x: self.content[x] > 0, self.content.keys())
values = map(lambda x: self.content[x], keys)
content = zip(keys, values)
for item in content:
_content[item[0]] = item[1]
self.content = _content
return
def fill(self):
"""Fill ranking table so that it contains all keys including those with nil scores. Inverse of strip."""
self.sanitize()
return
def sanitize(self, nil=0):
"""Sanitize the ranking table and make sure all keys are present."""
keys = self.content.keys()
for item in filter(lambda x: x not in keys, range(max(keys))):
self.content[item] = nil
return
def put(self, dictionary):
"""Insert content given in the dictionary into the table. Inverse of get."""
self.content = dictionary
return
def getdict(self):
"""Get the content of the table, return a dictionary. Inverse of put."""
return self.content
def read_from_file(self, filename, separator="|"):
"""Read ranking table from a CSV file."""
filehandle = open(filename, "r")
for line in filehandle.readlines():
(key, value) = line.split(separator)
try:
self.content[int(key)] = int(value)
except ValueError as err:
sys.stderr.write("Warning: %s\n" % err)
return
def lookup_in_lut(self,lut,reclist):
"""This is a mapping function which looks into existing look-up table/dictionary,
the corresponding scores for a record list. It also interpulates using
newton's second order to estimate scores not in the look up table/dictionary """
freq_values=sorted(lut.keys())
looked_up_scores={}
for key,score in reclist.items():
if lut.has_key(reclist[key]):
looked_up_scores[key] = lut[reclist[key]]
else:
for k in range(0, len(freq_values)-1):
if (freq_values[k] < score) and (freq_values[k+1] > score):
looked_up_scores[int(key)]= self.get_interpolated_value(score, freq_values[k], freq_values[k+1], lut[freq_values[k]], lut[freq_values[k+1]])
break
# print "record_pscore:{0}".format(record_pscore)
self.content=looked_up_scores
return
def get_interpolated_value(self,x, x1, x2, y1, y2):
"""Function that utilizes a second order newtons interpulant.
Get the interpolated value base on the two point (x1, y1) and (x2, y2) that x1 < x < x2
Input:
- x: new value
- (x1, y1): the first point of interpolation
- (x2, y2): the second point of interpolation
Output:
- interpolated value of y
"""
y = float(y1) + ((float(y2) - float(y1))*(float(x) - float(x1)))/(float(x2)-float(x1))
return y
def loadlut(self, dictname):
"""Load the ranking table into memory. Inverse of savedict."""
# identifier = run_sql("SELECT id from rnkMETHOD where name=\"%s\"" % dictname)
res = run_sql("SELECT drank_lut FROM rnkDRANKLUT WHERE drank_name=\"%s\"" %dictname)
if res:
self.content = deserialize_via_marshal(res[0][0])
else:
self.content = {}
return
def loaddict(self, dictname):
"""Load the ranking table into memory. Inverse of savedict."""
identifier = run_sql("SELECT id from rnkMETHOD where name=\"%s\"" % dictname)
res = run_sql("SELECT relevance_data FROM rnkMETHODDATA WHERE id_rnkMETHOD=\"%s\"" % identifier[0][0])
if res:
self.content = deserialize_via_marshal(res[0][0])
else:
self.content = {}
return
def savedict(self, dictname):
"""Save ranking table into the database (rewrite). Inverse of loaddict."""
mid = run_sql("SELECT id from rnkMETHOD where name=\"%s\"" % dictname)
self.delete_from_db(dictname)
serdata = serialize_via_marshal(self.content)
midstr = str(mid[0][0])
run_sql("INSERT INTO rnkMETHODDATA(id_rnkMETHOD, relevance_data) VALUES (%s,%s)", (midstr, serdata,))
# run_sql("UPDATE rnkMETHOD SET last_updated=%s WHERE name=%s", (date, rank_method_code))
return
def write(self, mode=1):
"""Print the ranking table to the standard output."""
if mode:
for key in self.content.keys():
sys.stdout.write("%s : %s\n" % (key, self.content[key]))
return
def length(self):
"""Get length of the ranking table."""
return len(self.content.keys())
def rescale(self):
"""Rescale the ranking table so that scores appear on scale in [0, 1]."""
_list = self.content.values()
xmin = min(_list)
xmax = max(_list)
span = xmax - xmin
return map(lambda x: (x-xmin)/span, _list)
def percrescale(self):
"""Rescale the ranking table to integers so that scores appear to be on scale in [0, 100]."""
_content = {}
xmin = min(self.content.values())
xmax = max(self.content.values())
span = xmax - xmin
res = map(lambda y, x: (y, int(100*(x-xmin)/span)), self.content.keys(), self.content.values())
for item in res:
_content[item[0]] = item[1]
self.content = _content
return
def rank(self):
"""Rank. Return list of ranked key/value pairs."""
return map(lambda x:list(x), sorted(self.content.items(), key=lambda (k, v):(v, k)))
def octopus(self, ldict, lweight):
"""Octopus merge. Merge two and more ranking tables in one go."""
allkeys = []
_allkeys = []
_c_dict = {}
for i in range(len(ldict)-1):
for key in ldict[i].keys():
allkeys.append(key)
_allkeys = uniq(allkeys)
counter = 0
for key in _allkeys:
counter += 1
cumulated = 0
for i in range(len(ldict)-1):
dico = ldict[i]
if dico.has_key(key):
cumulated += float(dico[key]) * lweight[i+1]
_c_dict[key] = 1/(1+exp(1)**(-1*cumulated))
self.content = _c_dict
return
def merge(self, a_dict, b_dict, weight=[0, 0, 0]):
"""Merge two ranking tables."""
# c_list = map(lambda x,y: 1/(1+exp(1)**(-1*(w[0]+w[1]*x+w[2]*y))), a_list, b_list)
_a_dict = a_dict.getdict()
_b_dict = b_dict.getdict()
_c_dict = {}
for item in _a_dict.keys():
if _b_dict.has_key(item):
_c_dict[item] = 1/(1+exp(1)**(-1*(weight[0]+weight[1]*_a_dict[item]+weight[2]*_b_dict[item])))
del _b_dict[item]
else:
_c_dict[item] = 1/(1+exp(1)**(-1*(weight[0]+weight[1]*_a_dict[item])))
for item in _b_dict.keys():
_c_dict[item] = 1/(1+exp(1)**(-1*(weight[0]+weight[2]*_b_dict[item])))
self.content = _c_dict
return
def multiply(self, a_dict, b_dict):
"""Create a new table multiplying values from two other ranking tables."""
_a_dict = a_dict.getdict()
_b_dict = b_dict.getdict()
_c_dict = {}
for item in _a_dict.keys():
if _b_dict.has_key(item):
_c_dict[item] = _a_dict[item] * _b_dict[item]
del _b_dict[item]
else:
_c_dict[item] = 0
for item in _b_dict.keys():
_c_dict[item] = 0
self.content = _c_dict
return
def normalize(self, outlier=1):
"""Normalize scores in the ranking table w/kde. Remove outliers first."""
scorelist = self.content.values()
if outlier:
outlier_above = scoreatpercentile(scorelist, 100-outlier)
outlier_below = scoreatpercentile(scorelist, outlier)
scorelist_new = []
for item in scorelist:
if float(item) >= float(outlier_below) and float(item) <= float(outlier_above):
scorelist_new.append(item)
scorelist = scorelist_new
density_estimate = gaussian_kde(array(scorelist))
conversiontable = {}
for item in self.content.items():
self.content[item[0]] = density_estimate.integrate_box_1d(-inf, item[1])
return
def normalize_take_displays_into_account(self, outlier=1):
"""Normalize scores in the ranking table w/kde. Remove outliers first. Take displays into account."""
_dico = rnkDict()
_dico.loaddict("displays")
mydico = _dico.getdict()
scorelist = []
for key in self.content.keys():
if mydico.has_key(key):
for iter in range(int(mydico[key])):
scorelist.append(self.content[key])
else:
scorelist.append(self.content[key])
if outlier:
outlier_above = scoreatpercentile(scorelist, 100-outlier)
outlier_below = scoreatpercentile(scorelist, outlier)
scorelist_new = []
for item in scorelist:
if float(item) >= float(outlier_below) and float(item) <= float(outlier_above):
scorelist_new.append(item)
scorelist = scorelist_new
density_estimate = gaussian_kde(array(scorelist))
conversiontable = {}
for item in self.content.items():
self.content[item[0]] = density_estimate.integrate_box_1d(-inf, item[1])
return
def _remove_ties(self):
"""Arbitrary tie removal."""
temp = {}
for item in self.content.values():
temp[item] = item
return
def delete_from_db(self, dictname):
"""Delete the ranking table from database."""
identifier = run_sql("SELECT id from rnkMETHOD where name=\"%s\"" % dictname)
run_sql("DELETE FROM rnkMETHODDATA WHERE id_rnkMETHOD=\"%s\"" % identifier[0][0])
return
def gethirsch(self):
"""Get hirsch index of the ranking table."""
hirsch = 0
counter = 0
ranked = self.rank()
ranked.reverse()
for item in ranked:
counter += 1
if item[1] >= counter:
hirsch += 1
return hirsch
def filter(self, keylist):
"""Filter the ranking table with a given list of keys."""
_content = {}
for key in keylist:
if self.content.has_key(key):
_content[key] = self.content[key]
self.content = _content
return
def clean(self):
"""Clean the ranking table."""
# """Remove empty spaces from strings and make integer."""
_content = {}
for key in self.content.keys():
_value = int(self.content[key].lstrip().rstrip())
_key = int(key.lstrip().rstrip())
_content[_key] = _value
self.content = _content
return
|
pamoakoy/invenio
|
modules/bibrank/lib/bibrank_drank_sorter.py
|
Python
|
gpl-2.0
| 12,825
|
[
"Octopus"
] |
c5cc392e46d643904e7f8ffcd72d6ede69b48a4c1e14f1671f02d1114305deca
|
"""
This is the boilerplate default configuration file.
Changes and additions to settings should be done in the config module
located in the application root rather than this config.
"""
config = {
# webapp2 sessions
'webapp2_extras.sessions' : {'secret_key': '_PUT_KEY_HERE_YOUR_SECRET_KEY_'},
# webapp2 authentication
'webapp2_extras.auth' : {'user_model': 'boilerplate.models.User',
'cookie_name': 'session_name'},
# jinja2 templates
'webapp2_extras.jinja2' : {'template_path': ['templates','boilerplate/templates', 'admin/templates'],
'environment_args': {'extensions': ['jinja2.ext.i18n']}},
# application name
'app_name' : "App Income Trust Service",
# the default language code for the application.
# should match whatever language the site uses when i18n is disabled
'app_lang' : 'en',
# Locale code = <language>_<territory> (ie 'en_US')
# to pick locale codes see http://cldr.unicode.org/index/cldr-spec/picking-the-right-language-code
# also see http://www.sil.org/iso639-3/codes.asp
# Language codes defined under iso 639-1 http://en.wikipedia.org/wiki/List_of_ISO_639-1_codes
# Territory codes defined under iso 3166-1 alpha-2 http://en.wikipedia.org/wiki/ISO_3166-1
# disable i18n if locales array is empty or None
'locales' : ['en_US', 'es_ES', 'it_IT', 'zh_CN', 'id_ID', 'fr_FR', 'de_DE', 'ru_RU', 'pt_BR', 'cs_CZ'],
# contact page email settings
'contact_sender' : "PUT_SENDER_EMAIL_HERE",
'contact_recipient' : "PUT_RECIPIENT_EMAIL_HERE",
# Password AES Encryption Parameters
'aes_key' : "12_24_32_BYTES_KEY_FOR_PASSWORDS",
'salt' : "_PUT_SALT_HERE_TO_SHA512_PASSWORDS_",
# get your own consumer key and consumer secret by registering at https://dev.twitter.com/apps
# callback url must be: http://[YOUR DOMAIN]/login/twitter/complete
'twitter_consumer_key' : 'PUT_YOUR_TWITTER_CONSUMER_KEY_HERE',
'twitter_consumer_secret' : 'PUT_YOUR_TWITTER_CONSUMER_SECRET_HERE',
#Facebook Login
# get your own consumer key and consumer secret by registering at https://developers.facebook.com/apps
#Very Important: set the site_url= your domain in the application settings in the facebook app settings page
# callback url must be: http://[YOUR DOMAIN]/login/facebook/complete
'fb_api_key' : 'PUT_YOUR_FACEBOOK_PUBLIC_KEY_HERE',
'fb_secret' : 'PUT_YOUR_FACEBOOK_PUBLIC_KEY_HERE',
#Linkedin Login
#Get you own api key and secret from https://www.linkedin.com/secure/developer
'linkedin_api' : 'PUT_YOUR_LINKEDIN_PUBLIC_KEY_HERE',
'linkedin_secret' : 'PUT_YOUR_LINKEDIN_PUBLIC_KEY_HERE',
# Github login
# Register apps here: https://github.com/settings/applications/new
'github_server' : 'github.com',
'github_redirect_uri' : 'http://www.example.com/social_login/github/complete',
'github_client_id' : 'PUT_YOUR_GITHUB_CLIENT_ID_HERE',
'github_client_secret' : 'PUT_YOUR_GITHUB_CLIENT_SECRET_HERE',
# get your own recaptcha keys by registering at http://www.google.com/recaptcha/
'captcha_public_key' : "PUT_YOUR_RECAPCHA_PUBLIC_KEY_HERE",
'captcha_private_key' : "PUT_YOUR_RECAPCHA_PRIVATE_KEY_HERE",
# Leave blank "google_analytics_domain" if you only want Analytics code
'google_analytics_domain' : "YOUR_PRIMARY_DOMAIN (e.g. google.com)",
'google_analytics_code' : "UA-XXXXX-X",
# add status codes and templates used to catch and display errors
# if a status code is not listed here it will use the default app engine
# stacktrace error page or browser error page
'error_templates' : {
403: 'errors/default_error.html',
404: 'errors/default_error.html',
500: 'errors/default_error.html',
},
# Enable Federated login (OpenID and OAuth)
# Google App Engine Settings must be set to Authentication Options: Federated Login
'enable_federated_login' : True,
# jinja2 base layout template
'base_layout' : 'base.html',
# send error emails to developers
'send_mail_developer' : True,
# fellas' list
'developers' : (
('Jackie Chang', 'jackie.chang@awitsystems.com'),
),
# If true, it will write in datastore a log of every email sent
'log_email' : True,
# If true, it will write in datastore a log of every visit
'log_visit' : True,
# ----> ADD MORE CONFIGURATION OPTIONS HERE <----
} # end config
|
jackie6chang/aits
|
boilerplate/config.py
|
Python
|
lgpl-3.0
| 4,185
|
[
"VisIt"
] |
13b2b5b5ac64b1f080740c9ad9acb92da7488779d4af8c48142c4c28b8502ca3
|
"""
:codeauthor: Jayesh Kariya <jayeshk@saltstack.com>
"""
import pytest
import salt.modules.moosefs as moosefs
from tests.support.mock import MagicMock, patch
@pytest.fixture
def configure_loader_modules():
return {moosefs: {}}
def test_dirinfo():
"""
Test if it return information on a directory located on the Moose
"""
mock = MagicMock(return_value={"stdout": "Salt:salt"})
with patch.dict(moosefs.__salt__, {"cmd.run_all": mock}):
assert moosefs.dirinfo("/tmp/salt") == {"Salt": "salt"}
def test_fileinfo():
"""
Test if it returns information on a file located on the Moose
"""
mock = MagicMock(return_value={"stdout": ""})
with patch.dict(moosefs.__salt__, {"cmd.run_all": mock}):
assert moosefs.fileinfo("/tmp/salt") == {}
def test_mounts():
"""
Test if it returns a list of current MooseFS mounts
"""
mock = MagicMock(return_value={"stdout": ""})
with patch.dict(moosefs.__salt__, {"cmd.run_all": mock}):
assert moosefs.mounts() == {}
def test_getgoal():
"""
Test if it returns goal(s) for a file or directory
"""
mock = MagicMock(return_value={"stdout": "Salt: salt"})
with patch.dict(moosefs.__salt__, {"cmd.run_all": mock}):
assert moosefs.getgoal("/tmp/salt") == {"goal": "salt"}
|
saltstack/salt
|
tests/pytests/unit/modules/test_moosefs.py
|
Python
|
apache-2.0
| 1,323
|
[
"MOOSE"
] |
b3bf121e523926dc88cf1a6bbd20b0159c937d2090e9817f1d0ed369db57141c
|
from ase.atoms import string2symbols
abinitio_energies = {
'CO_gas': -626.611970497,
'H2_gas': -32.9625308725,
'CH4_gas': -231.60983421,
'H2O_gas': -496.411394229,
'CO_111': -115390.445596,
'C_111': -114926.212205,
'O_111': -115225.106527,
'H_111': -114779.038569,
'CH_111': -114943.455431,
'OH_111': -115241.861661,
'CH2_111': -114959.776961,
'CH3_111': -114976.7397,
'C-O_111': -115386.76440668429,
'H-OH_111': -115257.78796158083,
'H-C_111': -114942.25042955727,
'slab_111': -114762.254842,
}
ref_dict = {}
ref_dict['H'] = 0.5*abinitio_energies['H2_gas']
ref_dict['O'] = abinitio_energies['H2O_gas'] - 2*ref_dict['H']
ref_dict['C'] = abinitio_energies['CH4_gas'] - 4*ref_dict['H']
ref_dict['111'] = abinitio_energies['slab_111']
def get_formation_energies(energy_dict,ref_dict):
formation_energies = {}
for key in energy_dict.keys(): #iterate through keys
E0 = energy_dict[key] #raw energy
name,site = key.split('_') #split key into name/site
if 'slab' not in name: #do not include empty site energy (0)
if site == '111':
E0 -= ref_dict[site] #subtract slab energy if adsorbed
#remove - from transition-states
formula = name.replace('-','')
#get the composition as a list of atomic species
composition = string2symbols(formula)
#for each atomic species, subtract off the reference energy
for atom in composition:
E0 -= ref_dict[atom]
#round to 3 decimals since this is the accuracy of DFT
E0 = round(E0,3)
formation_energies[key] = E0
return formation_energies
formation_energies = get_formation_energies(abinitio_energies,ref_dict)
for key in formation_energies:
print(str(key) + ' ' + str(formation_energies[key]))
frequency_dict = {
'CO_gas': [2170],
'H2_gas': [4401],
'CH4_gas':[2917,1534,1534,3019,3019,3019,1306,
1306,1306],
'H2O_gas': [3657, 1595, 3756],
'CO_111': [60.8, 230.9, 256.0, 302.9, 469.9, 1747.3],
'C_111': [464.9, 490.0, 535.9],
'O_111': [359.5, 393.3, 507.0],
'H_111': [462.8, 715.9, 982.5],
'CH_111': [413.3, 437.5, 487.6, 709.6, 735.1, 3045.0],
'OH_111': [55, 340.9, 396.1, 670.3, 718.0, 3681.7],
'CH2_111': [55, 305.5, 381.3, 468.0, 663.4, 790.2, 1356.1,
2737.7, 3003.9],
'CH3_111': [55, 113.5, 167.4, 621.8, 686.0, 702.5, 1381.3,
1417.5, 1575.8, 3026.6, 3093.2, 3098.9],
'C-O_111': [],
'H-OH_111': [],
'H-C_111': []
}
def make_input_file(file_name,energy_dict,frequency_dict):
#create a header
header = '\t'.join(['surface_name','site_name',
'species_name','formation_energy',
'frequencies','reference'])
lines = [] #list of lines in the output
for key in energy_dict.keys(): #iterate through keys
E = energy_dict[key] #raw energy
name,site = key.split('_') #split key into name/site
if 'slab' not in name: #do not include empty site energy (0)
frequency = frequency_dict[key]
if site == 'gas':
surface = None
else:
surface = 'Rh'
outline = [surface,site,name,E,frequency,'Input File Tutorial.']
line = '\t'.join([str(w) for w in outline])
lines.append(line)
lines.sort() #The file is easier to read if sorted (optional)
lines = [header] + lines #add header to top
input_file = '\n'.join(lines) #Join the lines with a line break
input = open(file_name,'w') #open the file name in write mode
input.write(input_file) #write the text
input.close() #close the file
print('Successfully created input file')
file_name = 'energies.txt'
make_input_file(file_name,formation_energies,frequency_dict)
#Test that input is parsed correctly
from catmap.model import ReactionModel
from catmap.parsers import TableParser
rxm = ReactionModel()
#The following lines are normally assigned by the setup_file
#and are thus not usually necessary.
rxm.surface_names = ['Rh']
rxm.adsorbate_names = ('CO','C','O','H','CH','OH','CH2','CH3')
rxm.transition_state_names = ('C-O','H-OH','H-C')
rxm.gas_names = ('CO_g','H2_g','CH4_g','H2O_g')
rxm.site_names = ('s',)
rxm.species_definitions = {'s':{'site_names':['111']}}
#Now we initialize a parser instance (also normally done by setup_file)
parser = TableParser(rxm)
parser.input_file = file_name
parser.parse()
#All structured data is stored in species_definitions; thus we can
#check that the parsing was successful by ensuring that all the
#data in the input file was collected in this dictionary.
for key in rxm.species_definitions:
print(str(key) + ' ' + str(rxm.species_definitions[key]))
|
mhoffman/catmap
|
tutorials/1-generating_input_file/generate_input.py
|
Python
|
gpl-3.0
| 5,160
|
[
"ASE"
] |
94cdf175ed4cc9856ff5fe1c5f160a2a8f10a30b5ff5d3b2daa7ae68a13501d8
|
###############################
# This file is part of PyLaDa.
#
# Copyright (C) 2013 National Renewable Energy Lab
#
# PyLaDa is vasp high throughput computational platform for Physics. It aims to make it easier to submit
# large numbers of jobs on supercomputers. It provides vasp python interface to physical input, such as
# crystal structures, as well as to vasp number of DFT (VASP, CRYSTAL) and atomic potential programs. It
# is able to organise and launch computational jobs on PBS and SLURM.
#
# PyLaDa is free software: you can redistribute it and/or modify it under the terms of the GNU General
# Public License as published by the Free Software Foundation, either version 3 of the License, or (at
# your option) any later version.
#
# PyLaDa is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even
# the implied warranty of MERCHANTABILITY or FITNESS FOR vasp PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received vasp copy of the GNU General Public License along with PyLaDa. If not, see
# <http://www.gnu.org/licenses/>.
###############################
from pytest import fixture
@fixture
def structure():
from pylada.crystal import Structure
u = 0.25
x, y = u, 0.25 - u
structure = Structure([[0, 0.5, 0.5], [0.5, 0, 0.5], [0.5, 0.5, 0]]) \
.add_atom(5.000000e-01, 5.000000e-01, 5.000000e-01, "A") \
.add_atom(5.000000e-01, 2.500000e-01, 2.500000e-01, "A") \
.add_atom(2.500000e-01, 5.000000e-01, 2.500000e-01, "A") \
.add_atom(2.500000e-01, 2.500000e-01, 5.000000e-01, "A") \
.add_atom(8.750000e-01, 8.750000e-01, 8.750000e-01, "B") \
.add_atom(1.250000e-01, 1.250000e-01, 1.250000e-01, "B") \
.add_atom( x, x, x, "X") \
.add_atom( x, y, y, "X") \
.add_atom( y, x, y, "X") \
.add_atom( y, y, x, "X") \
.add_atom( -x, -x, -x, "X") \
.add_atom( -x, -y, -y, "X") \
.add_atom( -y, -x, -y, "X") \
.add_atom(-y, -y, -x, "X")
return structure
@fixture
def Specie():
from collections import namedtuple
return namedtuple('Specie', ['U'])
@fixture
def vasp(Specie):
from pylada.vasp import Vasp
from pylada.vasp.specie import U, nlep
vasp = Vasp()
vasp.species = {'A': Specie([]), 'B': Specie([]), 'X': Specie([])}
for key in list(vasp._input.keys()):
if key not in ['ldau']:
del vasp._input[key]
return vasp
def test_ldau_keyword_without_nlep(vasp, structure):
from pickle import loads, dumps
import pylada
pylada.vasp_has_nlep = False
assert vasp.ldau == True
keyword = vasp._input['ldau']
assert keyword.output_map(vasp=vasp, structure=structure) is None
assert eval(repr(keyword), {'LDAU': keyword.__class__})._value == True
assert eval(repr(keyword), {'LDAU': keyword.__class__}).keyword == 'LDAU'
assert loads(dumps(keyword)).keyword == 'LDAU'
assert loads(dumps(keyword))._value
def test_U_disabled(vasp, structure, Specie):
from pylada.vasp.specie import U
import pylada
pylada.vasp_has_nlep = False
vasp.ldau = False
vasp.species = {'A': Specie([U(2, 0, 0.5)]), 'B': Specie([]), 'X': Specie([])}
assert vasp.ldau == False
assert vasp.output_map(vasp=vasp) is None
def test_enabled_U(vasp, Specie, structure):
from numpy import all, abs, array
from pylada.vasp.specie import U
import pylada
pylada.vasp_has_nlep = False
vasp.species = {'A': Specie([U(2, 0, 0.5)]), 'B': Specie([]), 'X': Specie([])}
vasp.ldau = True
assert vasp.ldau == True
map = vasp.output_map(vasp=vasp, structure=structure)
assert map['LDAU'] == '.TRUE.'
assert map['LDAUTYPE'] == '2'
assert all(abs(array(map['LDAUJ'].split(), dtype='float64')) < 1e-8)
assert all(abs(array(map['LDAUU'].split(), dtype='float64') - [0.5, 0, 0]) < 1e-8)
assert all(abs(array(map['LDAUL'].split(), dtype='float64') - [0, -1, -1]) < 1e-8)
def test_enabled_complex_U(vasp, Specie, structure):
from numpy import all, abs, array
from pylada.vasp.specie import U
import pylada
pylada.vasp_has_nlep = False
vasp.species = {'A': Specie([U(2, 0, 0.5)]), 'B': Specie([U(2, 1, 0.6)]), 'X': Specie([])}
vasp.ldau = True
map = vasp.output_map(vasp=vasp, structure=structure)
assert map['LDAU'] == '.TRUE.'
assert map['LDAUTYPE'] == '2'
assert all(abs(array(map['LDAUJ'].split(), dtype='float64')) < 1e-8)
assert all(abs(array(map['LDAUU'].split(), dtype='float64') - [0.5, 0.6, 0]) < 1e-8)
assert all(abs(array(map['LDAUL'].split(), dtype='float64') - [0, 1, -1]) < 1e-8)
def test_disabled_nlep(vasp, Specie, structure):
from numpy import all, abs, array
from pylada.vasp.specie import U, nlep
import pylada
pylada.vasp_has_nlep = True
vasp.species = {
'A': Specie([U(2, 0, 0.5)]),
'B': Specie([U(2, 0, -0.5), nlep(2, 1, -1.0)]),
'X': Specie([])
}
vasp.ldau = False
assert vasp.ldau == False
assert vasp.output_map(vasp=vasp) is None
def test_enabled_nlep(vasp, Specie, structure):
from numpy import all, abs, array
from pylada.vasp.specie import U, nlep
import pylada
pylada.vasp_has_nlep = True
vasp.species = {
'A': Specie([U(2, 0, 0.5)]),
'B': Specie([U(2, 0, -0.5), nlep(2, 1, -1.0)]),
'X': Specie([])
}
vasp.ldau = True
map = vasp.output_map(vasp=vasp, structure=structure)
assert map['LDAU'] == '.TRUE.'
assert map['LDAUTYPE'] == '2'
assert all(abs(array(map['LDUL1'].split(), dtype='float64') - [0, 0, -1]) < 1e-8)
assert all(abs(array(map['LDUU1'].split(), dtype='float64') - [0.5, -0.5, 0]) < 1e-8)
assert all(abs(array(map['LDUJ1'].split(), dtype='float64') - [0, 0, 0]) < 1e-8)
assert all(abs(array(map['LDUO1'].split(), dtype='float64') - [1, 1, 1]) < 1e-8)
assert all(abs(array(map['LDUL2'].split(), dtype='float64') - [-1, 1, -1]) < 1e-8)
assert all(abs(array(map['LDUU2'].split(), dtype='float64') - [0, -1.0, 0]) < 1e-8)
assert all(abs(array(map['LDUJ2'].split(), dtype='float64') - [0, 0, 0]) < 1e-8)
assert all(abs(array(map['LDUO2'].split(), dtype='float64') - [1, 2, 1]) < 1e-8)
def test_enabled_complex_nlep(vasp, structure, Specie):
from numpy import all, abs, array
from pylada.vasp.specie import U, nlep
import pylada
pylada.vasp_has_nlep = True
vasp.species = {
'A': Specie([U(2, 0, 0.5)]),
'B': Specie([U(2, 0, -0.5), nlep(2, 2, -1.0, -3.0)]),
'X': Specie([])
}
vasp.ldau = True
map = vasp.output_map(vasp=vasp, structure=structure)
assert map['LDAU'] == '.TRUE.'
assert map['LDAUTYPE'] == '2'
assert all(abs(array(map['LDUL1'].split(), dtype='float64') - [0, 0, -1]) < 1e-8)
assert all(abs(array(map['LDUU1'].split(), dtype='float64') - [0.5, -0.5, 0]) < 1e-8)
assert all(abs(array(map['LDUJ1'].split(), dtype='float64') - [0, 0, 0]) < 1e-8)
assert all(abs(array(map['LDUO1'].split(), dtype='float64') - [1, 1, 1]) < 1e-8)
assert all(abs(array(map['LDUL2'].split(), dtype='float64') - [-1, 2, -1]) < 1e-8)
assert all(abs(array(map['LDUU2'].split(), dtype='float64') - [0, -1.0, 0]) < 1e-8)
assert all(abs(array(map['LDUJ2'].split(), dtype='float64') - [0, -3.0, 0]) < 1e-8)
assert all(abs(array(map['LDUO2'].split(), dtype='float64') - [1, 3, 1]) < 1e-8)
|
pylada/pylada-light
|
tests/vasp/test_uparams_attribute.py
|
Python
|
gpl-3.0
| 7,548
|
[
"CRYSTAL",
"VASP"
] |
a53fb5c8af00b70daa84c40236b4bca22316a81b3ad6fce10405cb6eda1448e5
|
"""
Common functions for tests
"""
__author__ = 'Dan Gunter <dkgunter@lbl.gov>'
__date__ = '10/29/13'
# Stdlib
import json
import logging
import os
import subprocess
import sys
import tempfile
import traceback
import unittest
# Third-party
from mongomock import MongoClient
import pymongo
# Package
from matgendb.query_engine import QueryEngine
from matgendb.builders.incr import CollectionTracker
class MockQueryEngine(QueryEngine):
"""Mock (fake) QueryEngine, unless a real connection works.
"""
def __init__(self, host="127.0.0.1", port=27017, database="vasp",
user=None, password=None, collection="tasks",
aliases_config=None, default_properties=None):
try:
QueryEngine.__init__(self, host=host, port=port, database=database,
user=user, password=password, collection=collection,
aliases_config=aliases_config,
default_properties=default_properties)
print("@@ connected to real Mongo")
return # actully connected! not mocked..
except:
pass
self.connection = MongoClient(self.host, self.port)
self.db = self.connection[database]
self._user, self._password = user, password
self.host = host
self.port = port
self.database_name = database
# colllection name is now a @property. the setter will set "self.collection" internally
self.collection_name = collection
self.set_aliases_and_defaults(aliases_config=aliases_config,
default_properties=default_properties)
# -----------------------------------
# Component test classes / functions
# -----------------------------------
def get_component_logger(name, strm=sys.stdout):
log = logging.getLogger(name)
if 'TEST_DEBUG' in os.environ:
log.setLevel(logging.DEBUG)
else:
log.setLevel(logging.INFO)
_h = logging.StreamHandler(strm)
log.addHandler(_h)
return log
class ComponentTest(unittest.TestCase):
DB = 'testdb'
SRC = 'source'
DST = 'dest'
MGBUILD_CMD = ["mgbuild", "run"]
def setUp(self):
self.db = self.connect(True)
self.src, self.dst = self.db[self.SRC], self.db[self.DST]
self.src_conf, self.dst_conf = self.create_configs()
def mgbuild(self, args):
try:
s = subprocess.check_output(self.MGBUILD_CMD + args,
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
print("ERROR: {}".format(err.output))
raise
return s
def connect(self, clear=False):
"""Connect to Mongo DB
:return: pymongo Database
"""
c = pymongo.MongoClient()
db = c[self.DB]
if clear:
for coll in self.SRC, self.DST:
db[coll].remove()
tcoll = coll + '.' + CollectionTracker.TRACKING_NAME
db[tcoll].remove() # remove tracking as well
return db
def get_record(self, i):
return {
"number": i,
"data": [
1, 2, 3
],
"name": "mp-{:d}".format(i)
}
def add_records(self, coll, n):
for i in range(n):
coll.insert(self.get_record(i))
def create_configs(self):
base = {"host": "localhost",
"port": 27017,
"database": self.DB,
"collection": None}
files = []
for coll in (self.SRC, self.DST):
f = tempfile.NamedTemporaryFile(suffix=".json")
base['collection'] = coll
json.dump(base, f)
f.flush()
files.append(f)
return files
def tearDown(self):
pass
def run_command(self, args, options):
"""Run the command-line given by the list
in `args`, adding the dictionary given by
options as long-form --{key}=value pairs.
"""
for key, value in options:
args.append("--{}".format(key))
if value:
args.append(value)
return subprocess.call(args)
|
migueldiascosta/pymatgen-db
|
matgendb/tests/common.py
|
Python
|
mit
| 4,261
|
[
"VASP"
] |
c1a2deaadda81d7e1ae3700734fe58f23b37277d846fea2b67f89fab3f6b32fa
|
#!@Python_EXECUTABLE@
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2021 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
import argparse
import atexit
import datetime
import json
import os
import sys
import warnings
from pathlib import Path
# yapf: disable
parser = argparse.ArgumentParser(description="Psi4: Open-Source Quantum Chemistry", formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("-i", "--input", default="input.dat",
help="Input file name. Default: input.dat.")
parser.add_argument("-o", "--output", help="""\
Redirect output elsewhere.
Default: when input filename is 'input.dat', 'output.dat'.
Otherwise, output filename defaults to input filename with
any '.in' or 'dat' extension replaced by '.out'""")
parser.add_argument("-a", "--append", action='store_true',
help="Appends results to output file. Default: Truncate first")
parser.add_argument("-V", "--version", action='store_true',
help="Prints version information.")
parser.add_argument("-n", "--nthread", default=1,
help="Number of threads to use. Psi4 disregards OMP_NUM_THREADS/MKL_NUM_THREADS.")
parser.add_argument("--memory", default=524288000,
help="The amount of memory to use. Can be specified with units (e.g., '10MB') otherwise bytes is assumed.")
parser.add_argument("-s", "--scratch",
help="Scratch directory to use. Overrides PSI_SCRATCH.")
parser.add_argument("-m", "--messy", action='store_true',
help="Leaves temporary files after the run is completed.")
# parser.add_argument("-d", "--debug", action='store_true', help="Flush the outfile at every print statement.")
# parser.add_argument("-r", "--restart", action='store_true', help="Number to be used instead of process id.")
# parser.add_argument("-p", "--prefix", help="Prefix name for psi files. Default psi")
parser.add_argument("--psiapi-path", action='store_true',
help="""Generates a bash command to source correct Python """
"""interpreter and path for ``python -c "import psi4"``""")
parser.add_argument("-v", "--verbose", action='store_true', help="Prints Psithon to Python translation.")
parser.add_argument("--inplace", action='store_true',
help="Runs Psi4 from the source directory. !Warning! expert option.")
parser.add_argument("-l", "--psidatadir",
help="Specifies where to look for the Psi4 data directory. Overrides PSIDATADIR. !Warning! expert option.")
parser.add_argument("-k", "--skip-preprocessor", action='store_true',
help="Skips input preprocessing. !Warning! expert option.")
parser.add_argument("--qcschema", "--schema", action='store_true',
help="Runs input file as QCSchema. Can either be JSON or MessagePack input.")
parser.add_argument("--json", action='store_true',
help="Runs a JSON input file. !Warning! depcrated option in 1.4, use --qcschema instead.")
parser.add_argument("-t", "--test", nargs='?', const='smoke', default=None,
help="Runs pytest tests. If `pytest-xdist` installed, parallel with `--nthread`.")
parser.add_argument("--mdi", default=None,
help="Sets MDI configuration options")
# For plugins
parser.add_argument("--plugin-name", help="""\
Creates a new directory with files for writing a new plugin.
You can specify an additional argument that specifies a
template to use, for example
>>> psi4 --plugin-name mygreatcode --plugin-template mointegrals""")
parser.add_argument('--plugin-template',
choices=['ambit', 'aointegrals', 'basic', 'dfmp2', 'mointegrals', 'scf', 'sointegrals', 'wavefunction'],
help='Selects new plugin template to use.')
parser.add_argument('--plugin-compile', action='store_true', help="""\
Generates a CMake command for building a plugin against this Psi4 installation.
>>> cd <plugin_directory>
>>> `psi4 --plugin-compile`
>>> make
>>> psi4""")
# yapf: enable
# print("Environment Variables\n");
# print(" PSI_SCRATCH Directory where scratch files are written.")
# print(" Default: $TMPDIR (or /tmp/ when not set)")
# print(" This should be a local, not network, disk")
# parser.print_help()
args, unknown = parser.parse_known_args()
args = args.__dict__ # Namespace object seems silly
# Figure out pythonpath
cmake_install_prefix = os.path.normpath(os.path.dirname(os.path.abspath(__file__)) + os.path.sep + '..')
lib_dir = os.path.sep.join([cmake_install_prefix, "@CMAKE_INSTALL_LIBDIR@", "@PYMOD_INSTALL_LIBDIR@"])
if args["inplace"]:
if "CMAKE_INSTALL_LIBDIR" not in lib_dir:
raise ImportError("Cannot run inplace from an installed directory.")
import sysconfig
core_location = os.path.dirname(os.path.abspath(__file__)) + os.path.sep + "core" + sysconfig.get_config_var("EXT_SUFFIX")
if not os.path.isfile(core_location):
raise ImportError("A compiled Psi4 core{} needs to be symlinked to the {} folder".format(
sysconfig.get_config_var("EXT_SUFFIX"), os.path.dirname(__file__)))
lib_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
if ("PSIDATADIR" not in os.environ.keys()) and (not args["psidatadir"]):
data_dir = os.path.sep.join([os.path.abspath(os.path.dirname(__file__)), "share", "psi4"])
os.environ["PSIDATADIR"] = data_dir
elif "CMAKE_INSTALL_LIBDIR" in lib_dir:
raise ImportError("Psi4 was not installed correctly!")
# Replace input/output if unknown kwargs
if len(unknown) > 0:
args["input"] = unknown[0]
if len(unknown) > 1:
args["output"] = unknown[1]
if len(unknown) > 2:
raise KeyError(f"Too many unknown arguments: {unknown}")
# Figure out output arg
if (args["output"] is None) and (args["qcschema"] is False):
if args["input"] == "input.dat":
args["output"] = "output.dat"
elif args["input"].endswith(".in"):
args["output"] = args["input"][:-2] + "out"
elif args["input"].endswith(".dat"):
args["output"] = args["input"][:-3] + "out"
else:
args["output"] = args["input"] + ".dat"
# Plugin compile line
if args['plugin_compile']:
share_cmake_dir = os.path.sep.join([cmake_install_prefix, 'share', 'cmake', 'psi4'])
plugincachealongside = os.path.isfile(share_cmake_dir + os.path.sep + 'psi4PluginCache.cmake')
if plugincachealongside:
print("""cmake -C {}/psi4PluginCache.cmake -DCMAKE_PREFIX_PATH={} .""".format(
share_cmake_dir, cmake_install_prefix))
sys.exit()
else:
print("""Install "psi4-dev" via `conda install psi4-dev -c psi4[/label/dev]`, then reissue command.""")
if args['psiapi_path']:
pyexe_dir = os.path.dirname("@Python_EXECUTABLE@")
bin_dir = Path(cmake_install_prefix) / 'bin'
print(f"""export PATH={pyexe_dir}:$PATH # python interpreter\nexport PATH={bin_dir}:$PATH # psi4 executable\nexport PYTHONPATH={lib_dir}:$PYTHONPATH # psi4 pymodule""")
sys.exit()
# Transmit any argument psidatadir through environ
if args["psidatadir"] is not None:
data_dir = os.path.abspath(os.path.expanduser(args["psidatadir"]))
os.environ["PSIDATADIR"] = data_dir
### Actually import psi4 and apply setup ###
# Arrange for warnings to ignore everything except the message
def custom_formatwarning(msg, *args, **kwargs):
return str(msg) + '\n'
warnings.formatwarning = custom_formatwarning
# Import installed psi4
sys.path.insert(1, lib_dir)
import psi4 # isort:skip
if args["version"]:
print(psi4.__version__)
sys.exit()
# Prevents a poor option combination
if args['plugin_template'] and (not args['plugin_name']):
raise KeyError("Please specify a '--plugin-name' for your plugin template!")
if args['plugin_name']:
# Set the flag
if not args['plugin_template']:
args['plugin_template'] = 'basic'
# This call does not return.
psi4.pluginutil.create_plugin(args['plugin_name'], args['plugin_template'])
sys.exit()
if args["test"] is not None:
if args["test"] not in ['smoke', 'quick', 'full', 'long']:
raise KeyError("The test category {} does not exist.".format(args["test"]))
nthread = int(args["nthread"])
if nthread == 1:
extras = None
else:
extras = ['-n', str(nthread)]
retcode = psi4.test(args["test"], extras=extras)
sys.exit(retcode)
if not os.path.isfile(args["input"]):
raise KeyError("The file %s does not exist." % args["input"])
args["input"] = os.path.normpath(args["input"])
# Setup scratch_messy
_clean_functions = [psi4.core.clean, psi4.extras.clean_numpy_files]
# Setup outfile
if args["append"] is None:
args["append"] = False
if (args["output"] != "stdout") and (args["qcschema"] is False):
psi4.core.set_output_file(args["output"], args["append"])
# Set a few options
psi4.core.set_num_threads(int(args["nthread"]), quiet=True)
psi4.set_memory(args["memory"], quiet=True)
psi4.extras._input_dir_ = os.path.dirname(os.path.abspath(args["input"]))
if args["qcschema"] is False:
psi4.print_header()
start_time = datetime.datetime.now()
# Initialize MDI
if args["mdi"] is not None:
psi4.mdi_engine.mdi_init(args["mdi"])
# Prepare scratch for inputparser
if args["scratch"] is not None:
if not os.path.isdir(args["scratch"]):
raise Exception("Passed in scratch is not a directory (%s)." % args["scratch"])
psi4.core.IOManager.shared_object().set_default_path(os.path.abspath(os.path.expanduser(args["scratch"])))
# If this is a json or qcschema call, compute and stop
if args["qcschema"]:
import qcelemental as qcel
# Handle the reading and deserialization manually
filename = args["input"]
if filename.endswith("json"):
encoding = "json"
with open(filename, 'r') as handle:
# No harm in attempting to read json-ext over json
data = qcel.util.deserialize(handle.read(), "json-ext")
elif filename.endswith("msgpack"):
encoding = "msgpack-ext"
with open(filename, 'rb') as handle:
data = qcel.util.deserialize(handle.read(), "msgpack-ext")
else:
raise Exception("qcschema files must either end in '.json' or '.msgpack'.")
psi4.extras._success_flag_ = True
clean = True
if args["messy"]:
clean = False
for func in _clean_functions:
atexit.unregister(func)
ret = psi4.schema_wrapper.run_qcschema(data, clean=clean)
if args["output"] is not None:
filename = args["output"]
if filename.endswith("json"):
encoding = "json"
elif filename.endswith("msgpack"):
encoding = "msgpack-ext"
# Else write with whatever encoding came in
if encoding == "json":
with open(filename, 'w') as handle:
handle.write(ret.serialize(encoding))
elif encoding == "msgpack-ext":
with open(filename, 'wb') as handle:
handle.write(ret.serialize(encoding))
sys.exit()
if args["json"]:
with open(args["input"], 'r') as f:
json_data = json.load(f)
psi4.extras._success_flag_ = True
psi4.extras.exit_printing(start_time)
json_data = psi4.schema_wrapper.run_json(json_data)
with open(args["input"], 'w') as f:
json.dump(json_data, f)
if args["output"] != "stdout":
os.unlink(args["output"])
sys.exit()
# Read input
with open(args["input"]) as f:
content = f.read()
# Preprocess
if not args["skip_preprocessor"]:
# PSI_SCRATCH must be set before this call!
content = psi4.process_input(content)
# Handle Verbose
if args["verbose"]:
psi4.core.print_out('\nParsed Psithon:')
psi4.core.print_out(content)
psi4.core.print_out('-' * 75)
# Handle Messy
if args["messy"]:
for func in _clean_functions:
atexit.unregister(func)
# Register exit printing, failure GOTO coffee ELSE beer
atexit.register(psi4.extras.exit_printing, start_time=start_time)
# Run the program!
try:
exec(content)
psi4.extras._success_flag_ = True
# Capture _any_ python error message
except Exception as exception:
import traceback
exc_type, exc_value, exc_traceback = sys.exc_info()
tb_str = "Traceback (most recent call last):\n"
tb_str += ''.join(traceback.format_tb(exc_traceback))
tb_str += '\n'
tb_str += ''.join(traceback.format_exception_only(type(exception), exception))
psi4.core.print_out("\n")
psi4.core.print_out(tb_str)
psi4.core.print_out("\n\n")
in_str = "Printing out the relevant lines from the Psithon --> Python processed input file:\n"
lines = content.splitlines()
try:
suspect_lineno = traceback.extract_tb(exc_traceback)[1].lineno - 1 # -1 for 0 indexing
except IndexError:
# module error where lineno useless (e.g., `print "asdf"`)
pass
else:
first_line = max(0, suspect_lineno - 5) # Try to show five lines back...
last_line = min(len(lines), suspect_lineno + 6) # Try to show five lines forward
for lineno in range(first_line, last_line):
mark = "--> " if lineno == suspect_lineno else " "
in_str += mark + lines[lineno] + "\n"
psi4.core.print_out(in_str)
# extract exception message and print it in a box for attention.
ex = ','.join(traceback.format_exception_only(type(exception), exception))
ex_list = ex.split(":", 1)[-1]
error = ''.join(ex_list)
psi4.core.print_out(psi4.driver.p4util.text.message_box(error))
if psi4.core.get_output_file() != "stdout":
print(tb_str)
print(in_str)
print(psi4.driver.p4util.text.message_box(error))
sys.exit(1)
|
lothian/psi4
|
psi4/run_psi4.py
|
Python
|
lgpl-3.0
| 14,586
|
[
"Psi4"
] |
eeae13b268cc02944ac3aa6e5252c3e6bcbf15829f1c5b73d9cab4f3276deae6
|
#########################################################################
## This program is part of 'MOOSE', the
## Messaging Object Oriented Simulation Environment.
## Copyright (C) 2014 Upinder S. Bhalla. and NCBS
## It is made available under the terms of the
## GNU Lesser General Public License version 2.1
## See the file COPYING.LIB for the full notice.
#########################################################################
import moose
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import pylab
import numpy
import sys
def main():
"""
This example illustrates loading, and running a kinetic model
for a bistable positive feedback system, defined in kkit format.
This is based on Bhalla, Ram and Iyengar, Science 2002.
The core of this model is a positive feedback loop comprising of
the MAPK cascade, PLA2, and PKC. It receives PDGF and Ca2+ as
inputs.
This model is quite a large one and due to some stiffness in its
equations, it runs somewhat slowly.
The simulation illustrated here shows how the model starts out in
a state of low activity. It is induced to 'turn on' when a
a PDGF stimulus is given for 400 seconds.
After it has settled to the new 'on' state, model is made to
'turn off'
by setting the system calcium levels to zero for a while. This
is a somewhat unphysiological manipulation!
"""
solver = "gsl" # Pick any of gsl, gssa, ee..
#solver = "gssa" # Pick any of gsl, gssa, ee..
mfile = '../../genesis/acc35.g'
runtime = 2000.0
if ( len( sys.argv ) == 2 ):
solver = sys.argv[1]
modelId = moose.loadModel( mfile, 'model', solver )
# Increase volume so that the stochastic solver gssa
# gives an interesting output
compt = moose.element( '/model/kinetics' )
compt.volume = 5e-19
moose.reinit()
moose.start( 500 )
moose.element( '/model/kinetics/PDGFR/PDGF' ).concInit = 0.0001
moose.start( 400 )
moose.element( '/model/kinetics/PDGFR/PDGF' ).concInit = 0.0
moose.start( 2000 )
moose.element( '/model/kinetics/Ca' ).concInit = 0.0
moose.start( 500 )
moose.element( '/model/kinetics/Ca' ).concInit = 0.00008
moose.start( 2000 )
# Display all plots.
img = mpimg.imread( 'mapkFB.png' )
fig = plt.figure( figsize=(12, 10 ) )
png = fig.add_subplot( 211 )
imgplot = plt.imshow( img )
ax = fig.add_subplot( 212 )
x = moose.wildcardFind( '/model/#graphs/conc#/#' )
t = numpy.arange( 0, x[0].vector.size, 1 ) * x[0].dt
ax.plot( t, x[0].vector, 'b-', label=x[0].name )
ax.plot( t, x[1].vector, 'c-', label=x[1].name )
ax.plot( t, x[2].vector, 'r-', label=x[2].name )
ax.plot( t, x[3].vector, 'm-', label=x[3].name )
plt.ylabel( 'Conc (mM)' )
plt.xlabel( 'Time (seconds)' )
pylab.legend()
pylab.show()
# Run the 'main' if this script is executed standalone.
if __name__ == '__main__':
main()
|
dilawar/moose-full
|
moose-examples/tutorials/ChemicalBistables/mapkFB.py
|
Python
|
gpl-2.0
| 3,118
|
[
"MOOSE"
] |
eef69f615bbd5449682a8edbc20e2618c0c4be640eaed032c3f68ebdb1fa0eee
|
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
import json
import sys
import re
import random
from twitterbot import TwitterBot
# from os.path import expanduser
class InsultBird(TwitterBot):
def bot_init(self):
"""
Initialize and configure your bot!
Use this function to set options and initialize your own custom bot
state (if any).
"""
############################
# REQUIRED: LOGIN DETAILS! #
############################
with open("etc/access.json", 'r') as access:
authdb = json.load(access)
self.config['api_key'] = authdb["API_Key"]
self.config['api_secret'] = authdb["API_Secret"]
self.config['access_key'] = authdb["Access_Token"]
self.config['access_secret'] = authdb["Access_Token_Secret"]
access.close()
######################################
# SEMI-OPTIONAL: OTHER CONFIG STUFF! #
######################################
# how often to tweet, in seconds
self.config['tweet_interval'] = 24 * 60 # 24 minutes
# use this to define a (min, max) random range of how often to tweet
# e.g., self.config['tweet_interval_range'] = (5*60, 10*60) # tweets every 5-10 minutes
# Tweet range: every 74 to 240 minutes
self.config['tweet_interval_range'] = (24 * 60, 74 * 60)
# only reply to tweets that specifically mention the bot
self.config['reply_direct_mention_only'] = True
# only include bot followers (and original tweeter) in @-replies
self.config['reply_followers_only'] = False
# fav any tweets that mention this bot?
self.config['autofav_mentions'] = True
# fav any tweets containing these keywords?
self.config['autofav_keywords'] = []
# follow back all followers?
self.config['autofollow'] = True
###########################################
# CUSTOM: your bot's own state variables! #
###########################################
# If you'd like to save variables with the bot's state, use the
# self.state dictionary. These will only be initialized if the bot is
# not loading a previous saved state.
# self.state['butt_counter'] = 0
# You can also add custom functions that run at regular intervals
# using self.register_custom_handler(function, interval).
#
# For instance, if your normal timeline tweet interval is every 30
# minutes, but you'd also like to post something different every 24
# hours, you would implement self.my_function and add the following
# line here:
# self.register_custom_handler(self.my_function, 60 * 60 * 24)
# log path
# home = expanduser("~")
self.config['log_path'] = '/home/brian/var/bot_logs/'
# what's up with reply interval?
self.config['reply_interval'] = 2 * 60
def get_insult(self):
with open("etc/nouns.json", 'r') as noun:
nounlist = json.load(noun)
nouns = nounlist["nounwords"]
with open("etc/adjectives.json", 'r') as adjectivejson:
adjectivelist = json.load(adjectivejson)
adjectives = adjectivelist["adjectivewords"]
with open("etc/amounts.json", 'r') as amountjson:
amountlist = json.load(amountjson)
amounts = amountlist["amountwords"]
with open("etc/starters.json", 'r') as starterjson:
starterlist = json.load(starterjson)
starters = starterlist["starterterms"]
starter = starters[random.randint(0, len(starters) - 1)]
adj1 = adjectives[random.randint(0, len(adjectives) - 1)]
adj2 = adjectives[random.randint(0, len(adjectives) - 1)]
noun = nouns[random.randint(0, len(nouns) - 1)]
amount = amounts[random.randint(0, len(amounts) - 1)]
if adj1 == adj2:
adj2 = adjectives[random.randint(0, len(adjectives) - 1)]
if not adj1[0] in 'aeiou':
an = 'a'
else:
an = 'an'
return "{starter} {aan} {adjective1} {amount} of {adjective2} {noun}".format(starter=starter, aan=an, adjective1=adj1, amount=amount, adjective2=adj2, noun=noun)
def on_scheduled_tweet(self):
text = self.get_insult()
self.post_tweet(text)
def on_mention(self, tweet, prefix):
text = self.get_insult()
prefixed_text = prefix + ' ' + text
self.post_tweet(prefix + ' ' + text, reply_to=tweet)
def on_timeline(self, tweet, prefix):
pass
"""
if random.randrange(100) < 2:
text = self.get_insult()
self.post_tweet(text, reply_to=tweet)
else:
self.favorite_tweet(tweet)
"""
if __name__ == '__main__':
bot = InsultBird()
bot.run()
|
brianshumate/insult-bird
|
insult-bird.py
|
Python
|
mit
| 4,896
|
[
"Brian"
] |
10efabf1b64738dc1ce019bfc8601b068a4a8a4155e7dc3adcc508d122158523
|
#!/usr/bin/env python
##########################################################################
#
# Copyright 2008 VMware, Inc.
# All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sub license, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice (including the
# next paragraph) shall be included in all copies or substantial portions
# of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
# IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
# ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
##########################################################################
'''Trace data model.'''
import sys
import string
import binascii
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
import format
class Node:
def visit(self, visitor):
raise NotImplementedError
def __str__(self):
stream = StringIO()
formatter = format.DefaultFormatter(stream)
pretty_printer = PrettyPrinter(formatter)
self.visit(pretty_printer)
return stream.getvalue()
class Literal(Node):
def __init__(self, value):
self.value = value
def visit(self, visitor):
visitor.visit_literal(self)
class Blob(Node):
def __init__(self, value):
self._rawValue = None
self._hexValue = value
def getValue(self):
if self._rawValue is None:
self._rawValue = binascii.a2b_hex(self._hexValue)
self._hexValue = None
return self._rawValue
def visit(self, visitor):
visitor.visit_blob(self)
class NamedConstant(Node):
def __init__(self, name):
self.name = name
def visit(self, visitor):
visitor.visit_named_constant(self)
class Array(Node):
def __init__(self, elements):
self.elements = elements
def visit(self, visitor):
visitor.visit_array(self)
class Struct(Node):
def __init__(self, name, members):
self.name = name
self.members = members
def visit(self, visitor):
visitor.visit_struct(self)
class Pointer(Node):
def __init__(self, address):
self.address = address
def visit(self, visitor):
visitor.visit_pointer(self)
class Call:
def __init__(self, no, klass, method, args, ret, time):
self.no = no
self.klass = klass
self.method = method
self.args = args
self.ret = ret
self.time = time
def visit(self, visitor):
visitor.visit_call(self)
class Trace:
def __init__(self, calls):
self.calls = calls
def visit(self, visitor):
visitor.visit_trace(self)
class Visitor:
def visit_literal(self, node):
raise NotImplementedError
def visit_blob(self, node):
raise NotImplementedError
def visit_named_constant(self, node):
raise NotImplementedError
def visit_array(self, node):
raise NotImplementedError
def visit_struct(self, node):
raise NotImplementedError
def visit_pointer(self, node):
raise NotImplementedError
def visit_call(self, node):
raise NotImplementedError
def visit_trace(self, node):
raise NotImplementedError
class PrettyPrinter:
def __init__(self, formatter):
self.formatter = formatter
def visit_literal(self, node):
if node.value is None:
self.formatter.literal('NULL')
return
if isinstance(node.value, basestring):
self.formatter.literal('"' + node.value + '"')
return
self.formatter.literal(repr(node.value))
def visit_blob(self, node):
self.formatter.address('blob()')
def visit_named_constant(self, node):
self.formatter.literal(node.name)
def visit_array(self, node):
self.formatter.text('{')
sep = ''
for value in node.elements:
self.formatter.text(sep)
value.visit(self)
sep = ', '
self.formatter.text('}')
def visit_struct(self, node):
self.formatter.text('{')
sep = ''
for name, value in node.members:
self.formatter.text(sep)
self.formatter.variable(name)
self.formatter.text(' = ')
value.visit(self)
sep = ', '
self.formatter.text('}')
def visit_pointer(self, node):
self.formatter.address(node.address)
def visit_call(self, node):
self.formatter.text('%s ' % node.no)
if node.klass is not None:
self.formatter.function(node.klass + '::' + node.method)
else:
self.formatter.function(node.method)
self.formatter.text('(')
sep = ''
for name, value in node.args:
self.formatter.text(sep)
self.formatter.variable(name)
self.formatter.text(' = ')
value.visit(self)
sep = ', '
self.formatter.text(')')
if node.ret is not None:
self.formatter.text(' = ')
node.ret.visit(self)
if node.time is not None:
self.formatter.text(' // time ')
node.time.visit(self)
def visit_trace(self, node):
for call in node.calls:
call.visit(self)
self.formatter.newline()
|
execunix/vinos
|
xsrc/external/mit/MesaLib/dist/src/gallium/tools/trace/model.py
|
Python
|
apache-2.0
| 6,211
|
[
"VisIt"
] |
827fb335a0fc87c203baab587c903a9cc61f7e5815dbe81023ac94254c567d75
|
def codegen_helper():
import clr
clr.AddReference("IronPython")
from IronPython.Compiler import Ast
def get_Subclass_of(rt):
for y in [getattr(Ast, x) for x in dir(Ast)]:
yt = clr.GetClrType(y)
if rt == yt: continue
if yt.IsAbstract: continue
if yt.IsSubclassOf(rt):
yield yt.Name
all_types = []
all_exprs = []
all_stmts = []
nodeRt = clr.GetClrType(Ast.Node)
exprRt = clr.GetClrType(Ast.Expression)
stmtRt = clr.GetClrType(Ast.Statement)
all_exprs = [x for x in get_Subclass_of(exprRt)]
all_stmts = [x for x in get_Subclass_of(stmtRt)]
all_types = [x for x in get_Subclass_of(nodeRt)]
other_types = list(set(['Expression', 'Statement']) | set(all_types) - set(all_exprs) - set(all_stmts))
print(" public override Node Visit(Node node) {")
print(" if (node == null) return null;")
print(" Type type = node.GetType();")
for i in range(len(other_types)):
tn = other_types[i]
if i != 0: print(" else", end=' ')
print(" if (type == typeof(%s)) {" % tn)
print(" return Visit%s(node as %s);" % (tn, tn))
print("}", end=' ')
print()
print("throw new ApplicationException(\"unknown node\");")
print(" }")
for (n, ts) in [('Expression', all_exprs), ('Statement', all_stmts)]:
print(" public virtual Node Visit%s(%s node) {" % (n, n))
print(" if (node == null) return null;")
print(" Type type = node.GetType();")
for i in range(len(ts)):
tn = ts[i]
if i != 0: print(" else", end=' ')
print(" if (type == typeof(%s)) {" % tn)
print(" return Visit%s(node as %s);" % (tn, tn))
print("}", end=' ')
print()
print("throw new ApplicationException(\"unknown %s\");" % n.lower())
print(" }")
import nt, sys
import clr
import System
import pickle
clr.AddReference("PythonStyle")
from PythonStyle import *
from System.IO import *
testdir = r"C:\Workspaces\dlr\Languages\IronPython\Tests"
cpy_testdir = r"C:\Workspaces\dlr\Languages\IronPython\Tests\CPy_Testcases"
if System.IO.Directory.Exists(testdir+r"\Transformed"):
System.IO.Directory.Delete(testdir+r"\Transformed", True)
System.IO.Directory.CreateDirectory(testdir+r"\Transformed")
filename_to_g = lambda f: Path.Combine(testdir, "Transformed\\g_" + Path.GetFileName(f))
filename_to_3 = lambda f: f.lower().replace("merlin2", "merlin3")
generated = filename_to_g
def get_test_file(directory, pattern):
for x in System.IO.Directory.GetFiles(directory, pattern):
yield x
for x in System.IO.Directory.GetDirectories(directory):
for y in get_test_file(x, pattern):
yield y
def try_one(visitor, test_file, stdout=False):
test_file = testdir + "\\" + test_file
rw = Rewriter(visitor)
if stdout:
rw.Convert(test_file)
else:
rw.Convert(test_file, generated(test_file))
def try_all(visitor):
rw = Rewriter(visitor)
if (ipy_only):
if System.IO.Directory.Exists(testdir+r"\CPy_Testcases"):
System.IO.Directory.Delete(testdir+r"\CPy_Testcases", True)
for f in get_test_file(testdir, "test_*.py"):
#list of files to skip
f_name = Path.GetFileName(f)
if (f_name=="test_builds.py"):
pass
elif (f_name=="test_complex.py"):
pass
elif (f_name=="test_dllsite.py"):
pass
elif (f_name=="test_formatting.py"):
pass
elif (f_name=="test_namebinding.py"):
pass
elif (f_name=="test_nofuture.py"):
pass
elif (f_name=="test_nt.py"):
pass
elif (f_name=="test_number.py"):
pass
elif (f_name=="test_numtypes.py"):
pass
elif (f_name=="test_superconsole.py"):
pass
elif (f_name=="test_threadsafety.py"):
pass
elif (f_name=="test_traceback.py"):
pass
else:
print("Attempting to transform: ", f_name)
rw.Convert(f, generated(f))
print("\nNumber of test cases transformed: ", rw.transformCount, "/", rw.fileCount)
_f_list = open('Transformed\\untrans_list.txt', 'w')
_f_list.write("Files not transformed:\n")
for f in rw.PrintFileList():
f_name = Path.GetFileName(f)
_f_list.write(f_name+'\n')
System.IO.File.Delete(testdir+"\\Transformed\\g_"+Path.GetFileName(f))
_f_list.close()
def cpy_try_all(visitor):
rw = Rewriter(visitor)
for f in get_test_file(cpy_testdir, "test_*.py"):
#list of files to skip
f_name = Path.GetFileName(f)
if (f_name=="test_builds.py"):
pass
else:
print("Attempting to transform: ", f_name)
rw.Convert(f, generated(f))
print("\nNumber of test cases transformed: ", rw.transformCount, "/", rw.fileCount)
_f_list = open('Transformed\\untrans_list.txt', 'w')
_f_list.write("Files not transformed:\n")
for f in rw.PrintFileList():
f_name = Path.GetFileName(f)
_f_list.write(f_name+'\n')
System.IO.File.Delete(testdir+"\\Transformed\\g_"+Path.GetFileName(f))
_f_list.close()
def compile_all():
for f in get_test_file(testdir, "test_*.py"):
f = generated(f)
fo = file(f)
lines = fo.readlines()
fo.close()
try:
print("compiling", f, end=' ')
compile("\n".join(lines), f, "exec")
print("pass")
except:
print("fail")
print(sys.exc_info()[1])
if sys.argv[1]=="try_one":
exec("t_obj = " + sys.argv[2] + "()")
try_one(t_obj, sys.argv[3])
elif sys.argv[1]=="try_ipy":
exec("t_obj = " + sys.argv[2] + "()")
ipy_only = True
try_all(t_obj)
elif sys.argv[1]=="try_cpy":
exec("t_obj = " + sys.argv[2] + "()")
cpy_try_all(t_obj)
elif sys.argv[1]=="try_all":
exec("t_obj = " + sys.argv[2] + "()")
ipy_only = False
try_all(t_obj)
else:
print("Error: unrecognized argument")
#codegen_helper()
#try_all(CallableClassVisitor())
#try_all(DynamicMethodVisitor())
#try_all(NestedFunctionVisitor())
#try_all(PropertyVisitor())
#try_all(DecoratorVisitor())
#cpy_try_all(CallableClassVisitor())
#cpy_try_all(DynamicMethodVisitor())
#cpy_try_all(NestedFunctionVisitor())
#cpy_try_all(PropertyVisitor())
#cpy_try_all(DecoratorVisitor())
#compile_all()
#try_one(DecoratorVisitor(), "test_CPickle.py")
#try_one(NestedFunctionVisitor(), "test_methodbinder2.py")
#try_one(CallableClassVisitor(), "kevin_test.py")
#try_one(StandardVisitor(), "kevin_test.py", False)
#try_one(YieldReturnVisitor(), "test_isinstance.py")
#try_one(LambdaExprVisitor(), "test_generator_throw.py")
#try_one(DummyInterfaceVisitor(), "test_class.py")
#try_one(SubClassingVisitor(), "test_class.py")
#try_one(ParameterDefaultValueVisitor(), "test_class.py")
#try_one(PropertyVisitor(), "kevin_test.py")
#try_one(PropertyVisitor(), "test_class.py")
|
IronLanguages/ironpython3
|
Tests/transform.py
|
Python
|
apache-2.0
| 7,398
|
[
"VisIt"
] |
3caf511894eb50046c4fcdfd6ce8c3371e43234970c193ea7cb57bd2e76eec55
|
"""
Several utilities to reduce clutter in my policy gradient codes.
(c) April-June 2017 (mostly) by Daniel Seita
"""
import numpy as np
import tensorflow as tf
import scipy.signal
import sys
def gauss_log_prob_1(mu, logstd, x):
"""
Calls `gauss_log_prob` with a broadcasted version of logstd. Assumes that
logstd is of shape (n,) and mu is of shape (n,a).
"""
assert mu.get_shape()[1:] == x.get_shape()[1:]
assert len(logstd.get_shape()) == 1
logstd_broadcasted = tf.ones(shape=tf.shape(mu), dtype=tf.float32) * logstd
return gauss_log_prob(mu, logstd_broadcasted, x)
def gauss_log_prob(mu, logstd, x):
""" Used for computing the log probability, following the formula for the
multivariate Gaussian density.
All the inputs should have shape (n,a). The `gp_na` contains component-wise
probabilitiles, then the reduce_sum results in a tensor of size (n,) which
contains the log probability for each of the n elements. (We later perform a
mean on this.) Also, the 2*pi part needs 1/2, but doesn't need the sum over
the number of components (# of actions) because of the reduce sum here.
Finally, logstd doesn't need a 1/2 constant because log(\sigma_i^2) will
bring the 2 over.
This formula generalizes for an arbitrary number of actions, BUT it assumes
that the covariance matrix is diagonal.
"""
var_na = tf.exp(2*logstd)
gp_na = -tf.square(x - mu)/(2*var_na) - 0.5*tf.log(tf.constant(2*np.pi)) - logstd
return tf.reduce_sum(gp_na, axis=[1])
def gauss_KL_1(mu1, logstd1, mu2, logstd2):
"""
Calls `gauss_KL` with a broadcasted version of logstd1 and logstd1. Assumes
that logstd1 and logstd2 are of shape (n,).
"""
assert mu1.get_shape()[1:] == mu2.get_shape()[1:]
assert len(logstd1.get_shape()) == 1
assert len(logstd2.get_shape()) == 1
logstd1_broadcasted = tf.ones(shape=tf.shape(mu1), dtype=tf.float32) * logstd1
logstd2_broadcasted = tf.ones(shape=tf.shape(mu2), dtype=tf.float32) * logstd2
return gauss_KL(mu1, logstd1_broadcasted, mu2, logstd2_broadcasted)
def gauss_KL(mu1, logstd1, mu2, logstd2):
""" Returns KL divergence among two multivariate Gaussians, component-wise.
It assumes the covariance matrix is diagonal. All inputs have shape (n,a).
It is not necessary to know the number of actions because reduce_sum will
sum over this to get the `d` constant offset. The part consisting of the
trace in the formula is blended with the mean difference squared due to the
common "denominator" of var2_na. This forumula generalizes for an arbitrary
number of actions. I think mu2 and logstd2 should represent the policy
before the update.
Returns the KL divergence for each of the n components in the minibatch,
then we do a reduce_mean outside this.
"""
var1_na = tf.exp(2.*logstd1)
var2_na = tf.exp(2.*logstd2)
tmp_matrix = 2.*(logstd2 - logstd1) + (var1_na + tf.square(mu1-mu2))/var2_na - 1
kl_n = tf.reduce_sum(0.5 * tmp_matrix, axis=[1]) # Don't forget the 1/2 !!
assert_op = tf.Assert(tf.reduce_all(kl_n >= -0.0000001), [kl_n])
with tf.control_dependencies([assert_op]):
kl_n = tf.identity(kl_n)
return kl_n
def normc_initializer(std=1.0):
""" Initialize array with normalized columns """
def _initializer(shape, dtype=None, partition_info=None): #pylint: disable=W0613
out = np.random.randn(*shape).astype(np.float32)
out *= std / np.sqrt(np.square(out).sum(axis=0, keepdims=True))
return tf.constant(out)
return _initializer
def dense(x, size, name, weight_init=None):
""" Dense (fully connected) layer """
w = tf.get_variable(name + "/w", [x.get_shape()[1], size], initializer=weight_init)
b = tf.get_variable(name + "/b", [size], initializer=tf.zeros_initializer())
return tf.matmul(x, w) + b
def fancy_slice_2d(X, inds0, inds1):
""" Like numpy's X[inds0, inds1] """
inds0 = tf.cast(inds0, tf.int64)
inds1 = tf.cast(inds1, tf.int64)
shape = tf.cast(tf.shape(X), tf.int64)
ncols = shape[1]
Xflat = tf.reshape(X, [-1])
return tf.gather(Xflat, inds0 * ncols + inds1)
def discount(x, gamma):
"""
Compute discounted sum of future values. Returns a list, NOT a scalar!
out[i] = in[i] + gamma * in[i+1] + gamma^2 * in[i+2] + ...
"""
return scipy.signal.lfilter([1],[1,-gamma],x[::-1], axis=0)[::-1]
def lrelu(x, leak=0.2):
""" Performs a leaky ReLU operation. """
f1 = 0.5 * (1 + leak)
f2 = 0.5 * (1 - leak)
return f1 * x + f2 * abs(x)
def explained_variance_1d(ypred,y):
"""
Var[ypred - y] / var[y].
https://www.quora.com/What-is-the-meaning-proportion-of-variance-explained-in-linear-regression
"""
assert y.ndim == 1 and ypred.ndim == 1
vary = np.var(y)
return np.nan if vary==0 else 1 - np.var(y-ypred)/vary
def categorical_sample_logits(logits):
"""
Samples (symbolically) from categorical distribution, where logits is a NxK
matrix specifying N categorical distributions with K categories
specifically, exp(logits) / sum( exp(logits), axis=1 ) is the
probabilities of the different classes
Cleverly uses gumbell trick, based on
https://github.com/tensorflow/tensorflow/issues/456
"""
U = tf.random_uniform(tf.shape(logits))
return tf.argmax(logits - tf.log(-tf.log(U)), dimension=1)
def pathlength(path):
return len(path["reward"])
|
DanielTakeshi/rl_algorithms
|
utils/utils_pg.py
|
Python
|
mit
| 5,503
|
[
"Gaussian"
] |
9d527ce5363d0f34b4845b4531b9437d784e87ed9a5f8cd4197a5226b9dc7c09
|
# coding: utf-8
from __future__ import unicode_literals
import itertools
import json
import os.path
import random
import re
import time
import traceback
from .common import InfoExtractor, SearchInfoExtractor
from ..jsinterp import JSInterpreter
from ..swfinterp import SWFInterpreter
from ..compat import (
compat_chr,
compat_parse_qs,
compat_urllib_parse_unquote,
compat_urllib_parse_unquote_plus,
compat_urllib_parse_urlencode,
compat_urllib_parse_urlparse,
compat_urlparse,
compat_str,
)
from ..utils import (
bool_or_none,
clean_html,
error_to_compat_str,
ExtractorError,
float_or_none,
get_element_by_id,
int_or_none,
mimetype2ext,
orderedSet,
parse_codecs,
parse_duration,
remove_quotes,
remove_start,
smuggle_url,
str_or_none,
str_to_int,
try_get,
unescapeHTML,
unified_strdate,
unsmuggle_url,
uppercase_escape,
url_or_none,
urlencode_postdata,
urljoin,
)
class YoutubeBaseInfoExtractor(InfoExtractor):
"""Provide base functions for Youtube extractors"""
_LOGIN_URL = 'https://accounts.google.com/ServiceLogin'
_TWOFACTOR_URL = 'https://accounts.google.com/signin/challenge'
_LOOKUP_URL = 'https://accounts.google.com/_/signin/sl/lookup'
_CHALLENGE_URL = 'https://accounts.google.com/_/signin/sl/challenge'
_TFA_URL = 'https://accounts.google.com/_/signin/challenge?hl=en&TL={0}'
_NETRC_MACHINE = 'youtube'
# If True it will raise an error if no login info is provided
_LOGIN_REQUIRED = False
_PLAYLIST_ID_RE = r'(?:PL|LL|EC|UU|FL|RD|UL|TL|PU|OLAK5uy_)[0-9A-Za-z-_]{10,}'
_YOUTUBE_CLIENT_HEADERS = {
'x-youtube-client-name': '1',
'x-youtube-client-version': '1.20200609.04.02',
}
def _set_language(self):
self._set_cookie(
'.youtube.com', 'PREF', 'f1=50000000&f6=8&hl=en',
# YouTube sets the expire time to about two months
expire_time=time.time() + 2 * 30 * 24 * 3600)
def _ids_to_results(self, ids):
return [
self.url_result(vid_id, 'Youtube', video_id=vid_id)
for vid_id in ids]
def _login(self):
"""
Attempt to log in to YouTube.
True is returned if successful or skipped.
False is returned if login failed.
If _LOGIN_REQUIRED is set and no authentication was provided, an error is raised.
"""
username, password = self._get_login_info()
# No authentication to be performed
if username is None:
if self._LOGIN_REQUIRED and self._downloader.params.get('cookiefile') is None:
raise ExtractorError('No login info available, needed for using %s.' % self.IE_NAME, expected=True)
return True
login_page = self._download_webpage(
self._LOGIN_URL, None,
note='Downloading login page',
errnote='unable to fetch login page', fatal=False)
if login_page is False:
return
login_form = self._hidden_inputs(login_page)
def req(url, f_req, note, errnote):
data = login_form.copy()
data.update({
'pstMsg': 1,
'checkConnection': 'youtube',
'checkedDomains': 'youtube',
'hl': 'en',
'deviceinfo': '[null,null,null,[],null,"US",null,null,[],"GlifWebSignIn",null,[null,null,[]]]',
'f.req': json.dumps(f_req),
'flowName': 'GlifWebSignIn',
'flowEntry': 'ServiceLogin',
# TODO: reverse actual botguard identifier generation algo
'bgRequest': '["identifier",""]',
})
return self._download_json(
url, None, note=note, errnote=errnote,
transform_source=lambda s: re.sub(r'^[^[]*', '', s),
fatal=False,
data=urlencode_postdata(data), headers={
'Content-Type': 'application/x-www-form-urlencoded;charset=utf-8',
'Google-Accounts-XSRF': 1,
})
def warn(message):
self._downloader.report_warning(message)
lookup_req = [
username,
None, [], None, 'US', None, None, 2, False, True,
[
None, None,
[2, 1, None, 1,
'https://accounts.google.com/ServiceLogin?passive=true&continue=https%3A%2F%2Fwww.youtube.com%2Fsignin%3Fnext%3D%252F%26action_handle_signin%3Dtrue%26hl%3Den%26app%3Ddesktop%26feature%3Dsign_in_button&hl=en&service=youtube&uilel=3&requestPath=%2FServiceLogin&Page=PasswordSeparationSignIn',
None, [], 4],
1, [None, None, []], None, None, None, True
],
username,
]
lookup_results = req(
self._LOOKUP_URL, lookup_req,
'Looking up account info', 'Unable to look up account info')
if lookup_results is False:
return False
user_hash = try_get(lookup_results, lambda x: x[0][2], compat_str)
if not user_hash:
warn('Unable to extract user hash')
return False
challenge_req = [
user_hash,
None, 1, None, [1, None, None, None, [password, None, True]],
[
None, None, [2, 1, None, 1, 'https://accounts.google.com/ServiceLogin?passive=true&continue=https%3A%2F%2Fwww.youtube.com%2Fsignin%3Fnext%3D%252F%26action_handle_signin%3Dtrue%26hl%3Den%26app%3Ddesktop%26feature%3Dsign_in_button&hl=en&service=youtube&uilel=3&requestPath=%2FServiceLogin&Page=PasswordSeparationSignIn', None, [], 4],
1, [None, None, []], None, None, None, True
]]
challenge_results = req(
self._CHALLENGE_URL, challenge_req,
'Logging in', 'Unable to log in')
if challenge_results is False:
return
login_res = try_get(challenge_results, lambda x: x[0][5], list)
if login_res:
login_msg = try_get(login_res, lambda x: x[5], compat_str)
warn(
'Unable to login: %s' % 'Invalid password'
if login_msg == 'INCORRECT_ANSWER_ENTERED' else login_msg)
return False
res = try_get(challenge_results, lambda x: x[0][-1], list)
if not res:
warn('Unable to extract result entry')
return False
login_challenge = try_get(res, lambda x: x[0][0], list)
if login_challenge:
challenge_str = try_get(login_challenge, lambda x: x[2], compat_str)
if challenge_str == 'TWO_STEP_VERIFICATION':
# SEND_SUCCESS - TFA code has been successfully sent to phone
# QUOTA_EXCEEDED - reached the limit of TFA codes
status = try_get(login_challenge, lambda x: x[5], compat_str)
if status == 'QUOTA_EXCEEDED':
warn('Exceeded the limit of TFA codes, try later')
return False
tl = try_get(challenge_results, lambda x: x[1][2], compat_str)
if not tl:
warn('Unable to extract TL')
return False
tfa_code = self._get_tfa_info('2-step verification code')
if not tfa_code:
warn(
'Two-factor authentication required. Provide it either interactively or with --twofactor <code>'
'(Note that only TOTP (Google Authenticator App) codes work at this time.)')
return False
tfa_code = remove_start(tfa_code, 'G-')
tfa_req = [
user_hash, None, 2, None,
[
9, None, None, None, None, None, None, None,
[None, tfa_code, True, 2]
]]
tfa_results = req(
self._TFA_URL.format(tl), tfa_req,
'Submitting TFA code', 'Unable to submit TFA code')
if tfa_results is False:
return False
tfa_res = try_get(tfa_results, lambda x: x[0][5], list)
if tfa_res:
tfa_msg = try_get(tfa_res, lambda x: x[5], compat_str)
warn(
'Unable to finish TFA: %s' % 'Invalid TFA code'
if tfa_msg == 'INCORRECT_ANSWER_ENTERED' else tfa_msg)
return False
check_cookie_url = try_get(
tfa_results, lambda x: x[0][-1][2], compat_str)
else:
CHALLENGES = {
'LOGIN_CHALLENGE': "This device isn't recognized. For your security, Google wants to make sure it's really you.",
'USERNAME_RECOVERY': 'Please provide additional information to aid in the recovery process.',
'REAUTH': "There is something unusual about your activity. For your security, Google wants to make sure it's really you.",
}
challenge = CHALLENGES.get(
challenge_str,
'%s returned error %s.' % (self.IE_NAME, challenge_str))
warn('%s\nGo to https://accounts.google.com/, login and solve a challenge.' % challenge)
return False
else:
check_cookie_url = try_get(res, lambda x: x[2], compat_str)
if not check_cookie_url:
warn('Unable to extract CheckCookie URL')
return False
check_cookie_results = self._download_webpage(
check_cookie_url, None, 'Checking cookie', fatal=False)
if check_cookie_results is False:
return False
if 'https://myaccount.google.com/' not in check_cookie_results:
warn('Unable to log in')
return False
return True
def _real_initialize(self):
if self._downloader is None:
return
self._set_language()
if not self._login():
return
_DEFAULT_API_DATA = {
'context': {
'client': {
'clientName': 'WEB',
'clientVersion': '2.20201021.03.00',
}
},
}
def _call_api(self, ep, query, video_id):
data = self._DEFAULT_API_DATA.copy()
data.update(query)
response = self._download_json(
'https://www.youtube.com/youtubei/v1/%s' % ep, video_id=video_id,
note='Downloading API JSON', errnote='Unable to download API page',
data=json.dumps(data).encode('utf8'),
headers={'content-type': 'application/json'},
query={'key': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8'})
return response
def _extract_yt_initial_data(self, video_id, webpage):
return self._parse_json(
self._search_regex(
r'(?:window\s*\[\s*["\']ytInitialData["\']\s*\]|ytInitialData)\s*=\s*({.+?})\s*;',
webpage, 'yt initial data'),
video_id)
class YoutubeIE(YoutubeBaseInfoExtractor):
IE_DESC = 'YouTube.com'
_VALID_URL = r"""(?x)^
(
(?:https?://|//) # http(s):// or protocol-independent URL
(?:(?:(?:(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie|kids)?\.com/|
(?:www\.)?deturl\.com/www\.youtube\.com/|
(?:www\.)?pwnyoutube\.com/|
(?:www\.)?hooktube\.com/|
(?:www\.)?yourepeat\.com/|
tube\.majestyc\.net/|
# Invidious instances taken from https://github.com/omarroth/invidious/wiki/Invidious-Instances
(?:(?:www|dev)\.)?invidio\.us/|
(?:(?:www|no)\.)?invidiou\.sh/|
(?:(?:www|fi|de)\.)?invidious\.snopyta\.org/|
(?:www\.)?invidious\.kabi\.tk/|
(?:www\.)?invidious\.13ad\.de/|
(?:www\.)?invidious\.mastodon\.host/|
(?:www\.)?invidious\.nixnet\.xyz/|
(?:www\.)?invidious\.drycat\.fr/|
(?:www\.)?tube\.poal\.co/|
(?:www\.)?vid\.wxzm\.sx/|
(?:www\.)?yewtu\.be/|
(?:www\.)?yt\.elukerio\.org/|
(?:www\.)?yt\.lelux\.fi/|
(?:www\.)?invidious\.ggc-project\.de/|
(?:www\.)?yt\.maisputain\.ovh/|
(?:www\.)?invidious\.13ad\.de/|
(?:www\.)?invidious\.toot\.koeln/|
(?:www\.)?invidious\.fdn\.fr/|
(?:www\.)?watch\.nettohikari\.com/|
(?:www\.)?kgg2m7yk5aybusll\.onion/|
(?:www\.)?qklhadlycap4cnod\.onion/|
(?:www\.)?axqzx4s6s54s32yentfqojs3x5i7faxza6xo3ehd4bzzsg2ii4fv2iid\.onion/|
(?:www\.)?c7hqkpkpemu6e7emz5b4vyz7idjgdvgaaa3dyimmeojqbgpea3xqjoid\.onion/|
(?:www\.)?fz253lmuao3strwbfbmx46yu7acac2jz27iwtorgmbqlkurlclmancad\.onion/|
(?:www\.)?invidious\.l4qlywnpwqsluw65ts7md3khrivpirse744un3x7mlskqauz5pyuzgqd\.onion/|
(?:www\.)?owxfohz4kjyv25fvlqilyxast7inivgiktls3th44jhk3ej3i7ya\.b32\.i2p/|
(?:www\.)?4l2dgddgsrkf2ous66i6seeyi6etzfgrue332grh2n7madpwopotugyd\.onion/|
youtube\.googleapis\.com/) # the various hostnames, with wildcard subdomains
(?:.*?\#/)? # handle anchor (#/) redirect urls
(?: # the various things that can precede the ID:
(?:(?:v|embed|e)/(?!videoseries)) # v/ or embed/ or e/
|(?: # or the v= param in all its forms
(?:(?:watch|movie)(?:_popup)?(?:\.php)?/?)? # preceding watch(_popup|.php) or nothing (like /?v=xxxx)
(?:\?|\#!?) # the params delimiter ? or # or #!
(?:.*?[&;])?? # any other preceding param (like /?s=tuff&v=xxxx or ?s=tuff&v=V36LpHqtcDY)
v=
)
))
|(?:
youtu\.be| # just youtu.be/xxxx
vid\.plus| # or vid.plus/xxxx
zwearz\.com/watch| # or zwearz.com/watch/xxxx
)/
|(?:www\.)?cleanvideosearch\.com/media/action/yt/watch\?videoId=
)
)? # all until now is optional -> you can pass the naked ID
(?P<id>[0-9A-Za-z_-]{11}) # here is it! the YouTube video ID
(?!.*?\blist=
(?:
%(playlist_id)s| # combined list/video URLs are handled by the playlist IE
WL # WL are handled by the watch later IE
)
)
(?(1).+)? # if we found the ID, everything can follow
$""" % {'playlist_id': YoutubeBaseInfoExtractor._PLAYLIST_ID_RE}
_NEXT_URL_RE = r'[\?&]next_url=([^&]+)'
_PLAYER_INFO_RE = (
r'/(?P<id>[a-zA-Z0-9_-]{8,})/player_ias\.vflset(?:/[a-zA-Z]{2,3}_[a-zA-Z]{2,3})?/base\.(?P<ext>[a-z]+)$',
r'\b(?P<id>vfl[a-zA-Z0-9_-]+)\b.*?\.(?P<ext>[a-z]+)$',
)
_formats = {
'5': {'ext': 'flv', 'width': 400, 'height': 240, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'},
'6': {'ext': 'flv', 'width': 450, 'height': 270, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'},
'13': {'ext': '3gp', 'acodec': 'aac', 'vcodec': 'mp4v'},
'17': {'ext': '3gp', 'width': 176, 'height': 144, 'acodec': 'aac', 'abr': 24, 'vcodec': 'mp4v'},
'18': {'ext': 'mp4', 'width': 640, 'height': 360, 'acodec': 'aac', 'abr': 96, 'vcodec': 'h264'},
'22': {'ext': 'mp4', 'width': 1280, 'height': 720, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
'34': {'ext': 'flv', 'width': 640, 'height': 360, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
'35': {'ext': 'flv', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
# itag 36 videos are either 320x180 (BaW_jenozKc) or 320x240 (__2ABJjxzNo), abr varies as well
'36': {'ext': '3gp', 'width': 320, 'acodec': 'aac', 'vcodec': 'mp4v'},
'37': {'ext': 'mp4', 'width': 1920, 'height': 1080, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
'38': {'ext': 'mp4', 'width': 4096, 'height': 3072, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
'43': {'ext': 'webm', 'width': 640, 'height': 360, 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8'},
'44': {'ext': 'webm', 'width': 854, 'height': 480, 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8'},
'45': {'ext': 'webm', 'width': 1280, 'height': 720, 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8'},
'46': {'ext': 'webm', 'width': 1920, 'height': 1080, 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8'},
'59': {'ext': 'mp4', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
'78': {'ext': 'mp4', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
# 3D videos
'82': {'ext': 'mp4', 'height': 360, 'format_note': '3D', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -20},
'83': {'ext': 'mp4', 'height': 480, 'format_note': '3D', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -20},
'84': {'ext': 'mp4', 'height': 720, 'format_note': '3D', 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264', 'preference': -20},
'85': {'ext': 'mp4', 'height': 1080, 'format_note': '3D', 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264', 'preference': -20},
'100': {'ext': 'webm', 'height': 360, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8', 'preference': -20},
'101': {'ext': 'webm', 'height': 480, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8', 'preference': -20},
'102': {'ext': 'webm', 'height': 720, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8', 'preference': -20},
# Apple HTTP Live Streaming
'91': {'ext': 'mp4', 'height': 144, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
'92': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
'93': {'ext': 'mp4', 'height': 360, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -10},
'94': {'ext': 'mp4', 'height': 480, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -10},
'95': {'ext': 'mp4', 'height': 720, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 256, 'vcodec': 'h264', 'preference': -10},
'96': {'ext': 'mp4', 'height': 1080, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 256, 'vcodec': 'h264', 'preference': -10},
'132': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
'151': {'ext': 'mp4', 'height': 72, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 24, 'vcodec': 'h264', 'preference': -10},
# DASH mp4 video
'133': {'ext': 'mp4', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'h264'},
'134': {'ext': 'mp4', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'h264'},
'135': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'h264'},
'136': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'h264'},
'137': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'h264'},
'138': {'ext': 'mp4', 'format_note': 'DASH video', 'vcodec': 'h264'}, # Height can vary (https://github.com/ytdl-org/youtube-dl/issues/4559)
'160': {'ext': 'mp4', 'height': 144, 'format_note': 'DASH video', 'vcodec': 'h264'},
'212': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'h264'},
'264': {'ext': 'mp4', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'h264'},
'298': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'h264', 'fps': 60},
'299': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'h264', 'fps': 60},
'266': {'ext': 'mp4', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'h264'},
# Dash mp4 audio
'139': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 48, 'container': 'm4a_dash'},
'140': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 128, 'container': 'm4a_dash'},
'141': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 256, 'container': 'm4a_dash'},
'256': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'container': 'm4a_dash'},
'258': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'container': 'm4a_dash'},
'325': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'dtse', 'container': 'm4a_dash'},
'328': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'ec-3', 'container': 'm4a_dash'},
# Dash webm
'167': {'ext': 'webm', 'height': 360, 'width': 640, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'168': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'169': {'ext': 'webm', 'height': 720, 'width': 1280, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'170': {'ext': 'webm', 'height': 1080, 'width': 1920, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'218': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'219': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'278': {'ext': 'webm', 'height': 144, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp9'},
'242': {'ext': 'webm', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'243': {'ext': 'webm', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'244': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'245': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'246': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'247': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'248': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'271': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'vp9'},
# itag 272 videos are either 3840x2160 (e.g. RtoitU2A-3E) or 7680x4320 (sLprVF6d7Ug)
'272': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'302': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
'303': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
'308': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
'313': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'315': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
# Dash webm audio
'171': {'ext': 'webm', 'acodec': 'vorbis', 'format_note': 'DASH audio', 'abr': 128},
'172': {'ext': 'webm', 'acodec': 'vorbis', 'format_note': 'DASH audio', 'abr': 256},
# Dash webm audio with opus inside
'249': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 50},
'250': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 70},
'251': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 160},
# RTMP (unnamed)
'_rtmp': {'protocol': 'rtmp'},
# av01 video only formats sometimes served with "unknown" codecs
'394': {'acodec': 'none', 'vcodec': 'av01.0.05M.08'},
'395': {'acodec': 'none', 'vcodec': 'av01.0.05M.08'},
'396': {'acodec': 'none', 'vcodec': 'av01.0.05M.08'},
'397': {'acodec': 'none', 'vcodec': 'av01.0.05M.08'},
}
_SUBTITLE_FORMATS = ('srv1', 'srv2', 'srv3', 'ttml', 'vtt')
_GEO_BYPASS = False
IE_NAME = 'youtube'
_TESTS = [
{
'url': 'https://www.youtube.com/watch?v=BaW_jenozKc&t=1s&end=9',
'info_dict': {
'id': 'BaW_jenozKc',
'ext': 'mp4',
'title': 'youtube-dl test video "\'/\\ä↭𝕐',
'uploader': 'Philipp Hagemeister',
'uploader_id': 'phihag',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/phihag',
'channel_id': 'UCLqxVugv74EIW3VWh2NOa3Q',
'channel_url': r're:https?://(?:www\.)?youtube\.com/channel/UCLqxVugv74EIW3VWh2NOa3Q',
'upload_date': '20121002',
'description': 'test chars: "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .',
'categories': ['Science & Technology'],
'tags': ['youtube-dl'],
'duration': 10,
'view_count': int,
'like_count': int,
'dislike_count': int,
'start_time': 1,
'end_time': 9,
}
},
{
'url': '//www.YouTube.com/watch?v=yZIXLfi8CZQ',
'note': 'Embed-only video (#1746)',
'info_dict': {
'id': 'yZIXLfi8CZQ',
'ext': 'mp4',
'upload_date': '20120608',
'title': 'Principal Sexually Assaults A Teacher - Episode 117 - 8th June 2012',
'description': 'md5:09b78bd971f1e3e289601dfba15ca4f7',
'uploader': 'SET India',
'uploader_id': 'setindia',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/setindia',
'age_limit': 18,
}
},
{
'url': 'https://www.youtube.com/watch?v=BaW_jenozKc&v=yZIXLfi8CZQ',
'note': 'Use the first video ID in the URL',
'info_dict': {
'id': 'BaW_jenozKc',
'ext': 'mp4',
'title': 'youtube-dl test video "\'/\\ä↭𝕐',
'uploader': 'Philipp Hagemeister',
'uploader_id': 'phihag',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/phihag',
'upload_date': '20121002',
'description': 'test chars: "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .',
'categories': ['Science & Technology'],
'tags': ['youtube-dl'],
'duration': 10,
'view_count': int,
'like_count': int,
'dislike_count': int,
},
'params': {
'skip_download': True,
},
},
{
'url': 'https://www.youtube.com/watch?v=a9LDPn-MO4I',
'note': '256k DASH audio (format 141) via DASH manifest',
'info_dict': {
'id': 'a9LDPn-MO4I',
'ext': 'm4a',
'upload_date': '20121002',
'uploader_id': '8KVIDEO',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/8KVIDEO',
'description': '',
'uploader': '8KVIDEO',
'title': 'UHDTV TEST 8K VIDEO.mp4'
},
'params': {
'youtube_include_dash_manifest': True,
'format': '141',
},
'skip': 'format 141 not served anymore',
},
# DASH manifest with encrypted signature
{
'url': 'https://www.youtube.com/watch?v=IB3lcPjvWLA',
'info_dict': {
'id': 'IB3lcPjvWLA',
'ext': 'm4a',
'title': 'Afrojack, Spree Wilson - The Spark (Official Music Video) ft. Spree Wilson',
'description': 'md5:8f5e2b82460520b619ccac1f509d43bf',
'duration': 244,
'uploader': 'AfrojackVEVO',
'uploader_id': 'AfrojackVEVO',
'upload_date': '20131011',
},
'params': {
'youtube_include_dash_manifest': True,
'format': '141/bestaudio[ext=m4a]',
},
},
# Controversy video
{
'url': 'https://www.youtube.com/watch?v=T4XJQO3qol8',
'info_dict': {
'id': 'T4XJQO3qol8',
'ext': 'mp4',
'duration': 219,
'upload_date': '20100909',
'uploader': 'Amazing Atheist',
'uploader_id': 'TheAmazingAtheist',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/TheAmazingAtheist',
'title': 'Burning Everyone\'s Koran',
'description': 'SUBSCRIBE: http://www.youtube.com/saturninefilms\n\nEven Obama has taken a stand against freedom on this issue: http://www.huffingtonpost.com/2010/09/09/obama-gma-interview-quran_n_710282.html',
}
},
# Normal age-gate video (No vevo, embed allowed)
{
'url': 'https://youtube.com/watch?v=HtVdAasjOgU',
'info_dict': {
'id': 'HtVdAasjOgU',
'ext': 'mp4',
'title': 'The Witcher 3: Wild Hunt - The Sword Of Destiny Trailer',
'description': r're:(?s).{100,}About the Game\n.*?The Witcher 3: Wild Hunt.{100,}',
'duration': 142,
'uploader': 'The Witcher',
'uploader_id': 'WitcherGame',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/WitcherGame',
'upload_date': '20140605',
'age_limit': 18,
},
},
# video_info is None (https://github.com/ytdl-org/youtube-dl/issues/4421)
# YouTube Red ad is not captured for creator
{
'url': '__2ABJjxzNo',
'info_dict': {
'id': '__2ABJjxzNo',
'ext': 'mp4',
'duration': 266,
'upload_date': '20100430',
'uploader_id': 'deadmau5',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/deadmau5',
'creator': 'Dada Life, deadmau5',
'description': 'md5:12c56784b8032162bb936a5f76d55360',
'uploader': 'deadmau5',
'title': 'Deadmau5 - Some Chords (HD)',
'alt_title': 'This Machine Kills Some Chords',
},
'expected_warnings': [
'DASH manifest missing',
]
},
# Olympics (https://github.com/ytdl-org/youtube-dl/issues/4431)
{
'url': 'lqQg6PlCWgI',
'info_dict': {
'id': 'lqQg6PlCWgI',
'ext': 'mp4',
'duration': 6085,
'upload_date': '20150827',
'uploader_id': 'olympic',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/olympic',
'description': 'HO09 - Women - GER-AUS - Hockey - 31 July 2012 - London 2012 Olympic Games',
'uploader': 'Olympic',
'title': 'Hockey - Women - GER-AUS - London 2012 Olympic Games',
},
'params': {
'skip_download': 'requires avconv',
}
},
# Non-square pixels
{
'url': 'https://www.youtube.com/watch?v=_b-2C3KPAM0',
'info_dict': {
'id': '_b-2C3KPAM0',
'ext': 'mp4',
'stretched_ratio': 16 / 9.,
'duration': 85,
'upload_date': '20110310',
'uploader_id': 'AllenMeow',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/AllenMeow',
'description': 'made by Wacom from Korea | 字幕&加油添醋 by TY\'s Allen | 感謝heylisa00cavey1001同學熱情提供梗及翻譯',
'uploader': '孫ᄋᄅ',
'title': '[A-made] 變態妍字幕版 太妍 我就是這樣的人',
},
},
# url_encoded_fmt_stream_map is empty string
{
'url': 'qEJwOuvDf7I',
'info_dict': {
'id': 'qEJwOuvDf7I',
'ext': 'webm',
'title': 'Обсуждение судебной практики по выборам 14 сентября 2014 года в Санкт-Петербурге',
'description': '',
'upload_date': '20150404',
'uploader_id': 'spbelect',
'uploader': 'Наблюдатели Петербурга',
},
'params': {
'skip_download': 'requires avconv',
},
'skip': 'This live event has ended.',
},
# Extraction from multiple DASH manifests (https://github.com/ytdl-org/youtube-dl/pull/6097)
{
'url': 'https://www.youtube.com/watch?v=FIl7x6_3R5Y',
'info_dict': {
'id': 'FIl7x6_3R5Y',
'ext': 'webm',
'title': 'md5:7b81415841e02ecd4313668cde88737a',
'description': 'md5:116377fd2963b81ec4ce64b542173306',
'duration': 220,
'upload_date': '20150625',
'uploader_id': 'dorappi2000',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/dorappi2000',
'uploader': 'dorappi2000',
'formats': 'mincount:31',
},
'skip': 'not actual anymore',
},
# DASH manifest with segment_list
{
'url': 'https://www.youtube.com/embed/CsmdDsKjzN8',
'md5': '8ce563a1d667b599d21064e982ab9e31',
'info_dict': {
'id': 'CsmdDsKjzN8',
'ext': 'mp4',
'upload_date': '20150501', # According to '<meta itemprop="datePublished"', but in other places it's 20150510
'uploader': 'Airtek',
'description': 'Retransmisión en directo de la XVIII media maratón de Zaragoza.',
'uploader_id': 'UCzTzUmjXxxacNnL8I3m4LnQ',
'title': 'Retransmisión XVIII Media maratón Zaragoza 2015',
},
'params': {
'youtube_include_dash_manifest': True,
'format': '135', # bestvideo
},
'skip': 'This live event has ended.',
},
{
# Multifeed videos (multiple cameras), URL is for Main Camera
'url': 'https://www.youtube.com/watch?v=jqWvoWXjCVs',
'info_dict': {
'id': 'jqWvoWXjCVs',
'title': 'teamPGP: Rocket League Noob Stream',
'description': 'md5:dc7872fb300e143831327f1bae3af010',
},
'playlist': [{
'info_dict': {
'id': 'jqWvoWXjCVs',
'ext': 'mp4',
'title': 'teamPGP: Rocket League Noob Stream (Main Camera)',
'description': 'md5:dc7872fb300e143831327f1bae3af010',
'duration': 7335,
'upload_date': '20150721',
'uploader': 'Beer Games Beer',
'uploader_id': 'beergamesbeer',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
'license': 'Standard YouTube License',
},
}, {
'info_dict': {
'id': '6h8e8xoXJzg',
'ext': 'mp4',
'title': 'teamPGP: Rocket League Noob Stream (kreestuh)',
'description': 'md5:dc7872fb300e143831327f1bae3af010',
'duration': 7337,
'upload_date': '20150721',
'uploader': 'Beer Games Beer',
'uploader_id': 'beergamesbeer',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
'license': 'Standard YouTube License',
},
}, {
'info_dict': {
'id': 'PUOgX5z9xZw',
'ext': 'mp4',
'title': 'teamPGP: Rocket League Noob Stream (grizzle)',
'description': 'md5:dc7872fb300e143831327f1bae3af010',
'duration': 7337,
'upload_date': '20150721',
'uploader': 'Beer Games Beer',
'uploader_id': 'beergamesbeer',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
'license': 'Standard YouTube License',
},
}, {
'info_dict': {
'id': 'teuwxikvS5k',
'ext': 'mp4',
'title': 'teamPGP: Rocket League Noob Stream (zim)',
'description': 'md5:dc7872fb300e143831327f1bae3af010',
'duration': 7334,
'upload_date': '20150721',
'uploader': 'Beer Games Beer',
'uploader_id': 'beergamesbeer',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
'license': 'Standard YouTube License',
},
}],
'params': {
'skip_download': True,
},
'skip': 'This video is not available.',
},
{
# Multifeed video with comma in title (see https://github.com/ytdl-org/youtube-dl/issues/8536)
'url': 'https://www.youtube.com/watch?v=gVfLd0zydlo',
'info_dict': {
'id': 'gVfLd0zydlo',
'title': 'DevConf.cz 2016 Day 2 Workshops 1 14:00 - 15:30',
},
'playlist_count': 2,
'skip': 'Not multifeed anymore',
},
{
'url': 'https://vid.plus/FlRa-iH7PGw',
'only_matching': True,
},
{
'url': 'https://zwearz.com/watch/9lWxNJF-ufM/electra-woman-dyna-girl-official-trailer-grace-helbig.html',
'only_matching': True,
},
{
# Title with JS-like syntax "};" (see https://github.com/ytdl-org/youtube-dl/issues/7468)
# Also tests cut-off URL expansion in video description (see
# https://github.com/ytdl-org/youtube-dl/issues/1892,
# https://github.com/ytdl-org/youtube-dl/issues/8164)
'url': 'https://www.youtube.com/watch?v=lsguqyKfVQg',
'info_dict': {
'id': 'lsguqyKfVQg',
'ext': 'mp4',
'title': '{dark walk}; Loki/AC/Dishonored; collab w/Elflover21',
'alt_title': 'Dark Walk - Position Music',
'description': 'md5:8085699c11dc3f597ce0410b0dcbb34a',
'duration': 133,
'upload_date': '20151119',
'uploader_id': 'IronSoulElf',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/IronSoulElf',
'uploader': 'IronSoulElf',
'creator': 'Todd Haberman, Daniel Law Heath and Aaron Kaplan',
'track': 'Dark Walk - Position Music',
'artist': 'Todd Haberman, Daniel Law Heath and Aaron Kaplan',
'album': 'Position Music - Production Music Vol. 143 - Dark Walk',
},
'params': {
'skip_download': True,
},
},
{
# Tags with '};' (see https://github.com/ytdl-org/youtube-dl/issues/7468)
'url': 'https://www.youtube.com/watch?v=Ms7iBXnlUO8',
'only_matching': True,
},
{
# Video with yt:stretch=17:0
'url': 'https://www.youtube.com/watch?v=Q39EVAstoRM',
'info_dict': {
'id': 'Q39EVAstoRM',
'ext': 'mp4',
'title': 'Clash Of Clans#14 Dicas De Ataque Para CV 4',
'description': 'md5:ee18a25c350637c8faff806845bddee9',
'upload_date': '20151107',
'uploader_id': 'UCCr7TALkRbo3EtFzETQF1LA',
'uploader': 'CH GAMER DROID',
},
'params': {
'skip_download': True,
},
'skip': 'This video does not exist.',
},
{
# Video licensed under Creative Commons
'url': 'https://www.youtube.com/watch?v=M4gD1WSo5mA',
'info_dict': {
'id': 'M4gD1WSo5mA',
'ext': 'mp4',
'title': 'md5:e41008789470fc2533a3252216f1c1d1',
'description': 'md5:a677553cf0840649b731a3024aeff4cc',
'duration': 721,
'upload_date': '20150127',
'uploader_id': 'BerkmanCenter',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/BerkmanCenter',
'uploader': 'The Berkman Klein Center for Internet & Society',
'license': 'Creative Commons Attribution license (reuse allowed)',
},
'params': {
'skip_download': True,
},
},
{
# Channel-like uploader_url
'url': 'https://www.youtube.com/watch?v=eQcmzGIKrzg',
'info_dict': {
'id': 'eQcmzGIKrzg',
'ext': 'mp4',
'title': 'Democratic Socialism and Foreign Policy | Bernie Sanders',
'description': 'md5:dda0d780d5a6e120758d1711d062a867',
'duration': 4060,
'upload_date': '20151119',
'uploader': 'Bernie Sanders',
'uploader_id': 'UCH1dpzjCEiGAt8CXkryhkZg',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCH1dpzjCEiGAt8CXkryhkZg',
'license': 'Creative Commons Attribution license (reuse allowed)',
},
'params': {
'skip_download': True,
},
},
{
'url': 'https://www.youtube.com/watch?feature=player_embedded&amp;v=V36LpHqtcDY',
'only_matching': True,
},
{
# YouTube Red paid video (https://github.com/ytdl-org/youtube-dl/issues/10059)
'url': 'https://www.youtube.com/watch?v=i1Ko8UG-Tdo',
'only_matching': True,
},
{
# Rental video preview
'url': 'https://www.youtube.com/watch?v=yYr8q0y5Jfg',
'info_dict': {
'id': 'uGpuVWrhIzE',
'ext': 'mp4',
'title': 'Piku - Trailer',
'description': 'md5:c36bd60c3fd6f1954086c083c72092eb',
'upload_date': '20150811',
'uploader': 'FlixMatrix',
'uploader_id': 'FlixMatrixKaravan',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/FlixMatrixKaravan',
'license': 'Standard YouTube License',
},
'params': {
'skip_download': True,
},
'skip': 'This video is not available.',
},
{
# YouTube Red video with episode data
'url': 'https://www.youtube.com/watch?v=iqKdEhx-dD4',
'info_dict': {
'id': 'iqKdEhx-dD4',
'ext': 'mp4',
'title': 'Isolation - Mind Field (Ep 1)',
'description': 'md5:46a29be4ceffa65b92d277b93f463c0f',
'duration': 2085,
'upload_date': '20170118',
'uploader': 'Vsauce',
'uploader_id': 'Vsauce',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/Vsauce',
'series': 'Mind Field',
'season_number': 1,
'episode_number': 1,
},
'params': {
'skip_download': True,
},
'expected_warnings': [
'Skipping DASH manifest',
],
},
{
# The following content has been identified by the YouTube community
# as inappropriate or offensive to some audiences.
'url': 'https://www.youtube.com/watch?v=6SJNVb0GnPI',
'info_dict': {
'id': '6SJNVb0GnPI',
'ext': 'mp4',
'title': 'Race Differences in Intelligence',
'description': 'md5:5d161533167390427a1f8ee89a1fc6f1',
'duration': 965,
'upload_date': '20140124',
'uploader': 'New Century Foundation',
'uploader_id': 'UCEJYpZGqgUob0zVVEaLhvVg',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCEJYpZGqgUob0zVVEaLhvVg',
},
'params': {
'skip_download': True,
},
},
{
# itag 212
'url': '1t24XAntNCY',
'only_matching': True,
},
{
# geo restricted to JP
'url': 'sJL6WA-aGkQ',
'only_matching': True,
},
{
'url': 'https://www.youtube.com/watch?v=MuAGGZNfUkU&list=RDMM',
'only_matching': True,
},
{
'url': 'https://invidio.us/watch?v=BaW_jenozKc',
'only_matching': True,
},
{
# DRM protected
'url': 'https://www.youtube.com/watch?v=s7_qI6_mIXc',
'only_matching': True,
},
{
# Video with unsupported adaptive stream type formats
'url': 'https://www.youtube.com/watch?v=Z4Vy8R84T1U',
'info_dict': {
'id': 'Z4Vy8R84T1U',
'ext': 'mp4',
'title': 'saman SMAN 53 Jakarta(Sancety) opening COFFEE4th at SMAN 53 Jakarta',
'description': 'md5:d41d8cd98f00b204e9800998ecf8427e',
'duration': 433,
'upload_date': '20130923',
'uploader': 'Amelia Putri Harwita',
'uploader_id': 'UCpOxM49HJxmC1qCalXyB3_Q',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCpOxM49HJxmC1qCalXyB3_Q',
'formats': 'maxcount:10',
},
'params': {
'skip_download': True,
'youtube_include_dash_manifest': False,
},
'skip': 'not actual anymore',
},
{
# Youtube Music Auto-generated description
'url': 'https://music.youtube.com/watch?v=MgNrAu2pzNs',
'info_dict': {
'id': 'MgNrAu2pzNs',
'ext': 'mp4',
'title': 'Voyeur Girl',
'description': 'md5:7ae382a65843d6df2685993e90a8628f',
'upload_date': '20190312',
'uploader': 'Stephen - Topic',
'uploader_id': 'UC-pWHpBjdGG69N9mM2auIAA',
'artist': 'Stephen',
'track': 'Voyeur Girl',
'album': 'it\'s too much love to know my dear',
'release_date': '20190313',
'release_year': 2019,
},
'params': {
'skip_download': True,
},
},
{
'url': 'https://www.youtubekids.com/watch?v=3b8nCWDgZ6Q',
'only_matching': True,
},
{
# invalid -> valid video id redirection
'url': 'DJztXj2GPfl',
'info_dict': {
'id': 'DJztXj2GPfk',
'ext': 'mp4',
'title': 'Panjabi MC - Mundian To Bach Ke (The Dictator Soundtrack)',
'description': 'md5:bf577a41da97918e94fa9798d9228825',
'upload_date': '20090125',
'uploader': 'Prochorowka',
'uploader_id': 'Prochorowka',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/Prochorowka',
'artist': 'Panjabi MC',
'track': 'Beware of the Boys (Mundian to Bach Ke) - Motivo Hi-Lectro Remix',
'album': 'Beware of the Boys (Mundian To Bach Ke)',
},
'params': {
'skip_download': True,
},
},
{
# empty description results in an empty string
'url': 'https://www.youtube.com/watch?v=x41yOUIvK2k',
'info_dict': {
'id': 'x41yOUIvK2k',
'ext': 'mp4',
'title': 'IMG 3456',
'description': '',
'upload_date': '20170613',
'uploader_id': 'ElevageOrVert',
'uploader': 'ElevageOrVert',
},
'params': {
'skip_download': True,
},
},
]
def __init__(self, *args, **kwargs):
super(YoutubeIE, self).__init__(*args, **kwargs)
self._player_cache = {}
def report_video_info_webpage_download(self, video_id):
"""Report attempt to download video info webpage."""
self.to_screen('%s: Downloading video info webpage' % video_id)
def report_information_extraction(self, video_id):
"""Report attempt to extract video information."""
self.to_screen('%s: Extracting video information' % video_id)
def report_unavailable_format(self, video_id, format):
"""Report extracted video URL."""
self.to_screen('%s: Format %s not available' % (video_id, format))
def report_rtmp_download(self):
"""Indicate the download will use the RTMP protocol."""
self.to_screen('RTMP download detected')
def _signature_cache_id(self, example_sig):
""" Return a string representation of a signature """
return '.'.join(compat_str(len(part)) for part in example_sig.split('.'))
@classmethod
def _extract_player_info(cls, player_url):
for player_re in cls._PLAYER_INFO_RE:
id_m = re.search(player_re, player_url)
if id_m:
break
else:
raise ExtractorError('Cannot identify player %r' % player_url)
return id_m.group('ext'), id_m.group('id')
def _extract_signature_function(self, video_id, player_url, example_sig):
player_type, player_id = self._extract_player_info(player_url)
# Read from filesystem cache
func_id = '%s_%s_%s' % (
player_type, player_id, self._signature_cache_id(example_sig))
assert os.path.basename(func_id) == func_id
cache_spec = self._downloader.cache.load('youtube-sigfuncs', func_id)
if cache_spec is not None:
return lambda s: ''.join(s[i] for i in cache_spec)
download_note = (
'Downloading player %s' % player_url
if self._downloader.params.get('verbose') else
'Downloading %s player %s' % (player_type, player_id)
)
if player_type == 'js':
code = self._download_webpage(
player_url, video_id,
note=download_note,
errnote='Download of %s failed' % player_url)
res = self._parse_sig_js(code)
elif player_type == 'swf':
urlh = self._request_webpage(
player_url, video_id,
note=download_note,
errnote='Download of %s failed' % player_url)
code = urlh.read()
res = self._parse_sig_swf(code)
else:
assert False, 'Invalid player type %r' % player_type
test_string = ''.join(map(compat_chr, range(len(example_sig))))
cache_res = res(test_string)
cache_spec = [ord(c) for c in cache_res]
self._downloader.cache.store('youtube-sigfuncs', func_id, cache_spec)
return res
def _print_sig_code(self, func, example_sig):
def gen_sig_code(idxs):
def _genslice(start, end, step):
starts = '' if start == 0 else str(start)
ends = (':%d' % (end + step)) if end + step >= 0 else ':'
steps = '' if step == 1 else (':%d' % step)
return 's[%s%s%s]' % (starts, ends, steps)
step = None
# Quelch pyflakes warnings - start will be set when step is set
start = '(Never used)'
for i, prev in zip(idxs[1:], idxs[:-1]):
if step is not None:
if i - prev == step:
continue
yield _genslice(start, prev, step)
step = None
continue
if i - prev in [-1, 1]:
step = i - prev
start = prev
continue
else:
yield 's[%d]' % prev
if step is None:
yield 's[%d]' % i
else:
yield _genslice(start, i, step)
test_string = ''.join(map(compat_chr, range(len(example_sig))))
cache_res = func(test_string)
cache_spec = [ord(c) for c in cache_res]
expr_code = ' + '.join(gen_sig_code(cache_spec))
signature_id_tuple = '(%s)' % (
', '.join(compat_str(len(p)) for p in example_sig.split('.')))
code = ('if tuple(len(p) for p in s.split(\'.\')) == %s:\n'
' return %s\n') % (signature_id_tuple, expr_code)
self.to_screen('Extracted signature function:\n' + code)
def _parse_sig_js(self, jscode):
funcname = self._search_regex(
(r'\b[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\b[a-zA-Z0-9]+\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'(?:\b|[^a-zA-Z0-9$])(?P<sig>[a-zA-Z0-9$]{2})\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)',
r'(?P<sig>[a-zA-Z0-9$]+)\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)',
# Obsolete patterns
r'(["\'])signature\1\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\.sig\|\|(?P<sig>[a-zA-Z0-9$]+)\(',
r'yt\.akamaized\.net/\)\s*\|\|\s*.*?\s*[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*(?:encodeURIComponent\s*\()?\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\b[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\b[a-zA-Z0-9]+\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\bc\s*&&\s*a\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\bc\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\bc\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\('),
jscode, 'Initial JS player signature function name', group='sig')
jsi = JSInterpreter(jscode)
initial_function = jsi.extract_function(funcname)
return lambda s: initial_function([s])
def _parse_sig_swf(self, file_contents):
swfi = SWFInterpreter(file_contents)
TARGET_CLASSNAME = 'SignatureDecipher'
searched_class = swfi.extract_class(TARGET_CLASSNAME)
initial_function = swfi.extract_function(searched_class, 'decipher')
return lambda s: initial_function([s])
def _decrypt_signature(self, s, video_id, player_url, age_gate=False):
"""Turn the encrypted s field into a working signature"""
if player_url is None:
raise ExtractorError('Cannot decrypt signature without player_url')
if player_url.startswith('//'):
player_url = 'https:' + player_url
elif not re.match(r'https?://', player_url):
player_url = compat_urlparse.urljoin(
'https://www.youtube.com', player_url)
try:
player_id = (player_url, self._signature_cache_id(s))
if player_id not in self._player_cache:
func = self._extract_signature_function(
video_id, player_url, s
)
self._player_cache[player_id] = func
func = self._player_cache[player_id]
if self._downloader.params.get('youtube_print_sig_code'):
self._print_sig_code(func, s)
return func(s)
except Exception as e:
tb = traceback.format_exc()
raise ExtractorError(
'Signature extraction failed: ' + tb, cause=e)
def _get_subtitles(self, video_id, webpage):
try:
subs_doc = self._download_xml(
'https://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id,
video_id, note=False)
except ExtractorError as err:
self._downloader.report_warning('unable to download video subtitles: %s' % error_to_compat_str(err))
return {}
sub_lang_list = {}
for track in subs_doc.findall('track'):
lang = track.attrib['lang_code']
if lang in sub_lang_list:
continue
sub_formats = []
for ext in self._SUBTITLE_FORMATS:
params = compat_urllib_parse_urlencode({
'lang': lang,
'v': video_id,
'fmt': ext,
'name': track.attrib['name'].encode('utf-8'),
})
sub_formats.append({
'url': 'https://www.youtube.com/api/timedtext?' + params,
'ext': ext,
})
sub_lang_list[lang] = sub_formats
if not sub_lang_list:
self._downloader.report_warning('video doesn\'t have subtitles')
return {}
return sub_lang_list
def _get_ytplayer_config(self, video_id, webpage):
patterns = (
# User data may contain arbitrary character sequences that may affect
# JSON extraction with regex, e.g. when '};' is contained the second
# regex won't capture the whole JSON. Yet working around by trying more
# concrete regex first keeping in mind proper quoted string handling
# to be implemented in future that will replace this workaround (see
# https://github.com/ytdl-org/youtube-dl/issues/7468,
# https://github.com/ytdl-org/youtube-dl/pull/7599)
r';ytplayer\.config\s*=\s*({.+?});ytplayer',
r';ytplayer\.config\s*=\s*({.+?});',
)
config = self._search_regex(
patterns, webpage, 'ytplayer.config', default=None)
if config:
return self._parse_json(
uppercase_escape(config), video_id, fatal=False)
def _get_automatic_captions(self, video_id, webpage):
"""We need the webpage for getting the captions url, pass it as an
argument to speed up the process."""
self.to_screen('%s: Looking for automatic captions' % video_id)
player_config = self._get_ytplayer_config(video_id, webpage)
err_msg = 'Couldn\'t find automatic captions for %s' % video_id
if not player_config:
self._downloader.report_warning(err_msg)
return {}
try:
args = player_config['args']
caption_url = args.get('ttsurl')
if caption_url:
timestamp = args['timestamp']
# We get the available subtitles
list_params = compat_urllib_parse_urlencode({
'type': 'list',
'tlangs': 1,
'asrs': 1,
})
list_url = caption_url + '&' + list_params
caption_list = self._download_xml(list_url, video_id)
original_lang_node = caption_list.find('track')
if original_lang_node is None:
self._downloader.report_warning('Video doesn\'t have automatic captions')
return {}
original_lang = original_lang_node.attrib['lang_code']
caption_kind = original_lang_node.attrib.get('kind', '')
sub_lang_list = {}
for lang_node in caption_list.findall('target'):
sub_lang = lang_node.attrib['lang_code']
sub_formats = []
for ext in self._SUBTITLE_FORMATS:
params = compat_urllib_parse_urlencode({
'lang': original_lang,
'tlang': sub_lang,
'fmt': ext,
'ts': timestamp,
'kind': caption_kind,
})
sub_formats.append({
'url': caption_url + '&' + params,
'ext': ext,
})
sub_lang_list[sub_lang] = sub_formats
return sub_lang_list
def make_captions(sub_url, sub_langs):
parsed_sub_url = compat_urllib_parse_urlparse(sub_url)
caption_qs = compat_parse_qs(parsed_sub_url.query)
captions = {}
for sub_lang in sub_langs:
sub_formats = []
for ext in self._SUBTITLE_FORMATS:
caption_qs.update({
'tlang': [sub_lang],
'fmt': [ext],
})
sub_url = compat_urlparse.urlunparse(parsed_sub_url._replace(
query=compat_urllib_parse_urlencode(caption_qs, True)))
sub_formats.append({
'url': sub_url,
'ext': ext,
})
captions[sub_lang] = sub_formats
return captions
# New captions format as of 22.06.2017
player_response = args.get('player_response')
if player_response and isinstance(player_response, compat_str):
player_response = self._parse_json(
player_response, video_id, fatal=False)
if player_response:
renderer = player_response['captions']['playerCaptionsTracklistRenderer']
base_url = renderer['captionTracks'][0]['baseUrl']
sub_lang_list = []
for lang in renderer['translationLanguages']:
lang_code = lang.get('languageCode')
if lang_code:
sub_lang_list.append(lang_code)
return make_captions(base_url, sub_lang_list)
# Some videos don't provide ttsurl but rather caption_tracks and
# caption_translation_languages (e.g. 20LmZk1hakA)
# Does not used anymore as of 22.06.2017
caption_tracks = args['caption_tracks']
caption_translation_languages = args['caption_translation_languages']
caption_url = compat_parse_qs(caption_tracks.split(',')[0])['u'][0]
sub_lang_list = []
for lang in caption_translation_languages.split(','):
lang_qs = compat_parse_qs(compat_urllib_parse_unquote_plus(lang))
sub_lang = lang_qs.get('lc', [None])[0]
if sub_lang:
sub_lang_list.append(sub_lang)
return make_captions(caption_url, sub_lang_list)
# An extractor error can be raise by the download process if there are
# no automatic captions but there are subtitles
except (KeyError, IndexError, ExtractorError):
self._downloader.report_warning(err_msg)
return {}
def _mark_watched(self, video_id, video_info, player_response):
playback_url = url_or_none(try_get(
player_response,
lambda x: x['playbackTracking']['videostatsPlaybackUrl']['baseUrl']) or try_get(
video_info, lambda x: x['videostats_playback_base_url'][0]))
if not playback_url:
return
parsed_playback_url = compat_urlparse.urlparse(playback_url)
qs = compat_urlparse.parse_qs(parsed_playback_url.query)
# cpn generation algorithm is reverse engineered from base.js.
# In fact it works even with dummy cpn.
CPN_ALPHABET = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_'
cpn = ''.join((CPN_ALPHABET[random.randint(0, 256) & 63] for _ in range(0, 16)))
qs.update({
'ver': ['2'],
'cpn': [cpn],
})
playback_url = compat_urlparse.urlunparse(
parsed_playback_url._replace(query=compat_urllib_parse_urlencode(qs, True)))
self._download_webpage(
playback_url, video_id, 'Marking watched',
'Unable to mark watched', fatal=False)
@staticmethod
def _extract_urls(webpage):
# Embedded YouTube player
entries = [
unescapeHTML(mobj.group('url'))
for mobj in re.finditer(r'''(?x)
(?:
<iframe[^>]+?src=|
data-video-url=|
<embed[^>]+?src=|
embedSWF\(?:\s*|
<object[^>]+data=|
new\s+SWFObject\(
)
(["\'])
(?P<url>(?:https?:)?//(?:www\.)?youtube(?:-nocookie)?\.com/
(?:embed|v|p)/[0-9A-Za-z_-]{11}.*?)
\1''', webpage)]
# lazyYT YouTube embed
entries.extend(list(map(
unescapeHTML,
re.findall(r'class="lazyYT" data-youtube-id="([^"]+)"', webpage))))
# Wordpress "YouTube Video Importer" plugin
matches = re.findall(r'''(?x)<div[^>]+
class=(?P<q1>[\'"])[^\'"]*\byvii_single_video_player\b[^\'"]*(?P=q1)[^>]+
data-video_id=(?P<q2>[\'"])([^\'"]+)(?P=q2)''', webpage)
entries.extend(m[-1] for m in matches)
return entries
@staticmethod
def _extract_url(webpage):
urls = YoutubeIE._extract_urls(webpage)
return urls[0] if urls else None
@classmethod
def extract_id(cls, url):
mobj = re.match(cls._VALID_URL, url, re.VERBOSE)
if mobj is None:
raise ExtractorError('Invalid URL: %s' % url)
video_id = mobj.group(2)
return video_id
def _extract_chapters_from_json(self, webpage, video_id, duration):
if not webpage:
return
player = self._parse_json(
self._search_regex(
r'RELATED_PLAYER_ARGS["\']\s*:\s*({.+})\s*,?\s*\n', webpage,
'player args', default='{}'),
video_id, fatal=False)
if not player or not isinstance(player, dict):
return
watch_next_response = player.get('watch_next_response')
if not isinstance(watch_next_response, compat_str):
return
response = self._parse_json(watch_next_response, video_id, fatal=False)
if not response or not isinstance(response, dict):
return
chapters_list = try_get(
response,
lambda x: x['playerOverlays']
['playerOverlayRenderer']
['decoratedPlayerBarRenderer']
['decoratedPlayerBarRenderer']
['playerBar']
['chapteredPlayerBarRenderer']
['chapters'],
list)
if not chapters_list:
return
def chapter_time(chapter):
return float_or_none(
try_get(
chapter,
lambda x: x['chapterRenderer']['timeRangeStartMillis'],
int),
scale=1000)
chapters = []
for next_num, chapter in enumerate(chapters_list, start=1):
start_time = chapter_time(chapter)
if start_time is None:
continue
end_time = (chapter_time(chapters_list[next_num])
if next_num < len(chapters_list) else duration)
if end_time is None:
continue
title = try_get(
chapter, lambda x: x['chapterRenderer']['title']['simpleText'],
compat_str)
chapters.append({
'start_time': start_time,
'end_time': end_time,
'title': title,
})
return chapters
@staticmethod
def _extract_chapters_from_description(description, duration):
if not description:
return None
chapter_lines = re.findall(
r'(?:^|<br\s*/>)([^<]*<a[^>]+onclick=["\']yt\.www\.watch\.player\.seekTo[^>]+>(\d{1,2}:\d{1,2}(?::\d{1,2})?)</a>[^>]*)(?=$|<br\s*/>)',
description)
if not chapter_lines:
return None
chapters = []
for next_num, (chapter_line, time_point) in enumerate(
chapter_lines, start=1):
start_time = parse_duration(time_point)
if start_time is None:
continue
if start_time > duration:
break
end_time = (duration if next_num == len(chapter_lines)
else parse_duration(chapter_lines[next_num][1]))
if end_time is None:
continue
if end_time > duration:
end_time = duration
if start_time > end_time:
break
chapter_title = re.sub(
r'<a[^>]+>[^<]+</a>', '', chapter_line).strip(' \t-')
chapter_title = re.sub(r'\s+', ' ', chapter_title)
chapters.append({
'start_time': start_time,
'end_time': end_time,
'title': chapter_title,
})
return chapters
def _extract_chapters(self, webpage, description, video_id, duration):
return (self._extract_chapters_from_json(webpage, video_id, duration)
or self._extract_chapters_from_description(description, duration))
def _real_extract(self, url):
url, smuggled_data = unsmuggle_url(url, {})
proto = (
'http' if self._downloader.params.get('prefer_insecure', False)
else 'https')
start_time = None
end_time = None
parsed_url = compat_urllib_parse_urlparse(url)
for component in [parsed_url.fragment, parsed_url.query]:
query = compat_parse_qs(component)
if start_time is None and 't' in query:
start_time = parse_duration(query['t'][0])
if start_time is None and 'start' in query:
start_time = parse_duration(query['start'][0])
if end_time is None and 'end' in query:
end_time = parse_duration(query['end'][0])
# Extract original video URL from URL with redirection, like age verification, using next_url parameter
mobj = re.search(self._NEXT_URL_RE, url)
if mobj:
url = proto + '://www.youtube.com/' + compat_urllib_parse_unquote(mobj.group(1)).lstrip('/')
video_id = self.extract_id(url)
# Get video webpage
url = proto + '://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1&bpctr=9999999999' % video_id
video_webpage, urlh = self._download_webpage_handle(url, video_id)
qs = compat_parse_qs(compat_urllib_parse_urlparse(urlh.geturl()).query)
video_id = qs.get('v', [None])[0] or video_id
# Attempt to extract SWF player URL
mobj = re.search(r'swfConfig.*?"(https?:\\/\\/.*?watch.*?-.*?\.swf)"', video_webpage)
if mobj is not None:
player_url = re.sub(r'\\(.)', r'\1', mobj.group(1))
else:
player_url = None
dash_mpds = []
def add_dash_mpd(video_info):
dash_mpd = video_info.get('dashmpd')
if dash_mpd and dash_mpd[0] not in dash_mpds:
dash_mpds.append(dash_mpd[0])
def add_dash_mpd_pr(pl_response):
dash_mpd = url_or_none(try_get(
pl_response, lambda x: x['streamingData']['dashManifestUrl'],
compat_str))
if dash_mpd and dash_mpd not in dash_mpds:
dash_mpds.append(dash_mpd)
is_live = None
view_count = None
def extract_view_count(v_info):
return int_or_none(try_get(v_info, lambda x: x['view_count'][0]))
def extract_player_response(player_response, video_id):
pl_response = str_or_none(player_response)
if not pl_response:
return
pl_response = self._parse_json(pl_response, video_id, fatal=False)
if isinstance(pl_response, dict):
add_dash_mpd_pr(pl_response)
return pl_response
player_response = {}
# Get video info
video_info = {}
embed_webpage = None
if (self._og_search_property('restrictions:age', video_webpage, default=None) == '18+'
or re.search(r'player-age-gate-content">', video_webpage) is not None):
age_gate = True
# We simulate the access to the video from www.youtube.com/v/{video_id}
# this can be viewed without login into Youtube
url = proto + '://www.youtube.com/embed/%s' % video_id
embed_webpage = self._download_webpage(url, video_id, 'Downloading embed webpage')
data = compat_urllib_parse_urlencode({
'video_id': video_id,
'eurl': 'https://youtube.googleapis.com/v/' + video_id,
'sts': self._search_regex(
r'"sts"\s*:\s*(\d+)', embed_webpage, 'sts', default=''),
})
video_info_url = proto + '://www.youtube.com/get_video_info?' + data
try:
video_info_webpage = self._download_webpage(
video_info_url, video_id,
note='Refetching age-gated info webpage',
errnote='unable to download video info webpage')
except ExtractorError:
video_info_webpage = None
if video_info_webpage:
video_info = compat_parse_qs(video_info_webpage)
pl_response = video_info.get('player_response', [None])[0]
player_response = extract_player_response(pl_response, video_id)
add_dash_mpd(video_info)
view_count = extract_view_count(video_info)
else:
age_gate = False
# Try looking directly into the video webpage
ytplayer_config = self._get_ytplayer_config(video_id, video_webpage)
if ytplayer_config:
args = ytplayer_config['args']
if args.get('url_encoded_fmt_stream_map') or args.get('hlsvp'):
# Convert to the same format returned by compat_parse_qs
video_info = dict((k, [v]) for k, v in args.items())
add_dash_mpd(video_info)
# Rental video is not rented but preview is available (e.g.
# https://www.youtube.com/watch?v=yYr8q0y5Jfg,
# https://github.com/ytdl-org/youtube-dl/issues/10532)
if not video_info and args.get('ypc_vid'):
return self.url_result(
args['ypc_vid'], YoutubeIE.ie_key(), video_id=args['ypc_vid'])
if args.get('livestream') == '1' or args.get('live_playback') == 1:
is_live = True
if not player_response:
player_response = extract_player_response(args.get('player_response'), video_id)
if not video_info or self._downloader.params.get('youtube_include_dash_manifest', True):
add_dash_mpd_pr(player_response)
if not video_info and not player_response:
player_response = extract_player_response(
self._search_regex(
r'ytInitialPlayerResponse\s*=\s*({.+?})\s*;', video_webpage,
'initial player response', default='{}'),
video_id)
def extract_unavailable_message():
messages = []
for tag, kind in (('h1', 'message'), ('div', 'submessage')):
msg = self._html_search_regex(
r'(?s)<{tag}[^>]+id=["\']unavailable-{kind}["\'][^>]*>(.+?)</{tag}>'.format(tag=tag, kind=kind),
video_webpage, 'unavailable %s' % kind, default=None)
if msg:
messages.append(msg)
if messages:
return '\n'.join(messages)
if not video_info and not player_response:
unavailable_message = extract_unavailable_message()
if not unavailable_message:
unavailable_message = 'Unable to extract video data'
raise ExtractorError(
'YouTube said: %s' % unavailable_message, expected=True, video_id=video_id)
if not isinstance(video_info, dict):
video_info = {}
video_details = try_get(
player_response, lambda x: x['videoDetails'], dict) or {}
microformat = try_get(
player_response, lambda x: x['microformat']['playerMicroformatRenderer'], dict) or {}
video_title = video_info.get('title', [None])[0] or video_details.get('title')
if not video_title:
self._downloader.report_warning('Unable to extract video title')
video_title = '_'
description_original = video_description = get_element_by_id("eow-description", video_webpage)
if video_description:
def replace_url(m):
redir_url = compat_urlparse.urljoin(url, m.group(1))
parsed_redir_url = compat_urllib_parse_urlparse(redir_url)
if re.search(r'^(?:www\.)?(?:youtube(?:-nocookie)?\.com|youtu\.be)$', parsed_redir_url.netloc) and parsed_redir_url.path == '/redirect':
qs = compat_parse_qs(parsed_redir_url.query)
q = qs.get('q')
if q and q[0]:
return q[0]
return redir_url
description_original = video_description = re.sub(r'''(?x)
<a\s+
(?:[a-zA-Z-]+="[^"]*"\s+)*?
(?:title|href)="([^"]+)"\s+
(?:[a-zA-Z-]+="[^"]*"\s+)*?
class="[^"]*"[^>]*>
[^<]+\.{3}\s*
</a>
''', replace_url, video_description)
video_description = clean_html(video_description)
else:
video_description = video_details.get('shortDescription')
if video_description is None:
video_description = self._html_search_meta('description', video_webpage)
if not smuggled_data.get('force_singlefeed', False):
if not self._downloader.params.get('noplaylist'):
multifeed_metadata_list = try_get(
player_response,
lambda x: x['multicamera']['playerLegacyMulticameraRenderer']['metadataList'],
compat_str) or try_get(
video_info, lambda x: x['multifeed_metadata_list'][0], compat_str)
if multifeed_metadata_list:
entries = []
feed_ids = []
for feed in multifeed_metadata_list.split(','):
# Unquote should take place before split on comma (,) since textual
# fields may contain comma as well (see
# https://github.com/ytdl-org/youtube-dl/issues/8536)
feed_data = compat_parse_qs(compat_urllib_parse_unquote_plus(feed))
def feed_entry(name):
return try_get(feed_data, lambda x: x[name][0], compat_str)
feed_id = feed_entry('id')
if not feed_id:
continue
feed_title = feed_entry('title')
title = video_title
if feed_title:
title += ' (%s)' % feed_title
entries.append({
'_type': 'url_transparent',
'ie_key': 'Youtube',
'url': smuggle_url(
'%s://www.youtube.com/watch?v=%s' % (proto, feed_data['id'][0]),
{'force_singlefeed': True}),
'title': title,
})
feed_ids.append(feed_id)
self.to_screen(
'Downloading multifeed video (%s) - add --no-playlist to just download video %s'
% (', '.join(feed_ids), video_id))
return self.playlist_result(entries, video_id, video_title, video_description)
else:
self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
if view_count is None:
view_count = extract_view_count(video_info)
if view_count is None and video_details:
view_count = int_or_none(video_details.get('viewCount'))
if view_count is None and microformat:
view_count = int_or_none(microformat.get('viewCount'))
if is_live is None:
is_live = bool_or_none(video_details.get('isLive'))
# Check for "rental" videos
if 'ypc_video_rental_bar_text' in video_info and 'author' not in video_info:
raise ExtractorError('"rental" videos not supported. See https://github.com/ytdl-org/youtube-dl/issues/359 for more information.', expected=True)
def _extract_filesize(media_url):
return int_or_none(self._search_regex(
r'\bclen[=/](\d+)', media_url, 'filesize', default=None))
streaming_formats = try_get(player_response, lambda x: x['streamingData']['formats'], list) or []
streaming_formats.extend(try_get(player_response, lambda x: x['streamingData']['adaptiveFormats'], list) or [])
if 'conn' in video_info and video_info['conn'][0].startswith('rtmp'):
self.report_rtmp_download()
formats = [{
'format_id': '_rtmp',
'protocol': 'rtmp',
'url': video_info['conn'][0],
'player_url': player_url,
}]
elif not is_live and (streaming_formats or len(video_info.get('url_encoded_fmt_stream_map', [''])[0]) >= 1 or len(video_info.get('adaptive_fmts', [''])[0]) >= 1):
encoded_url_map = video_info.get('url_encoded_fmt_stream_map', [''])[0] + ',' + video_info.get('adaptive_fmts', [''])[0]
if 'rtmpe%3Dyes' in encoded_url_map:
raise ExtractorError('rtmpe downloads are not supported, see https://github.com/ytdl-org/youtube-dl/issues/343 for more information.', expected=True)
formats = []
formats_spec = {}
fmt_list = video_info.get('fmt_list', [''])[0]
if fmt_list:
for fmt in fmt_list.split(','):
spec = fmt.split('/')
if len(spec) > 1:
width_height = spec[1].split('x')
if len(width_height) == 2:
formats_spec[spec[0]] = {
'resolution': spec[1],
'width': int_or_none(width_height[0]),
'height': int_or_none(width_height[1]),
}
for fmt in streaming_formats:
itag = str_or_none(fmt.get('itag'))
if not itag:
continue
quality = fmt.get('quality')
quality_label = fmt.get('qualityLabel') or quality
formats_spec[itag] = {
'asr': int_or_none(fmt.get('audioSampleRate')),
'filesize': int_or_none(fmt.get('contentLength')),
'format_note': quality_label,
'fps': int_or_none(fmt.get('fps')),
'height': int_or_none(fmt.get('height')),
# bitrate for itag 43 is always 2147483647
'tbr': float_or_none(fmt.get('averageBitrate') or fmt.get('bitrate'), 1000) if itag != '43' else None,
'width': int_or_none(fmt.get('width')),
}
for fmt in streaming_formats:
if fmt.get('drmFamilies') or fmt.get('drm_families'):
continue
url = url_or_none(fmt.get('url'))
if not url:
cipher = fmt.get('cipher') or fmt.get('signatureCipher')
if not cipher:
continue
url_data = compat_parse_qs(cipher)
url = url_or_none(try_get(url_data, lambda x: x['url'][0], compat_str))
if not url:
continue
else:
cipher = None
url_data = compat_parse_qs(compat_urllib_parse_urlparse(url).query)
stream_type = int_or_none(try_get(url_data, lambda x: x['stream_type'][0]))
# Unsupported FORMAT_STREAM_TYPE_OTF
if stream_type == 3:
continue
format_id = fmt.get('itag') or url_data['itag'][0]
if not format_id:
continue
format_id = compat_str(format_id)
if cipher:
if 's' in url_data or self._downloader.params.get('youtube_include_dash_manifest', True):
ASSETS_RE = (
r'<script[^>]+\bsrc=("[^"]+")[^>]+\bname=["\']player_ias/base',
r'"jsUrl"\s*:\s*("[^"]+")',
r'"assets":.+?"js":\s*("[^"]+")')
jsplayer_url_json = self._search_regex(
ASSETS_RE,
embed_webpage if age_gate else video_webpage,
'JS player URL (1)', default=None)
if not jsplayer_url_json and not age_gate:
# We need the embed website after all
if embed_webpage is None:
embed_url = proto + '://www.youtube.com/embed/%s' % video_id
embed_webpage = self._download_webpage(
embed_url, video_id, 'Downloading embed webpage')
jsplayer_url_json = self._search_regex(
ASSETS_RE, embed_webpage, 'JS player URL')
player_url = json.loads(jsplayer_url_json)
if player_url is None:
player_url_json = self._search_regex(
r'ytplayer\.config.*?"url"\s*:\s*("[^"]+")',
video_webpage, 'age gate player URL')
player_url = json.loads(player_url_json)
if 'sig' in url_data:
url += '&signature=' + url_data['sig'][0]
elif 's' in url_data:
encrypted_sig = url_data['s'][0]
if self._downloader.params.get('verbose'):
if player_url is None:
player_desc = 'unknown'
else:
player_type, player_version = self._extract_player_info(player_url)
player_desc = '%s player %s' % ('flash' if player_type == 'swf' else 'html5', player_version)
parts_sizes = self._signature_cache_id(encrypted_sig)
self.to_screen('{%s} signature length %s, %s' %
(format_id, parts_sizes, player_desc))
signature = self._decrypt_signature(
encrypted_sig, video_id, player_url, age_gate)
sp = try_get(url_data, lambda x: x['sp'][0], compat_str) or 'signature'
url += '&%s=%s' % (sp, signature)
if 'ratebypass' not in url:
url += '&ratebypass=yes'
dct = {
'format_id': format_id,
'url': url,
'player_url': player_url,
}
if format_id in self._formats:
dct.update(self._formats[format_id])
if format_id in formats_spec:
dct.update(formats_spec[format_id])
# Some itags are not included in DASH manifest thus corresponding formats will
# lack metadata (see https://github.com/ytdl-org/youtube-dl/pull/5993).
# Trying to extract metadata from url_encoded_fmt_stream_map entry.
mobj = re.search(r'^(?P<width>\d+)[xX](?P<height>\d+)$', url_data.get('size', [''])[0])
width, height = (int(mobj.group('width')), int(mobj.group('height'))) if mobj else (None, None)
if width is None:
width = int_or_none(fmt.get('width'))
if height is None:
height = int_or_none(fmt.get('height'))
filesize = int_or_none(url_data.get(
'clen', [None])[0]) or _extract_filesize(url)
quality = url_data.get('quality', [None])[0] or fmt.get('quality')
quality_label = url_data.get('quality_label', [None])[0] or fmt.get('qualityLabel')
tbr = (float_or_none(url_data.get('bitrate', [None])[0], 1000)
or float_or_none(fmt.get('bitrate'), 1000)) if format_id != '43' else None
fps = int_or_none(url_data.get('fps', [None])[0]) or int_or_none(fmt.get('fps'))
more_fields = {
'filesize': filesize,
'tbr': tbr,
'width': width,
'height': height,
'fps': fps,
'format_note': quality_label or quality,
}
for key, value in more_fields.items():
if value:
dct[key] = value
type_ = url_data.get('type', [None])[0] or fmt.get('mimeType')
if type_:
type_split = type_.split(';')
kind_ext = type_split[0].split('/')
if len(kind_ext) == 2:
kind, _ = kind_ext
dct['ext'] = mimetype2ext(type_split[0])
if kind in ('audio', 'video'):
codecs = None
for mobj in re.finditer(
r'(?P<key>[a-zA-Z_-]+)=(?P<quote>["\']?)(?P<val>.+?)(?P=quote)(?:;|$)', type_):
if mobj.group('key') == 'codecs':
codecs = mobj.group('val')
break
if codecs:
dct.update(parse_codecs(codecs))
if dct.get('acodec') == 'none' or dct.get('vcodec') == 'none':
dct['downloader_options'] = {
# Youtube throttles chunks >~10M
'http_chunk_size': 10485760,
}
formats.append(dct)
else:
manifest_url = (
url_or_none(try_get(
player_response,
lambda x: x['streamingData']['hlsManifestUrl'],
compat_str))
or url_or_none(try_get(
video_info, lambda x: x['hlsvp'][0], compat_str)))
if manifest_url:
formats = []
m3u8_formats = self._extract_m3u8_formats(
manifest_url, video_id, 'mp4', fatal=False)
for a_format in m3u8_formats:
itag = self._search_regex(
r'/itag/(\d+)/', a_format['url'], 'itag', default=None)
if itag:
a_format['format_id'] = itag
if itag in self._formats:
dct = self._formats[itag].copy()
dct.update(a_format)
a_format = dct
a_format['player_url'] = player_url
# Accept-Encoding header causes failures in live streams on Youtube and Youtube Gaming
a_format.setdefault('http_headers', {})['Youtubedl-no-compression'] = 'True'
formats.append(a_format)
else:
error_message = extract_unavailable_message()
if not error_message:
error_message = clean_html(try_get(
player_response, lambda x: x['playabilityStatus']['reason'],
compat_str))
if not error_message:
error_message = clean_html(
try_get(video_info, lambda x: x['reason'][0], compat_str))
if error_message:
raise ExtractorError(error_message, expected=True)
raise ExtractorError('no conn, hlsvp, hlsManifestUrl or url_encoded_fmt_stream_map information found in video info')
# uploader
video_uploader = try_get(
video_info, lambda x: x['author'][0],
compat_str) or str_or_none(video_details.get('author'))
if video_uploader:
video_uploader = compat_urllib_parse_unquote_plus(video_uploader)
else:
self._downloader.report_warning('unable to extract uploader name')
# uploader_id
video_uploader_id = None
video_uploader_url = None
mobj = re.search(
r'<link itemprop="url" href="(?P<uploader_url>https?://www\.youtube\.com/(?:user|channel)/(?P<uploader_id>[^"]+))">',
video_webpage)
if mobj is not None:
video_uploader_id = mobj.group('uploader_id')
video_uploader_url = mobj.group('uploader_url')
else:
owner_profile_url = url_or_none(microformat.get('ownerProfileUrl'))
if owner_profile_url:
video_uploader_id = self._search_regex(
r'(?:user|channel)/([^/]+)', owner_profile_url, 'uploader id',
default=None)
video_uploader_url = owner_profile_url
channel_id = (
str_or_none(video_details.get('channelId'))
or self._html_search_meta(
'channelId', video_webpage, 'channel id', default=None)
or self._search_regex(
r'data-channel-external-id=(["\'])(?P<id>(?:(?!\1).)+)\1',
video_webpage, 'channel id', default=None, group='id'))
channel_url = 'http://www.youtube.com/channel/%s' % channel_id if channel_id else None
thumbnails = []
thumbnails_list = try_get(
video_details, lambda x: x['thumbnail']['thumbnails'], list) or []
for t in thumbnails_list:
if not isinstance(t, dict):
continue
thumbnail_url = url_or_none(t.get('url'))
if not thumbnail_url:
continue
thumbnails.append({
'url': thumbnail_url,
'width': int_or_none(t.get('width')),
'height': int_or_none(t.get('height')),
})
if not thumbnails:
video_thumbnail = None
# We try first to get a high quality image:
m_thumb = re.search(r'<span itemprop="thumbnail".*?href="(.*?)">',
video_webpage, re.DOTALL)
if m_thumb is not None:
video_thumbnail = m_thumb.group(1)
thumbnail_url = try_get(video_info, lambda x: x['thumbnail_url'][0], compat_str)
if thumbnail_url:
video_thumbnail = compat_urllib_parse_unquote_plus(thumbnail_url)
if video_thumbnail:
thumbnails.append({'url': video_thumbnail})
# upload date
upload_date = self._html_search_meta(
'datePublished', video_webpage, 'upload date', default=None)
if not upload_date:
upload_date = self._search_regex(
[r'(?s)id="eow-date.*?>(.*?)</span>',
r'(?:id="watch-uploader-info".*?>.*?|["\']simpleText["\']\s*:\s*["\'])(?:Published|Uploaded|Streamed live|Started) on (.+?)[<"\']'],
video_webpage, 'upload date', default=None)
if not upload_date:
upload_date = microformat.get('publishDate') or microformat.get('uploadDate')
upload_date = unified_strdate(upload_date)
video_license = self._html_search_regex(
r'<h4[^>]+class="title"[^>]*>\s*License\s*</h4>\s*<ul[^>]*>\s*<li>(.+?)</li',
video_webpage, 'license', default=None)
m_music = re.search(
r'''(?x)
<h4[^>]+class="title"[^>]*>\s*Music\s*</h4>\s*
<ul[^>]*>\s*
<li>(?P<title>.+?)
by (?P<creator>.+?)
(?:
\(.+?\)|
<a[^>]*
(?:
\bhref=["\']/red[^>]*>| # drop possible
>\s*Listen ad-free with YouTube Red # YouTube Red ad
)
.*?
)?</li
''',
video_webpage)
if m_music:
video_alt_title = remove_quotes(unescapeHTML(m_music.group('title')))
video_creator = clean_html(m_music.group('creator'))
else:
video_alt_title = video_creator = None
def extract_meta(field):
return self._html_search_regex(
r'<h4[^>]+class="title"[^>]*>\s*%s\s*</h4>\s*<ul[^>]*>\s*<li>(.+?)</li>\s*' % field,
video_webpage, field, default=None)
track = extract_meta('Song')
artist = extract_meta('Artist')
album = extract_meta('Album')
# Youtube Music Auto-generated description
release_date = release_year = None
if video_description:
mobj = re.search(r'(?s)Provided to YouTube by [^\n]+\n+(?P<track>[^·]+)·(?P<artist>[^\n]+)\n+(?P<album>[^\n]+)(?:.+?℗\s*(?P<release_year>\d{4})(?!\d))?(?:.+?Released on\s*:\s*(?P<release_date>\d{4}-\d{2}-\d{2}))?(.+?\nArtist\s*:\s*(?P<clean_artist>[^\n]+))?', video_description)
if mobj:
if not track:
track = mobj.group('track').strip()
if not artist:
artist = mobj.group('clean_artist') or ', '.join(a.strip() for a in mobj.group('artist').split('·'))
if not album:
album = mobj.group('album'.strip())
release_year = mobj.group('release_year')
release_date = mobj.group('release_date')
if release_date:
release_date = release_date.replace('-', '')
if not release_year:
release_year = int(release_date[:4])
if release_year:
release_year = int(release_year)
m_episode = re.search(
r'<div[^>]+id="watch7-headline"[^>]*>\s*<span[^>]*>.*?>(?P<series>[^<]+)</a></b>\s*S(?P<season>\d+)\s*•\s*E(?P<episode>\d+)</span>',
video_webpage)
if m_episode:
series = unescapeHTML(m_episode.group('series'))
season_number = int(m_episode.group('season'))
episode_number = int(m_episode.group('episode'))
else:
series = season_number = episode_number = None
m_cat_container = self._search_regex(
r'(?s)<h4[^>]*>\s*Category\s*</h4>\s*<ul[^>]*>(.*?)</ul>',
video_webpage, 'categories', default=None)
category = None
if m_cat_container:
category = self._html_search_regex(
r'(?s)<a[^<]+>(.*?)</a>', m_cat_container, 'category',
default=None)
if not category:
category = try_get(
microformat, lambda x: x['category'], compat_str)
video_categories = None if category is None else [category]
video_tags = [
unescapeHTML(m.group('content'))
for m in re.finditer(self._meta_regex('og:video:tag'), video_webpage)]
if not video_tags:
video_tags = try_get(video_details, lambda x: x['keywords'], list)
def _extract_count(count_name):
return str_to_int(self._search_regex(
r'-%s-button[^>]+><span[^>]+class="yt-uix-button-content"[^>]*>([\d,]+)</span>'
% re.escape(count_name),
video_webpage, count_name, default=None))
like_count = _extract_count('like')
dislike_count = _extract_count('dislike')
if view_count is None:
view_count = str_to_int(self._search_regex(
r'<[^>]+class=["\']watch-view-count[^>]+>\s*([\d,\s]+)', video_webpage,
'view count', default=None))
average_rating = (
float_or_none(video_details.get('averageRating'))
or try_get(video_info, lambda x: float_or_none(x['avg_rating'][0])))
# subtitles
video_subtitles = self.extract_subtitles(video_id, video_webpage)
automatic_captions = self.extract_automatic_captions(video_id, video_webpage)
video_duration = try_get(
video_info, lambda x: int_or_none(x['length_seconds'][0]))
if not video_duration:
video_duration = int_or_none(video_details.get('lengthSeconds'))
if not video_duration:
video_duration = parse_duration(self._html_search_meta(
'duration', video_webpage, 'video duration'))
# annotations
video_annotations = None
if self._downloader.params.get('writeannotations', False):
xsrf_token = self._search_regex(
r'([\'"])XSRF_TOKEN\1\s*:\s*([\'"])(?P<xsrf_token>[A-Za-z0-9+/=]+)\2',
video_webpage, 'xsrf token', group='xsrf_token', fatal=False)
invideo_url = try_get(
player_response, lambda x: x['annotations'][0]['playerAnnotationsUrlsRenderer']['invideoUrl'], compat_str)
if xsrf_token and invideo_url:
xsrf_field_name = self._search_regex(
r'([\'"])XSRF_FIELD_NAME\1\s*:\s*([\'"])(?P<xsrf_field_name>\w+)\2',
video_webpage, 'xsrf field name',
group='xsrf_field_name', default='session_token')
video_annotations = self._download_webpage(
self._proto_relative_url(invideo_url),
video_id, note='Downloading annotations',
errnote='Unable to download video annotations', fatal=False,
data=urlencode_postdata({xsrf_field_name: xsrf_token}))
chapters = self._extract_chapters(video_webpage, description_original, video_id, video_duration)
# Look for the DASH manifest
if self._downloader.params.get('youtube_include_dash_manifest', True):
dash_mpd_fatal = True
for mpd_url in dash_mpds:
dash_formats = {}
try:
def decrypt_sig(mobj):
s = mobj.group(1)
dec_s = self._decrypt_signature(s, video_id, player_url, age_gate)
return '/signature/%s' % dec_s
mpd_url = re.sub(r'/s/([a-fA-F0-9\.]+)', decrypt_sig, mpd_url)
for df in self._extract_mpd_formats(
mpd_url, video_id, fatal=dash_mpd_fatal,
formats_dict=self._formats):
if not df.get('filesize'):
df['filesize'] = _extract_filesize(df['url'])
# Do not overwrite DASH format found in some previous DASH manifest
if df['format_id'] not in dash_formats:
dash_formats[df['format_id']] = df
# Additional DASH manifests may end up in HTTP Error 403 therefore
# allow them to fail without bug report message if we already have
# some DASH manifest succeeded. This is temporary workaround to reduce
# burst of bug reports until we figure out the reason and whether it
# can be fixed at all.
dash_mpd_fatal = False
except (ExtractorError, KeyError) as e:
self.report_warning(
'Skipping DASH manifest: %r' % e, video_id)
if dash_formats:
# Remove the formats we found through non-DASH, they
# contain less info and it can be wrong, because we use
# fixed values (for example the resolution). See
# https://github.com/ytdl-org/youtube-dl/issues/5774 for an
# example.
formats = [f for f in formats if f['format_id'] not in dash_formats.keys()]
formats.extend(dash_formats.values())
# Check for malformed aspect ratio
stretched_m = re.search(
r'<meta\s+property="og:video:tag".*?content="yt:stretch=(?P<w>[0-9]+):(?P<h>[0-9]+)">',
video_webpage)
if stretched_m:
w = float(stretched_m.group('w'))
h = float(stretched_m.group('h'))
# yt:stretch may hold invalid ratio data (e.g. for Q39EVAstoRM ratio is 17:0).
# We will only process correct ratios.
if w > 0 and h > 0:
ratio = w / h
for f in formats:
if f.get('vcodec') != 'none':
f['stretched_ratio'] = ratio
if not formats:
if 'reason' in video_info:
if 'The uploader has not made this video available in your country.' in video_info['reason']:
regions_allowed = self._html_search_meta(
'regionsAllowed', video_webpage, default=None)
countries = regions_allowed.split(',') if regions_allowed else None
self.raise_geo_restricted(
msg=video_info['reason'][0], countries=countries)
reason = video_info['reason'][0]
if 'Invalid parameters' in reason:
unavailable_message = extract_unavailable_message()
if unavailable_message:
reason = unavailable_message
raise ExtractorError(
'YouTube said: %s' % reason,
expected=True, video_id=video_id)
if video_info.get('license_info') or try_get(player_response, lambda x: x['streamingData']['licenseInfos']):
raise ExtractorError('This video is DRM protected.', expected=True)
self._sort_formats(formats)
self.mark_watched(video_id, video_info, player_response)
return {
'id': video_id,
'uploader': video_uploader,
'uploader_id': video_uploader_id,
'uploader_url': video_uploader_url,
'channel_id': channel_id,
'channel_url': channel_url,
'upload_date': upload_date,
'license': video_license,
'creator': video_creator or artist,
'title': video_title,
'alt_title': video_alt_title or track,
'thumbnails': thumbnails,
'description': video_description,
'categories': video_categories,
'tags': video_tags,
'subtitles': video_subtitles,
'automatic_captions': automatic_captions,
'duration': video_duration,
'age_limit': 18 if age_gate else 0,
'annotations': video_annotations,
'chapters': chapters,
'webpage_url': proto + '://www.youtube.com/watch?v=%s' % video_id,
'view_count': view_count,
'like_count': like_count,
'dislike_count': dislike_count,
'average_rating': average_rating,
'formats': formats,
'is_live': is_live,
'start_time': start_time,
'end_time': end_time,
'series': series,
'season_number': season_number,
'episode_number': episode_number,
'track': track,
'artist': artist,
'album': album,
'release_date': release_date,
'release_year': release_year,
}
class YoutubeTabIE(YoutubeBaseInfoExtractor):
IE_DESC = 'YouTube.com tab'
_VALID_URL = r'https?://(?:\w+\.)?(?:youtube(?:kids)?\.com|invidio\.us)/(?:(?:channel|c|user)/|playlist\?.*?\blist=)(?P<id>[^/?#&]+)'
IE_NAME = 'youtube:tab'
_TESTS = [{
# playlists, multipage
'url': 'https://www.youtube.com/c/ИгорьКлейнер/playlists?view=1&flow=grid',
'playlist_mincount': 94,
'info_dict': {
'id': 'UCqj7Cz7revf5maW9g5pgNcg',
'title': 'Игорь Клейнер - Playlists',
},
}, {
# playlists, multipage, different order
'url': 'https://www.youtube.com/user/igorkle1/playlists?view=1&sort=dd',
'playlist_mincount': 94,
'info_dict': {
'id': 'UCqj7Cz7revf5maW9g5pgNcg',
'title': 'Игорь Клейнер - Playlists',
},
}, {
# playlists, singlepage
'url': 'https://www.youtube.com/user/ThirstForScience/playlists',
'playlist_mincount': 4,
'info_dict': {
'id': 'ThirstForScience',
'title': 'ThirstForScience',
}
}, {
'url': 'https://www.youtube.com/c/ChristophLaimer/playlists',
'only_matching': True,
}, {
# basic, single video playlist
'url': 'https://www.youtube.com/playlist?list=PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc',
'info_dict': {
'uploader_id': 'UCmlqkdCBesrv2Lak1mF_MxA',
'uploader': 'Sergey M.',
'id': 'PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc',
'title': 'youtube-dl public playlist',
},
'playlist_count': 1,
}, {
# empty playlist
'url': 'https://www.youtube.com/playlist?list=PL4lCao7KL_QFodcLWhDpGCYnngnHtQ-Xf',
'info_dict': {
'uploader_id': 'UCmlqkdCBesrv2Lak1mF_MxA',
'uploader': 'Sergey M.',
'id': 'PL4lCao7KL_QFodcLWhDpGCYnngnHtQ-Xf',
'title': 'youtube-dl empty playlist',
},
'playlist_count': 0,
}, {
# Home tab
'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/featured',
'info_dict': {
'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'title': 'lex will - Home',
},
'playlist_mincount': 2,
}, {
# Videos tab
'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/videos',
'info_dict': {
'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'title': 'lex will - Videos',
},
'playlist_mincount': 975,
}, {
# Videos tab, sorted by popular
'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/videos?view=0&sort=p&flow=grid',
'info_dict': {
'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'title': 'lex will - Videos',
},
'playlist_mincount': 199,
}, {
# Playlists tab
'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/playlists',
'info_dict': {
'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'title': 'lex will - Playlists',
},
'playlist_mincount': 17,
}, {
# Community tab
'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/community',
'info_dict': {
'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'title': 'lex will - Community',
},
'playlist_mincount': 18,
}, {
# Channels tab
'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/channels',
'info_dict': {
'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'title': 'lex will - Channels',
},
'playlist_mincount': 138,
}, {
'url': 'https://invidio.us/channel/UC23qupoDRn9YOAVzeoxjOQA',
'only_matching': True,
}, {
'url': 'https://www.youtubekids.com/channel/UCyu8StPfZWapR6rfW_JgqcA',
'only_matching': True,
}, {
'url': 'https://music.youtube.com/channel/UCT-K0qO8z6NzWrywqefBPBQ',
'only_matching': True,
}, {
'note': 'Playlist with deleted videos (#651). As a bonus, the video #51 is also twice in this list.',
'url': 'https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
'info_dict': {
'title': '29C3: Not my department',
'id': 'PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
'uploader': 'Christiaan008',
'uploader_id': 'ChRiStIaAn008',
},
'playlist_count': 96,
}, {
'note': 'Large playlist',
'url': 'https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q',
'info_dict': {
'title': 'Uploads from Cauchemar',
'id': 'UUBABnxM4Ar9ten8Mdjj1j0Q',
'uploader': 'Cauchemar',
'uploader_id': 'Cauchemar89',
},
'playlist_mincount': 1123,
}, {
# even larger playlist, 8832 videos
'url': 'http://www.youtube.com/user/NASAgovVideo/videos',
'only_matching': True,
}, {
'note': 'Buggy playlist: the webpage has a "Load more" button but it doesn\'t have more videos',
'url': 'https://www.youtube.com/playlist?list=UUXw-G3eDE9trcvY2sBMM_aA',
'info_dict': {
'title': 'Uploads from Interstellar Movie',
'id': 'UUXw-G3eDE9trcvY2sBMM_aA',
'uploader': 'Interstellar Movie',
'uploader_id': 'InterstellarMovie1',
},
'playlist_mincount': 21,
}, {
# https://github.com/ytdl-org/youtube-dl/issues/21844
'url': 'https://www.youtube.com/playlist?list=PLzH6n4zXuckpfMu_4Ff8E7Z1behQks5ba',
'info_dict': {
'title': 'Data Analysis with Dr Mike Pound',
'id': 'PLzH6n4zXuckpfMu_4Ff8E7Z1behQks5ba',
'uploader_id': 'Computerphile',
'uploader': 'Computerphile',
},
'playlist_mincount': 11,
}, {
'url': 'https://invidio.us/playlist?list=PLDIoUOhQQPlXr63I_vwF9GD8sAKh77dWU',
'only_matching': True,
}]
@classmethod
def suitable(cls, url):
return False if YoutubeLiveIE.suitable(url) else super(
YoutubeTabIE, cls).suitable(url)
def _extract_channel_id(self, webpage):
channel_id = self._html_search_meta(
'channelId', webpage, 'channel id', default=None)
if channel_id:
return channel_id
channel_url = self._html_search_meta(
('og:url', 'al:ios:url', 'al:android:url', 'al:web:url',
'twitter:url', 'twitter:app:url:iphone', 'twitter:app:url:ipad',
'twitter:app:url:googleplay'), webpage, 'channel url')
return self._search_regex(
r'https?://(?:www\.)?youtube\.com/channel/([^/?#&])+',
channel_url, 'channel id')
@staticmethod
def _extract_grid_item_renderer(item):
for item_kind in ('Playlist', 'Video', 'Channel'):
renderer = item.get('grid%sRenderer' % item_kind)
if renderer:
return renderer
def _extract_video(self, renderer):
video_id = renderer.get('videoId')
title = try_get(
renderer, lambda x: x['title']['runs'][0]['text'], compat_str)
description = try_get(
renderer, lambda x: x['descriptionSnippet']['runs'][0]['text'],
compat_str)
duration = parse_duration(try_get(
renderer, lambda x: x['lengthText']['simpleText'], compat_str))
view_count_text = try_get(
renderer, lambda x: x['viewCountText']['simpleText'], compat_str) or ''
view_count = int_or_none(self._search_regex(
r'^(\d+)', re.sub(r'\s', '', view_count_text),
'view count', default=None))
uploader = try_get(
renderer, lambda x: x['ownerText']['runs'][0]['text'], compat_str)
return {
'_type': 'url_transparent',
'ie_key': YoutubeIE.ie_key(),
'id': video_id,
'url': video_id,
'title': title,
'description': description,
'duration': duration,
'view_count': view_count,
'uploader': uploader,
}
def _grid_entries(self, grid_renderer):
for item in grid_renderer['items']:
if not isinstance(item, dict):
continue
renderer = self._extract_grid_item_renderer(item)
if not isinstance(renderer, dict):
continue
title = try_get(
renderer, lambda x: x['title']['runs'][0]['text'], compat_str)
# playlist
playlist_id = renderer.get('playlistId')
if playlist_id:
yield self.url_result(
'https://www.youtube.com/playlist?list=%s' % playlist_id,
ie=YoutubeTabIE.ie_key(), video_id=playlist_id,
video_title=title)
# video
video_id = renderer.get('videoId')
if video_id:
yield self._extract_video(renderer)
# channel
channel_id = renderer.get('channelId')
if channel_id:
title = try_get(
renderer, lambda x: x['title']['simpleText'], compat_str)
yield self.url_result(
'https://www.youtube.com/channel/%s' % channel_id,
ie=YoutubeTabIE.ie_key(), video_title=title)
def _shelf_entries_trimmed(self, shelf_renderer):
renderer = try_get(
shelf_renderer, lambda x: x['content']['horizontalListRenderer'], dict)
if not renderer:
return
# TODO: add support for nested playlists so each shelf is processed
# as separate playlist
# TODO: this includes only first N items
for entry in self._grid_entries(renderer):
yield entry
def _shelf_entries(self, shelf_renderer):
ep = try_get(
shelf_renderer, lambda x: x['endpoint']['commandMetadata']['webCommandMetadata']['url'],
compat_str)
shelf_url = urljoin('https://www.youtube.com', ep)
if not shelf_url:
return
title = try_get(
shelf_renderer, lambda x: x['title']['runs'][0]['text'], compat_str)
yield self.url_result(shelf_url, video_title=title)
def _playlist_entries(self, video_list_renderer):
for content in video_list_renderer['contents']:
if not isinstance(content, dict):
continue
renderer = content.get('playlistVideoRenderer')
if not isinstance(renderer, dict):
continue
video_id = renderer.get('videoId')
if not video_id:
continue
yield self._extract_video(renderer)
def _video_entry(self, video_renderer):
video_id = video_renderer.get('videoId')
if video_id:
return self._extract_video(video_renderer)
def _post_thread_entries(self, post_thread_renderer):
post_renderer = try_get(
post_thread_renderer, lambda x: x['post']['backstagePostRenderer'], dict)
if not post_renderer:
return
# video attachment
video_renderer = try_get(
post_renderer, lambda x: x['backstageAttachment']['videoRenderer'], dict)
video_id = None
if video_renderer:
entry = self._video_entry(video_renderer)
if entry:
yield entry
# inline video links
runs = try_get(post_renderer, lambda x: x['contentText']['runs'], list) or []
for run in runs:
if not isinstance(run, dict):
continue
ep_url = try_get(
run, lambda x: x['navigationEndpoint']['urlEndpoint']['url'], compat_str)
if not ep_url:
continue
if not YoutubeIE.suitable(ep_url):
continue
ep_video_id = YoutubeIE._match_id(ep_url)
if video_id == ep_video_id:
continue
yield self.url_result(ep_url, ie=YoutubeIE.ie_key(), video_id=video_id)
def _post_thread_continuation_entries(self, post_thread_continuation):
contents = post_thread_continuation.get('contents')
if not isinstance(contents, list):
return
for content in contents:
renderer = content.get('backstagePostThreadRenderer')
if not isinstance(renderer, dict):
continue
for entry in self._post_thread_entries(renderer):
yield entry
@staticmethod
def _extract_next_continuation_data(renderer):
next_continuation = try_get(
renderer, lambda x: x['continuations'][0]['nextContinuationData'], dict)
if not next_continuation:
return
continuation = next_continuation.get('continuation')
if not continuation:
return
ctp = next_continuation.get('clickTrackingParams')
return {
'ctoken': continuation,
'continuation': continuation,
'itct': ctp,
}
@classmethod
def _extract_continuation(cls, renderer):
next_continuation = cls._extract_next_continuation_data(renderer)
if next_continuation:
return next_continuation
contents = renderer.get('contents')
if not isinstance(contents, list):
return
for content in contents:
if not isinstance(content, dict):
continue
continuation_ep = try_get(
content, lambda x: x['continuationItemRenderer']['continuationEndpoint'],
dict)
if not continuation_ep:
continue
continuation = try_get(
continuation_ep, lambda x: x['continuationCommand']['token'], compat_str)
if not continuation:
continue
ctp = continuation_ep.get('clickTrackingParams')
if not ctp:
continue
return {
'ctoken': continuation,
'continuation': continuation,
'itct': ctp,
}
def _entries(self, tab):
continuation = None
slr_contents = tab['sectionListRenderer']['contents']
for slr_content in slr_contents:
if not isinstance(slr_content, dict):
continue
is_renderer = try_get(slr_content, lambda x: x['itemSectionRenderer'], dict)
if not is_renderer:
continue
isr_contents = try_get(is_renderer, lambda x: x['contents'], list) or []
for isr_content in isr_contents:
if not isinstance(isr_content, dict):
continue
renderer = isr_content.get('playlistVideoListRenderer')
if renderer:
for entry in self._playlist_entries(renderer):
yield entry
continuation = self._extract_continuation(renderer)
continue
renderer = isr_content.get('gridRenderer')
if renderer:
for entry in self._grid_entries(renderer):
yield entry
continuation = self._extract_continuation(renderer)
continue
renderer = isr_content.get('shelfRenderer')
if renderer:
for entry in self._shelf_entries(renderer):
yield entry
continue
renderer = isr_content.get('backstagePostThreadRenderer')
if renderer:
for entry in self._post_thread_entries(renderer):
yield entry
continuation = self._extract_continuation(renderer)
continue
renderer = isr_content.get('videoRenderer')
if renderer:
entry = self._video_entry(renderer)
if entry:
yield entry
if not continuation:
continuation = self._extract_continuation(is_renderer)
for page_num in itertools.count(1):
if not continuation:
break
browse = self._download_json(
'https://www.youtube.com/browse_ajax', None,
'Downloading page %d' % page_num,
headers={
'x-youtube-client-name': '1',
'x-youtube-client-version': '2.20201030.01.00',
}, query=continuation, fatal=False)
if not browse:
break
response = try_get(browse, lambda x: x[1]['response'], dict)
if not response:
break
continuation_contents = try_get(
response, lambda x: x['continuationContents'], dict)
if continuation_contents:
continuation_renderer = continuation_contents.get('playlistVideoListContinuation')
if continuation_renderer:
for entry in self._playlist_entries(continuation_renderer):
yield entry
continuation = self._extract_continuation(continuation_renderer)
continue
continuation_renderer = continuation_contents.get('gridContinuation')
if continuation_renderer:
for entry in self._grid_entries(continuation_renderer):
yield entry
continuation = self._extract_continuation(continuation_renderer)
continue
continuation_renderer = continuation_contents.get('itemSectionContinuation')
if continuation_renderer:
for entry in self._post_thread_continuation_entries(continuation_renderer):
yield entry
continuation = self._extract_continuation(continuation_renderer)
continue
continuation_items = try_get(
response, lambda x: x['onResponseReceivedActions'][0]['appendContinuationItemsAction']['continuationItems'], list)
if continuation_items:
continuation_item = continuation_items[0]
if not isinstance(continuation_item, dict):
continue
renderer = continuation_item.get('playlistVideoRenderer')
if renderer:
video_list_renderer = {'contents': continuation_items}
for entry in self._playlist_entries(video_list_renderer):
yield entry
continuation = self._extract_continuation(video_list_renderer)
continue
break
@staticmethod
def _extract_selected_tab(tabs):
for tab in tabs:
if try_get(tab, lambda x: x['tabRenderer']['selected'], bool):
return tab['tabRenderer']
else:
raise ExtractorError('Unable to find selected tab')
def _real_extract(self, url):
channel_id = self._match_id(url)
url = compat_urlparse.urlunparse(
compat_urlparse.urlparse(url)._replace(netloc='www.youtube.com'))
webpage = self._download_webpage(url, channel_id)
data = self._extract_yt_initial_data(channel_id, webpage)
tabs = data['contents']['twoColumnBrowseResultsRenderer']['tabs']
selected_tab = self._extract_selected_tab(tabs)
channel_title = try_get(
data, lambda x: x['metadata']['channelMetadataRenderer']['title'],
compat_str)
channel_external_id = try_get(
data, lambda x: x['metadata']['channelMetadataRenderer']['externalId'],
compat_str)
tab_title = selected_tab.get('title')
title = channel_title or channel_id
if tab_title:
title += ' - %s' % tab_title
return self.playlist_result(
self._entries(selected_tab['content']),
playlist_id=channel_external_id or channel_id,
playlist_title=title)
class YoutubePlaylistIE(InfoExtractor):
IE_DESC = 'YouTube.com playlists'
_VALID_URL = r"""(?x)(?:
(?:https?://)?
(?:\w+\.)?
(?:
(?:
youtube(?:kids)?\.com|
invidio\.us
)
/
(?:
(?:course|view_play_list|my_playlists|artist|playlist|watch|embed/(?:videoseries|[0-9A-Za-z_-]{11}))
\? (?:.*?[&;])*? (?:p|a|list)=
| p/
)|
youtu\.be/[0-9A-Za-z_-]{11}\?.*?\blist=
)
(
(?:PL|LL|EC|UU|FL|RD|UL|TL|PU|OLAK5uy_)?[0-9A-Za-z-_]{10,}
# Top tracks, they can also include dots
|(?:MC)[\w\.]*
)
.*
|
(%(playlist_id)s)
)""" % {'playlist_id': YoutubeBaseInfoExtractor._PLAYLIST_ID_RE}
IE_NAME = 'youtube:playlist'
_TESTS = [{
'note': 'issue #673',
'url': 'PLBB231211A4F62143',
'info_dict': {
'title': '[OLD]Team Fortress 2 (Class-based LP)',
'id': 'PLBB231211A4F62143',
'uploader': 'Wickydoo',
'uploader_id': 'Wickydoo',
},
'playlist_mincount': 29,
}, {
'url': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
'info_dict': {
'title': 'YDL_safe_search',
'id': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
},
'playlist_count': 2,
'skip': 'This playlist is private',
}, {
'note': 'embedded',
'url': 'https://www.youtube.com/embed/videoseries?list=PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
'playlist_count': 4,
'info_dict': {
'title': 'JODA15',
'id': 'PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
'uploader': 'milan',
'uploader_id': 'UCEI1-PVPcYXjB73Hfelbmaw',
}
}, {
'url': 'http://www.youtube.com/embed/_xDOZElKyNU?list=PLsyOSbh5bs16vubvKePAQ1x3PhKavfBIl',
'playlist_mincount': 982,
'info_dict': {
'title': '2018 Chinese New Singles (11/6 updated)',
'id': 'PLsyOSbh5bs16vubvKePAQ1x3PhKavfBIl',
'uploader': 'LBK',
'uploader_id': 'sdragonfang',
}
}, {
'note': 'Embedded SWF player',
'url': 'https://www.youtube.com/p/YN5VISEtHet5D4NEvfTd0zcgFk84NqFZ?hl=en_US&fs=1&rel=0',
'playlist_count': 4,
'info_dict': {
'title': 'JODA7',
'id': 'YN5VISEtHet5D4NEvfTd0zcgFk84NqFZ',
},
'skip': 'This playlist does not exist',
}, {
# Playlist URL that does not actually serve a playlist
'url': 'https://www.youtube.com/watch?v=FqZTN594JQw&list=PLMYEtVRpaqY00V9W81Cwmzp6N6vZqfUKD4',
'info_dict': {
'id': 'FqZTN594JQw',
'ext': 'webm',
'title': "Smiley's People 01 detective, Adventure Series, Action",
'uploader': 'STREEM',
'uploader_id': 'UCyPhqAZgwYWZfxElWVbVJng',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCyPhqAZgwYWZfxElWVbVJng',
'upload_date': '20150526',
'license': 'Standard YouTube License',
'description': 'md5:507cdcb5a49ac0da37a920ece610be80',
'categories': ['People & Blogs'],
'tags': list,
'view_count': int,
'like_count': int,
'dislike_count': int,
},
'params': {
'skip_download': True,
},
'skip': 'This video is not available.',
'add_ie': [YoutubeIE.ie_key()],
}, {
'url': 'https://youtu.be/yeWKywCrFtk?list=PL2qgrgXsNUG5ig9cat4ohreBjYLAPC0J5',
'info_dict': {
'id': 'yeWKywCrFtk',
'ext': 'mp4',
'title': 'Small Scale Baler and Braiding Rugs',
'uploader': 'Backus-Page House Museum',
'uploader_id': 'backuspagemuseum',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/backuspagemuseum',
'upload_date': '20161008',
'description': 'md5:800c0c78d5eb128500bffd4f0b4f2e8a',
'categories': ['Nonprofits & Activism'],
'tags': list,
'like_count': int,
'dislike_count': int,
},
'params': {
'noplaylist': True,
'skip_download': True,
},
}, {
'url': 'https://youtu.be/uWyaPkt-VOI?list=PL9D9FC436B881BA21',
'only_matching': True,
}, {
'url': 'TLGGrESM50VT6acwMjAyMjAxNw',
'only_matching': True,
}, {
# music album playlist
'url': 'OLAK5uy_m4xAFdmMC5rX3Ji3g93pQe3hqLZw_9LhM',
'only_matching': True,
}, {
'url': 'https://www.youtubekids.com/watch?v=Agk7R8I8o5U&list=PUZ6jURNr1WQZCNHF0ao-c0g',
'only_matching': True,
}]
@classmethod
def suitable(cls, url):
return False if YoutubeTabIE.suitable(url) else super(
YoutubePlaylistIE, cls).suitable(url)
def _real_extract(self, url):
# Extract playlist id
mobj = re.match(self._VALID_URL, url)
if mobj is None:
raise ExtractorError('Invalid URL: %s' % url)
playlist_id = mobj.group(1) or mobj.group(2)
return self.url_result(
'https://www.youtube.com/playlist?list=%s' % playlist_id,
ie=YoutubeTabIE.ie_key(), video_id=playlist_id)
class YoutubeYtUserIE(InfoExtractor):
_VALID_URL = r'ytuser:(?P<id>.+)'
_TESTS = [{
'url': 'ytuser:phihag',
'only_matching': True,
}]
def _real_extract(self, url):
user_id = self._match_id(url)
return self.url_result(
'https://www.youtube.com/user/%s' % user_id,
ie=YoutubeTabIE.ie_key(), video_id=user_id)
class YoutubeLiveIE(YoutubeBaseInfoExtractor):
IE_DESC = 'YouTube.com live streams'
_VALID_URL = r'(?P<base_url>https?://(?:\w+\.)?youtube\.com/(?:(?:user|channel|c)/)?(?P<id>[^/]+))/live'
IE_NAME = 'youtube:live'
_TESTS = [{
'url': 'https://www.youtube.com/user/TheYoungTurks/live',
'info_dict': {
'id': 'a48o2S1cPoo',
'ext': 'mp4',
'title': 'The Young Turks - Live Main Show',
'uploader': 'The Young Turks',
'uploader_id': 'TheYoungTurks',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/TheYoungTurks',
'upload_date': '20150715',
'license': 'Standard YouTube License',
'description': 'md5:438179573adcdff3c97ebb1ee632b891',
'categories': ['News & Politics'],
'tags': ['Cenk Uygur (TV Program Creator)', 'The Young Turks (Award-Winning Work)', 'Talk Show (TV Genre)'],
'like_count': int,
'dislike_count': int,
},
'params': {
'skip_download': True,
},
}, {
'url': 'https://www.youtube.com/channel/UC1yBKRuGpC1tSM73A0ZjYjQ/live',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/c/CommanderVideoHq/live',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/TheYoungTurks/live',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
channel_id = mobj.group('id')
base_url = mobj.group('base_url')
webpage = self._download_webpage(url, channel_id, fatal=False)
if webpage:
page_type = self._og_search_property(
'type', webpage, 'page type', default='')
video_id = self._html_search_meta(
'videoId', webpage, 'video id', default=None)
if page_type.startswith('video') and video_id and re.match(
r'^[0-9A-Za-z_-]{11}$', video_id):
return self.url_result(video_id, YoutubeIE.ie_key())
return self.url_result(base_url)
class YoutubeSearchIE(SearchInfoExtractor, YoutubeBaseInfoExtractor):
IE_DESC = 'YouTube.com searches'
# there doesn't appear to be a real limit, for example if you search for
# 'python' you get more than 8.000.000 results
_MAX_RESULTS = float('inf')
IE_NAME = 'youtube:search'
_SEARCH_KEY = 'ytsearch'
_SEARCH_PARAMS = None
_TESTS = []
def _entries(self, query, n):
data = {
'context': {
'client': {
'clientName': 'WEB',
'clientVersion': '2.20201021.03.00',
}
},
'query': query,
}
if self._SEARCH_PARAMS:
data['params'] = self._SEARCH_PARAMS
total = 0
for page_num in itertools.count(1):
search = self._download_json(
'https://www.youtube.com/youtubei/v1/search?key=AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8',
video_id='query "%s"' % query,
note='Downloading page %s' % page_num,
errnote='Unable to download API page', fatal=False,
data=json.dumps(data).encode('utf8'),
headers={'content-type': 'application/json'})
if not search:
break
slr_contents = try_get(
search,
(lambda x: x['contents']['twoColumnSearchResultsRenderer']['primaryContents']['sectionListRenderer']['contents'],
lambda x: x['onResponseReceivedCommands'][0]['appendContinuationItemsAction']['continuationItems']),
list)
if not slr_contents:
break
isr_contents = try_get(
slr_contents,
lambda x: x[0]['itemSectionRenderer']['contents'],
list)
if not isr_contents:
break
for content in isr_contents:
if not isinstance(content, dict):
continue
video = content.get('videoRenderer')
if not isinstance(video, dict):
continue
video_id = video.get('videoId')
if not video_id:
continue
title = try_get(video, lambda x: x['title']['runs'][0]['text'], compat_str)
description = try_get(video, lambda x: x['descriptionSnippet']['runs'][0]['text'], compat_str)
duration = parse_duration(try_get(video, lambda x: x['lengthText']['simpleText'], compat_str))
view_count_text = try_get(video, lambda x: x['viewCountText']['simpleText'], compat_str) or ''
view_count = int_or_none(self._search_regex(
r'^(\d+)', re.sub(r'\s', '', view_count_text),
'view count', default=None))
uploader = try_get(video, lambda x: x['ownerText']['runs'][0]['text'], compat_str)
total += 1
yield {
'_type': 'url_transparent',
'ie_key': YoutubeIE.ie_key(),
'id': video_id,
'url': video_id,
'title': title,
'description': description,
'duration': duration,
'view_count': view_count,
'uploader': uploader,
}
if total == n:
return
token = try_get(
slr_contents,
lambda x: x[1]['continuationItemRenderer']['continuationEndpoint']['continuationCommand']['token'],
compat_str)
if not token:
break
data['continuation'] = token
def _get_n_results(self, query, n):
"""Get a specified number of results for a query"""
return self.playlist_result(self._entries(query, n), query)
class YoutubeSearchDateIE(YoutubeSearchIE):
IE_NAME = YoutubeSearchIE.IE_NAME + ':date'
_SEARCH_KEY = 'ytsearchdate'
IE_DESC = 'YouTube.com searches, newest videos first'
_SEARCH_PARAMS = 'CAI%3D'
r"""
class YoutubeSearchURLIE(YoutubeSearchIE):
IE_DESC = 'YouTube.com search URLs'
IE_NAME = 'youtube:search_url'
_VALID_URL = r'https?://(?:www\.)?youtube\.com/results\?(.*?&)?(?:search_query|q)=(?P<query>[^&]+)(?:[&]|$)'
_TESTS = [{
'url': 'https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video',
'playlist_mincount': 5,
'info_dict': {
'title': 'youtube-dl test video',
}
}, {
'url': 'https://www.youtube.com/results?q=test&sp=EgQIBBgB',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
query = compat_urllib_parse_unquote_plus(mobj.group('query'))
webpage = self._download_webpage(url, query)
return self.playlist_result(self._process_page(webpage), playlist_title=query)
"""
class YoutubeFeedsInfoExtractor(YoutubeBaseInfoExtractor):
"""
Base class for feed extractors
Subclasses must define the _FEED_NAME and _PLAYLIST_TITLE properties.
"""
_LOGIN_REQUIRED = True
@property
def IE_NAME(self):
return 'youtube:%s' % self._FEED_NAME
def _real_initialize(self):
self._login()
def _entries(self, page):
# The extraction process is the same as for playlists, but the regex
# for the video ids doesn't contain an index
ids = []
more_widget_html = content_html = page
for page_num in itertools.count(1):
matches = re.findall(r'href="\s*/watch\?v=([0-9A-Za-z_-]{11})', content_html)
# 'recommended' feed has infinite 'load more' and each new portion spins
# the same videos in (sometimes) slightly different order, so we'll check
# for unicity and break when portion has no new videos
new_ids = list(filter(lambda video_id: video_id not in ids, orderedSet(matches)))
if not new_ids:
break
ids.extend(new_ids)
for entry in self._ids_to_results(new_ids):
yield entry
mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html)
if not mobj:
break
more = self._download_json(
'https://www.youtube.com/%s' % mobj.group('more'), self._PLAYLIST_TITLE,
'Downloading page #%s' % page_num,
transform_source=uppercase_escape,
headers=self._YOUTUBE_CLIENT_HEADERS)
content_html = more['content_html']
more_widget_html = more['load_more_widget_html']
def _real_extract(self, url):
page = self._download_webpage(
'https://www.youtube.com/feed/%s' % self._FEED_NAME,
self._PLAYLIST_TITLE)
return self.playlist_result(
self._entries(page), playlist_title=self._PLAYLIST_TITLE)
class YoutubeWatchLaterIE(InfoExtractor):
IE_NAME = 'youtube:watchlater'
IE_DESC = 'Youtube watch later list, ":ytwatchlater" for short (requires authentication)'
_VALID_URL = r'https?://(?:www\.)?youtube\.com/(?:feed/watch_later|(?:playlist|watch)\?(?:.+&)?list=WL)|:ytwatchlater'
_TESTS = [{
'url': 'https://www.youtube.com/watch?v=bCNU9TrbiRk&index=1&list=WL',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/feed/watch_later',
'only_matching': True,
}]
def _real_extract(self, url):
return self.url_result(
'https://www.youtube.com/playlist?list=WL', ie=YoutubeTabIE.ie_key())
class YoutubeRecommendedIE(YoutubeFeedsInfoExtractor):
IE_DESC = 'YouTube.com recommended videos, ":ytrec" for short (requires authentication)'
_VALID_URL = r'https?://(?:www\.)?youtube\.com/feed/recommended|:ytrec(?:ommended)?'
_FEED_NAME = 'recommended'
_PLAYLIST_TITLE = 'Youtube Recommended videos'
class YoutubeSubscriptionsIE(YoutubeFeedsInfoExtractor):
IE_DESC = 'YouTube.com subscriptions feed, "ytsubs" keyword (requires authentication)'
_VALID_URL = r'https?://(?:www\.)?youtube\.com/feed/subscriptions|:ytsubs(?:criptions)?'
_FEED_NAME = 'subscriptions'
_PLAYLIST_TITLE = 'Youtube Subscriptions'
class YoutubeHistoryIE(YoutubeFeedsInfoExtractor):
IE_DESC = 'Youtube watch history, ":ythistory" for short (requires authentication)'
_VALID_URL = r'https?://(?:www\.)?youtube\.com/feed/history|:ythistory'
_FEED_NAME = 'history'
_PLAYLIST_TITLE = 'Youtube History'
class YoutubeTruncatedURLIE(InfoExtractor):
IE_NAME = 'youtube:truncated_url'
IE_DESC = False # Do not list
_VALID_URL = r'''(?x)
(?:https?://)?
(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie)?\.com/
(?:watch\?(?:
feature=[a-z_]+|
annotation_id=annotation_[^&]+|
x-yt-cl=[0-9]+|
hl=[^&]*|
t=[0-9]+
)?
|
attribution_link\?a=[^&]+
)
$
'''
_TESTS = [{
'url': 'https://www.youtube.com/watch?annotation_id=annotation_3951667041',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?x-yt-cl=84503534',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?feature=foo',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?hl=en-GB',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?t=2372',
'only_matching': True,
}]
def _real_extract(self, url):
raise ExtractorError(
'Did you forget to quote the URL? Remember that & is a meta '
'character in most shells, so you want to put the URL in quotes, '
'like youtube-dl '
'"https://www.youtube.com/watch?feature=foo&v=BaW_jenozKc" '
' or simply youtube-dl BaW_jenozKc .',
expected=True)
class YoutubeTruncatedIDIE(InfoExtractor):
IE_NAME = 'youtube:truncated_id'
IE_DESC = False # Do not list
_VALID_URL = r'https?://(?:www\.)?youtube\.com/watch\?v=(?P<id>[0-9A-Za-z_-]{1,10})$'
_TESTS = [{
'url': 'https://www.youtube.com/watch?v=N_708QY7Ob',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
raise ExtractorError(
'Incomplete YouTube ID %s. URL %s looks truncated.' % (video_id, url),
expected=True)
|
nyuszika7h/youtube-dl
|
youtube_dl/extractor/youtube.py
|
Python
|
unlicense
| 150,830
|
[
"ADF"
] |
e3dc14438db66b361899b823fba7937576fb283841ecf156196e1769c9fe5b68
|
## DensityProfiles.py
## Dr. Harold Barnard and Jonathan Terry
## 8/21/2014
import numpy as np
import matplotlib.pyplot as plt
## Aggregation of various density profiles, maintaining the total density profile of
## the target material.
## Used for statistics management in simulation
class TargetRegion(object):
def __init__(self, densityProfiles):
self.densityProfiles = densityProfiles
self.totalTargetLength = densityProfiles[0].totalTargetDistance
self.interactions = []
def recordReaction(self, depthIndex):
totalNumberDensity = 0
composition = {}
for elem in self.densityProfiles:
name = elem.name
numberDensity = elem.profileRange[depthIndex]
totalNumberDensity += numberDensity
if (name in composition):
composition[name] += numberDensity
else:
composition[name] = numberDensity
print "REACTION RECORDED AT " + str(depthIndex/100.0)
print "Element\t Percentage\n"
for elem in composition.keys():
composition[elem] = 100*composition[elem]/totalNumberDensity
if (composition[elem] > .001):
print elem + "\t" + str(composition[elem]) + "\n"
def visualizeTotalTarget(self):
frame, fig = plt.subplots()
for elem in self.densityProfiles:
fig.plot(elem.profileDomain, elem.profileRange, label = elem.regionName)
legend = fig.legend(loc = 'upper right', shadow = True)
plt.title('Target Density Profile')
plt.xlabel('Target Depth')
plt.ylabel('Number Densities in Target')
plt.show()
## Supports the creation of either gaussian or step function density
## distributions. Across a target region (compilation of density profiles)
## the totalTargetDistance must be constant.
## totalTargetDistance = total thickness of target
## regionName = name of region (used for statistical analysis)
## regionSymbol = symbol for element that region is made of
## shape = shape of desired number density distribution (either Gaussian or recangular)
## mu = average of Gaussian OR center of rectangle
## sigma = standard dev of Gaussian OR half width of recatngle
## scale = maximum number density of distribution
## edgeTolerance = standard dev of Gaussian used to smooth rectangular distributions
class DensityProfile(object):
def __init__(self, totalTargetDistance, regionName, regionSymbol, shape, mu, sigma, scale, edgeTolerance = 1):
self.profileDomain = np.linspace(0, totalTargetDistance, 10000)
self.totalTargetDistance = totalTargetDistance
self.regionName = regionName
with open('Universal Data Table.txt') as elementData:
for line in elementData.readlines()[5:]:
line.strip()
col0, col1, col2, col3, col4, col5 = line.split()
if (regionSymbol == col1):
self.atomicNumber = int(col0)
self.symbol = col1
self.name = col2
self.atomicMass = int(col0)/float(col3)
self.meanIonization = float(col4)
self.density = float(col5)
break
if (shape == 'Gaussian'):
gaussScale = scale*sigma*np.sqrt(2*np.pi)
self.profileRange = self.gaussian(self.profileDomain, mu, sigma, gaussScale)
else:
self.profileRange = self.rectangle(self.profileDomain, mu, sigma, scale)
self.smooth(mu, sigma, scale, edgeTolerance)
def smooth(self, mu, sigma, scale, edgeTolerance):
gaussScale = scale*edgeTolerance*np.sqrt(2*np.pi)
self.profileContourSubtract('Gaussian', mu-sigma, edgeTolerance, gaussScale)
self.profileContourSubtract('Gaussian', mu+sigma, edgeTolerance, gaussScale)
def profileContourAdd(self, shape, mu, sigma, scale):
if (shape == 'Gaussian'):
appendage = self.gaussian(self.profileDomain, mu, sigma, scale)
self.profileRange = self.profileRange + appendage
else:
appendage = self.rectangle(self.profileDomain, mu, sigma, scale)
self.profileRange = self.profileRange + appendage
def profileContourSubtract(self, shape, mu, sigma, scale):
if (shape == 'Gaussian'):
appendage = self.gaussian(self.profileDomain, mu, sigma, scale)
self.profileRange = self.profileRange - appendage
np.clip(self.profileRange, 0, np.inf, out = self.profileRange)
else:
appendage = self.rectangle(self.profileDomain, mu, sigma, scale)
self.profileRange = self.profileRange - appendage
np.clip(self.profileRange, 0, np.inf, out = self.profileRange)
def addProfile(self, profile):
self.profileRange = self.profileRange + profile.profileRange
def subtractProfile(self, profile):
self.profileRange = self.profileRange - profile.profileRange
def gaussian(self, x, mu, sigma, scale):
scaledNormalization = scale/np.sqrt(2*np.pi*sigma*sigma)
gaussian = lambda i: scaledNormalization*np.exp(-((i-mu)**2)/(2*sigma**2))
return np.array([gaussian(i) for i in x])
def rectangle(self, x, mu, sigma, scale):
heaviside = lambda i: scale if (i > (mu-sigma) and i < (mu+sigma)) else 0
return np.array([heaviside(i) for i in x])
def visualize(self):
print "REGION SELECTED: " + self.regionName
print "Displaying distribution for " + self.name + " in a " + str(self.totalTargetDistance) + " micron target."
plt.xlabel('Target Depth')
plt.ylabel(self.name + " Number Density in Target")
plt.plot(self.profileDomain, self.profileRange)
plt.show()
|
hbar/python-ChargedParticleTools
|
lib/ChargedParticleTools/DensityProfiles.py
|
Python
|
mit
| 5,843
|
[
"Gaussian"
] |
0d6eec3a0805c7846ea7c0a9a40a09b3fee9b572e6f13246f36801a440169206
|
# Storage filtering classes
#
# Copyright (C) 2013 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Chris Lumens <clumens@redhat.com>
#
from collections import namedtuple
import itertools
from blivet import arch
from blivet.devices import DASDDevice, FcoeDiskDevice, iScsiDiskDevice, MultipathDevice, MDRaidArrayDevice, ZFCPDiskDevice
from blivet.fcoe import has_fcoe
from pyanaconda.flags import flags
from pyanaconda.i18n import CN_, CP_
from pyanaconda.ui.lib.disks import getDisks, isLocalDisk
from pyanaconda.ui.gui.utils import enlightbox
from pyanaconda.ui.gui.spokes import NormalSpoke
from pyanaconda.ui.gui.spokes.advstorage.fcoe import FCoEDialog
from pyanaconda.ui.gui.spokes.advstorage.iscsi import ISCSIDialog
from pyanaconda.ui.gui.spokes.lib.cart import SelectedDisksDialog
from pyanaconda.ui.gui.categories.system import SystemCategory
__all__ = ["FilterSpoke"]
DiskStoreRow = namedtuple("DiskStoreRow", ["visible", "selected", "mutable",
"name", "type", "model", "capacity",
"vendor", "interconnect", "serial",
"wwid", "paths", "port", "target",
"lun", "ccw"])
class FilterPage(object):
"""A FilterPage is the logic behind one of the notebook tabs on the filter
UI spoke. Each page has its own specific filtered model overlaid on top
of a common model that holds all non-advanced disks.
A Page is created once, when the filter spoke is initialized. It is
setup multiple times - each time the spoke is revisited. When the Page
is setup, it is given a complete view of all disks that belong on this
Page. This is because certain pages may require populating a combo with
all vendor names, or other similar tasks.
This class is just a base class. One subclass should be created for each
more specialized type of page. Only one instance of each subclass should
ever be created.
"""
def __init__(self, storage, builder):
"""Create a new FilterPage instance.
Instance attributes:
builder -- A reference to the Gtk.Builder instance containing
this page's UI elements.
filterActive -- Whether the user has chosen to filter results down
on this page. If set, visible_func should take the
filter UI elements into account.
storage -- An instance of a blivet object.
"""
self.builder = builder
self.storage = storage
self.model = None
self.filterActive = False
def ismember(self, device):
"""Does device belong on this page? This function should taken into
account what kind of thing device is. It should not be concerned
with any sort of filtering settings. It only determines whether
device belongs.
"""
return True
def setup(self, store, selectedNames, disks):
"""Do whatever setup of the UI is necessary before this page can be
displayed. This function is called every time the filter spoke
is revisited, and thus must first do any cleanup that is necessary.
The setup function is passed a reference to the master store, a list
of names of disks the user has selected (either from a previous visit
or via kickstart), and a list of all disk objects that belong on this
page as determined from the ismember method.
At the least, this method should add all the disks to the store. It
may also need to populate combos and other lists as appropriate.
"""
pass
def clear(self):
"""Blank out any filtering-related fields on this page and return them
to their defaults. This is called when the Clear button is clicked.
"""
pass
def visible_func(self, model, itr, *args):
"""This method is called for every row (disk) in the store, in order to
determine if it should be displayed on this page or not. This method
should take into account whether filterActive is set, perhaps whether
something in pyanaconda.flags is setup, and other settings to make
a final decision. Because filtering can be complicated, many pages
will want to farm this decision out to another method.
The return value is a boolean indicating whether the row is visible
or not.
"""
return True
def setupCombo(self, combo, items):
"""Populate a given GtkComboBoxText instance with a list of items. The
combo will first be cleared, so this method is suitable for calling
repeatedly. The first item in the list will be selected by default.
"""
combo.remove_all()
for i in sorted(items):
combo.append_text(i)
if items:
combo.set_active(0)
class SearchPage(FilterPage):
def __init__(self, storage, builder):
FilterPage.__init__(self, storage, builder)
self.model = self.builder.get_object("searchModel")
self.model.set_visible_func(self.visible_func)
self._lunEntry = self.builder.get_object("searchLUNEntry")
self._wwidEntry = self.builder.get_object("searchWWIDEntry")
self._combo = self.builder.get_object("searchTypeCombo")
self._portCombo = self.builder.get_object("searchPortCombo")
self._targetEntry = self.builder.get_object("searchTargetEntry")
def setup(self, store, selectedNames, disks):
self._combo.set_active(0)
self._combo.emit("changed")
ports = []
for disk in disks:
if hasattr(disk, "node"):
ports.append(str(disk.node.port))
self.setupCombo(self.builder.get_object("searchPortCombo"), ports)
def clear(self):
self._lunEntry.set_text("")
self._portCombo.set_active(0)
self._targetEntry.set_text("")
self._wwidEntry.set_text("")
def _port_equal(self, device):
active = self._portCombo.get_active_text()
if active and hasattr(device, "node"):
return device.node.port == active
else:
return True
def _target_equal(self, device):
active = self._targetEntry.get_text().strip()
if active:
return active in getattr(device, "initiator", "")
else:
return True
def _lun_equal(self, device):
active = self._lunEntry.get_text().strip()
if active and hasattr(device, "node"):
try:
return int(active) == device.node.tpgt
except ValueError:
return True
else:
return True
def _filter_func(self, device):
if not self.filterActive:
return True
filterBy = self._combo.get_active()
if filterBy == 0:
return True
elif filterBy == 1:
return self._port_equal(device) and self._target_equal(device) and self._lun_equal(device)
elif filterBy == 2:
return hasattr(device, "wwid") and self._wwidEntry.get_text() in device.wwid
elif filterBy == 3:
return hasattr(device, "fcp_lun") and self._lunEntry.get_text() in device.fcp_lun
def visible_func(self, model, itr, *args):
obj = DiskStoreRow(*model[itr])
device = self.storage.devicetree.getDeviceByName(obj.name, hidden=True)
return self._filter_func(device)
class MultipathPage(FilterPage):
def __init__(self, storage, builder):
FilterPage.__init__(self, storage, builder)
self.model = self.builder.get_object("multipathModel")
self.model.set_visible_func(self.visible_func)
self._combo = self.builder.get_object("multipathTypeCombo")
self._icCombo = self.builder.get_object("multipathInterconnectCombo")
self._vendorCombo = self.builder.get_object("multipathVendorCombo")
self._wwidEntry = self.builder.get_object("multipathWWIDEntry")
def ismember(self, device):
return isinstance(device, MultipathDevice)
def setup(self, store, selectedNames, disks):
vendors = []
interconnects = []
for disk in disks:
paths = [d.name for d in disk.parents]
selected = disk.name in selectedNames
store.append([True, selected, not disk.protected,
disk.name, "", disk.model, str(disk.size),
disk.vendor, disk.bus, disk.serial,
disk.wwid, "\n".join(paths), "", "",
"", ""])
if not disk.vendor in vendors:
vendors.append(disk.vendor)
if not disk.bus in interconnects:
interconnects.append(disk.bus)
self._combo.set_active(0)
self._combo.emit("changed")
self.setupCombo(self._vendorCombo, vendors)
self.setupCombo(self._icCombo, interconnects)
def clear(self):
self._icCombo.set_active(0)
self._vendorCombo.set_active(0)
self._wwidEntry.set_text("")
def _filter_func(self, device):
if not self.filterActive:
return True
filterBy = self._combo.get_active()
if filterBy == 0:
return True
elif filterBy == 1:
return device.vendor == self._vendorCombo.get_active_text()
elif filterBy == 2:
return device.bus == self._icCombo.get_active_text()
elif filterBy == 3:
return self._wwidEntry.get_text() in device.wwid
def visible_func(self, model, itr, *args):
if not flags.mpath:
return False
obj = DiskStoreRow(*model[itr])
device = self.storage.devicetree.getDeviceByName(obj.name, hidden=True)
return self.ismember(device) and self._filter_func(device)
class OtherPage(FilterPage):
def __init__(self, storage, builder):
FilterPage.__init__(self, storage, builder)
self.model = self.builder.get_object("otherModel")
self.model.set_visible_func(self.visible_func)
self._combo = self.builder.get_object("otherTypeCombo")
self._icCombo = self.builder.get_object("otherInterconnectCombo")
self._idEntry = self.builder.get_object("otherIDEntry")
self._vendorCombo = self.builder.get_object("otherVendorCombo")
def ismember(self, device):
return isinstance(device, iScsiDiskDevice) or isinstance(device, FcoeDiskDevice)
def _long_identifier(self, disk):
# For iSCSI devices, we want the long ip-address:port-iscsi-tgtname-lun-XX
# identifier, but blivet doesn't expose that in any useful way and I don't
# want to go asking udev. Instead, we dig around in the deviceLinks and
# default to the name if we can't figure anything else out.
for link in disk.deviceLinks:
if "by-path" in link:
lastSlash = link.rindex("/")+1
return link[lastSlash:]
return disk.name
def setup(self, store, selectedNames, disks):
vendors = []
interconnects = []
for disk in disks:
selected = disk.name in selectedNames
if hasattr(disk, "node"):
port = str(disk.node.port)
lun = str(disk.node.tpgt)
else:
port = ""
lun = ""
store.append([True, selected, not disk.protected,
disk.name, "", disk.model, str(disk.size),
disk.vendor, disk.bus, disk.serial,
self._long_identifier(disk), "", port, getattr(disk, "initiator", ""),
lun, ""])
if not disk.vendor in vendors:
vendors.append(disk.vendor)
if not disk.bus in interconnects:
interconnects.append(disk.bus)
self._combo.set_active(0)
self._combo.emit("changed")
self.setupCombo(self._vendorCombo, vendors)
self.setupCombo(self._icCombo, interconnects)
def clear(self):
self._icCombo.set_active(0)
self._idEntry.set_text("")
self._vendorCombo.set_active(0)
def _filter_func(self, device):
if not self.filterActive:
return True
filterBy = self._combo.get_active()
if filterBy == 0:
return True
elif filterBy == 1:
return device.vendor == self._vendorCombo.get_active_text()
elif filterBy == 2:
return device.bus == self._icCombo.get_active_text()
elif filterBy == 3:
for link in device.deviceLinks:
if "by-path" in link:
return self._idEntry.get_text().strip() in link
return False
def visible_func(self, model, itr, *args):
obj = DiskStoreRow(*model[itr])
device = self.storage.devicetree.getDeviceByName(obj.name, hidden=True)
return self.ismember(device) and self._filter_func(device)
class RaidPage(FilterPage):
def __init__(self, storage, builder):
FilterPage.__init__(self, storage, builder)
self.model = self.builder.get_object("raidModel")
self.model.set_visible_func(self.visible_func)
def ismember(self, device):
return isinstance(device, MDRaidArrayDevice) and device.isDisk
def visible_func(self, model, itr, *args):
if not flags.dmraid:
return False
obj = DiskStoreRow(*model[itr])
device = self.storage.devicetree.getDeviceByName(obj.name, hidden=True)
return self.ismember(device)
class ZPage(FilterPage):
def __init__(self, storage, builder):
FilterPage.__init__(self, storage, builder)
self.model = self.builder.get_object("zModel")
self.model.set_visible_func(self.visible_func)
self._isS390 = arch.isS390()
def ismember(self, device):
return isinstance(device, ZFCPDiskDevice) or isinstance(device, DASDDevice)
def setup(self, store, selectedNames, disks):
if not self._isS390:
return
def visible_func(self, model, itr, *args):
obj = DiskStoreRow(*model[itr])
device = self.storage.devicetree.getDeviceByName(obj.name, hidden=True)
return self.ismember(device)
class FilterSpoke(NormalSpoke):
builderObjects = ["diskStore", "filterWindow",
"searchModel", "multipathModel", "otherModel", "raidModel", "zModel"]
mainWidgetName = "filterWindow"
uiFile = "spokes/filter.glade"
category = SystemCategory
title = CN_("GUI|Spoke", "_INSTALLATION DESTINATION")
def __init__(self, *args):
NormalSpoke.__init__(self, *args)
self.applyOnSkip = True
self.ancestors = []
self.disks = []
self.selected_disks = []
@property
def indirect(self):
return True
# This spoke has no status since it's not in a hub
@property
def status(self):
return None
def apply(self):
onlyuse = self.selected_disks[:]
for disk in [d for d in self.storage.disks if d.name in onlyuse]:
onlyuse.extend([d.name for d in disk.ancestors
if d.name not in onlyuse])
self.data.ignoredisk.onlyuse = onlyuse
self.data.clearpart.drives = self.selected_disks[:]
def initialize(self):
NormalSpoke.initialize(self)
self.pages = [SearchPage(self.storage, self.builder),
MultipathPage(self.storage, self.builder),
OtherPage(self.storage, self.builder),
RaidPage(self.storage, self.builder),
ZPage(self.storage, self.builder)]
self._notebook = self.builder.get_object("advancedNotebook")
if not arch.isS390():
self._notebook.remove_page(-1)
self.builder.get_object("addZFCPButton").destroy()
if not has_fcoe():
self.builder.get_object("addFCOEButton").destroy()
self._store = self.builder.get_object("diskStore")
self._addDisksButton = self.builder.get_object("addDisksButton")
def _real_ancestors(self, disk):
# Return a list of all the ancestors of a disk, but remove the disk
# itself from this list.
return [d for d in disk.ancestors if d.name != disk.name]
def refresh(self):
NormalSpoke.refresh(self)
self.disks = getDisks(self.storage.devicetree)
self.selected_disks = self.data.ignoredisk.onlyuse[:]
self.ancestors = itertools.chain(*map(self._real_ancestors, self.disks))
self.ancestors = map(lambda d: d.name, self.ancestors)
self._store.clear()
allDisks = []
multipathDisks = []
otherDisks = []
raidDisks = []
zDisks = []
# Now all all the non-local disks to the store. Everything has been set up
# ahead of time, so there's no need to configure anything. We first make
# these lists of disks, then call setup on each individual page. This is
# because there could be page-specific setup to do that requires a complete
# view of all the disks on that page.
for disk in itertools.ifilterfalse(isLocalDisk, self.disks):
if self.pages[1].ismember(disk):
multipathDisks.append(disk)
elif self.pages[2].ismember(disk):
otherDisks.append(disk)
elif self.pages[3].ismember(disk):
raidDisks.append(disk)
elif self.pages[4].ismember(disk):
zDisks.append(disk)
allDisks.append(disk)
self.pages[0].setup(self._store, self.selected_disks, allDisks)
self.pages[1].setup(self._store, self.selected_disks, multipathDisks)
self.pages[2].setup(self._store, self.selected_disks, otherDisks)
self.pages[3].setup(self._store, self.selected_disks, raidDisks)
self.pages[4].setup(self._store, self.selected_disks, zDisks)
self._update_summary()
def _update_summary(self):
summaryButton = self.builder.get_object("summary_button")
label = self.builder.get_object("summary_button_label")
# We need to remove ancestor devices from the count. Otherwise, we'll
# end up in a situation where selecting one multipath device could
# potentially show three devices selected (mpatha, sda, sdb for instance).
count = len([disk for disk in self.selected_disks if disk not in self.ancestors])
summary = CP_("GUI|Installation Destination|Filter",
"%d _storage device selected",
"%d _storage devices selected",
count) % count
label.set_text(summary)
label.set_use_underline(True)
summaryButton.set_visible(count > 0)
label.set_sensitive(count > 0)
def on_back_clicked(self, button):
self.skipTo = "StorageSpoke"
NormalSpoke.on_back_clicked(self, button)
def on_summary_clicked(self, button):
dialog = SelectedDisksDialog(self.data)
# Include any disks selected in the initial storage spoke, plus any
# selected in this filter UI.
disks = [disk for disk in self.disks if disk.name in self.selected_disks]
free_space = self.storage.getFreeSpace(disks=disks)
with enlightbox(self.window, dialog.window):
dialog.refresh(disks, free_space, showRemove=False, setBoot=False)
dialog.run()
def on_find_clicked(self, button):
n = self._notebook.get_current_page()
self.pages[n].filterActive = True
self.pages[n].model.refilter()
def on_clear_clicked(self, button):
n = self._notebook.get_current_page()
self.pages[n].filterActive = False
self.pages[n].model.refilter()
self.pages[n].clear()
def on_page_switched(self, notebook, newPage, newPageNum, *args):
self.pages[newPageNum].model.refilter()
notebook.get_nth_page(newPageNum).show_all()
def on_row_toggled(self, button, path):
if not path:
return
itr = self._store.get_iter(path)
self._store[itr][1] = not self._store[itr][1]
if self._store[itr][1] and self._store[itr][3] not in self.selected_disks:
self.selected_disks.append(self._store[itr][3])
elif not self._store[itr][1] and self._store[itr][3] in self.selected_disks:
self.selected_disks.remove(self._store[itr][3])
self._update_summary()
def on_add_iscsi_clicked(self, widget, *args):
dialog = ISCSIDialog(self.data, self.storage)
with enlightbox(self.window, dialog.window):
dialog.refresh()
dialog.run()
# We now need to refresh so any new disks picked up by adding advanced
# storage are displayed in the UI.
self.refresh()
def on_add_fcoe_clicked(self, widget, *args):
dialog = FCoEDialog(self.data, self.storage)
with enlightbox(self.window, dialog.window):
dialog.refresh()
dialog.run()
# We now need to refresh so any new disks picked up by adding advanced
# storage are displayed in the UI.
self.refresh()
def on_add_zfcp_clicked(self, widget, *args):
pass
##
## SEARCH TAB SIGNAL HANDLERS
##
def on_search_type_changed(self, combo):
ndx = combo.get_active()
notebook = self.builder.get_object("searchTypeNotebook")
findButton = self.builder.get_object("searchFindButton")
clearButton = self.builder.get_object("searchClearButton")
findButton.set_sensitive(ndx != 0)
clearButton.set_sensitive(ndx != 0)
notebook.set_current_page(ndx)
##
## MULTIPATH TAB SIGNAL HANDLERS
##
def on_multipath_type_changed(self, combo):
ndx = combo.get_active()
notebook = self.builder.get_object("multipathTypeNotebook")
findButton = self.builder.get_object("multipathFindButton")
clearButton = self.builder.get_object("multipathClearButton")
findButton.set_sensitive(ndx != 0)
clearButton.set_sensitive(ndx != 0)
notebook.set_current_page(ndx)
##
## OTHER TAB SIGNAL HANDLERS
##
def on_other_type_combo_changed(self, combo):
ndx = combo.get_active()
notebook = self.builder.get_object("otherTypeNotebook")
findButton = self.builder.get_object("otherFindButton")
clearButton = self.builder.get_object("otherClearButton")
findButton.set_sensitive(ndx != 0)
clearButton.set_sensitive(ndx != 0)
notebook.set_current_page(ndx)
|
mairin/anaconda
|
pyanaconda/ui/gui/spokes/filter.py
|
Python
|
gpl-2.0
| 23,922
|
[
"VisIt"
] |
e2305bc988a2be30f0f851d792552d62e4056b90c7782453cba2ea0d7b90f75c
|
import sys
import os.path
#sys.path.insert(0, '/home/andy/theano/tool_examples/theano-lstm-0.0.15')
from theano_lstm import Embedding, LSTM, RNN, StackedCells, Layer, create_optimization_updates, masked_loss
from utilities import *
import dill
import argparse
#import cPickle
import pickle
import numpy
from collections import OrderedDict
import theano, theano.tensor as T
import turing_model
from theano_toolkit.parameters import Parameters
from theano.compile.nanguardmode import NanGuardMode
DESCRIPTION = """
Recurrent neural network based statistical language modelling toolkit
(based on LSTM algorithm)
Implemented by Daniel Soutner,
Department of Cybernetics, University of West Bohemia, Plzen, Czech rep.
dsoutner@kky.zcu.cz, 2013
"""
def parse_args(parser):
parser.add_argument('--train', nargs=1, action="store", metavar="FILE",
help='training file !')
parser.add_argument('--valid', nargs=1, action="store", metavar="FILE",
help='valid file !')
parser.add_argument('--test', nargs=1, action="store", metavar="FILE",
help='testing file for ppl!')
parser.add_argument('--neuron-type', action="store", dest='celltype',
help='type of hidden neurons, RNN/LSTM, default: RNN', type=str, default='RNN')
parser.add_argument('--train-method', action="store", dest='train_method',
help='training method LSTM/TURING/ALL, default: ALL', type=str, default='ALL')
parser.add_argument('--projection-size', action="store", dest='n_projection',
help='Number of neurons in projection layer, default: 100', type=int, default=100)
parser.add_argument('--hidden-size', action="store", dest='n_hidden',
help='Number of neurons in hidden layer, default: 100', type=int, default=100)
parser.add_argument('--stack', action="store", dest='n_stack',
help='Number of hidden neurons, default: 1 ', type=int, default=1)
parser.add_argument('--learning-rate', action="store", dest='lr',
help='learing rate at begining, default: 0.01 ', type=float, default=0.01)
parser.add_argument('--improvement-rate', action="store", dest='improvement_rate',
help='relative improvement for early stopping on ppl , default: 0.005 ', type=float, default=0.005)
parser.add_argument('--minibatch-size', action="store", dest='minibatch_size',
help='minibatch size for training, default: 100', type=int, default=100)
parser.add_argument('--max-epoch', action="store", dest='max_epoch',
help='maximum number of epoch if not early stopping, default: 1000', type=int, default=1000)
parser.add_argument('--early-stop', action="store", dest='early_stop',
help='1 for early-stopping, 0 for not', type=int, default=1)
parser.add_argument('--save-net', action="store", dest="save_net", default=None, metavar="FILE",
help="Save RNN to file")
parser.add_argument('--load-net', action="store", dest="load_net", default=None, metavar="FILE",
help="Load RNN from file")
return parser.parse_args()
def build_vocab(data_file_str):
lines = []
data_file = open(data_file_str)
for line in data_file:
tokens = line.replace('\n','.')
lines.append(tokens)
data_file.close()
vocab = Vocab()
for line in lines:
vocab.add_words(line.split(" "))
return vocab
def load_data(data_file_str, vocab, data_type):
lines = []
data_file = open(data_file_str)
for line in data_file:
tokens = line.replace('\n','.')
# abandom too long sent in training set., too long sent will take too many time and decrease preformance
tokens_for_count = line.replace('\n','').split(' ')
if len(tokens_for_count) > 50 and data_type == 'train':
continue
lines.append(tokens)
data_file.close()
# transform into big numerical matrix of sentences:
numerical_lines = []
for line in lines:
numerical_lines.append(vocab(line))
numerical_lines, numerical_lengths = pad_into_matrix(numerical_lines)
return numerical_lines, numerical_lengths
def softmax(x):
"""
Wrapper for softmax, helps with
pickling, and removing one extra
dimension that Theano adds during
its exponential normalization.
"""
return T.nnet.softmax(x.T)
def has_hidden(layer):
"""
Whether a layer has a trainable
initial hidden state.
"""
return hasattr(layer, 'initial_hidden_state')
def matrixify(vector, n):
return T.repeat(T.shape_padleft(vector), n, axis=0)
def initial_state(layer, dimensions = None):
"""
Initalizes the recurrence relation with an initial hidden state
if needed, else replaces with a "None" to tell Theano that
the network **will** return something, but it does not need
to send it to the next step of the recurrence
"""
if dimensions is None:
return layer.initial_hidden_state if has_hidden(layer) else None
else:
return matrixify(layer.initial_hidden_state, dimensions) if has_hidden(layer) else None
def initial_state_with_taps(layer, dimensions = None):
"""Optionally wrap tensor variable into a dict with taps=[-1]"""
state = initial_state(layer, dimensions)
if state is not None:
return dict(initial=state, taps=[-1])
else:
return None
class Model:
"""
Simple predictive model for forecasting words from
sequence using LSTMs. Choose how many LSTMs to stack
what size their memory should be, and how many
words can be predicted.
"""
def __init__(self, hidden_size, input_size, vocab_size, stack_size=1, celltype=LSTM):
# core layer in RNN/LSTM
self.model = StackedCells(input_size, celltype=celltype, layers =[hidden_size] * stack_size)
# add an embedding
self.model.layers.insert(0, Embedding(vocab_size, input_size))
# add a classifier:
self.model.layers.append(Layer(hidden_size, vocab_size, activation = softmax))
self.turing_params = Parameters()
#init turing machine model
self.turing_updates , self.turing_predict = turing_model.build(self.turing_params , hidden_size , vocab_size)
self.hidden_size = hidden_size
# inputs are matrices of indices,
# each row is a sentence, each column a timestep
self._stop_word = theano.shared(np.int32(999999999), name="stop word")
self.for_how_long = T.ivector()
self.input_mat = T.imatrix()
self.priming_word = T.iscalar()
self.srng = T.shared_randomstreams.RandomStreams(np.random.randint(0, 1024))
# create symbolic variables for prediction:
#change by darong #issue : what is greedy
self.lstm_predictions = self.create_lstm_prediction()
self.final_predictions = self.create_final_prediction()
# create symbolic variable for greedy search:
self.greedy_predictions = self.create_lstm_prediction(greedy=True)
# create gradient training functions:
self.create_cost_fun()#create 2 cost func(lstm final)
self.lstm_lr = 0.01
self.turing_lr = 0.01
self.all_lr = 0.01
self.create_training_function()#create 3 functions(lstm turing all)
self.create_predict_function()#create 2 predictions(lstm final)
# create ppl
self.lstm_ppl = self.create_lstm_ppl()
self.final_ppl = self.create_final_ppl()
self.create_ppl_function()
def save(self, save_file, vocab):
pickle.dump(self.model, open(save_file, "wb")) # pickle is for lambda function, cPickle cannot
pickle.dump(vocab, open(save_file+'.vocab', "wb")) # pickle is for lambda function, cPickle cannot
def save_turing(self, save_file):
self.turing_params.save(save_file + '.turing')
def load(self, load_file, lr):
self.model = pickle.load(open(load_file, "rb"))
if os.path.isfile(load_file + '.turing') :
self.turing_params.load(load_file + '.turing')
else :
print "no turing model!!!! pretrain with lstm param"
self.turing_params['W_input_hidden'] = self.model.layers[-1].params[0].get_value().T #not sure
self.turing_params['W_read_hidden'] = self.model.layers[-1].params[0].get_value().T[self.hidden_size/2:,:]
self.turing_params['b_hidden_0'] = self.model.layers[-1].params[1].get_value()
# need to compile again for calculating predictions after loading lstm
self.srng = T.shared_randomstreams.RandomStreams(np.random.randint(0, 1024))
self.lstm_predictions = self.create_lstm_prediction()
self.final_predictions = self.create_final_prediction()
self.greedy_predictions = self.create_lstm_prediction(greedy=True)#can change to final
self.create_cost_fun()#create 2 cost func(lstm final)
self.lstm_lr = lr
self.turing_lr = lr#change this
self.all_lr = lr
self.create_training_function()#create 3 functions(lstm turing all)
self.create_predict_function()#create 2 predictions(lstm final)
self.lstm_ppl = self.create_lstm_ppl()
self.final_ppl = self.create_final_ppl()
self.create_ppl_function()
print "done loading model"
# print "done compile"
def stop_on(self, idx):
self._stop_word.set_value(idx)
@property
def params(self):
return self.model.params
def create_lstm_prediction(self, greedy=False):
def step(idx, *states):
# new hiddens are the states we need to pass to LSTMs
# from past. Because the StackedCells also include
# the embeddings, and those have no state, we pass
# a "None" instead:
new_hiddens = [None] + list(states)
new_states = self.model.forward(idx, prev_hiddens = new_hiddens)
if greedy:
new_idxes = new_states[-1]
new_idx = new_idxes.argmax()
# provide a stopping condition for greedy search:
return ([new_idx.astype(self.priming_word.dtype)] + new_states[1:-1]), theano.scan_module.until(T.eq(new_idx,self._stop_word))
else:
return new_states[1:]
# in sequence forecasting scenario we take everything
# up to the before last step, and predict subsequent
# steps ergo, 0 ... n - 1, hence:
inputs = self.input_mat[:, 0:-1]
num_examples = inputs.shape[0]
# pass this to Theano's recurrence relation function:
# choose what gets outputted at each timestep:
if greedy:
outputs_info = [dict(initial=self.priming_word, taps=[-1])] + [initial_state_with_taps(layer) for layer in self.model.layers[1:-1]]
result, _ = theano.scan(fn=step,
n_steps=200,
outputs_info=outputs_info)
else:
outputs_info = [initial_state_with_taps(layer, num_examples) for layer in self.model.layers[1:]]
result, _ = theano.scan(fn=step,
sequences=[inputs.T],
outputs_info=outputs_info)
if greedy:
return result[0]
# softmaxes are the last layer of our network,
# and are at the end of our results list:
return result[-1].transpose((2,0,1))
# we reorder the predictions to be:
# 1. what row / example
# 2. what timestep
# 3. softmax dimension
def create_final_prediction(self, greedy=False):
def step(idx, *states):
# new hiddens are the states we need to pass to LSTMs
# from past. Because the StackedCells also include
# the embeddings, and those have no state, we pass
# a "None" instead:
new_hiddens = [None] + list(states)
new_states = self.model.forward(idx, prev_hiddens = new_hiddens)
if greedy:
new_idxes = new_states[-1]
new_idx = new_idxes.argmax()
# provide a stopping condition for greedy search:
return ([new_idx.astype(self.priming_word.dtype)] + new_states[1:-1]), theano.scan_module.until(T.eq(new_idx,self._stop_word))
else:
return new_states[1:]
# in sequence forecasting scenario we take everything
# up to the before last step, and predict subsequent
# steps ergo, 0 ... n - 1, hence:
inputs = self.input_mat[:, 0:-1]
num_examples = inputs.shape[0]
# pass this to Theano's recurrence relation function:
# choose what gets outputted at each timestep:
if greedy:
outputs_info = [dict(initial=self.priming_word, taps=[-1])] + [initial_state_with_taps(layer) for layer in self.model.layers[1:-1]]
result, _ = theano.scan(fn=step,
n_steps=200,
outputs_info=outputs_info)
else:
outputs_info = [initial_state_with_taps(layer, num_examples) for layer in self.model.layers[1:]]
result, _ = theano.scan(fn=step,
sequences=[inputs.T],
outputs_info=outputs_info)
if greedy:
return result[0]
# softmaxes are the last layer of our network,
# and are at the end of our results list:
hidden_size = result[-2].shape[2]/2
turing_result = self.turing_predict(result[-2][:,:,hidden_size:])
#the last layer do transpose before compute
return turing_result.transpose((1,0,2))
# we reorder the predictions to be:
# 1. what row / example
# 2. what timestep
# 3. softmax dimension
def create_cost_fun (self):
# create a cost function that
# takes each prediction at every timestep
# and guesses next timestep's value:
what_to_predict = self.input_mat[:, 1:]
# because some sentences are shorter, we
# place masks where the sentences end:
# (for how long is zero indexed, e.g. an example going from `[2,3)`)
# has this value set 0 (here we substract by 1):
for_how_long = self.for_how_long - 1
# all sentences start at T=0:
starting_when = T.zeros_like(self.for_how_long)
self.lstm_cost = masked_loss(self.lstm_predictions,
what_to_predict,
for_how_long,
starting_when).sum()
self.final_cost = masked_loss(self.final_predictions,
what_to_predict,
for_how_long,
starting_when).sum()
def create_predict_function(self):
self.lstm_pred_fun = theano.function(
inputs=[self.input_mat],
outputs=self.lstm_predictions,
allow_input_downcast=True
)
self.final_pred_fun = theano.function(
inputs=[self.input_mat],
outputs=self.final_predictions,
allow_input_downcast=True
)
self.greedy_fun = theano.function(
inputs=[self.priming_word],
outputs=T.concatenate([T.shape_padleft(self.priming_word), self.greedy_predictions]),
allow_input_downcast=True
)
def create_training_function(self):
updates, _, _, _, _ = create_optimization_updates(self.lstm_cost, self.params, method="SGD", lr=self.lstm_lr)
# updates, _, _, _, _ = create_optimization_updates(self.cost, self.params, method="adadelta", lr=self.lr)
self.lstm_update_fun = theano.function(
inputs=[self.input_mat, self.for_how_long],
outputs=self.lstm_cost,
updates=updates,
allow_input_downcast=True)
updates_turing = self.turing_updates(self.final_cost , lr=self.turing_lr)
# updates, _, _, _, _ = create_optimization_updates(self.cost, self.params, method="adadelta", lr=self.lr)
self.turing_update_fun = theano.function(
inputs=[self.input_mat, self.for_how_long],
outputs=self.final_cost,
updates=updates_turing,
mode=NanGuardMode(nan_is_error=True, inf_is_error=True, big_is_error=True),
allow_input_downcast=True)
all_updates_lstm, _, _, _, _ = create_optimization_updates(self.final_cost, self.params, method="SGD", lr=self.all_lr,part=True)
all_updates_turing_temp = self.turing_updates(self.final_cost , lr=self.all_lr)
updates_all = all_updates_lstm
for pair in all_updates_turing_temp :
updates_all[pair[0]] = pair[1]
self.all_update_fun = theano.function(
inputs=[self.input_mat, self.for_how_long],
outputs=self.final_cost,
updates=updates_all,
allow_input_downcast=True)
def create_lstm_ppl(self):
def timestep(predictions, label, len_example, total_len_example):
label_binary = T.gt(label[0:len_example-1], 0)
oov_count = T.shape(label_binary)[0] - T.sum(label_binary)
a = total_len_example
return T.sum(T.log( 1./ predictions[T.arange(len_example-1), label[0:len_example-1]]) * label_binary ), oov_count
result, _ = theano.scan(fn=timestep,
sequences=[ self.lstm_predictions, self.input_mat[:, 1:], self.for_how_long ],
non_sequences=T.sum(self.for_how_long))
oov_count_total = T.sum(result[1])
return T.exp(T.sum(result[0]).astype(theano.config.floatX)/(T.sum(self.for_how_long) - oov_count_total).astype(theano.config.floatX)).astype(theano.config.floatX)
def create_final_ppl(self):
def timestep(predictions, label, len_example, total_len_example):
label_binary = T.gt(label[0:len_example-1], 0)
oov_count = T.shape(label_binary)[0] - T.sum(label_binary)
a = total_len_example
return T.sum(T.log( 1./ predictions[T.arange(len_example-1), label[0:len_example-1]]) * label_binary ), oov_count
result, _ = theano.scan(fn=timestep,
sequences=[ self.final_predictions, self.input_mat[:, 1:], self.for_how_long ],
non_sequences=T.sum(self.for_how_long))
oov_count_total = T.sum(result[1])
return T.exp(T.sum(result[0]).astype(theano.config.floatX)/(T.sum(self.for_how_long) - oov_count_total).astype(theano.config.floatX)).astype(theano.config.floatX)
def create_ppl_function(self):
self.lstm_ppl_fun = theano.function(
inputs=[self.input_mat, self.for_how_long],
outputs=self.lstm_ppl,
allow_input_downcast=True)
self.final_ppl_fun = theano.function(
inputs=[self.input_mat, self.for_how_long],
outputs=self.final_ppl,
allow_input_downcast=True)
def __call__(self, x):
return self.pred_fun(x)#any problem??
def get_minibatch(full_data, full_lengths, minibatch_size, minibatch_idx):
lengths = []
for j in range(minibatch_size):
lengths.append(full_lengths[minibatch_size * minibatch_idx + j])
width = max(full_lengths)
# width = max(full_data[minibatch_size * minibatch_idx: minibatch_size * (minibatch_idx+1), :])
height = minibatch_size
minibatch_data = np.empty([height, width], dtype=theano.config.floatX)
minibatch_data = full_data[minibatch_size * minibatch_idx: minibatch_size * (minibatch_idx+1), :]
return minibatch_data, lengths
def training(args, vocab, train_data, train_lengths, valid_data, valid_lengths):
# training information
print 'training information'
print '-------------------------------------------------------'
print 'method: %s' % args.train_method
print 'vocab size: %d' % len(vocab)
print 'sentences in training file: %d' % len(train_lengths)
print 'max length in training file: %d' % max(train_lengths)
print 'train file: %s' % args.train[0]
print 'valid file: %s' % args.valid[0]
print 'type: %s' % args.celltype
print 'project: %d' % args.n_projection
print 'hidden: %d' % args.n_hidden
print 'stack: %d' % args.n_stack
print 'learning rate: %f' % args.lr
print 'minibatch size: %d' % args.minibatch_size
print 'max epoch: %d' % args.max_epoch
print 'improvement rate: %f' % args.improvement_rate
print 'save file: %s' % args.save_net
print 'load_model: %s' % args.load_net
print 'early-stop: %r' % args.early_stop
print '-------------------------------------------------------'
if args.celltype == 'LSTM':
celltype = LSTM
elif args.celltype == 'RNN':
celltype = RNN
print 'start initializing model'
# construct model & theano functions:
model = Model(
input_size=args.n_projection,
hidden_size=args.n_hidden,
vocab_size=len(vocab),
stack_size=args.n_stack, # make this bigger, but makes compilation slow
celltype=celltype # use RNN or LSTM
)
if args.lr :
model.lstm_lr = args.lr
model.turing_lr = args.lr
model.all_lr = args.lr
model.stop_on(vocab.word2index["."])
if args.load_net :
if args.lr :
model.load(args.load_net, args.lr)# 0 is useless
else :
model.load(args.load_net, 0)
# train:
#select correct train and prediction method according to train_method(LSTM/TURING/ALL)
if args.train_method == 'LSTM' :
update_fun = model.lstm_update_fun
ppl_fun = model.lstm_ppl_fun
lr = model.lstm_lr
print 'update lstm learning rate : %f' % model.lstm_lr
elif args.train_method == 'TURING' :
update_fun = model.turing_update_fun
ppl_fun = model.final_ppl_fun
lr = model.turing_lr
print 'update turing learning rate : %f' % model.turing_lr
else :
update_fun = model.all_update_fun
ppl_fun = model.final_ppl_fun
lr = model.all_lr
print 'update all learning rate : %f' % model.all_lr
stop_count = 0 # for stop training
change_count = 0 # for change learning rate
print 'start training'
min_valid_ppl = float('inf')
for epoch in range(args.max_epoch):
print "\nepoch %d" % epoch
# minibatch part
minibatch_size = args.minibatch_size # how many examples in a minibatch
n_train_batches = len(train_lengths)/minibatch_size
train_ppl = 0
for minibatch_idx in range(n_train_batches):
minibatch_train_data, lengths = get_minibatch(train_data, train_lengths, minibatch_size, minibatch_idx)
error = update_fun(minibatch_train_data , list(lengths) )
minibatch_train_ppl = ppl_fun(minibatch_train_data, list(lengths))
train_ppl = train_ppl + minibatch_train_ppl * sum(lengths)
sys.stdout.write( '\n%d minibatch idx / %d total minibatch, ppl: %f '% (minibatch_idx+1, n_train_batches, minibatch_train_ppl) )
sys.stdout.flush() # important
# rest minibatch if exits
if (minibatch_idx + 1) * minibatch_size != len(train_lengths):
minibatch_idx = minibatch_idx + 1
n_rest_example = len(train_lengths) - minibatch_size * minibatch_idx
minibatch_train_data, lengths = get_minibatch(train_data, train_lengths, n_rest_example, minibatch_idx)
error = update_fun(minibatch_train_data , list(lengths) )
minibatch_train_ppl = ppl_fun(minibatch_train_data, list(lengths))
train_ppl = train_ppl + minibatch_train_ppl * sum(lengths)
train_ppl = train_ppl / sum(train_lengths)
# print 'done training'
# valid ppl
minibatch_size = min(20, len(valid_lengths))
valid_ppl = 0
n_valid_batches = len(valid_lengths)/minibatch_size
for minibatch_idx in range(n_valid_batches):
minibatch_valid_data, lengths = get_minibatch(valid_data, valid_lengths, minibatch_size, minibatch_idx)
minibatch_valid_ppl = ppl_fun(minibatch_valid_data, list(lengths))
valid_ppl = valid_ppl + minibatch_valid_ppl * sum(lengths)
# last minibatch
if (minibatch_idx + 1) * minibatch_size != len(valid_lengths):
minibatch_idx = minibatch_idx + 1
n_rest_example = len(valid_lengths) - minibatch_size * minibatch_idx
minibatch_valid_data, lengths = get_minibatch(valid_data, valid_lengths, n_rest_example, minibatch_idx)
minibatch_valid_ppl = ppl_fun(minibatch_valid_data, list(lengths))
valid_ppl = valid_ppl + minibatch_valid_ppl * sum(lengths)
valid_ppl = valid_ppl / sum(valid_lengths)
print "\ntrain ppl: %f, valid ppl: %f" % (train_ppl, valid_ppl)
if valid_ppl < min_valid_ppl:
min_valid_ppl = valid_ppl
model.save(args.save_net, vocab)
if args.train_method != 'LSTM' :
model.save_turing(args.save_net)
stop_count = 0
change_count = 0
print "save best model"
continue
if args.early_stop:
if (valid_ppl - min_valid_ppl) / min_valid_ppl > args.improvement_rate:
if stop_count > 2 or lr < 1e-6:
print 'stop training'
break
stop_count = stop_count + 1
elif (valid_ppl - min_valid_ppl) / min_valid_ppl > args.improvement_rate * 0.5:
# if change_count > 2:
print 'change learning rate from %f to %f' % (lr, lr/2)
model.lstm_lr = model.lstm_lr / 2.
model.turing_lr = model.turing_lr / 2.
model.all_lr = model.all_lr / 2.
if args.train_method == 'LSTM' :
lr = model.lstm_lr
elif args.train_method == 'TURING' :
lr = model.turing_lr
else :
lr = model.all_lr
# change_count = change_count + 1
def testing(args, test_data, test_lengths):
print 'start loading'
model_load = Model(
input_size=1,
hidden_size=1,
vocab_size=1,
stack_size=1, # make this bigger, but makes compilation slow
celltype=RNN # use RNN or LSTM
)
model_load.stop_on(vocab.word2index["."])
if args.train_method != 'LSTM' :
if not os.path.isfile(args.load_net + '.turing') :
print "there is no trained turing file so we can't test by turing model!!"
sys.exit()
model_load.load(args.load_net, 0)
# test ppl
#select correct train and prediction method according to train_method(LSTM/TURING/ALL)
if args.train_method == 'LSTM' :
ppl_fun = model_load.lstm_ppl_fun
else :
ppl_fun = model_load.final_ppl_fun
minibatch_size = min(20, len(test_lengths))
test_ppl = 0
n_test_batches = len(test_lengths)/minibatch_size
for minibatch_idx in range(n_test_batches):
minibatch_test_data, lengths = get_minibatch(test_data, test_lengths, minibatch_size, minibatch_idx)
minibatch_test_ppl = ppl_fun(minibatch_test_data, list(lengths))
test_ppl = test_ppl + minibatch_test_ppl * sum(lengths)
# last minibatch
if (minibatch_idx + 1) * minibatch_size != len(test_lengths):
minibatch_idx = minibatch_idx + 1
n_rest_example = len(test_lengths) - minibatch_size * minibatch_idx
minibatch_test_data, lengths = get_minibatch(test_data, test_lengths, n_rest_example, minibatch_idx)
minibatch_test_ppl = ppl_fun(minibatch_test_data, list(lengths))
test_ppl = test_ppl + minibatch_test_ppl * sum(lengths)
test_ppl = test_ppl / sum(test_lengths)
print "test ppl: %f" %test_ppl
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=DESCRIPTION)
args = parse_args(parser)
# if no args are passed
if len(sys.argv) == 1:
parser.print_help()
sys.exit()
if args.train:
vocab = build_vocab(args.train[0])
train_data, train_lengths = load_data(args.train[0], vocab, 'train')
valid_data, valid_lengths = load_data(args.valid[0], vocab, 'valid')
training(args, vocab, train_data, train_lengths, valid_data, valid_lengths)
elif args.test:
vocab = pickle.load(open(args.load_net+'.vocab', "rb"))
test_data, test_lengths = load_data(args.test[0], vocab, 'test')
testing(args, test_data, test_lengths)
|
darongliu/Lstm_Turing_LM
|
lstm-neural-turing-machines-lm/exp1/v1-first-half/lm_v4.py
|
Python
|
mit
| 25,619
|
[
"NEURON"
] |
06d79fcad3650b5638f492f42b3c016d4ac063a460ea90605ccf140b7d30b2dd
|
import sys
import os
import pysam
import difflib
IS_PYTHON3 = sys.version_info[0] >= 3
if IS_PYTHON3:
from itertools import zip_longest
from urllib.request import urlopen
else:
from itertools import izip as zip_longest
from urllib2 import urlopen
def checkBinaryEqual(filename1, filename2):
'''return true if the two files are binary equal.
'''
if os.path.getsize(filename1) != os.path.getsize(filename2):
return False
infile1 = open(filename1, "rb")
infile2 = open(filename2, "rb")
def chariter(infile):
while 1:
c = infile.read(1)
if c == b"":
break
yield c
found = False
for c1, c2 in zip_longest(chariter(infile1), chariter(infile2)):
if c1 != c2:
break
else:
found = True
infile1.close()
infile2.close()
return found
def checkSamtoolsViewEqual(filename1, filename2,
without_header=False):
'''return true if the two files are equal in their
content through samtools view.
'''
# strip MD and NM tags, as not preserved in CRAM files
args = ["-x", "MD", "-x", "NM"]
if not without_header:
args.append("-h")
lines1 = pysam.view(*(args + [filename1]))
lines2 = pysam.view(*(args + [filename2]))
if len(lines1) != len(lines2):
return False
if lines1 != lines2:
# line by line comparison
# sort each line, as tags get rearranged between
# BAM/CRAM
for n, pair in enumerate(zip(lines1, lines2)):
l1, l2 = pair
l1 = sorted(l1[:-1].split("\t"))
l2 = sorted(l2[:-1].split("\t"))
if l1 != l2:
print ("mismatch in line %i" % n)
print (l1)
print (l2)
return False
else:
return False
return True
def checkURL(url):
'''return True if URL is available.
A URL might not be available if it is the wrong URL
or there is no connection to the URL.
'''
try:
urlopen(url, timeout=1)
return True
except:
return False
def checkFieldEqual(cls, read1, read2, exclude=[]):
'''check if two reads are equal by comparing each field.'''
# add the . for refactoring purposes.
for x in (".query_name",
".query_sequence",
".flag",
".reference_id",
".reference_start",
".mapping_quality",
".cigartuples",
".next_reference_id",
".next_reference_start",
".template_length",
".query_length",
".query_qualities",
".bin",
".is_paired", ".is_proper_pair",
".is_unmapped", ".mate_is_unmapped",
".is_reverse", ".mate_is_reverse",
".is_read1", ".is_read2",
".is_secondary", ".is_qcfail",
".is_duplicate"):
n = x[1:]
if n in exclude:
continue
cls.assertEqual(getattr(read1, n), getattr(read2, n),
"attribute mismatch for %s: %s != %s" %
(n, getattr(read1, n), getattr(read2, n)))
|
daler/pysam
|
tests/TestUtils.py
|
Python
|
mit
| 3,266
|
[
"pysam"
] |
70cefbe072cbdbf9a2833f1943320292c4009477fada87c2e9d91b04aa4db026
|
# -*- coding: utf-8 -*-
#
# * Copyright (c) 2009-2017. Authors: see NOTICE file.
# *
# * Licensed under the Apache License, Version 2.0 (the "License");
# * you may not use this file except in compliance with the License.
# * You may obtain a copy of the License at
# *
# * http://www.apache.org/licenses/LICENSE-2.0
# *
# * Unless required by applicable law or agreed to in writing, software
# * distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
# */
__author__ = "Vandaele Rémy <remy.vandaele@ulg.ac.be>"
__contributors__ = ["Marée Raphaël <raphael.maree@ulg.ac.be>"]
__copyright__ = "Copyright 2010-2017 University of Liège, Belgium, http://www.cytomine.be/"
from SeparateTrees import SeparateTrees
from SeparateTreesRegressor import SeparateTreesRegressor
from ldmtools import *
import numpy as np
from multiprocessing import Pool
import scipy.ndimage as snd
from sklearn.externals import joblib
from download import *
import sys,cytomine
import optparse
def dataset_from_coordinates(img, x, y, feature_offsets):
(h, w) = img.shape
original_values = img[y.clip(min=0, max=h - 1), x.clip(min=0, max=w - 1)]
dataset = np.zeros((x.size, feature_offsets[:, 0].size))
for i in range(feature_offsets[:, 0].size):
dataset[:, i] = original_values - img[(y + feature_offsets[i, 1]).clip(min=0, max=h - 1), (x + feature_offsets[i, 0]).clip(min=0, max=w - 1)]
return dataset
def image_dataset_phase_1(repository, image_number, x, y, feature_offsets, R_offsets, delta, P):
img = makesize(snd.zoom(readimage(repository, image_number), delta), 1)
(h, w) = img.shape
mask = np.ones((h, w), 'bool')
mask[:, 0] = 0
mask[0, :] = 0
mask[h - 1, :] = 0
mask[:, w - 1] = 0
(nroff, blc) = R_offsets.shape
h -= 2
w -= 2
x += 1
y += 1
n_out = int(np.round(P*nroff))
rep = np.zeros((x.size * nroff) + n_out)
xs = np.zeros((x.size * nroff) + n_out).astype('int')
ys = np.zeros((x.size * nroff) + n_out).astype('int')
for ip in range(x.size):
xs[ip*nroff:(ip+1)*nroff] = x[ip] + R_offsets[:, 0]
ys[ip*nroff:(ip+1)*nroff] = y[ip] + R_offsets[:, 1]
rep[ip*nroff:(ip+1)*nroff] = ip
mask[ys, xs] = 0
(ym, xm) = np.where(mask == 1)
perm = np.random.permutation(ym.size)[0:n_out]
ym = ym[perm]
xm = xm[perm]
xs[x.size * nroff:] = xm
ys[y.size * nroff:] = ym
rep[x.size * nroff:] = x.size
dataset = dataset_from_coordinates(img, xs, ys, feature_offsets)
return dataset, rep
def dataset_mp_helper(jobargs):
return image_dataset_phase_1(*jobargs)
def get_dataset_phase_1(repository, training_images, image_ids, n_jobs, feature_offsets, R_offsets, delta, P, X, Y):
p = Pool(n_jobs)
Xc = np.round(X * delta).astype('int')
Yc = np.round(Y * delta).astype('int')
(nims, nldms) = Xc.shape
jobargs = []
for i in range(nims):
if image_ids[i] in training_images:
jobargs.append((repository, image_ids[i], Xc[i, :], Yc[i, :], feature_offsets, R_offsets, delta, P))
data = p.map(dataset_mp_helper, jobargs)
p.close()
p.join()
(nroff, blc) = R_offsets.shape
nims = len(training_images)
n_in = nroff*nldms
n_out = int(np.round(nroff*P))
n_tot = n_in+n_out
DATASET = np.zeros((nims * n_tot, feature_offsets[:, 0].size))
REP = np.zeros(nims * n_tot)
IMG = np.zeros(nims * n_tot)
b = 0
i = 0
for (d, r) in data:
(nd, nw) = d.shape
DATASET[b:b + nd, :] = d
REP[b:b + nd] = r
IMG[b:b + nd] = i
i += 1
b = b + nd
DATASET = DATASET[0:b, :]
REP = REP[0:b]
IMG = IMG[0:b]
return DATASET, REP, IMG
def build_phase_1_model(repository, tr_image=[], image_ids=[], n_jobs=1, NT=32, F=100, R=2, sigma=10, delta=0.25, P=1, X=None, Y=None):
std_matrix = np.eye(2) * (sigma ** 2)
feature_offsets = np.round(np.random.multivariate_normal([0, 0], std_matrix, NT * F)).astype('int')
R_offsets = []
for x1 in range(-R, R + 1):
for x2 in range(-R, R + 1):
if (np.linalg.norm([x1, x2]) <= R):
R_offsets.append([x1, x2])
R_offsets = np.array(R_offsets).astype('int')
(dataset, rep, img) = get_dataset_phase_1(repository, tr_image, image_ids, n_jobs, feature_offsets, R_offsets, delta, P, X, Y)
return dataset, rep, img, feature_offsets
def probability_map_phase_1(repository, image_number, clf, feature_offsets, delta):
img = makesize(snd.zoom(readimage(repository, image_number), delta), 1)
(h, w) = img.shape
ys = []
xs = []
c = np.arange((h - 2) * (w - 2))
ys = 1 + np.round(c / (w - 2)).astype('int')
xs = 1 + np.mod(c, (w - 2))
step = 20000
b = 0
probability_map = None
nldms = -1
while b < xs.size:
next_b = min(b + step, xs.size)
dataset = dataset_from_coordinates(img, xs[b:next_b], ys[b:next_b], feature_offsets)
probabilities = clf.predict_proba(dataset)
if (nldms == -1):
(ns, nldms) = probabilities.shape
probability_map = np.zeros((h - 2, w - 2, nldms))
for ip in range(nldms):
probability_map[ys[b:next_b] - 1, xs[b:next_b] - 1, ip] = probabilities[:, ip]
b = next_b
return probability_map
def image_dataset_phase_2(repository, image_number, x, y, feature_offsets, R_offsets, delta):
img = makesize(snd.zoom(readimage(repository, image_number), delta), 1)
(h, w) = img.shape
mask = np.ones((h, w), 'bool')
mask[:, 0] = 0
mask[0, :] = 0
mask[h - 1, :] = 0
mask[:, w - 1] = 0
(nroff, blc) = R_offsets.shape
h -= 2
w -= 2
x += 1
y += 1
rep = np.zeros((nroff, 2))
number = image_number
xs = (x + R_offsets[:, 0]).astype('int')
ys = (y + R_offsets[:, 1]).astype('int')
rep[:, 0] = R_offsets[:, 0]
rep[:, 1] = R_offsets[:, 1]
dataset = dataset_from_coordinates(img, xs, ys, feature_offsets)
return dataset, rep, number
def dataset_mp_helper_phase_2(jobargs):
return image_dataset_phase_2(*jobargs)
def get_dataset_phase_2(repository, tr_images, image_ids, n_jobs, id_term, feature_offsets, R_offsets, delta):
p = Pool(n_jobs)
(Xc, Yc, Xp, Yp, ims) = getcoords(repository.rstrip('/') + '/txt/', id_term)
nims = Xc.size
jobargs = []
for i in range(nims):
if image_ids[i] in tr_images:
jobargs.append((repository, image_ids[i], Xc[i], Yc[i], feature_offsets, R_offsets, delta))
data = p.map(dataset_mp_helper_phase_2, jobargs)
p.close()
p.join()
(nroff, blc) = R_offsets.shape
nims = len(tr_images)
DATASET = np.zeros((nims * nroff, feature_offsets[:, 0].size))
REP = np.zeros((nims * nroff, 2))
NUMBER = np.zeros(nims * nroff)
b = 0
for (d, r, n) in data:
(nd, nw) = d.shape
DATASET[b:b + nd, :] = d
REP[b:b + nd, :] = r
NUMBER[b:b + nd] = n
b = b + nd
DATASET = DATASET[0:b, :]
REP = REP[0:b]
NUMBER = NUMBER[0:b]
return DATASET, REP, NUMBER
def build_phase_2_model(repository, tr_image=None, image_ids=None, n_jobs=1, IP=0, NT=32, F=100, R=3, N=500, sigma=10, delta=0.25):
std_matrix = np.eye(2) * (sigma ** 2)
feature_offsets = np.round(np.random.multivariate_normal([0, 0], std_matrix, NT * F)).astype('int')
R_offsets = np.zeros((N, 2))
dis = np.random.ranf(N) * R
ang = np.random.ranf(N) * 2 * np.pi
R_offsets[:, 0] = np.round((dis * np.cos(ang))).astype('int')
R_offsets[:, 1] = np.round((dis * np.sin(ang))).astype('int')
(dataset, rep, number) = get_dataset_phase_2(repository, tr_image, image_ids, n_jobs, IP, feature_offsets, R_offsets, delta)
return dataset, rep, number, feature_offsets
def build_edgematrix_phase_3(Xc, Yc, sde, delta, T):
Xc = Xc * delta
Yc = Yc * delta
(nims, nldms) = Xc.shape
differential_entropy = np.eye(nldms) + np.inf
c1 = np.zeros((nims, 2))
c2 = np.zeros((nims, 2))
for ldm1 in range(nldms):
c1[:, 0] = Xc[:, ldm1]
c1[:, 1] = Yc[:, ldm1]
for ldm2 in range(ldm1 + 1, nldms):
c2[:, 0] = Xc[:, ldm2]
c2[:, 1] = Yc[:, ldm2]
diff = c1 - c2
d = diff - np.mean(diff, axis=0)
d = np.mean(np.sqrt((d[:, 0] ** 2) + (d[:, 1] ** 2)))
differential_entropy[ldm1, ldm2] = d
differential_entropy[ldm2, ldm1] = d
edges = np.zeros((nldms, T))
for ldm in range(nldms):
edges[ldm, :] = np.argsort(differential_entropy[ldm, :])[0:T]
return edges.astype(int)
def main():
p = optparse.OptionParser(description='Cytomine Landmark Detection : Model building', prog='Cytomine Landmark Detector : Model builder', version='0.1')
p.add_option('--cytomine_host', type="string", default='beta.cytomine.be', dest="cytomine_host", help="The Cytomine host (eg: beta.cytomine.be, localhost:8080)")
p.add_option('--cytomine_public_key', type="string", default='XXX', dest="cytomine_public_key", help="Cytomine public key")
p.add_option('--cytomine_private_key', type="string", default='YYY', dest="cytomine_private_key", help="Cytomine private key")
p.add_option('--cytomine_id_software', type="int", dest="cytomine_id_software", help="The Cytomine software identifier")
p.add_option('--cytomine_base_path', type="string", default='/api/', dest="cytomine_base_path", help="Cytomine base path")
p.add_option('--cytomine_working_path', default="/tmp/", type="string", dest="cytomine_working_path", help="The working directory (eg: /tmp)")
p.add_option('--cytomine_training_images', default="all", type="string", dest="cytomine_training_images", help="identifiers of the images used to create the models. ids must be separated by commas (no spaces). If 'all' is mentioned instead, every image with manual annotation will be used.")
p.add_option('--cytomine_id_project', type="int", dest="cytomine_id_project", help="The Cytomine project identifier")
p.add_option('--image_type', type='string', default='jpg', dest='image_type', help="The type of the images that will be used (jpg, bmp, png,...)")
p.add_option('--model_njobs', type='int', default=1, dest='model_njobs', help="The number of processors used for model building")
p.add_option('--cytomine_id_terms', type='string', default=1, dest='cytomine_id_terms', help="The identifiers of the terms to create detection models for. Terms must be separated by commas (no spaces). If 'all' is mentioned instead, every terms will be detected.")
p.add_option('--model_NT_P1', type='int', default=6, dest='model_NT_P1', help="Number of trees for phase 1.")
p.add_option('--model_F_P1', type='int', default=200, dest='model_F_P1', help="Number of features for phase 1.")
p.add_option('--model_R_P1', type='int', default=3, dest='model_R_P1', help="Radius in which phase 1 samples are extracted.")
p.add_option('--model_sigma', type='int', default=20, dest='model_sigma', help="Standard deviation for the gaussian.")
p.add_option('--model_delta', type='float', default=3, dest='model_delta', help="Resizing factor.")
p.add_option('--model_P', type='float', default=3, dest='model_P', help="Proportion of non-landmarks.")
p.add_option('--model_R_P2', type='int', default=3, dest='model_R_P2', help="Radius in which phase 2 samples are extracted.")
p.add_option('--model_ns_P2', type='int', default=3, dest='model_ns_P2', help="Number of samples for phase 2.")
p.add_option('--model_NT_P2', type='int', default=3, dest='model_NT_P2', help="Number of trees for phase 2.")
p.add_option('--model_F_P2', type='int', default=3, dest='model_F_P2', help="Number of features for phase 2.")
p.add_option('--model_filter_size', type='int', default=3, dest='model_filter_size', help="Size of the filter for phase 2.")
p.add_option('--model_beta', type='float', default=3, dest='model_beta', help="Beta for phase 2.")
p.add_option('--model_n_iterations', type='int', default=3, dest='model_n_iterations', help="Number of iterations for phase 2.")
p.add_option('--model_ncandidates', type='int', default=3, dest='model_ncandidates', help="Number of candidates for phase 3.")
p.add_option('--model_sde', type='float', default=10., dest='model_sde', help="Standard deviation for gaussian phase 3.")
p.add_option('--model_T', type='int', default=3, dest='model_T', help="Number of edges for phase 3.")
p.add_option('--model_save_to', type='string', default='/tmp/', dest='model_save_to', help="Destination for model storage")
p.add_option('--model_name', type='string', dest='model_name', help="Name of the model (used for saving)")
p.add_option('--verbose', type="string", default="0", dest="verbose", help="Turn on (1) or off (0) verbose mode")
opt_parser, arguments = p.parse_args(args=sys.argv)
opt_parser.cytomine_working_path = opt_parser.cytomine_working_path.rstrip('/') + '/'
cytomine_connection = cytomine.Cytomine(opt_parser.cytomine_host, opt_parser.cytomine_public_key, opt_parser.cytomine_private_key, base_path=opt_parser.cytomine_base_path, working_path=opt_parser.cytomine_working_path, verbose=str2bool(opt_parser.verbose))
current_user = cytomine_connection.get_current_user()
if not current_user.algo:
user_job = cytomine_connection.add_user_job(opt_parser.cytomine_id_software, opt_parser.cytomine_id_project)
cytomine_connection.set_credentials(str(user_job.publicKey), str(user_job.privateKey))
else:
user_job = current_user
run_by_user_job = True
job = cytomine_connection.get_job(user_job.job)
cytomine_connection.update_job_status(job, status=job.RUNNING, progress=0, status_comment="Bulding model...")
job_parameters = {}
job_parameters['cytomine_id_terms'] = opt_parser.cytomine_id_terms
job_parameters['model_njobs'] = opt_parser.model_njobs
job_parameters['model_NT_P1'] = opt_parser.model_NT_P1
job_parameters['model_F_P1'] = opt_parser.model_F_P1
job_parameters['model_R_P1'] = opt_parser.model_R_P1
job_parameters['model_sigma'] = opt_parser.model_sigma
job_parameters['model_delta'] = opt_parser.model_delta
job_parameters['model_P'] = opt_parser.model_P
job_parameters['model_R_P2'] = opt_parser.model_R_P2
job_parameters['model_ns_P2'] = opt_parser.model_ns_P2
job_parameters['model_NT_P2'] = opt_parser.model_NT_P2
job_parameters['model_F_P2'] = opt_parser.model_F_P2
job_parameters['model_filter_size'] = opt_parser.model_filter_size
job_parameters['model_beta'] = opt_parser.model_beta
job_parameters['model_n_iterations'] = opt_parser.model_n_iterations
job_parameters['model_ncandidates'] = opt_parser.model_ncandidates
job_parameters['model_sde'] = opt_parser.model_sde
job_parameters['model_T'] = opt_parser.model_T
model_repo = opt_parser.model_save_to
if not os.path.isdir(model_repo):
os.mkdir(model_repo)
if not run_by_user_job:
cytomine_connection.add_job_parameters(user_job.job, cytomine_connection.get_software(opt_parser.cytomine_id_software), job_parameters)
download_images(cytomine_connection, opt_parser.cytomine_id_project)
download_annotations(cytomine_connection, opt_parser.cytomine_id_project, opt_parser.cytomine_working_path)
repository = opt_parser.cytomine_working_path + str(opt_parser.cytomine_id_project) + '/'
(xc, yc, xr, yr, ims, term_to_i, i_to_term) = getallcoords(repository.rstrip('/') + '/txt/')
(nims, nldms) = xc.shape
if opt_parser.cytomine_id_terms != 'all':
term_list = np.sort([int(term) for term in opt_parser.cytomine_id_terms.split(',')])
Xc = np.zeros((nims, len(term_list)))
Yc = np.zeros(Xc.shape)
i = 0
for id_term in term_list:
Xc[:, i] = xc[:, term_to_i[id_term]]
Yc[:, i] = yc[:, term_to_i[id_term]]
i += 1
else:
term_list = np.sort(term_to_i.keys())
Xc = xc
Yc = yc
if opt_parser.cytomine_training_images == 'all':
tr_im = ims
else:
tr_im = [int(p) for p in opt_parser.cytomine_training_images.split(',')]
(dataset, rep, img, feature_offsets_1) = build_phase_1_model(repository, tr_image=tr_im, image_ids=ims, n_jobs=opt_parser.model_njobs, NT=opt_parser.model_NT_P1, F=opt_parser.model_F_P1, R=opt_parser.model_R_P1, sigma=opt_parser.model_sigma, delta=opt_parser.model_delta, P=opt_parser.model_P, X=Xc, Y=Yc)
clf = SeparateTrees(n_estimators=opt_parser.model_NT_P1, n_jobs=opt_parser.model_njobs)
clf = clf.fit(dataset, rep)
joblib.dump(clf, '%s%s_classifier_phase1.pkl' % (model_repo, opt_parser.model_name))
joblib.dump(feature_offsets_1, '%s%s_offsets_phase1.pkl' % (model_repo, opt_parser.model_name))
for id_term in term_list:
(dataset, rep, number, feature_offsets_2) = build_phase_2_model(repository, tr_image=tr_im, image_ids=ims, n_jobs=opt_parser.model_njobs, IP=id_term, NT=opt_parser.model_NT_P2, F=opt_parser.model_F_P2, R=opt_parser.model_R_P2, N=opt_parser.model_ns_P2, sigma=opt_parser.model_sigma, delta=opt_parser.model_delta)
reg = SeparateTreesRegressor(n_estimators=opt_parser.model_NT_P2, n_jobs=opt_parser.model_njobs)
reg.fit(dataset, rep)
joblib.dump(reg, '%s%s_dmbl_regressor_phase2_%d.pkl' % (model_repo, opt_parser.model_name, id_term))
joblib.dump(feature_offsets_2, '%s%s_dmbl_offsets_phase2_%d.pkl' % (model_repo, opt_parser.model_name, id_term))
(nims, nldms) = xc.shape
X = np.zeros((len(tr_im), nldms))
Y = np.zeros(X.shape)
j = 0
for i in range(nims):
if ims[i] in tr_im:
X[j, :] = xc[i, :]
Y[j, :] = yc[i, :]
j += 1
edges = build_edgematrix_phase_3(X, Y, opt_parser.model_sde, opt_parser.model_delta, opt_parser.model_T)
joblib.dump(edges, '%s%s_edgematrix_phase3.pkl' % (opt_parser.model_save_to, opt_parser.model_name))
F = open('%s%s_dmbl_parameters.conf' % (opt_parser.model_save_to, opt_parser.model_name), 'wb')
F.write('cytomine_id_terms %s\n' % opt_parser.cytomine_id_terms)
F.write('model_njobs %d\n' % opt_parser.model_njobs)
F.write('model_NT_P1 %d\n' % opt_parser.model_NT_P1)
F.write('model_F_P1 %d\n' % opt_parser.model_F_P1)
F.write('model_R_P1 %d\n' % opt_parser.model_R_P1)
F.write('model_sigma %f\n' % opt_parser.model_sigma)
F.write('model_delta %f\n' % opt_parser.model_delta)
F.write('model_P %f\n' % opt_parser.model_P)
F.write('model_R_P2 %d\n' % opt_parser.model_R_P2)
F.write('model_ns_P2 %d\n' % opt_parser.model_ns_P2)
F.write('model_NT_P2 %d\n' % opt_parser.model_NT_P2)
F.write('model_F_P2 %d\n' % opt_parser.model_F_P2)
F.write('model_filter_size %d\n' % opt_parser.model_filter_size)
F.write('model_beta %f\n' % opt_parser.model_beta)
F.write('model_n_iterations %d\n' % opt_parser.model_n_iterations)
F.write("model_ncandidates %d\n" % opt_parser.model_ncandidates)
F.write('model_sde %f\n' % opt_parser.model_sde)
F.write('model_T %d' % opt_parser.model_T)
F.close()
if __name__ == "__main__":
main()
|
cytomine/Cytomine-python-datamining
|
cytomine-applications/ldm_model_builder/build_dmbl_model.py
|
Python
|
apache-2.0
| 18,236
|
[
"Gaussian"
] |
a8528602f131d940e503e10301eb8dd8a4c74fdcbf20414cce0704cd327d34e5
|
from django.db import models
from edc_base.model.fields.custom_fields import OtherCharField
from edc_base.model.models import BaseUuidModel
from edc_base.audit_trail import AuditTrail
from edc_constants.choices import DRUG_ROUTE
from edc_constants.choices import YES_NO
from edc_visit_tracking.models import CrfInlineModelMixin
from edc_sync.models import SyncModelMixin
from microbiome.apps.mb.choices import MEDICATIONS
from ..managers import InfantFuNewMedItemsManager
from .infant_crf_model import InfantCrfModel
class InfantFuNewMed(InfantCrfModel):
""" A model completed by the user on the infant's follow up medications. """
new_medications = models.CharField(
max_length=25,
choices=YES_NO,
verbose_name="Has the child recieved a NEW course of any of the following medications "
"since the last attended scheduled visit",
help_text="do not report if the same course was recorded at previous visit. "
"only report oral and intravenous meds",
)
history = AuditTrail()
class Meta:
app_label = 'mb_infant'
verbose_name = "Infant FollowUp: New Medication"
verbose_name_plural = "Infant FollowUp: New Medication"
class InfantFuNewMedItems(CrfInlineModelMixin, SyncModelMixin, BaseUuidModel):
"""A model completed by the user on the infant's follow up medication items."""
infant_fu_med = models.ForeignKey(InfantFuNewMed)
medication = models.CharField(
max_length=100,
choices=MEDICATIONS,
verbose_name="Medication",
)
other_medication = OtherCharField()
date_first_medication = models.DateField(
verbose_name="Date of first medication use",
)
stop_date = models.DateField(
verbose_name="Date medication was stopped",
blank=True,
null=True,
)
drug_route = models.CharField(
max_length=20,
choices=DRUG_ROUTE,
verbose_name="Drug route",
)
objects = InfantFuNewMedItemsManager()
history = AuditTrail()
def natural_key(self):
return (self.medication, ) + self.infant_fu_med.natural_key()
class Meta:
app_label = 'mb_infant'
verbose_name = "Infant FollowUp: New Med Items"
verbose_name_plural = "Infant FollowUp: New Med Items"
|
botswana-harvard/microbiome
|
microbiome/apps/mb_infant/models/infant_fu_new_med.py
|
Python
|
gpl-2.0
| 2,336
|
[
"VisIt"
] |
d98c07568157667aea2881eae7a06a1456eeb57c353040fb724bc91914788bc6
|
import time
from vtk.web import testing
dependencies_met = True
try:
# import modules for automating web testing using a real browser
import selenium, Image
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.by import By
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.keys import Keys
except:
dependencies_met = False
# =============================================================================
# This function uses Selenium library to open a browser window and load the
# ParaView WebVisualizer appliction. Then it interacts with the browser app
# to open a known file (the iron protein file). Then the image is captured
# and compared with a baseline.
# =============================================================================
def runTest(args) :
# print 'We were passed the following args: ' + str(args)
if dependencies_met == False:
raise testing.DependencyError("One of python modules 'selenium' or 'Image' is missing or deficient")
# This name is used in error reporting
testName = 'pv_web_file_loader_open_browser_and_click_renderer.py'
# Request the WebVisualizer index.html
urlToRetrieve = 'http://localhost:' + str(args.port) + '/apps/FileViewer'
# The author of pv_web_visualizer.py grabbed the --data-dir argument
# from the command line and put it in a variable called "path" in the
# arguments object, so that's where we look for the ParaViewData dir
# inside this test script.
baselineImgDir = args.baselineImgDir
print 'The baseline images are located here: ' + baselineImgDir
# Create a Chrome window driver.
browser = webdriver.Chrome()
browser.set_window_size(720, 480)
browser.get(urlToRetrieve)
sleepSeconds = 8
print "Going to sleep for " + str(sleepSeconds) + " seconds to let browser load page"
time.sleep(sleepSeconds)
print "Ok, page should be loaded by now...continuing."
# First we need to hit the enter key to make the modal pop-up go away
browser.switch_to_alert().accept()
time.sleep(3)
sphereDataLi = browser.execute_script("return $('.jstree-leaf:contains(dualSphereAnimation_P00T0003.vtp)')[0]")
sphereDataLi.click()
time.sleep(1)
# Now click the resetCamera icon so that we change the center of
# rotation
resetCameraIcon = browser.find_element_by_css_selector("[action=resetCamera]");
resetCameraIcon.click()
time.sleep(1)
# Now grab the renderer image and write it to disk
imgdata = testing.get_image_data(browser, ".image.active>img")
filename = 'image_sphere_part.jpg'
testing.write_image_to_disk(imgdata, filename)
print 'About to compare images...'
knownGoodFileName = testing.concat_paths(baselineImgDir,
'image_sphere_part_known_good.jpg')
compareResult = -1
try :
compareResult = testing.compare_images(knownGoodFileName, filename)
print 'Images compared with diff = ' + str(compareResult)
except Exception as inst :
print 'Caught exception in compareImages:'
print inst
testing.test_fail(testName)
browser.quit()
if compareResult != 0 :
print "Images were different, diffsum was: " + str(compareResult)
testing.test_fail(testName)
testing.test_pass(testName)
|
HopeFOAM/HopeFOAM
|
ThirdParty-0.1/ParaView-5.0.1/Web/Applications/FileViewer/test/test_pv_web_fileviewer_can_test.py
|
Python
|
gpl-3.0
| 3,472
|
[
"ParaView",
"VTK"
] |
da2ece4f58ee8503756e996dd6f012eb4c5dd5ad9b2e19cf788e1b36bf7c6fc1
|
# coding: utf-8
"""
Vericred API
Vericred's API allows you to search for Health Plans that a specific doctor
accepts.
## Getting Started
Visit our [Developer Portal](https://developers.vericred.com) to
create an account.
Once you have created an account, you can create one Application for
Production and another for our Sandbox (select the appropriate Plan when
you create the Application).
## SDKs
Our API follows standard REST conventions, so you can use any HTTP client
to integrate with us. You will likely find it easier to use one of our
[autogenerated SDKs](https://github.com/vericred/?query=vericred-),
which we make available for several common programming languages.
## Authentication
To authenticate, pass the API Key you created in the Developer Portal as
a `Vericred-Api-Key` header.
`curl -H 'Vericred-Api-Key: YOUR_KEY' "https://api.vericred.com/providers?search_term=Foo&zip_code=11215"`
## Versioning
Vericred's API default to the latest version. However, if you need a specific
version, you can request it with an `Accept-Version` header.
The current version is `v3`. Previous versions are `v1` and `v2`.
`curl -H 'Vericred-Api-Key: YOUR_KEY' -H 'Accept-Version: v2' "https://api.vericred.com/providers?search_term=Foo&zip_code=11215"`
## Pagination
Endpoints that accept `page` and `per_page` parameters are paginated. They expose
four additional fields that contain data about your position in the response,
namely `Total`, `Per-Page`, `Link`, and `Page` as described in [RFC-5988](https://tools.ietf.org/html/rfc5988).
For example, to display 5 results per page and view the second page of a
`GET` to `/networks`, your final request would be `GET /networks?....page=2&per_page=5`.
## Sideloading
When we return multiple levels of an object graph (e.g. `Provider`s and their `State`s
we sideload the associated data. In this example, we would provide an Array of
`State`s and a `state_id` for each provider. This is done primarily to reduce the
payload size since many of the `Provider`s will share a `State`
```
{
providers: [{ id: 1, state_id: 1}, { id: 2, state_id: 1 }],
states: [{ id: 1, code: 'NY' }]
}
```
If you need the second level of the object graph, you can just match the
corresponding id.
## Selecting specific data
All endpoints allow you to specify which fields you would like to return.
This allows you to limit the response to contain only the data you need.
For example, let's take a request that returns the following JSON by default
```
{
provider: {
id: 1,
name: 'John',
phone: '1234567890',
field_we_dont_care_about: 'value_we_dont_care_about'
},
states: [{
id: 1,
name: 'New York',
code: 'NY',
field_we_dont_care_about: 'value_we_dont_care_about'
}]
}
```
To limit our results to only return the fields we care about, we specify the
`select` query string parameter for the corresponding fields in the JSON
document.
In this case, we want to select `name` and `phone` from the `provider` key,
so we would add the parameters `select=provider.name,provider.phone`.
We also want the `name` and `code` from the `states` key, so we would
add the parameters `select=states.name,states.code`. The id field of
each document is always returned whether or not it is requested.
Our final request would be `GET /providers/12345?select=provider.name,provider.phone,states.name,states.code`
The response would be
```
{
provider: {
id: 1,
name: 'John',
phone: '1234567890'
},
states: [{
id: 1,
name: 'New York',
code: 'NY'
}]
}
```
## Benefits summary format
Benefit cost-share strings are formatted to capture:
* Network tiers
* Compound or conditional cost-share
* Limits on the cost-share
* Benefit-specific maximum out-of-pocket costs
**Example #1**
As an example, we would represent [this Summary of Benefits & Coverage](https://s3.amazonaws.com/vericred-data/SBC/2017/33602TX0780032.pdf) as:
* **Hospital stay facility fees**:
- Network Provider: `$400 copay/admit plus 20% coinsurance`
- Out-of-Network Provider: `$1,500 copay/admit plus 50% coinsurance`
- Vericred's format for this benefit: `In-Network: $400 before deductible then 20% after deductible / Out-of-Network: $1,500 before deductible then 50% after deductible`
* **Rehabilitation services:**
- Network Provider: `20% coinsurance`
- Out-of-Network Provider: `50% coinsurance`
- Limitations & Exceptions: `35 visit maximum per benefit period combined with Chiropractic care.`
- Vericred's format for this benefit: `In-Network: 20% after deductible / Out-of-Network: 50% after deductible | limit: 35 visit(s) per Benefit Period`
**Example #2**
In [this other Summary of Benefits & Coverage](https://s3.amazonaws.com/vericred-data/SBC/2017/40733CA0110568.pdf), the **specialty_drugs** cost-share has a maximum out-of-pocket for in-network pharmacies.
* **Specialty drugs:**
- Network Provider: `40% coinsurance up to a $500 maximum for up to a 30 day supply`
- Out-of-Network Provider `Not covered`
- Vericred's format for this benefit: `In-Network: 40% after deductible, up to $500 per script / Out-of-Network: 100%`
**BNF**
Here's a description of the benefits summary string, represented as a context-free grammar:
```
root ::= coverage
coverage ::= (simple_coverage | tiered_coverage) (space pipe space coverage_modifier)?
tiered_coverage ::= tier (space slash space tier)*
tier ::= tier_name colon space (tier_coverage | not_applicable)
tier_coverage ::= simple_coverage (space (then | or | and) space simple_coverage)* tier_limitation?
simple_coverage ::= (pre_coverage_limitation space)? coverage_amount (space post_coverage_limitation)? (comma? space coverage_condition)?
coverage_modifier ::= limit_condition colon space (((simple_coverage | simple_limitation) (semicolon space see_carrier_documentation)?) | see_carrier_documentation | waived_if_admitted | shared_across_tiers)
waived_if_admitted ::= ("copay" space)? "waived if admitted"
simple_limitation ::= pre_coverage_limitation space "copay applies"
tier_name ::= "In-Network-Tier-2" | "Out-of-Network" | "In-Network"
limit_condition ::= "limit" | "condition"
tier_limitation ::= comma space "up to" space (currency | (integer space time_unit plural?)) (space post_coverage_limitation)?
coverage_amount ::= currency | unlimited | included | unknown | percentage | (digits space (treatment_unit | time_unit) plural?)
pre_coverage_limitation ::= first space digits space time_unit plural?
post_coverage_limitation ::= (((then space currency) | "per condition") space)? "per" space (treatment_unit | (integer space time_unit) | time_unit) plural?
coverage_condition ::= ("before deductible" | "after deductible" | "penalty" | allowance | "in-state" | "out-of-state") (space allowance)?
allowance ::= upto_allowance | after_allowance
upto_allowance ::= "up to" space (currency space)? "allowance"
after_allowance ::= "after" space (currency space)? "allowance"
see_carrier_documentation ::= "see carrier documentation for more information"
shared_across_tiers ::= "shared across all tiers"
unknown ::= "unknown"
unlimited ::= /[uU]nlimited/
included ::= /[iI]ncluded in [mM]edical/
time_unit ::= /[hH]our/ | (((/[cC]alendar/ | /[cC]ontract/) space)? /[yY]ear/) | /[mM]onth/ | /[dD]ay/ | /[wW]eek/ | /[vV]isit/ | /[lL]ifetime/ | ((((/[bB]enefit/ plural?) | /[eE]ligibility/) space)? /[pP]eriod/)
treatment_unit ::= /[pP]erson/ | /[gG]roup/ | /[cC]ondition/ | /[sS]cript/ | /[vV]isit/ | /[eE]xam/ | /[iI]tem/ | /[sS]tay/ | /[tT]reatment/ | /[aA]dmission/ | /[eE]pisode/
comma ::= ","
colon ::= ":"
semicolon ::= ";"
pipe ::= "|"
slash ::= "/"
plural ::= "(s)" | "s"
then ::= "then" | ("," space) | space
or ::= "or"
and ::= "and"
not_applicable ::= "Not Applicable" | "N/A" | "NA"
first ::= "first"
currency ::= "$" number
percentage ::= number "%"
number ::= float | integer
float ::= digits "." digits
integer ::= /[0-9]/+ (comma_int | under_int)*
comma_int ::= ("," /[0-9]/*3) !"_"
under_int ::= ("_" /[0-9]/*3) !","
digits ::= /[0-9]/+ ("_" /[0-9]/+)*
space ::= /[ \t]/+
```
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import sys
import unittest
import vericred_client
from vericred_client.rest import ApiException
from vericred_client.models.applicant import Applicant
class TestApplicant(unittest.TestCase):
""" Applicant unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testApplicant(self):
"""
Test Applicant
"""
model = vericred_client.models.applicant.Applicant()
if __name__ == '__main__':
unittest.main()
|
vericred/vericred-python
|
test/test_applicant.py
|
Python
|
apache-2.0
| 9,979
|
[
"VisIt"
] |
0382b8504b2f907478076159f4730fb1d06c45294c33dea3014e3920f15f082a
|
import sys
sys.path.insert(1, "../../../")
import h2o
def link_correct_default(ip,port):
# Connect to h2o
h2o.init(ip,port)
print("Reading in original prostate data.")
h2o_data = h2o.upload_file(path=h2o.locate("smalldata/prostate/prostate.csv.zip"))
print("Compare models with link unspecified and canonical link specified.")
print("GAUSSIAN: ")
h2o_model_unspecified = h2o.glm(x=h2o_data[1:8], y=h2o_data[8], family="gaussian")
h2o_model_specified = h2o.glm(x=h2o_data[1:8], y=h2o_data[8], family="gaussian", link="identity")
assert h2o_model_specified._model_json['output']['coefficients_table'].cell_values == \
h2o_model_unspecified._model_json['output']['coefficients_table'].cell_values, "coefficient should be equal"
print("BINOMIAL: ")
h2o_model_unspecified = h2o.glm(x=h2o_data[2:9], y=h2o_data[1], family="binomial")
h2o_model_specified = h2o.glm(x=h2o_data[2:9], y=h2o_data[1], family="binomial", link="logit")
assert h2o_model_specified._model_json['output']['coefficients_table'].cell_values == \
h2o_model_unspecified._model_json['output']['coefficients_table'].cell_values, "coefficient should be equal"
print("POISSON: ")
h2o_model_unspecified = h2o.glm(x=h2o_data[2:9], y=h2o_data[1], family="poisson")
h2o_model_specified = h2o.glm(x=h2o_data[2:9], y=h2o_data[1], family="poisson", link="log")
assert h2o_model_specified._model_json['output']['coefficients_table'].cell_values == \
h2o_model_unspecified._model_json['output']['coefficients_table'].cell_values, "coefficient should be equal"
print("GAMMA: ")
h2o_model_unspecified = h2o.glm(x=h2o_data[3:9], y=h2o_data[2], family="gamma")
h2o_model_specified = h2o.glm(x=h2o_data[3:9], y=h2o_data[2], family="gamma", link="inverse")
assert h2o_model_specified._model_json['output']['coefficients_table'].cell_values == \
h2o_model_unspecified._model_json['output']['coefficients_table'].cell_values, "coefficient should be equal"
if __name__ == "__main__":
h2o.run_test(sys.argv, link_correct_default)
|
bikash/h2o-dev
|
h2o-py/tests/testdir_algos/glm/pyunit_link_correct_default_largeGLM.py
|
Python
|
apache-2.0
| 2,018
|
[
"Gaussian"
] |
a5053796f46b419a2ea0531832756899bbcba63e456a509b87708b4d6292a25a
|
# Copyright (c) 2012-2014 The GPy authors (see AUTHORS.txt)
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
from scipy import stats, special
from . import link_functions
from .likelihood import Likelihood
from .gaussian import Gaussian
from ..core.parameterization import Param
from ..core.parameterization.transformations import Logexp
from ..core.parameterization import Parameterized
import itertools
class MixedNoise(Likelihood):
def __init__(self, likelihoods_list, name='mixed_noise'):
#NOTE at the moment this likelihood only works for using a list of gaussians
super(Likelihood, self).__init__(name=name)
self.link_parameters(*likelihoods_list)
self.likelihoods_list = likelihoods_list
self.log_concave = False
def gaussian_variance(self, Y_metadata):
assert all([isinstance(l, Gaussian) for l in self.likelihoods_list])
ind = Y_metadata['output_index'].flatten()
variance = np.zeros(ind.size)
for lik, j in zip(self.likelihoods_list, range(len(self.likelihoods_list))):
variance[ind==j] = lik.variance
return variance
def betaY(self,Y,Y_metadata):
#TODO not here.
return Y/self.gaussian_variance(Y_metadata=Y_metadata)[:,None]
def update_gradients(self, gradients):
self.gradient = gradients
def exact_inference_gradients(self, dL_dKdiag, Y_metadata):
assert all([isinstance(l, Gaussian) for l in self.likelihoods_list])
ind = Y_metadata['output_index'].flatten()
return np.array([dL_dKdiag[ind==i].sum() for i in range(len(self.likelihoods_list))])
def predictive_values(self, mu, var, full_cov=False, Y_metadata=None):
ind = Y_metadata['output_index'].flatten()
_variance = np.array([self.likelihoods_list[j].variance for j in ind ])
if full_cov:
var += np.eye(var.shape[0])*_variance
else:
var += _variance
return mu, var
def predictive_variance(self, mu, sigma, Y_metadata):
_variance = self.gaussian_variance(Y_metadata)
return _variance + sigma**2
def predictive_quantiles(self, mu, var, quantiles, Y_metadata):
ind = Y_metadata['output_index'].flatten()
outputs = np.unique(ind)
Q = np.zeros( (mu.size,len(quantiles)) )
for j in outputs:
q = self.likelihoods_list[j].predictive_quantiles(mu[ind==j,:],
var[ind==j,:],quantiles,Y_metadata=None)
Q[ind==j,:] = np.hstack(q)
return [q[:,None] for q in Q.T]
def samples(self, gp, Y_metadata):
"""
Returns a set of samples of observations based on a given value of the latent variable.
:param gp: latent variable
"""
N1, N2 = gp.shape
Ysim = np.zeros((N1,N2))
ind = Y_metadata['output_index'].flatten()
for j in np.unique(ind):
flt = ind==j
gp_filtered = gp[flt,:]
n1 = gp_filtered.shape[0]
lik = self.likelihoods_list[j]
_ysim = np.array([np.random.normal(lik.gp_link.transf(gpj), scale=np.sqrt(lik.variance), size=1) for gpj in gp_filtered.flatten()])
Ysim[flt,:] = _ysim.reshape(n1,N2)
return Ysim
|
jameshensman/GPy
|
GPy/likelihoods/mixed_noise.py
|
Python
|
bsd-3-clause
| 3,283
|
[
"Gaussian"
] |
a3650c47fe70af8ccd500171677a00c9efb52bb420225ef3ad4a082b6e93b45c
|
import time
class FileObjectError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
# Biopython read_fasta implementation
def SimpleFastaParser(file):
"""Generator function to iterate over Fasta records (as string tuples).
For each record a tuple of two strings is returned, the FASTA title
line (without the leading '>' character), and the sequence (with any
whitespace removed). The title line is not divided up into an
identifier (the first word) and comment or description.
>>> with open("Fasta/dups.fasta") as handle:
... for values in SimpleFastaParser(handle):
... print(values)
...
('alpha', 'ACGTA')
('beta', 'CGTC')
('gamma', 'CCGCC')
('alpha (again - this is a duplicate entry to test the indexing code)', 'ACGTA')
('delta', 'CGCGC')
"""
with open(file) as handle:
# Skip any text before the first record (e.g. blank lines, comments)
while True:
line = handle.readline()
if line == "":
return # Premature end of file, or just empty?
if line[0] == ">":
break
while True:
if line[0] != ">":
raise ValueError(
"Records in Fasta files should start with '>' character")
title = line[1:].rstrip()
lines = []
line = handle.readline()
while True:
if not line:
break
if line[0] == ">":
break
lines.append(line.rstrip())
line = handle.readline()
# Remove trailing whitespace, and any internal spaces
# (and any embedded \r which are possible in mangled files
# when not opened in universal read lines mode)
yield title, "".join(lines)#.replace(" ", "").replace("\r", "")
if not line:
return # StopIteration
def read_fasta(file_path):
with open(file_path, "r") as fastafile:
#if "b" not in fastafile.mode:
# raise FileObjectError("File is not opened in binary mode")
#seq = b""
seq = []#bytearray()
header = ""
trig = False
for line in fastafile:
line = line.strip()
# Check if line header
if line.startswith(">"):
if trig:
yield header, seq
#seq = b""
seq = []#bytearray()
trig = True
header = line
else:
seq.append(line)
yield header, "".join(seq).replace(" ", "").replace("\r", "")
start = time.time()
for fasta in read_fasta(file_path="/home/nauer/Projects/PrimerDesign/Sources/picr.fa"):
pass
end = time.time()
print(end - start)
start = time.time()
for fasta in SimpleFastaParser("/home/nauer/Projects/PrimerDesign/Sources/picr.fa"):
pass
end = time.time()
print(end - start)
|
nauer/BI-Army-Knife
|
src/Examples.py
|
Python
|
apache-2.0
| 3,073
|
[
"Biopython"
] |
c7efbffd05ac60299c3b8c9d62d1f8316875a0f52a4953cb29298408b589a499
|
from ase import *
class Bader:
'''class for running bader analysis and extracting data from it.
ACF.dat contains the coordinates of each atom, the charge
associated with it according to Bader partitioning, percentage of
the whole according to Bader partitioning and the minimum distance
to the surface. This distance should be compared to maximum
cut-off radius for the core region if pseudo potentials have been
used.
BCF.dat contains the coordinates of each Bader maxima, the charge
within that volume, the nearest atom and the distance to that
atom.
AtomVolumes.dat contains the number of each volume that has been
assigned to each atom. These numbers correspond to the number of
the BvAtxxxx.dat files.
bader [ -c bader | voronoi ]
[ -n bader | voronoi ]
[ -b neargrid | ongrid ]
[ -r refine_edge_iterations ]
[ -ref reference_charge ]
[ -p all_atom | all_bader ]
[ -p sel_atom | sel_bader ] [volume list]
[ -p atom_index | bader_index ]
[ -i cube | chgcar ]
[ -h ] [ -v ]
chargefile
References
-----------
G. Henkelman, A. Arnaldsson, and H. Jonsson, A fast and robust
algorithm for Bader decomposition of charge density,
Comput. Mater. Sci. 36 254-360 (2006).
E. Sanville, S. D. Kenny, R. Smith, and G. Henkelman An improved
grid-based algorithm for Bader charge allocation,
J. Comp. Chem. 28 899-908 (2007).
W. Tang, E. Sanville, and G. Henkelman A grid-based Bader analysis
algorithm without lattice bias, J. Phys.: Condens. Matter 21
084204 (2009).
'''
def __init__(self,atoms):
'''
'''
self.atoms = atoms
#get density and write cube file
calc = atoms.get_calculator()
x,y,z,density = calc.get_charge_density()
cubefile = 'charge_density.cube'
write(cubefile, atoms,data=density*Bohr**3)
self.densityfile = cubefile
#cmd to run for bader analysis
cmd = 'bader %s' % cubefile
status,output = commands.getstatusoutput(cmd)
if status != 0:
print output
self.charges = []
self.volumes = []
#now parse the output
f = open('ACF.dat','r')
#skip 2 lines
f.readline()
f.readline()
for i,atom in enumerate(self.atoms):
line = f.readline()
fields = line.split()
n = int(fields[0])
x = float(fields[1])
y = float(fields[2])
z = float(fields[3])
chg = float(fields[4])
mindist = float(fields[5])
vol = float(fields[6])
self.charges.append(chg)
self.volumes.append(vol)
f.close()
def get_bader_charges(self):
return self.charges
def get_bader_volumes(self):
'return volumes in Ang**3'
return [x*Bohr**3 for x in self.volumes]
def write_atom_volume(self,atomlist):
'''write bader atom volumes to cube files.
atomlist = [0,2] #for example
-p sel_atom Write the selected atomic volumes, read from the
subsequent list of volumes.
'''
alist = string.join([str(x) for x in atomlist],' ')
cmd = 'bader -p sel_atom %s %s' % (alist,self.densityfile)
print cmd
os.system(cmd)
def write_bader_volume(self,atomlist):
"""write bader atom volumes to cube files.
::
atomlist = [0,2] # for example
-p sel_bader Write the selected Bader volumes, read from the
subsequent list of volumes.
"""
alist = string.join([str(x) for x in atomlist],' ')
cmd = 'bader -p sel_bader %s %s' % (alist,self.densityfile)
print cmd
os.system(cmd)
def write_atom_index(self):
''' -p atom_index Write the atomic volume index to a charge
density file.
'''
cmd = 'bader -p atom_index %s' % (self.densityfile)
print cmd
os.system(cmd)
def write_bader_index(self):
'''
-p bader_index Write the Bader volume index to a charge
density file.
'''
cmd = 'bader -p bader_index %s' % (self.densityfile)
print cmd
os.system(cmd)
def write_all_atom(self):
'''
-p all_atom Combine all volumes associated with an atom and
write to file. This is done for all atoms and written to files
named BvAtxxxx.dat. The volumes associated with atoms are
those for which the maximum in charge density within the
volume is closest to the atom.
'''
cmd = 'bader -p all_atom %s' % (self.densityfile)
print cmd
os.system(cmd)
def write_all_bader(self):
'''
-p all_bader Write all Bader volumes (containing charge above
threshold of 0.0001) to a file. The charge distribution in
each volume is written to a separate file, named
Bvolxxxx.dat. It will either be of a CHGCAR format or a CUBE
file format, depending on the format of the initial charge
density file. These files can be quite large, so this option
should be used with caution.
'''
cmd = 'bader -p all_bader %s' % (self.densityfile)
print cmd
os.system(cmd)
if __name__ == '__main__':
from Jacapo import *
atoms = Jacapo.read_atoms('ethylene.nc')
b = Bader(atoms)
print b.get_bader_charges()
print b.get_bader_volumes()
b.write_atom_volume([3,4])
|
freephys/python_ase
|
ase/calculators/jacapo/utils/bader.py
|
Python
|
gpl-3.0
| 5,653
|
[
"ASE"
] |
f675dd0ec7af7fa26380b8ad8c76c2de83f69894be19b14186916dbeb7a634b8
|
# ocean
from django.db import models
from django.contrib.gis.db import models
from django.contrib.gis.geos import Point
from django.utils import timezone
from django.contrib.gis.geos import GEOSGeometry
# from django.contrib.gis.geos import LineString
from django.contrib.gis.geos import Polygon, MultiPolygon
from django.conf import settings
from django.db.models.signals import post_save
from django.dispatch import receiver
from rest_framework.authtoken.models import Token
class Characteristic(models.Model):
title = models.CharField(
'Название', max_length=50, default='')
description = models.CharField(
'Описание', max_length=200, blank=True)
model = models.SlugField(
'Модель данных', default='', unique=True)
tag = models.SlugField(
'Опция для парсера (например, тэг в netCDF/hdf файле)', default='')
store_prefix = models.SlugField(
'Префикс хранения файлов', default='')
file_mask = models.CharField(
'Маска поиска файла', default='', max_length=50, blank=True)
def __str__(self):
# out = self.title if self.tag == '' else self.tag
return self.title
class Region(models.Model):
def __str__(self):
return self.title
"""
# Norv Sea
poly = Polygon((
(5.897592, 59.453594),
(-15.743695, 64.412255),
(15.617894, 77.781198),
(27.955703, 70.515584),
(5.897592, 59.453594),
))
# Norv Sea trapezoid
polygon = Polygon((
(-15.743695, 77.781198),
(27.955703, 77.781198),
(27.955703, 59.453594),
(-15.743695, 59.453594),
(-15.743695, 77.781198),
))
"""
# Norv Sea
norvSea = Polygon((
(5.897592, 59.453594),
(-15.743695, 64.412255),
(15.617894, 77.781198),
(27.955703, 70.515584),
(5.897592, 59.453594),
))
# Kurils islands
# lon1 = 145.933452, lat1 = 43,902974
# lon2 = 151.670932, lat2 = 39,980028
kurils = Polygon((
(145.933452, 43,902974),
(151.670932, 43,902974),
(151.670932, 39,980028),
(145.933452, 39,980028),
(145.933452, 43,902974),
))
mpolygon = MultiPolygon(norvSea, kurils)
title = models.CharField(max_length=50, default='Default regions of interest')
slug = models.SlugField(default='default')
# poly = models.PolygonField(default=polygon)
mpoly = models.MultiPolygonField(default=mpolygon)
# Bathymetric model
class Bathymetric(models.Model):
# Longitude and latitude geo point
point = models.PointField(default=Point(6, 70))
# Datetime value
datetime = models.DateTimeField(default=timezone.now, db_index=True)
# Bathymetric height - sea floor height, meters
value = models.FloatField("Высота поверхности океана, м", null=True)
# Sea surface temperature model
class SeaSurfaceTemperature(models.Model):
# Longitude and latitude geo point
# point = models.PointField(default=Point(6, 70), unique=True)
point = models.PointField(default=Point(6, 70))
# Datetime value
# datetime = models.DateTimeField(default=timezone.now, unique_for_date=True)
datetime = models.DateTimeField(default=timezone.now, db_index=True)
# Sea surface temperature, Kelvin
value = models.FloatField(
"Температура поверхности океана, К", null=True)
# Chlorophyll model
class Chlorophyll(models.Model):
# Longitude and latitude geo point
point = models.PointField(default=Point(6, 70))
# Datetime value
datetime = models.DateTimeField(default=timezone.now, db_index=True)
# Chlorophyll concentration, OCI algorithm, mg m^-3'
value = models.FloatField("Концентрация хлорофила", null=True)
# Sea-ice cover model
class SeaIceCover(models.Model):
# Longitude and latitude geo point
point = models.PointField(default=Point(6, 70))
# Datetime value
datetime = models.DateTimeField(default=timezone.now, db_index=True)
# Sea-ice cover, 0 - 1
value = models.FloatField("Ледяное покрытие океана, да - нет", null=True)
# 2 metre temperature
class Temperature(models.Model):
# Longitude and latitude geo point
point = models.PointField(default=Point(6, 70))
# Datetime value
datetime = models.DateTimeField(default=timezone.now, db_index=True)
# 2 metre temperature, Kelvin
value = models.FloatField("Температура у поверхности, К", null=True)
# Total cloud cover
class TotalCloudCover(models.Model):
# Longitude and latitude geo point
point = models.PointField(default=Point(6, 70))
# Datetime value
datetime = models.DateTimeField(default=timezone.now, db_index=True)
# Total cloud cover
value = models.FloatField("Облачность", null=True)
# Total cloud cover
class SeaWindSpeed(models.Model):
# Longitude and latitude geo point
point = models.PointField(default=Point(6, 70))
# Datetime value
datetime = models.DateTimeField(default=timezone.now, db_index=True)
# Sea surface wind speed, meters
value = models.FloatField(
"Скорость ветра у поверхности океана, м", null=True)
#####################################################################
# Adding auth token on user save
@receiver(post_save, sender=settings.AUTH_USER_MODEL)
def create_auth_token(sender, instance=None, created=False, **kwargs):
if created:
Token.objects.create(user=instance)
# Define main characteristics and save then to data base
characteristics = [
{
"title": 'Sea surface temperature',
"description": 'Sea surface temperature, Kelvin',
"model": 'SeaSurfaceTemperature',
"tag": 'sst',
"storePrefix": 'modis-terra-l2',
"fileMask": 'L2_LAC_SST',
},
{
"title": 'Chlorophyll',
"description": 'Chlorophyll concentration, OCI algorithm, mg m^-3',
"model": 'Chlorophyll',
"tag": 'chlor_a',
"storePrefix": 'modis-terra-l2',
"fileMask": 'L2_LAC_OC',
},
{
"title": 'Bathymetric',
"description": 'Bathymetric height - sea floor height, meters',
"model": 'Bathymetric',
"tag": 'elevation',
"storePrefix": 'bathymetric',
"fileMask": '',
},
{
"title": 'Sea-ice cover',
"description": 'Sea-ice cover, 0 - 1',
"model": 'SeaIceCover',
"tag": 'ci',
"storePrefix": 'ci-t2m-tcc',
"fileMask": '',
},
{
"title": 'Temperature',
"description": '2 metre temperature, Kelvin',
"model": 'Temperature',
"tag": 't2m',
"storePrefix": 'ci-t2m-tcc',
"fileMask": '',
},
{
"title": 'Total cloud cover',
"description": 'Total cloud cover',
"model": 'TotalCloudCover',
"tag": 'tcc',
"storePrefix": 'ci-t2m-tcc',
"fileMask": '',
},
{
"title": 'Sea wind speed',
"description": 'Sea surface wind speed, meters',
"model": 'SeaWindSpeed',
"tag": 'wind',
"storePrefix": 'wind',
"fileMask": '',
},
]
for c in characteristics:
try:
Characteristic._meta.get_field("model")
try:
qs = Characteristic.objects.get(
model = c["model"],
)
except:
m = Characteristic(
title = c["title"],
description = c["description"],
model = c["model"],
tag = c["tag"],
store_prefix = c["storePrefix"],
file_mask = c["fileMask"],
)
m.save()
print("Main characteristic '%s' created" % (c["title"]))
except:
print("No characteristic model found. Try to make DB migrations.")
|
cttgroup/oceanhub
|
proto/data-refining/back/workdir/ocean/models.py
|
Python
|
apache-2.0
| 8,027
|
[
"NetCDF"
] |
3a346abdece1f3787dc515188d5b5d741d6963dc8233df8c531dda9300441cdb
|
##!/home/alpha/anaconda/bin/python
## -*- coding: utf-8 -*-
"""
This Python module calculates the Credit Value Adjustment for a single netting set of plain vanilla
interest rate swaps.
The code is based on the IPython Notebook of Matthias Groncki (see reference below).
References:
"CVA Calculation with QuantLib and Python", Matthias Groncki
- https://ipythonquant.wordpress.com/tag/cva/
- http://nbviewer.ipython.org/github/mgroncki/IPythonScripts/blob/master/CVA_calculation_I.ipynb
"FOOLING AROUND WITH QUANTLIB: GSR MODEL", Peter Caspers:
- https://quantlib.wordpress.com/tag/gsr-model/
"One Factor Gaussian Short Rate Model Implementation", Peter Caspers, March 1, 2013:
- http://papers.ssrn.com/sol3/papers.cfm?abstract_id=2246013
"""
__author__ = 'Carl Johan Rehn'
__maintainer__ = "Carl Johan Rehn"
__email__ = "care02@gmail.com"
__credits__ = ["Sydney, The Red Merle"]
__copyright__ = "Copyright (c) 2015, Carl Johan Rehn"
__license__ = "The BSD 2-Clause License"
__version__ = "0.1.0"
__status__ = "Development"
import numpy as np
import matplotlib.pyplot as plt
# Check version of QuantLib...
import QuantLib as ql
def get_version():
return map(int, ql.__version__.split('.'))
if get_version()[1] < 6:
print 'You need QuantLib version 1.6 or higher!'
exit()
# General QuantLib functions...
set_evaluation_date = lambda date: ql.Settings.instance().setEvaluationDate(date)
link_to_curve = lambda relinkable_handle, curve: relinkable_handle.linkTo(curve)
# Random numbers...
def create_random_number_generator(evaluation_time_grid, seed=1):
"""
@param evaluation_time_grid:
@param seed:
@return:
"""
uniform_rng = ql.MersenneTwisterUniformRng(seed)
uniform_rsg = ql.MersenneTwisterUniformRsg(len(evaluation_time_grid) - 1, uniform_rng)
return ql.InvCumulativeMersenneTwisterGaussianRsg(uniform_rsg)
# Default curve...
def create_default_curve(default_dates, hazard_rates, day_count=ql.Actual365Fixed()):
"""
@param default_dates:
@param hazard_rates:
@param day_count:
@return:
"""
default_curve = ql.HazardRateCurve(default_dates, hazard_rates, day_count)
default_curve.enableExtrapolation()
return default_curve
def get_default_probability(times, default_curve):
"""
@param times:
@param default_curve:
@return:
"""
return np.vectorize(default_curve.defaultProbability)(times)
def get_survival_probability(times, default_curve):
"""
@param times:
@param default_curve:
@return:
"""
return np.vectorize(default_curve.survivalProbability)(times)
def get_default_density(times, default_curve):
"""
@param times:
@param default_curve:
@return:
"""
return np.vectorize(default_curve.defaultDensity)(times)
def get_hazard_rate(times, default_curve):
"""
@param times:
@param default_curve:
@return:
"""
return np.vectorize(default_curve.hazardRate)(times)
def calculate_default_probability_grid(evaluation_time_grid, default_curve):
"""
@param evaluation_time_grid:
@param default_curve:
@return:
"""
return np.vectorize(default_curve.defaultProbability)(
evaluation_time_grid[:-1], evaluation_time_grid[1:]
)
# Discount curve...
def create_flat_forward(todays_date, rate, day_count=ql.Actual365Fixed()):
"""
@param todays_date:
@param rate:
@param day_count:
@return:
"""
flat_forward = ql.FlatForward(todays_date, ql.QuoteHandle(rate), day_count)
flat_forward.enableExtrapolation()
return flat_forward, \
ql.YieldTermStructureHandle(flat_forward), \
ql.RelinkableYieldTermStructureHandle(flat_forward)
def generate_discount_factors(flat_forward_handle, evaluation_time_grid):
"""
@param flat_forward_handle:
@param evaluation_time_grid:
@return:
"""
return np.vectorize(flat_forward_handle.discount)(evaluation_time_grid)
def get_discount_curve(curve_dates,
discount_factors,
day_count_convention=ql.Actual365Fixed()):
"""
@param curve_dates:
@param discount_factors:
@param day_count_convention:
@return:
"""
discount_curve = ql.DiscountCurve(
curve_dates,
discount_factors,
day_count_convention
)
discount_curve.enableExtrapolation()
return discount_curve
# Pricing engine...
def create_pricing_engine(flat_forward_relinkable_handle):
"""
@param flat_forward_relinkable_handle:
@return:
"""
return ql.DiscountingSwapEngine(flat_forward_relinkable_handle)
# Swap portfolio...
def create_plain_vanilla_swap(start_date, maturity_date,
nominal_amount,
float_index,
fixed_rate,
fixed_leg_tenor=ql.Period("1y"),
fixed_leg_business_day_convention=ql.ModifiedFollowing,
fixed_leg_day_count_convention=ql.Thirty360(ql.Thirty360.BondBasis),
calendar=ql.Sweden(),
spread=0.0,
swap_type=ql.VanillaSwap.Payer):
"""
@param start_date:
@param maturity_date:
@param nominal_amount:
@param float_index:
@param fixed_rate:
@param fixed_leg_tenor:
@param fixed_leg_business_day_convention:
@param fixed_leg_day_count_convention:
@param calendar:
@param spread:
@param swap_type:
@return:
"""
end_date = calendar.advance(start_date, maturity_date)
fixed_schedule = ql.Schedule(
start_date,
end_date,
fixed_leg_tenor,
float_index.fixingCalendar(),
fixed_leg_business_day_convention,
fixed_leg_business_day_convention,
ql.DateGeneration.Backward,
False
)
float_schedule = ql.Schedule(
start_date,
end_date,
float_index.tenor(),
float_index.fixingCalendar(),
float_index.businessDayConvention(),
float_index.businessDayConvention(),
ql.DateGeneration.Backward,
False
)
swap = ql.VanillaSwap(
swap_type,
nominal_amount,
fixed_schedule,
fixed_rate,
fixed_leg_day_count_convention,
float_schedule,
float_index,
spread,
float_index.dayCounter()
)
return swap, [float_index.fixingDate(x) for x in float_schedule][:-1]
def make_simple_portfolio(list_of_start_dates, list_of_maturity_dates,
list_of_nominal_amounts,
list_of_float_indices,
list_of_fixed_rates,
list_of_swap_types):
"""
@param list_of_start_dates:
@param list_of_maturity_dates:
@param list_of_nominal_amounts:
@param list_of_float_indices:
@param list_of_fixed_rates:
@param list_of_swap_types:
@return:
"""
simple_portfolio = []
for (start_date, maturity_date,
nominal_amount,
float_index,
fixed_rate,
swap_type) in zip(list_of_start_dates, list_of_maturity_dates,
list_of_nominal_amounts,
list_of_float_indices,
list_of_fixed_rates,
list_of_swap_types):
simple_portfolio.append(
create_plain_vanilla_swap(start_date, maturity_date,
nominal_amount,
float_index,
fixed_rate,
swap_type=swap_type)
)
return simple_portfolio
def calculate_portfolio_npv(flat_forward_relinkable_handle, portfolio):
"""
@param flat_forward_relinkable_handle:
@param portfolio:
@return:
"""
engine = create_pricing_engine(flat_forward_relinkable_handle)
portfolio_npv = []
for deal, _ in portfolio:
deal.setPricingEngine(engine)
portfolio_npv.append(deal.NPV())
return portfolio_npv
# Evaluation grid, curve dates, and NPV matrix...
def define_evaluation_grid(todays_date, simple_portfolio, number_of_months=12*6):
"""
@param todays_date:
@param simple_portfolio:
@param number_of_months:
@return:
"""
evaluation_dates_grid = [
todays_date + ql.Period(i_month, ql.Months) for i_month in range(number_of_months)
]
for deal in simple_portfolio:
evaluation_dates_grid += deal[1]
evaluation_dates_grid = np.unique(np.sort(evaluation_dates_grid))
evaluation_time_grid = np.vectorize(
lambda x: ql.ActualActual().yearFraction(todays_date, x)
)(evaluation_dates_grid)
# diff_evaluation_time_grid = evaluation_time_grid[1:] - evaluation_time_grid[:-1]
return evaluation_dates_grid, evaluation_time_grid #, diff_evaluation_time_grid
def define_curve_dates(date, n_years=10):
"""
@param date:
@param n_years:
@return:
"""
# append first half year to date
curve_dates = [date, date + ql.Period(6, ql.Months)]
curve_dates += [date + ql.Period(i_year, ql.Years) for i_year in range(1, n_years + 1)]
return curve_dates
# TODO ...
def create_npv_matrix(todays_date,
number_of_paths,
evaluation_dates_grid,
simple_portfolio,
flat_forward,
flat_forward_relinkable_handle,
zero_bonds,
float_index):
"""
@param todays_date:
@param number_of_paths:
@param evaluation_dates_grid:
@param simple_portfolio:
@param flat_forward:
@param flat_forward_relinkable_handle:
@param zero_bonds:
@param float_index:
@return:
"""
n_dates, n_deals = len(evaluation_dates_grid), len(simple_portfolio)
npv_matrix = np.zeros(
(number_of_paths, n_dates, n_deals)
)
for i_path in range(number_of_paths):
for i_date in range(n_dates):
date = evaluation_dates_grid[i_date]
discount_curve = get_discount_curve(
define_curve_dates(date), zero_bonds[i_path, i_date, :]
)
set_evaluation_date(date)
link_to_curve(flat_forward_relinkable_handle, discount_curve)
# TODO Check... is this correct?
is_valid_fixing_date = float_index.isValidFixingDate(date)
if is_valid_fixing_date:
fixing = float_index.fixing(date)
float_index.addFixing(date, fixing)
for i_deal in range(n_deals):
npv_matrix[i_path, i_date, i_deal] = simple_portfolio[i_deal][0].NPV()
ql.IndexManager.instance().clearHistories()
set_evaluation_date(todays_date)
link_to_curve(flat_forward_relinkable_handle, flat_forward)
return npv_matrix
def calculate_discounted_npv_matrix(npv_matrix, discount_factors):
"""
@param npv_matrix:
@param discount_factors:
@return:
"""
discounted_npv_matrix = np.zeros(npv_matrix.shape)
for i in range(npv_matrix.shape[2]):
discounted_npv_matrix[:, :, i] = npv_matrix[:, :, i] * discount_factors
return discounted_npv_matrix
# Gsr model and simulation of paths...
def generate_gsr_model(flat_forward_handle,
volatility_step_dates, volatilities,
mean_reversion,
forward_measure_time=16.0):
return ql.Gsr(flat_forward_handle,
volatility_step_dates, volatilities,
mean_reversion,
forward_measure_time)
def generate_paths(number_of_paths,
evaluation_time_grid,
tenors,
inv_cumulative_gaussian_rsg,
model):
"""
@param number_of_paths:
@param evaluation_time_grid:
@param tenors:
@param inv_cumulative_gaussian_rsg:
@param model:
@return:
"""
n_tenors = len(tenors)
diff_evaluation_time_grid = evaluation_time_grid[1:] - evaluation_time_grid[:-1]
x = np.zeros((number_of_paths, len(evaluation_time_grid)))
y = np.zeros((number_of_paths, len(evaluation_time_grid)))
zero_bonds = np.zeros(
(number_of_paths, len(evaluation_time_grid), n_tenors)
)
for j_tenor in range(n_tenors):
zero_bonds[:, 0, j_tenor] = model.zerobond(
tenors[j_tenor], 0, 0
)
process = model.stateProcess()
for n_path in range(number_of_paths):
next_sequence = inv_cumulative_gaussian_rsg.nextSequence().value()
for i_time in range(1, len(evaluation_time_grid)):
t_start = evaluation_time_grid[i_time - 1]
t_end = evaluation_time_grid[i_time]
x[n_path, i_time] = process.expectation(
t_start, x[n_path, i_time - 1], diff_evaluation_time_grid[i_time - 1]
) + next_sequence[i_time-1] * process.stdDeviation(
t_start, x[n_path, i_time - 1], diff_evaluation_time_grid[i_time - 1]
)
# y equals standardized x (see Gsr-paper by Caspers and Gsr model in QuantLib)
y[n_path, i_time] = \
(x[n_path, i_time] - process.expectation(0, 0, t_end)) / process.stdDeviation(0, 0, t_end)
for j_tenor in range(n_tenors):
zero_bonds[n_path, i_time, j_tenor] = model.zerobond(
t_end + tenors[j_tenor], t_end, y[n_path, i_time]
)
return x, zero_bonds
# Netting, exposure, and CVA...
def calculate_netted_npv_matrix(npv_matrix):
"""
@param npv_matrix:
@return:
"""
return np.sum(npv_matrix, axis=2)
def calculate_exposure(portfolio_npv):
"""
@param portfolio_npv:
@return:
"""
exposure = portfolio_npv.copy()
exposure[exposure < 0] = 0
return exposure
def calculate_expected_exposure(portfolio_npv, number_of_paths):
"""
@param portfolio_npv:
@param number_of_paths:
@return:
"""
return np.sum(
calculate_exposure(portfolio_npv), axis=0
) / number_of_paths
def calculate_potential_future_exposure(exposure, number_of_paths, quantile=0.95):
"""
@param exposure:
@param number_of_paths:
@param quantile:
@return:
"""
potential_future_exposure = np.apply_along_axis(
lambda x: np.sort(x)[quantile * number_of_paths], 0, exposure
)
# Alternative formulation: use max of each exposure path
# potential_future_exposure = np.sort(np.max(exposure, axis=1))[quantile * number_of_paths]
return potential_future_exposure
def calculate_economic_cva(expected_discounted_exposure, default_probabilities, recovery_rate=0.4):
"""
@param expected_discounted_exposure:
@param default_probabilities:
@param recovery_rate:
@return:
"""
return (1 - recovery_rate) * np.sum(
expected_discounted_exposure[1:] * default_probabilities
)
# Plotting functions...
def plot_npv_paths(n_first, n_last,
evaluation_time_grid,
portfolio_npv, discounted_portfolio_npv):
_, (axis_1, axis_2) = plt.subplots(2, 1, figsize=(12, 10), sharey=True)
for i_path in range(n_first, n_last):
axis_1.plot(evaluation_time_grid, portfolio_npv[i_path, :])
axis_1.set_xlabel("Years")
axis_1.set_ylabel("Portfolio NPV")
axis_1.set_title("Portfolio NPV paths")
for i_path in range(n_first, n_last):
axis_2.plot(evaluation_time_grid, discounted_portfolio_npv[i_path, :])
axis_2.set_xlabel("Years")
axis_2.set_ylabel("Discounted Portfolio NPV")
axis_2.set_title("Discounted portfolio NPV paths")
def plot_exposure_paths(n_first, n_last,
evaluation_time_grid,
exposure, discounted_exposure):
_, (axis_1, axis_2) = plt.subplots(2, 1, figsize=(12, 10)) # , sharey=True)
for i_path in range(n_first, n_last):
axis_1.plot(evaluation_time_grid, exposure[i_path, :])
axis_1.set_ylim([-10000, 70000])
axis_1.set_xlabel("Years")
axis_1.set_ylabel("Exposure")
axis_1.set_title("Exposure paths")
for i_path in range(n_first, n_last):
axis_2.plot(evaluation_time_grid, discounted_exposure[i_path, :])
axis_2.set_ylim([-10000, 70000])
axis_2.set_xlabel("Years")
axis_2.set_ylabel("Discounted Exposure")
axis_2.set_title("Discounted exposure paths")
def plot_expected_exposure_paths(evaluation_time_grid,
expected_exposure, expected_discounted_exposure):
_, (axis_1, axis_2) = plt.subplots(2, 1, figsize=(8, 10)) # , sharey=True)
axis_1.plot(evaluation_time_grid, expected_exposure)
axis_1.set_xlabel("Time in years")
axis_1.set_ylabel("Exposure")
axis_1.set_title("Expected exposure")
axis_2.plot(evaluation_time_grid, expected_discounted_exposure)
axis_2.set_xlabel("Time in years")
axis_2.set_ylabel("Discounted Exposure")
axis_2.set_title("Expected discounted exposure")
def plot_expected_discounted_exposure(evaluation_time_grid,
expected_discounted_exposure):
# plt.figure(figsize=(7, 5), dpi=300)
plt.figure()
plt.plot(evaluation_time_grid, expected_discounted_exposure)
plt.ylim([-2000, 10000])
plt.xlabel("Years")
plt.ylabel("Expected discounted exposure")
plt.title("Expected discounted exposure")
def plot_potential_future_exposure(evaluation_time_grid,
potential_future_exposure):
# plt.figure(figsize=(7, 5), dpi=300)
plt.figure()
plt.plot(evaluation_time_grid, potential_future_exposure)
plt.xlabel("Years")
plt.ylabel("Potential future exposure")
plt.ylim([-2000, 35000])
plt.title("Potential future exposure")
def plot_default_curve(times, default_curve):
_, ((axis_1, axis_2), (axis_3, axis_4)) = plt.subplots(2, 2, figsize=(10, 10))
default_probability = get_default_probability(times, default_curve)
axis_1.plot(times, default_probability)
axis_1.set_xlabel("Years")
axis_1.set_ylabel("Probability")
axis_1.set_title("Default probability")
survival_probability = get_survival_probability(times, default_curve)
axis_2.plot(times, survival_probability)
axis_2.set_xlabel("Years")
axis_2.set_ylabel("Probability")
axis_2.set_title("Survival probability")
default_density = get_default_density(times, default_curve)
axis_3.plot(times, default_density)
axis_3.set_xlabel("Years")
axis_3.set_ylabel("Density")
axis_3.set_title("Default density")
hazard_rate = get_hazard_rate(times, default_curve)
axis_4.plot(times, hazard_rate)
axis_4.set_xlabel("Years")
axis_4.set_ylabel("Rate")
axis_4.set_title("Hazard rate")
def main():
# Set evaluation date...
# todays_date = ql.Date(7, 4, 2015)
todays_date = ql.Date(13, 8, 2015)
# ql.Settings.instance().setEvaluationDate(todays_date)
set_evaluation_date(todays_date)
# Market data...
rate = ql.SimpleQuote(0.03)
flat_forward, flat_forward_handle, flat_forward_relinkable_handle = \
create_flat_forward(todays_date, rate)
Euribor6M = ql.Euribor6M(flat_forward_relinkable_handle)
# Create simple swap portfolio...
list_of_start_dates = [
todays_date + ql.Period("2d"),
todays_date + ql.Period("2d")
]
list_of_maturity_dates = [ql.Period(years) for years in ["5Y", "4Y"]]
list_of_nominal_amounts = [1E6, 5E5]
list_of_float_indices = [Euribor6M, Euribor6M]
list_of_fixed_rates = [0.03, 0.03]
list_of_swap_types = [ql.VanillaSwap.Payer, ql.VanillaSwap.Receiver]
simple_portfolio = make_simple_portfolio(
list_of_start_dates, list_of_maturity_dates,
list_of_nominal_amounts,
list_of_float_indices,
list_of_fixed_rates,
list_of_swap_types
)
portfolio_npv = calculate_portfolio_npv(flat_forward_relinkable_handle, simple_portfolio)
# Instantiate the Gsr model...
volatility_step_dates = [todays_date + 100]
volatilities = [
ql.QuoteHandle(ql.SimpleQuote(0.0075)),
ql.QuoteHandle(ql.SimpleQuote(0.0075))
]
mean_reversion = [ql.QuoteHandle(ql.SimpleQuote(0.02))]
gsr_model = generate_gsr_model(flat_forward_handle,
volatility_step_dates, volatilities,
mean_reversion,
forward_measure_time=16.0)
# Create evaluation grid and simulate paths (using the Gsr model)...
evaluation_dates_grid, evaluation_time_grid = \
define_evaluation_grid(todays_date, simple_portfolio)
inv_cumulative_gaussian_rsg = create_random_number_generator(evaluation_time_grid)
number_of_paths = 1500
tenors = np.array([0.0, 0.5, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
x, zero_bonds = generate_paths(
number_of_paths, evaluation_time_grid, tenors, inv_cumulative_gaussian_rsg, gsr_model
)
# Plot paths...
# for i in range(number_of_paths):
# plt.plot(evaluation_time_grid, x[i, :])
# Create the discounted NPV matrix...
npv_matrix = create_npv_matrix(
todays_date,
number_of_paths,
evaluation_dates_grid,
simple_portfolio,
flat_forward,
flat_forward_relinkable_handle,
zero_bonds,
Euribor6M
)
discount_factors = generate_discount_factors(flat_forward_handle, evaluation_time_grid)
discounted_npv_cube = calculate_discounted_npv_matrix(npv_matrix, discount_factors)
# Calculate the portfolio NPV for the netting set...
portfolio_npv = calculate_netted_npv_matrix(npv_matrix)
discounted_portfolio_npv = calculate_netted_npv_matrix(discounted_npv_cube)
# Plot the first NPV paths...
n_first, n_last = 0, 30
plot_npv_paths(n_first, n_last,
evaluation_time_grid,
portfolio_npv, discounted_portfolio_npv)
# Calculate the exposure and discounted exposure...
exposure = calculate_exposure(portfolio_npv)
discounted_exposure = calculate_exposure(discounted_portfolio_npv)
# Plot the first exposure paths...
n_first, n_last = 0, 30
plot_exposure_paths(n_first, n_last,
evaluation_time_grid,
exposure, discounted_exposure)
# Calculate the "expected" and the "expected discounted" exposure...
expected_exposure = calculate_expected_exposure(portfolio_npv, number_of_paths)
expected_discounted_exposure = calculate_expected_exposure(discounted_portfolio_npv, number_of_paths)
# Plot the "expected" and the "expected discounted" exposure paths...
plot_expected_exposure_paths(evaluation_time_grid,
expected_exposure, expected_discounted_exposure)
plot_expected_discounted_exposure(evaluation_time_grid,
expected_discounted_exposure)
# Calculate the PFE (corresponding to the default 95% quantile)...
potential_future_exposure = \
calculate_potential_future_exposure(exposure, number_of_paths)
plot_potential_future_exposure(evaluation_time_grid,
potential_future_exposure)
# calculate the maximum PFE...
max_potential_future_exposure = np.max(potential_future_exposure)
# Default curve
default_dates = [todays_date + ql.Period(i_year, ql.Years) for i_year in range(11)]
hazard_rates = [0.02 * i_year for i_year in range(11)]
default_curve = create_default_curve(default_dates, hazard_rates)
# Plot default curves (default and survival probabilities, default densities, and hazard rates)...
default_times = np.linspace(0, 30, 100)
plot_default_curve(default_times, default_curve)
# Calculate default probabilities...
default_probabilities = \
calculate_default_probability_grid(evaluation_time_grid, default_curve)
# Calculation of the CVA...
economic_cva = calculate_economic_cva(expected_discounted_exposure, default_probabilities, recovery_rate=0.4)
print economic_cva
# List of TODOs...
# TODO Use QuantLib to calculate CCR and CVA REA, and KVA with SA-CCR
# TODO Add doc tests to functions
# TODO Use pandas to request data and SQLite or MySQL as data repositories
if __name__ == '__main__':
main()
|
carljohanrehn/xvapy
|
py/cva_calculation.py
|
Python
|
bsd-2-clause
| 24,700
|
[
"Gaussian"
] |
2a4551897b35edba00fefe67fb9dc9ed13d377bd48bbcd87649c78d46034b9ab
|
#
# Copyright 2019-2020 Johannes Hoermann (U. Freiburg)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Compute ion concentrations with general Poisson-Nernst-Planck (PNP) equations.
Copyright 2019 IMTEK Simulation
University of Freiburg
Authors:
Johannes Hoermann <johannes.hoermann@imtek-uni-freiburg.de>
"""
import logging
import time
import numpy as np
import scipy.constants as sc
import scipy.optimize
logger = logging.getLogger(__name__)
# Druecke nicht-linearen Teil der Transportgleichung (genauer, des Flusses) ueber
# Bernoulli-Funktionen
#
# $$ B(x) = \frac{x}{\exp(x)-1} $$
#
# aus. Damit wir in der Naehe von 0 nicht "in die Bredouille geraten", verwenden
# wir hier lieber die Taylorentwicklung. In der Literatur (Selbherr, S. Analysis
# and Simulation of Semiconductor Devices, Spriger 1984) wird eine noch
# aufwendigere stueckweise Definition empfohlen, allerdings werden wir im
# Folgenden sehen, dass unser Ansatz fuer dieses stationaere Problem genuegt.
def B(x):
"""Bernoulli function."""
return np.where(
np.abs(x) < 1e-9,
1 - x/2 + x**2/12 - x**4/720, # Taylor
x / (np.exp(x) - 1))
# "lazy" Ansatz for approximating Jacobian
def jacobian(f, x0, dx=np.NaN):
"""Naive way to construct N x N Jacobin Fij from N-valued function
f of N-valued vector x0.
Parameters
----------
f : callable
N-valued function of N-valued variable vector
x0 : (N,) ndarray
N-valued variable vector
dx : float (default: np.nan)
Jacobian built with finite difference scheme of spacing ``dx``.
If ``np.nan``, then use machine precision.
Returns
-------
F : (N,N) ndarray
NxN-valued 2nd order finite difference scheme approximate of Jacobian
convention: F_ij = dfidxj, where i are array rows, j are array columns
"""
N = len(x0)
# choose step as small as possible
if np.isnan(dx).any():
res = np.finfo('float64').resolution
dx = np.abs(x0) * np.sqrt(res)
dx[dx < res] = res
if np.isscalar(dx):
dx = np.ones(N) * dx
F = np.zeros((N,N)) # Jacobian Fij
# convention: dfi_dxj
# i are rows, j are columns
for j in range(N):
dxj = np.zeros(N)
dxj[j] = dx[j]
F[:,j] = (f(x0 + dxj) - f(x0 - dxj)) / (2.0*dxj[j])
return F
class PoissonNernstPlanckSystem:
"""Describes and solves a 1D Poisson-Nernst-Planck system"""
# properties "offer" the solution in physical units:
@property
def grid(self):
return self.X*self.l_unit
@property
def potential(self):
return self.uij*self.u_unit
@property
def concentration(self):
return np.where(self.nij > np.finfo('float64').resolution,
self.nij*self.c_unit, 0.0)
@property
def charge_density(self):
return np.sum(self.F * self.concentration.T * self.z,axis=1)
@property
def x1_scaled(self):
return self.x0_scaled + self.L_scaled
#TODO: replace "didactic" Newton solver from IMTEK Simulation course with
# some standard package
def newton(self,f,xij,**kwargs):
"""Newton solver expects system f and initial value xij
Parameters
----------
f : callable
N-valued function of N-valued vector
xij : (N,) ndarray
N-valued initial value vector
Returns
-------
xij : (N,) ndarray
N-valued solution vector
"""
self.xij = []
self.converged = True
# assume convergence, set to 'false' if maxit exceeded later
self.logger.debug('Newton solver, grid points N = {:d}'.format(self.N))
self.logger.debug('Newton solver, tolerance e = {:> 8.4g}'.format(self.e))
self.logger.debug('Newton solver, maximum number of iterations M = {:d}'.format(self.maxit))
i = 0
delta_rel = 2*self.e
self.logger.info("Convergence criterion: norm(dx) < {:4.2e}".format(self.e))
self.convergenceStepAbsolute = np.zeros(self.maxit)
self.convergenceStepRelative = np.zeros(self.maxit)
self.convergenceResidualAbsolute = np.zeros(self.maxit)
dxij = np.zeros(self.N)
while delta_rel > self.e and i < self.maxit:
self.logger.debug('*** Newton solver iteration {:d} ***'.format(i))
# avoid cluttering log
self.logger.disabled = True
J = jacobian(f, xij)
self.logger.disabled = False
rank = np.linalg.matrix_rank(J)
self.logger.debug(' Jacobian ({}) rank {:d}'.format(J.shape, rank))
if rank < self.N:
self.logger.warn("Singular jacobian of rank"
+ "{:d} < {:d} at step {:d}".format(
rank, self.N, i ))
break
F = f(xij)
invJ = np.linalg.inv(J)
dxij = np.dot( invJ, F )
delta = np.linalg.norm(dxij)
delta_rel = delta / np.linalg.norm(xij)
xij -= dxij
self.xij.append(xij)
normF = np.linalg.norm(F)
self.logger.debug(' convergence norm(dxij), absolute {:> 8.4g}'.format(delta))
self.logger.debug(' convergence norm(dxij), realtive {:> 8.4g}'.format(delta_rel))
self.logger.debug(' residual norm(F), absolute {:> 8.4g}'.format(normF))
self.convergenceStepAbsolute[i] = delta
self.convergenceStepRelative[i] = delta_rel
self.convergenceResidualAbsolute[i] = normF
self.logger.info("Step {:4d}: norm(dx)/norm(x) = {:4.2e}, norm(dx) = {:4.2e}, norm(F) = {:4.2e}".format(
i, delta_rel, delta, normF) )
i += 1
if i == self.maxit:
self.logger.warn("Maximum number of iterations reached")
self.converged = False
self.logger.info("Ended after {:d} steps.".format(i))
self.convergenceStepAbsolute = self.convergenceStepAbsolute[:i]
self.convergenceStepRelative = self.convergenceStepRelative[:i]
self.convergenceResidualAbsolute = self.convergenceResidualAbsolute[:i]
return xij
def solver_callback(self, xij, *_):
"""Callback function that can be used by optimizers of scipy.optimize.
The second argument "*_" makes sure that it still works when the
optimizer calls the callback function with more than one argument. See
https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html
"""
if self.callback_count == 0:
logger.info(
"{:>12s} {:>12s} {:>12s} {:>12s} {:>12s} {:>12s}".format(
"#callback","residual norm","abs dx norm", "rel dx norm",
"timing, step", "timing, tot.") )
self.xij = [ self.xi0 ]
self.converged = True # TODO remove (?)
self.convergenceStepAbsolute = []
self.convergenceStepRelative = []
self.convergenceResidualAbsolute = []
dxij = np.zeros(self.N)
self.xij.append(xij)
dxij = xij - self.xij[self.callback_count]
delta = np.linalg.norm(dxij)
delta_rel = delta / np.linalg.norm(xij)
fj = self.G(xij)
norm_fj = np.linalg.norm(fj)
self.convergenceStepAbsolute.append(delta)
self.convergenceStepRelative.append(delta_rel)
self.convergenceResidualAbsolute.append(norm_fj)
t1 = time.perf_counter()
dt = t1 - self.tj
dT = t1 - self.t0
self.tj = t1
logger.info(
"{:12d} {:12.5e} {:12.5e} {:12.5e} {:12.5e} {:12.5e}".format(
self.callback_count, norm_fj, delta , delta_rel, dt, dT))
self.callback_count += 1
return
def discretize(self):
"""Sets up discretization scheme and initial value"""
# indices
self.Ni = self.N+1
I = np.arange(self.Ni)
self.logger.info('{:<{lwidth}s} {:> 8.4g}'.format(
'discretization segments N', self.N, lwidth=self.label_width))
self.logger.info('{:<{lwidth}s} {:> 8.4g}'.format(
'grid points N', self.Ni, lwidth=self.label_width))
# discretization
self.dx = self.L_scaled / self.N # spatial step
self.logger.info('{:<{lwidth}s} {:> 8.4g}'.format(
'dx', self.dx, lwidth=self.label_width))
# positions (scaled)
self.X = self.x0_scaled + I*self.dx
# Bounary & initial values
# internally:
# n: dimensionless concentrations
# u: dimensionless potential
# i: spatial index
# j: temporal index
# k: species index
# initial concentrations equal to bulk concentrations
# Kronecker product, M rows (ion species), Ni cols (grid points),
if self.ni0 is None:
self.ni0 = np.kron( self.c_scaled, np.ones((self.Ni,1)) ).T
if self.zi0 is None:
self.zi0 = np.kron( self.z, np.ones((self.Ni,1)) ).T # does not change
# self.initial_values()
def initial_values(self):
"""
Solves decoupled linear system to get inital potential distribution.
"""
zini0 = self.zi0*self.ni0 # z*n
# shape: ion species (rows), grid points (cols), sum over ion species (along rows)
rhoi0 = 0.5*zini0.sum(axis=0)
# system matrix of spatial poisson equation
Au = np.zeros((self.Ni,self.Ni))
bu = np.zeros(self.Ni)
Au[0,0] = 1
Au[-1,-1] = 1
for i in range(1,self.N):
Au[i,[i-1,i,i+1]] = [1.0, -2.0, 1.0] # 1D Laplace operator, 2nd order
bu = rhoi0*self.dx**2 # => Poisson equation
bu[0] = self.u0
bu[-1] = self.u1
# get initial potential distribution by solving Poisson equation
self.ui0 = np.dot( np.linalg.inv(Au), bu) # A u - b = 0 <=> u = A^-1 b
return self.ui0
# evokes Newton solver
def solve(self):
"""Evokes solver
Returns
-------
uij : (Ni,) ndarray
potential at Ni grid points
nij : (M,Nij) ndarray
concentrations of M species at Ni grid points
lamj: (L,) ndarray
value of L Lagrange multipliers (not implemented, empty)
"""
# if not yet done, set up initial values
if self.ui0 is None:
self.initial_values()
if len(self.g) > 0:
self.xi0 = np.concatenate([self.ui0, self.ni0.flatten(), np.zeros(len(self.g))])
else:
self.xi0 = np.concatenate([self.ui0, self.ni0.flatten()])
self.callback_count = 0
self.t0 = time.perf_counter()
self.tj = self.t0 # previosu callback timer value
# neat lecture on scipy optimizers
# http://scipy-lectures.org/advanced/mathematical_optimization/
if isinstance(self.solver, str) and self.solver in [
'hybr','lm','broyden1','broyden2','anderson','linearmixing',
'diagbroyden','excitingmixing','krylov','df-sane']:
res = scipy.optimize.root(self.G,self.xi0,
method=self.solver,callback=self.solver_callback,
options = self.options)
self.xij1 = res.x
if not res.success:
logger.warn(res.message)
elif isinstance( self.solver, str):
f = lambda x: np.linalg.norm(self.G(x))
res = scipy.optimize.minimize(f,self.xi0.copy(),
method=self.solver,callback=self.solver_callback,
options=self.options)
self.xij1 = res.x
if not res.success:
logger.warn(res.message)
else:
self.xij1 = self.solver(self.G,self.xi0.copy(),
callback=self.solver_callback, options=self.options)
# store results:
self.uij = self.xij1[:self.Ni] # potential
self.nij = self.xij1[self.Ni:(self.M+1)*self.Ni].reshape(self.M, self.Ni) # concentrations
self.lamj = self.xij1[(self.M+1)*self.Ni:] # Lagrange multipliers
return self.uij, self.nij, self.lamj
# standard sets of boundary conditions:
def useStandardInterfaceBC(self):
"""Interface at left hand side and open bulk at right hand side"""
self.boundary_conditions = []
# Potential Dirichlet BC
self.u0 = self.delta_u_scaled
self.u1 = 0
self.logger.info('Left hand side Dirichlet boundary condition: u0 = {:> 8.4g}'.format(self.u0))
self.logger.info('Right hand side Dirichlet boundary condition: u1 = {:> 8.4g}'.format(self.u1))
self.boundary_conditions.extend([
lambda x: self.leftPotentialDirichletBC(x,self.u0),
lambda x: self.rightPotentialDirichletBC(x,self.u1) ])
# self.rightPotentialBC = lambda x: self.rightPotentialDirichletBC(x,self.u1)
#self.rightConcentrationBC = []
for k in range(self.M):
self.logger.info('Ion species {:02d} left hand side concentration Flux boundary condition: j0 = {:> 8.4g}'.format(k,0))
self.logger.info('Ion species {:02d} right hand side concentration Dirichlet boundary condition: c1 = {:> 8.4g}'.format(k,self.c_scaled[k]))
self.boundary_conditions.extend( [
lambda x, k=k: self.leftControlledVolumeSchemeFluxBC(x,k),
lambda x, k=k: self.rightDirichletBC(x,k,self.c_scaled[k]) ] )
#self.rightConcentrationBC.append(
# lambda x, k=k: self.rightDirichletBC(x,k,self.c_scaled[k]) )
# counter-intuitive behavior of lambda in loop:
# https://stackoverflow.com/questions/2295290/what-do-lambda-function-closures-capture
# workaround: default parameter k=k
def useStandardCellBC(self):
"""Interfaces at left hand side and right hand side"""
self.boundary_conditions = []
# Potential Dirichlet BC
self.u0 = self.delta_u_scaled / 2.0
self.u1 = - self.delta_u_scaled / 2.
self.logger.info('{:>{lwidth}s} u0 = {:< 8.4g}'.format(
'Left hand side Dirichlet boundary condition', self.u0, lwidth=self.label_width))
self.logger.info('{:>{lwidth}s} u1 = {:< 8.4g}'.format(
'Right hand side Dirichlet boundary condition', self.u1, lwidth=self.label_width))
self.boundary_conditions.extend([
lambda x: self.leftPotentialDirichletBC(x,self.u0),
lambda x: self.rightPotentialDirichletBC(x,self.u1) ])
N0 = self.L_scaled*self.c_scaled # total amount of species in cell
for k in range(self.M):
self.logger.info('{:>{lwidth}s} j0 = {:<8.4g}'.format(
'Ion species {:02d} left hand side concentration Flux boundary condition'.format(k),
0.0, lwidth=self.label_width))
self.logger.info('{:>{lwidth}s} N0 = {:<8.4g}'.format(
'Ion species {:02d} number conservation constraint'.format(k),
N0[k], lwidth=self.label_width))
self.boundary_conditions.extend( [
lambda x, k=k: self.leftControlledVolumeSchemeFluxBC(x,k),
lambda x, k=k, N0=N0[k]: self.numberConservationConstraint(x,k,N0) ] )
def useSternLayerCellBC(self, implicit=False):
"""Interfaces at left hand side and right hand side,
Stern layer either by prescribing linear potential regime between cell
boundary and outer Helmholtz plane (OHP), or by applying Robin BC;
zero flux BC on all ion species.
Parameters
----------
implicit : bool, optional
If true, then true Robin BC are applied. Attention:
if desired, domain must be cropped by manually by twice the Stern
layer thickness lambda_S. Otherwise, enforces
constant potential gradient across Stern layer region of thickness
lambda_S. (default:False)
"""
self.boundary_conditions = []
# Potential Dirichlet BC
self.u0 = self.delta_u_scaled / 2.0
self.u1 = - self.delta_u_scaled / 2.0
if implicit: # implicitly treat Stern layer via Robin BC
self.logger.info('Implicitly treating Stern layer via Robin BC')
self.logger.info('{:>{lwidth}s} u0 + lambda_S*dudx = {:< 8.4g}'.format(
'Left hand side Robin boundary condition', self.u0, lwidth=self.label_width))
self.logger.info('{:>{lwidth}s} u1 + lambda_S*dudx = {:< 8.4g}'.format(
'Right hand side Robin boundary condition', self.u1, lwidth=self.label_width))
self.boundary_conditions.extend([
lambda x: self.leftPotentialRobinBC(x,self.lambda_S_scaled,self.u0),
lambda x: self.rightPotentialRobinBC(x,self.lambda_S_scaled,self.u1) ])
else: # explicitly treat Stern layer via linear regime
self.logger.info('Explicitly treating Stern layer as uniformly charged regions')
# set left and right hand side outer Helmholtz plane
self.lhs_ohp = self.x0_scaled + self.lambda_S_scaled
self.rhs_ohp = self.x0_scaled + self.L_scaled - self.lambda_S_scaled
self.logger.info('{:>{lwidth}s} u0 = {:< 8.4g}'.format(
'Left hand side Dirichlet boundary condition', self.u0, lwidth=self.label_width))
self.logger.info('{:>{lwidth}s} u1 = {:< 8.4g}'.format(
'Right hand side Dirichlet boundary condition', self.u1, lwidth=self.label_width))
self.boundary_conditions.extend([
lambda x: self.leftPotentialDirichletBC(x,self.u0),
lambda x: self.rightPotentialDirichletBC(x,self.u1) ])
N0 = self.L_scaled*self.c_scaled # total amount of species in cell
for k in range(self.M):
self.logger.info('{:>{lwidth}s} j0 = {:<8.4g}'.format(
'Ion species {:02d} left hand side concentration Flux boundary condition'.format(k),
0.0, lwidth=self.label_width))
self.logger.info('{:>{lwidth}s} N0 = {:<8.4g}'.format(
'Ion species {:02d} number conservation constraint'.format(k),
N0[k], lwidth=self.label_width))
self.boundary_conditions.extend( [
lambda x, k=k: self.leftControlledVolumeSchemeFluxBC(x,k),
lambda x, k=k, N0=N0[k]: self.numberConservationConstraint(x,k,N0) ] )
# TODO: meaningful test for Dirichlet BC
def useStandardDirichletBC(self):
"""Dirichlet BC for all variables at all boundaries"""
self.boundary_conditions = []
self.u0 = self.delta_u_scaled
self.u1 = 0
self.logger.info('Left hand side potential Dirichlet boundary condition: u0 = {:> 8.4g}'.format(self.u0))
self.logger.info('Right hand side potential Dirichlet boundary condition: u1 = {:> 8.4g}'.format(self.u1))
# set up boundary conditions
self.boundary_conditions.extend( [
lambda x: self.leftPotentialDirichletBC(x,self.u0),
lambda x: self.rightPotentialDirichletBC(x,self.u1) ] )
for k in range(self.M):
self.logger.info('Ion species {:02d} left hand side concentration Dirichlet boundary condition: c0 = {:> 8.4g}'.format(k,self.c_scaled[k]))
self.logger.info('Ion species {:02d} right hand side concentration Dirichlet boundary condition: c1 = {:> 8.4g}'.format(k,self.c_scaled[k]))
self.boundary_conditions.extend( [
lambda x, k=k: self.leftDirichletBC(x,k,self.c_scaled[k]),
lambda x, k=k: self.rightDirichletBC(x,k,self.c_scaled[k]) ] )
# boundary conditions and constraints building blocks:
def leftFiniteDifferenceSchemeFluxBC(self,x,k,j0=0):
"""
Parameters
----------
x : (Ni,) ndarray
N-valued variable vector
k : int
ion species (-1 for potential)
j0 : float
flux of ion species `k` at left hand boundary
Returns
-------
float: boundary condition residual
"""
uij = x[:self.Ni]
nijk = x[(k+1)*self.Ni:(k+2)*self.Ni]
# 2nd order right hand side finite difference scheme:
# df0dx = 1 / (2*dx) * (-3 f0 + 4 f1 - f2 ) + O(dx^2)
# - dndx - z n dudx = j0
dndx = -3.0*nijk[0] + 4.0*nijk[1] - nijk[2]
dudx = -3.0*uij[0] + 4.0*uij[1] - uij[2]
bcval = - dndx - self.zi0[k,0]*nijk[0]*dudx - 2.0*self.dx*j0
self.logger.debug(
'Flux BC F[0] = - dndx - z n dudx - 2*dx*j0 = {:> 8.4g}'.format(bcval))
self.logger.debug(
' = - ({:.2f}) - ({:.0f})*{:.2f}*({:.2f}) - 2*{:.2f}*({:.2f})'.format(
dndx, self.zi0[k,0], nijk[0], dudx, self.dx, j0))
return bcval
def rightFiniteDifferenceSchemeFluxBC(self,x,k,j0=0):
"""
See ```leftFiniteDifferenceSchemeFluxBC```
"""
uij = x[:self.Ni]
nijk = x[(k+1)*self.Ni:(k+2)*self.Ni]
# 2nd order left hand side finite difference scheme:
# df0dx = 1 / (2*dx) * (-3 f0 + 4 f1 - f2 ) + O(dx^2)
# - dndx - z n dudx = j0
dndx = 3.0*nijk[-1] - 4.0*nijk[-2] + nijk[-3]
dudx = 3.0*uij[-1] - 4.0*uij[-2] + uij[-3]
bcval = - dndx - self.zi0[k,-1]*nijk[-1]*dudx - 2.0*self.dx*j0
self.logger.debug(
'FD flux BC F[-1] = - dndx - z n dudx - 2*dx*j0 = {:> 8.4g}'.format(bcval))
self.logger.debug(
' = - {:.2f} - {:.0f}*{:.2f}*{:.2f} - 2*{:.2f}*{:.2f}'.format(
dndx, self.zi0[k,-1], nijk[-1], dudx, self.dx, j0))
return bcval
def leftControlledVolumeSchemeFluxBC(self,x,k,j0=0):
"""
Compute left hand side flux boundary condition residual in accord with
controlled volume scheme.
Parameters
----------
x : (Ni,) ndarray
N-valued variable vector
k : int
ion species (-1 for potential)
j0 : float
flux of ion species `k` at left hand boundary
Returns
-------
float: boundary condition residual
"""
uij = x[:self.Ni]
nijk = x[(k+1)*self.Ni:(k+2)*self.Ni]
# flux by controlled volume scheme:
bcval = ( + B(self.z[k]*(uij[0]-uij[1]))*nijk[1]
- B(self.z[k]*(uij[1]-uij[0]))*nijk[0] - self.dx*j0 )
self.logger.debug(
'CV flux BC F[0] = n1*B(z(u0-u1)) - n0*B(z(u1-u0)) - j0*dx = {:> 8.4g}'.format(bcval))
return bcval
def rightControlledVolumeSchemeFluxBC(self,x,k,j0=0):
"""
Compute right hand side flux boundary condition residual in accord with
controlled volume scheme. See ``leftControlledVolumeSchemeFluxBC``
"""
uij = x[:self.Ni]
nijk = x[(k+1)*self.Ni:(k+2)*self.Ni]
# flux by controlled volume scheme:
bcval = ( + B(self.z[k]*(uij[-2]-uij[-1]))*nijk[-1]
- B(self.z[k]*(uij[-1]-uij[-2]))*nijk[-2] - self.dx*j0 )
self.logger.debug(
'CV flux BC F[-1] = n[-1]*B(z(u[-2]-u[-1])) - n[-2]*B(z(u[-1]-u[-2])) - j0*dx = {:> 8.4g}'.format(bcval))
return bcval
def leftPotentialDirichletBC(self,x,u0=0):
return self.leftDirichletBC(x,-1,u0)
def leftDirichletBC(self,x,k,x0=0):
"""Construct Dirichlet BC at left boundary"""
nijk = x[(k+1)*self.Ni:(k+2)*self.Ni]
return nijk[0] - x0
def rightPotentialDirichletBC(self,x,x0=0):
return self.rightDirichletBC(x,-1,x0)
def rightDirichletBC(self,x,k,x0=0):
nijk = x[(k+1)*self.Ni:(k+2)*self.Ni]
return nijk[-1] - x0
def leftPotentialRobinBC(self,x,lam,u0=0):
return self.leftRobinBC(x,-1,lam,u0)
def leftRobinBC(self,x,k,lam,x0=0):
"""
Compute left hand side Robin (u + lam*dudx = u0 ) BC at in accord with
2nd order finite difference scheme.
Parameters
----------
x : (Ni,) ndarray
N-valued variable vector
k : int
ion species (-1 for potential)
lam: float
BC coefficient, corresponds to Stern layer thickness
if applied to potential variable in PNP problem. Here, this steric
layer is assumed to constitute a region of uniform charge density
and thus linear potential drop across the interface.
x0 : float
right hand side value of BC, corresponds to potential beyond Stern
layer if applied to poential variable in PNP system.
Returns
-------
float: boundary condition residual
"""
nijk = x[(k+1)*self.Ni:(k+2)*self.Ni]
return nijk[0] + lam/(2*self.dx)* ( 3.0*nijk[0] - 4.0*nijk[1] + nijk[2] ) - x0
def rightPotentialRobinBC(self,x,lam,u0=0):
return self.rightRobinBC(x,-1,lam,u0)
def rightRobinBC(self,x,k,lam,x0=0):
"""Construct Robin (u + lam*dudx = u0 ) BC at right boundary."""
nijk = x[(k+1)*self.Ni:(k+2)*self.Ni]
return nijk[-1] + lam/(2*self.dx) * ( 3.0*nijk[-1] - 4.0*nijk[-2] + nijk[-3] ) - x0
def numberConservationConstraint(self,x,k,N0):
"""N0: total amount of species, k: ion species"""
nijk = x[(k+1)*self.Ni:(k+2)*self.Ni]
## TODO: this integration scheme assumes constant concentrations within
## an interval. Adapt to controlled volume scheme!
# rescale to fit interval
N = np.sum(nijk*self.dx) * self.N / self.Ni
constraint_val = N - N0
self.logger.debug(
'Number conservation constraint F(x) = N - N0 = {:.4g} - {:.4g} = {:.4g}'.format(
N, N0, constraint_val ) )
return constraint_val
# TODO: remove or standardize
# def leftNeumannBC(self,x,j0):
# """Construct finite difference Neumann BC (flux BC) at left boundary"""
# # right hand side first derivative of second order error
# # df0dx = 1 / (2*dx) * (-3 f0 + 4 f1 - f2 ) + O(dx^2) = j0
# bcval = -3.0*x[0] + 4.0*x[1] - x[2] - 2.0*self.dx*j0
# self.logger.debug(
# 'Neumann BC F[0] = -3*x[0] + 4*x[1] - x[2] = {:> 8.4g}'.format(bcval))
# return bcval
#
# def rightNeumannBC(self,x,j0):
# """Construct finite difference Neumann BC (flux BC) at right boundray"""
# # left hand side first derivative of second order error
# # dfndx = 1 / (2*dx) * (+3 fn - 4 fn-1 + fn-2 ) + O(dx^2) = 0
# bcval = 3.0*x[-1] - 4.0*x[-2] + x[-3] - 2.0*self.dx*j0
# self.logger.debug(
# 'Neumann BC F[-1] = -3*x[-1] + 4*x[-2] - nijk[-3] = {:> 8.4g}'.format(bcval))
# return bcval
# standard Poisson equation residual for potential
def poisson_pde(self,x):
"""Returns Poisson equation resiudal by applying 2nd order FD scheme"""
uij1 = x[:self.Ni]
self.logger.debug(
'potential range [u_min, u_max] = [ {:>.4g}, {:>.4g} ]'.format(
np.min(uij1),np.max(uij1)))
nij1 = x[self.Ni:(self.M+1)*self.Ni]
nijk1 = nij1.reshape( self.M, self.Ni )
for k in range(self.M):
self.logger.debug(
'ion species {:02d} concentration range [c_min, c_max] = [ {:>.4g}, {:>.4g} ]'.format(
k,np.min(nijk1[k,:]),np.max(nijk1[k,:])))
# M rows (ion species), N_i cols (grid points)
zi0nijk1 = self.zi0*nijk1 # z_ik*n_ijk
for k in range(self.M):
self.logger.debug(
'ion species {:02d} charge range [z*c_min, z*c_max] = [ {:>.4g}, {:>.4g} ]'.format(
k,np.min(zi0nijk1[k,:]), np.max(zi0nijk1[k,:])))
# charge density sum_k=1^M (z_ik*n_ijk)
rhoij1 = zi0nijk1.sum(axis=0)
self.logger.debug(
'charge density range [rho_min, rho_max] = [ {:>.4g}, {:>.4g} ]'.format(
np.min(rhoij1),np.max(rhoij1)))
# reduced Poisson equation: d2udx2 = rho
Fu = -(np.roll(uij1, -1)-2*uij1+np.roll(uij1, 1))-0.5*rhoij1*self.dx**2
# linear potential regime due to steric effects incorporated here
# TODO: incorporate "spatially finite" BC into Robin BC functions
# replace left and right hand side residuals with linear potential FD
if not np.isnan(self.lhs_ohp):
lhs_linear_regime_ndx = (self.X <= self.lhs_ohp)
lhs_ohp_ndx = np.max( np.nonzero( lhs_linear_regime_ndx ) )
self.logger.debug(
'selected {:d} grid points within lhs OHP at grid point index {:d} with x_scaled <= {:>.4g}'.format(
np.count_nonzero(lhs_linear_regime_ndx), lhs_ohp_ndx, self.lhs_ohp) )
# dudx = (u[ohp]-u[0])/lambda_S within Stern layer
Fu[lhs_linear_regime_ndx] = (
( np.roll(uij1,-1) - uij1 )[lhs_linear_regime_ndx]
* self.lambda_S_scaled - (uij1[lhs_ohp_ndx]-uij1[0])*self.dx )
if not np.isnan(self.rhs_ohp):
rhs_linear_regime_ndx = (self.X >= self.rhs_ohp)
rhs_ohp_ndx = np.min( np.nonzero( rhs_linear_regime_ndx ) )
self.logger.debug(
'selected {:d} grid points within lhs OHP at grid point index {:d} with x_scaled >= {:>.4g}'.format(
np.count_nonzero(rhs_linear_regime_ndx), rhs_ohp_ndx, self.rhs_ohp) )
# dudx = (u[ohp]-u[0])/lambda_S within Stern layer
Fu[rhs_linear_regime_ndx] = (
( uij1 - np.roll(uij1,1) )[rhs_linear_regime_ndx]
* self.lambda_S_scaled - (uij1[-1]-uij1[rhs_ohp_ndx])*self.dx )
Fu[0] = self.boundary_conditions[0](x)
Fu[-1] = self.boundary_conditions[1](x)
self.logger.debug('Potential BC residual Fu[0] = {:> 8.4g}'.format(Fu[0]))
self.logger.debug('Potential BC residual Fu[-1] = {:> 8.4g}'.format(Fu[-1]))
return Fu
def nernst_planck_pde(self,x):
"""Returns Nernst-Planck equation resiudal by applying controlled
volume scheme"""
uij1 = x[:self.Ni]
self.logger.debug(
'potential range [u_min, u_max] = [ {:>.4g}, {:>.4g} ]'.format(
np.min(uij1),np.max(uij1)))
nij1 = x[self.Ni:(self.M+1)*self.Ni]
nijk1 = nij1.reshape( self.M, self.Ni )
for k in range(self.M):
self.logger.debug(
'ion species {:02d} concentration range [c_min, c_max] = [ {:>.4g}, {:>.4g} ]'.format(
k,np.min(nijk1[k,:]),np.max(nijk1[k,:]) ) )
Fn = np.zeros([self.M, self.Ni])
# loop over k = 1..M reduced Nernst-Planck equations:
# - d2nkdx2 - ddx (zk nk dudx ) = 0
for k in range(self.M):
# conrolled volume implementation: constant flux across domain
Fn[k,:] = (
+ B(self.zi0[k,:]*(uij1 - np.roll(uij1,-1))) * np.roll(nijk1[k,:],-1)
- B(self.zi0[k,:]*(np.roll(uij1,-1) - uij1)) * nijk1[k,:]
- B(self.zi0[k,:]*(np.roll(uij1,+1) - uij1)) * nijk1[k,:]
+ B(self.zi0[k,:]*(uij1 - np.roll(uij1,+1))) * np.roll(nijk1[k,:],+1) )
# controlled volume implementation: flux j = 0 in every grid point
#
# Fn[k,:] = (
# B(self.zi0[k,:]*(uij1 - np.roll(uij1,-1)))*np.roll(nijk1[k,:],-1)
# - B(self.zi0[k,:]*(np.roll(uij1,-1) - uij1))*nijk1[k,:] )
# linear potential regime due to steric effects incorporated here
# TODO: incorporate "spatially finite" BC into Robin BC functions
# replace left and right hand side residuals with linear potential FD
# left and right hand side outer Helmholtz plane
# lhs_ohp = self.x0_scaled + self.lambda_S_scaled
# rhs_ohp = self.x0_scaled + self.L_scaled - self.lambda_S_scaled
#
# lhs_linear_regime_ndx = (self.X <= lhs_ohp)
# rhs_linear_regime_ndx = (self.X >= rhs_ohp)
#
# lhs_ohp_ndx = np.max( np.nonzero( lhs_linear_regime_ndx ) )
# rhs_ohp_ndx = np.min( np.nonzero( rhs_linear_regime_ndx ) )
#
# self.logger.debug(
# 'selected {:d} grid points within lhs OHP at grid point index {:d} with x_scaled <= {:>.4g}'.format(
# np.count_nonzero(lhs_linear_regime_ndx), lhs_ohp_ndx, lhs_ohp) )
# self.logger.debug(
# 'selected {:d} grid points within lhs OHP at grid point index {:d} with x_scaled >= {:>.4g}'.format(
# np.count_nonzero(rhs_linear_regime_ndx), rhs_ohp_ndx, rhs_ohp) )
#
# # zero concentration gradient in Stern layer
# Fn[k,lhs_linear_regime_ndx] = (
# ( np.roll(nijk1[k,:],-1)-np.roll(nijk1[k,:],1))[lhs_linear_regime_ndx])
# Fn[k,rhs_linear_regime_ndx] = (
# ( np.roll(nijk1[k,:],-1)-np.roll(nijk1[k,:],1))[rhs_linear_regime_ndx])
Fn[k,0] = self.boundary_conditions[2*k+2](x)
Fn[k,-1] = self.boundary_conditions[2*k+3](x)
self.logger.debug(
'ion species {k:02d} BC residual Fn[{k:d},0] = {:> 8.4g}'.format(
Fn[k,0],k=k))
self.logger.debug(
'ion species {k:02d} BC residual Fn[{k:d},-1] = {:> 8.4g}'.format(
Fn[k,-1],k=k))
return Fn
# non-linear system, "controlled volume" method
# Selbherr, S. Analysis and Simulation of Semiconductor Devices, Spriger 1984
def G(self, x):
"""Non-linear system
Discretization of Poisson-Nernst-Planck system with M ion species.
Implements "controlled volume" method as found in
Selbherr, Analysis and Simulation of Semiconductor Devices, Spriger 1984
Parameters
----------
x : ((M+1)*Ni,) ndarray
system variables. 1D array of (M+1)*Ni values, wher M is number of
ion sepcies, Ni number of spatial discretization points. First Ni
entries are expected to contain potential, following M*Ni points
contain ion concentrations.
Returns
--------
residual: ((M+1)*Ni,) ndarray
"""
# reduced Poisson equation: d2udx2 = rho
Fu = self.potential_pde(x)
Fn = self.concentration_pde(x)
# Apply constraints if set (not implemented properly, do not use):
if len(self.g) > 0:
Flam = np.array([g(x) for g in self.g])
F = np.concatenate([Fu,Fn.flatten(),Flam])
else:
F = np.concatenate([Fu,Fn.flatten()])
return F
@property
def I(self): # ionic strength
"""Compute the system's ionic strength from charges and concentrations.
Returns
-------
I : float
ionic strength ( 1/2 * sum(z_i^2*c_i) )
[concentration unit, i.e. mol m^-3]
"""
return 0.5*np.sum( np.square(self.z) * self.c )
@property
def lambda_D(self):
"""Compute the system's Debye length.
Returns
-------
lambda_D : float
Debye length, sqrt( epsR*eps*R*T/(2*F^2*I) ) [length unit, i.e. m]
"""
return np.sqrt(
self.relative_permittivity*self.vacuum_permittivity*self.R*self.T/(
2.0*self.F**2*self.I ) )
# default 0.1 mM (i.e. mol/m^3) NaCl aqueous solution
def init(self,
c = np.array([0.1,0.1]),
z = np.array([1,-1]),
L = 100e-9, # 100 nm
lambda_S=0, # Stern layer (compact layer) thickness
x0 = 0, # zero position
T = 298.15,
delta_u = 0.05, # potential difference [V]
relative_permittivity = 79,
vacuum_permittivity = sc.epsilon_0,
R = sc.value('molar gas constant'),
F = sc.value('Faraday constant'),
N = 200, # number of grid segments, number of grid points Ni = N + 1
e = 1e-10, # absolute tolerance, TODO: switch to standaradized measure
maxit = 20, # maximum number of Newton iterations
solver = None,
options = None,
potential0 = None,
concentration0 = None ):
"""Initializes a 1D Poisson-Nernst-Planck system description.
Expects quantities in SI units per default.
Parameters
----------
c : (M,) ndarray, optional
bulk concentrations of each ionic species [mol/m^3]
(default: [ 0.1, 0.1 ])
z : (M,) ndarray, optional
charge of each ionic species [1] (default: [ +1, -1 ])
x0 : float, optional
left hand side reference position (default: 0)
L : float, optional
1D domain size [m] (default: 100e-9)
lambda_S: float, optional
Stern layer thickness in case of Robin BC [m] (default: 0)
T : float, optional
temperature of the solution [K] (default: 298.15)
delta_u : float, optional
potential drop across 1D cell [V] (default: 0.05)
relative_permittivity: float, optional
relative permittivity of the ionic solution [1] (default: 79)
vacuum_permittivity: float, optional
vacuum permittivity [F m^-1] (default: 8.854187817620389e-12 )
R : float, optional
molar gas constant [J mol^-1 K^-1] (default: 8.3144598)
F : float, optional
Faraday constant [C mol^-1] (default: 96485.33289)
N : int, optional
number of discretization grid segments (default: 200)
e : float, optional
absolute tolerance for Newton solver convergence (default: 1e-10)
maxit : int, optional
maximum number of Newton iterations (default: 20)
solver: func( funx(x), x0), optional
solver to use (default: None, will use own simple Newton solver)
potential0: (N+1,) ndarray, optional (default: None)
potential initial values
concentration0: (M,N+1) ndarray, optional (default: None)
concentration initial values
"""
self.logger = logging.getLogger(__name__)
assert len(c) == len(z), "Provide concentration AND charge for ALL ion species!"
# TODO: integrate with constructor initialization parameters above
# default solver settings
self.converged = False # solver's convergence flag
self.N = N # discretization segments
self.e = e # Newton solver default tolerance
self.maxit = maxit # Newton solver maximum iterations
# default output settings
# self.output = False # let Newton solver output convergence plots...
# self.outfreq = 1 # ...at every nth iteration
self.label_width = 40 # charcater width of quantity labels in log
# standard governing equations
self.potential_pde = self.poisson_pde
self.concentration_pde = self.nernst_planck_pde
# empty BC
self.boundary_conditions = []
# empty constraints
self.g = [] # list of constrain functions, not fully implemented / tested
# system parameters
self.M = len(c) # number of ion species
self.c = c # concentrations
self.z = z # number charges
self.T = T # temperature
self.L = L # 1d domain size
self.lambda_S = lambda_S # Stern layer thickness
self.x0 = x0 # reference position
self.delta_u = delta_u # potential difference
self.relative_permittivity = relative_permittivity
self.vacuum_permittivity = vacuum_permittivity
# R = N_A * k_B
# (universal gas constant = Avogadro constant * Boltzmann constant)
self.R = R
self.F = F
self.f = F / (R*T) # for convenience
# print all quantities to log
for i, (c, z) in enumerate(zip(self.c, self.z)):
self.logger.info('{:<{lwidth}s} {:> 8.4g}'.format(
"ion species {:02d} concentration c".format(i), c, lwidth=self.label_width))
self.logger.info('{:<{lwidth}s} {:> 8.4g}'.format(
"ion species {:02d} number charge z".format(i), z, lwidth=self.label_width))
self.logger.info('{:<{lwidth}s} {:> 8.4g}'.format(
'temperature T', self.T, lwidth=self.label_width))
self.logger.info('{:<{lwidth}s} {:> 8.4g}'.format(
'domain size L', self.L, lwidth=self.label_width))
self.logger.info('{:<{lwidth}s} {:> 8.4g}'.format(
'compact layer thickness lambda_S', self.lambda_S, lwidth=self.label_width))
self.logger.info('{:<{lwidth}s} {:> 8.4g}'.format(
'reference position x0', self.x0, lwidth=self.label_width))
self.logger.info('{:<{lwidth}s} {:> 8.4g}'.format(
'potential difference delta_u', self.delta_u, lwidth=self.label_width))
self.logger.info('{:<{lwidth}s} {:> 8.4g}'.format(
'relative permittivity eps_R', self.relative_permittivity, lwidth=self.label_width))
self.logger.info('{:<{lwidth}s} {:> 8.4g}'.format(
'vacuum permittivity eps_0', self.vacuum_permittivity, lwidth=self.label_width))
self.logger.info('{:<{lwidth}s} {:> 8.4g}'.format(
'universal gas constant R', self.R, lwidth=self.label_width))
self.logger.info('{:<{lwidth}s} {:> 8.4g}'.format(
'Faraday constant F', self.F, lwidth=self.label_width))
self.logger.info('{:<{lwidth}s} {:> 8.4g}'.format(
'f = F / (RT)', self.f, lwidth=self.label_width))
# scaled units for dimensionless formulation
# length unit chosen as Debye length lambda
self.l_unit = self.lambda_D
# concentration unit is ionic strength
self.c_unit = self.I
# no time unit for now, only steady state
# self.t_unit = self.l_unit**2 / self.Dn # fixes Dn_scaled = 1
self.u_unit = self.R * self.T / self.F # thermal voltage
self.logger.info('{:<{lwidth}s} {:> 8.4g}'.format(
'spatial unit [l]', self.l_unit, lwidth=self.label_width))
self.logger.info('{:<{lwidth}s} {:> 8.4g}'.format(
'concentration unit [c]', self.c_unit, lwidth=self.label_width))
self.logger.info('{:<{lwidth}s} {:> 8.4g}'.format(
'potential unit [u]', self.u_unit, lwidth=self.label_width))
# domain
self.L_scaled = self.L / self.l_unit
# compact layer
self.lambda_S_scaled = self.lambda_S / self.l_unit
# reference position
self.x0_scaled = self.x0 / self.l_unit
# bulk conectrations
self.c_scaled = self.c / self.c_unit
# potential difference
self.delta_u_scaled = self.delta_u / self.u_unit
# print scaled quantities to log
self.logger.info('{:<{lwidth}s} {:> 8.4g}'.format(
'reduced domain size L*', self.L_scaled, lwidth=self.label_width))
self.logger.info('{:<{lwidth}s} {:> 8.4g}'.format(
'reduced compact layer thickness lambda_S*', self.lambda_S_scaled, lwidth=self.label_width))
self.logger.info('{:<{lwidth}s} {:> 8.4g}'.format(
'reduced reference position x0*', self.x0_scaled, lwidth=self.label_width))
for i, c_scaled in enumerate(self.c_scaled):
self.logger.info('{:<{lwidth}s} {:> 8.4g}'.format(
"ion species {:02d} reduced concentration c*".format(i),
c_scaled, lwidth=self.label_width))
self.logger.info('{:<{lwidth}s} {:> 8.4g}'.format(
'reduced potential delta_u*', self.delta_u_scaled, lwidth=self.label_width))
# per default, no outer Helmholtz plane
self.lhs_ohp = np.nan
self.rhs_ohp = np.nan
# self.xi0 = None
# initialize initial value arrays
if potential0 is not None:
self.ui0 = potential0 / self.u_unit
else:
self.ui0 = None
if concentration0 is not None:
self.ni0 = concentration0 / self.c_unit
else:
self.ni0 = None
self.zi0 = None
def __init__(self, *args, **kwargs):
"""Constructor, see init doc string for arguments.
Additional Parameters
---------------------
solver: str or func (default: None)
solver to use. If str, then selected from scipy optimizers.
options: dict, optional (default: None)
options object for scipy solver
"""
self.init(*args, **kwargs)
if 'solver' in kwargs:
self.solver = kwargs['solver']
else:
self.solver = self.newton
if 'options' in kwargs:
self.options = kwargs['options']
else:
self.options = None
self.discretize()
|
libAtoms/matscipy
|
matscipy/electrochemistry/poisson_nernst_planck_solver.py
|
Python
|
lgpl-2.1
| 45,577
|
[
"Avogadro",
"Matscipy"
] |
6522ac2719013a112ded2d668658dd29f8a0e25ce9300e6b0e924cad58ad5053
|
"""
# Notes:
- This simulation seeks to emulate the COBAHH benchmark simulations of (Brette
et al. 2007) using the Brian2 simulator for speed benchmark comparison to
DynaSim. However, this simulation includes CLOCK-DRIVEN synapses, for direct
comparison to DynaSim's clock-driven architecture. The synaptic connections
are "high-density", with a 90% probability of connection.
- The time taken to simulate will be indicated in the stdout log file
'~/batchdirs/brian2_benchmark_COBAHH_clocksyn_hidens_compiled_0004/pbsout/brian2_benchmark_COBAHH_clocksyn_hidens_compiled_0004.out'
- Note that this code has been slightly modified from the original (Brette et
al. 2007) benchmarking code, available here on ModelDB:
https://senselab.med.yale.edu/modeldb/showModel.cshtml?model=83319 in order
to work with version 2 of the Brian simulator (aka Brian2), and also modified
to change the model being benchmarked, etc.
# References:
- Brette R, Rudolph M, Carnevale T, Hines M, Beeman D, Bower JM, et al.
Simulation of networks of spiking neurons: A review of tools and strategies.
Journal of Computational Neuroscience 2007;23:349–98.
doi:10.1007/s10827-007-0038-6.
- Goodman D, Brette R. Brian: a simulator for spiking neural networks in Python.
Frontiers in Neuroinformatics 2008;2. doi:10.3389/neuro.11.005.2008.
"""
from brian2 import *
set_device('cpp_standalone')
prefs.codegen.cpp.extra_compile_args = ['-w', '-O3', '-ffast-math', '-march=native']
# Parameters
cells = 4
defaultclock.dt = 0.01*ms
area = 20000*umetre**2
Cm = (1*ufarad*cmetre**-2) * area
gl = (5e-5*siemens*cmetre**-2) * area
El = -60*mV
EK = -90*mV
ENa = 50*mV
g_na = (100*msiemens*cmetre**-2) * area
g_kd = (30*msiemens*cmetre**-2) * area
VT = -63*mV
# Synaptic strengths
gAMPA = (0.1*msiemens*cmetre**-2)* area
gGABAA = (0.06*msiemens*cmetre**-2)* area
# Synaptic time constants
tauAMPA = 2
tauGABAA = 5
# Synaptic reversal potentials
EAMPA = 1*mV
EGABAA = -80*mV
# The model
eqs = Equations('''
dv/dt = (gl*(El-v)-
gAMPA/cells*sAMPAtotal*(v-EAMPA)-
gGABAA/cells*sGABAAtotal*(v-EGABAA)-
g_na*(m*m*m)*h*(v-ENa)-
g_kd*(n*n*n*n)*(v-EK))/Cm : volt
dm/dt = alpha_m*(1-m)-beta_m*m : 1
dn/dt = alpha_n*(1-n)-beta_n*n : 1
dh/dt = alpha_h*(1-h)-beta_h*h : 1
alpha_m = 0.32*(mV**-1)*(13*mV-v+VT)/
(exp((13*mV-v+VT)/(4*mV))-1.)/ms : Hz
beta_m = 0.28*(mV**-1)*(v-VT-40*mV)/
(exp((v-VT-40*mV)/(5*mV))-1)/ms : Hz
alpha_h = 0.128*exp((17*mV-v+VT)/(18*mV))/ms : Hz
beta_h = 4./(1+exp((40*mV-v+VT)/(5*mV)))/ms : Hz
alpha_n = 0.032*(mV**-1)*(15*mV-v+VT)/
(exp((15*mV-v+VT)/(5*mV))-1.)/ms : Hz
beta_n = .5*exp((10*mV-v+VT)/(40*mV))/ms : Hz
sAMPAtotal : 1
sGABAAtotal : 1
''')
# Construct intrinsic cells
P = NeuronGroup(cells, model=eqs, method='euler')
proportion=int(0.8*cells)
Pe = P[:proportion]
Pi = P[proportion:]
# Contruct synaptic network
sAMPA=Synapses(Pe,P,
model='''ds/dt=1000.*5.*(1 + tanh(v_pre/(4.*mV)))*(1-s)/ms - (s)/(2*ms) : 1 (clock-driven)
sAMPAtotal_post = s : 1 (summed)
''')
sAMPA.connect(p=0.90)
sGABAA_RETC=Synapses(Pi,P,
model='''ds/dt=1000.*2.*(1 + tanh(v_pre/(4.*mV)))*(1-s)/ms - s/(5*ms) : 1 (clock-driven)
sGABAAtotal_post = s : 1 (summed)
''')
sGABAA_RETC.connect(p=0.90)
# Initialization
P.v = 'El + (randn() * 5 - 5)*mV'
# Record a few traces
trace = StateMonitor(P, 'v', record=[1, 10, 100])
totaldata = StateMonitor(P, 'v', record=True)
run(0.5 * second, report='text')
# # If you want to plot:
# plot(trace.t/ms, trace[1].v/mV)
# plot(trace.t/ms, trace[10].v/mV)
# plot(trace.t/ms, trace[100].v/mV)
# xlabel('t (ms)')
# ylabel('v (mV)')
# show()
# # If you want to save data:
# print("Saving TC cell voltages!")
# numpy.savetxt("foo_totaldata.csv", totaldata.v/mV, delimiter=",")
|
asoplata/dynasim-benchmark-brette-2007
|
Brian2/brian2_benchmark_COBAHH_clocksyn_hidens_compiled_0004.py
|
Python
|
gpl-3.0
| 3,909
|
[
"Brian"
] |
a9f9043114e2d3abaa772bea6940cc1967d0225e7686aceb2f066ecbd1a9dede
|
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
### BEGIN LICENSE
# Copyright (C) 2014 Brian Douglass bhdouglass@gmail.com
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
### END LICENSE
from agui.awidgets import ACheckBox
from agui.backends.pyside.widgets import Widget
class CheckBox(Widget, ACheckBox):
type = 'QCheckBox'
def __init__(self, item = None):
ACheckBox.__init__(self, item)
Widget.__init__(self, item)
self.item.stateChanged.connect(self.emit_changed)
@ACheckBox.checked.getter
def checked(self):
self._checked = self.item.isChecked()
return self._checked
@checked.setter
def checked(self, value):
self.item.setChecked(value)
self._checked = value
|
bhdouglass/agui
|
agui/backends/pyside/widgets/checkbox.py
|
Python
|
gpl-3.0
| 1,324
|
[
"Brian"
] |
8ba662fbbbcac5f70d4206e6aa3d6eba753c6d210b4bb4d0aaefa7b7fb4fe429
|
# INPUT FILE for adco_transect_preprocessor
# Set options in this file in order to use process adcp transects using the
# For further information on setting these options, the ADCP Python Documentation
#
# IMPORTANT: This options file is uses Python 2.X code convenctions, meaning:
# 1) there may not be any tab characters in the file;
# 2) option lines may have no leadng spaces;
# 3) strings (test in quotes) should be preceded by an 'r' for maximum compatibility.
# ADCP Data File location(s)
# -----------------------------------------------------------------------------
#working_directory = r'Z:\temp\adcp_anaylsis_stations\WGB20090721'
#working_directory = r'Z:\temp\adcp_anaylsis_stations\WGA20090722' # or None for current directory
working_directory = r'Y:\temp\adcp_anaylsis_stations\RIO20100309' # or None for current directory
#working_directory = r'/Volumes/Aux/temp/adcp_anaylsis_stations/RIO20100309' # or None for current directory
#working_directory = r'Z:\temp\adcp_anaylsis_stations\TMS20090513'
#working_directory = r'Z:\temp\adcp_anaylsis_stations\JPT20080618'
#working_directory = r'Z:\temp\adcp_anaylsis_stations\MRU022510' # can't headcorrect, not enough bins
#working_directory = r'Z:\temp\adcp_anaylsis_stations\MRU012810'
#working_directory = r'Z:\temp\adcp_anaylsis_stations\MRU060408' # no nav
#working_directory = r'Z:\temp\adcp_anaylsis_stations\MRU112707' # no nav
#working_directory = r'Z:\temp\adcp_anaylsis_stations\WCI042011' # done
#working_directory = r'Z:\temp\adcp_anaylsis_stations\WCI102009' # done
#working_directory = r'Z:\temp\adcp_anaylsis_stations\WCI050608'
#file_list = ['GEO4thRelease641r.000',
# 'GEO4thRelease642r.000']
file_list = None
# Processing Options
# -----------------------------------------------------------------------------
xy_projection = r'EPSG:26910' # The text-based EPSG code describing the map projection (in Northern CA, UTM Zone 10N = 'EPSG:26910')
do_head_correct = False # Switch for using/not using heading correction due to magnetic compass declination and errors. {True or False}
head_correct_spanning = False # perform heading correction on all data files binned together {True or False}
mag_declination = 14.7 # magnetic compass declination - this value will be used to correct compass heading if head_correcting is not used {degrees E of true North, or None}
u_min_bt=0.3 # minimum bottom track velocity for head_correct {typically 0-0.3 [m/s] or None}
hdg_bin_size=5 # bin size of heading correction {typically 5,10 [degrees]}
hdg_bin_min_samples=10 # minimum number of sample headings in a heading bin for consideration in heading correction {typically 10-50, more is safer}
sidelobe_drop=0.1 # fraction of vertical profile to drop due to sidelobe/bottom interaction {typically 0.5-0.15 [fraction]}
std_drop=3.0 # standard deviation of velocity, above which samples are dropped from analysis {0.0=no dropping, 2.0-3.0 typically [number of standard deviations]}
std_interp=True # perform interpolation of holes in velocity profiles left by high standard deviation removal {typically True with std_drop > 0.0}
smooth_kernel=3 # smooth velocity data using a square kernel box-filter, with side dimension =
extrap_boundaries=False # extrapolate velocity profiles upward toward surface, and downward to the sounder-detected bottom {True or False}
average_ens = 1 # average adjacent (in time) velocity profiles {typically 0-15 [number of adjacent velocity profiles(ensembles)]}
regrid_horiz_m = None # horizontal grid resolution used when regridding results {resonable fraction of transect width, or None for default(2m) [m]}
regrid_vert_m = None # vertical grid resolution used when regridding results {resonable fraction of transect depth, or None for default(0.1) [m]}
adcp_depth = 0.244 # depth of the adcp face under the surface {[m] or None}
p1lat = 38.0527 # latitude of origin of optional transect plot line [degrees E] or None
p1lon = -121.6943 # longitude of origin of optional transect plot line [degrees N] or None
p2lat = 38.0505 # latitude of end of optional transect plot line [degrees E] or None
p2lon = -121.6900 # longitude of end of optional transect plot line [degrees N] or None
# Data Output Options
# -----------------------------------------------------------------------------
save_raw_data_to_netcdf = True # Switch to output raw data to netCDF-CF format. {True or False}
save_preprocessed_data_to_netcdf = False # Switch to output results to netCDF-CF format. {True or False}
use_netcdf_data_compression = True # Switch to use NetCDF 4 data compression to save disk space in data and results files. {True or False}
# Debug options
debug_stop_after_n_transects = 3 # False, or number to limit return to
|
esatel/ADCPy
|
adcpy/trn_pre_input_RIO.py
|
Python
|
mit
| 4,988
|
[
"NetCDF"
] |
f0b4dc3113885cbae0a6558db38bdeada9b2c45507eb97cdad7e25bdfad7ce2c
|
#!flask/bin/python
# Author: Johan Beekhuizen, Deltares
# Author: Joan Sala Calero, Deltares
# This work is based on the Flask Upload Tool by Ngo Duy Khanh (https://github.com/ngoduykhanh/flask-file-uploader)
# which in turn is based on the jQuery-File-Upload (https://github.com/blueimp/jQuery-File-Upload/)
import os
import simplejson
from flask import Flask, request, render_template, session, redirect, url_for, flash, send_from_directory
from flask_bootstrap import Bootstrap
from werkzeug.utils import secure_filename
from lib.upload_file import uploadfile
import logging
from logging.handlers import RotatingFileHandler
import json
import zipfile
import time
import functions
import threddsclient
import requests
from requests.auth import HTTPBasicAuth
import urllib
import re
from unicodedata import normalize
import traceback
import xml.etree.ElementTree as ET
from geoserver.catalog import Catalog
# Specific from app
from DOI import DOI
from settings import settings
# used for 'slugify': creating a valid url
_punct_re = re.compile(r'[\t !"#$%&\'()*\-/<=>?@\[\\\]^_`{|},.]+')
app = Flask(__name__)
my_dir = os.path.dirname(__file__)
app.config.update(settings)
bootstrap = Bootstrap(app)
# set up logging
logFile = os.path.join(my_dir, 'datauploadtool.log')
file_handler = RotatingFileHandler(logFile, 'a', 1 * 1024 * 1024, 10)
file_handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]'))
app.logger.setLevel(logging.INFO)
file_handler.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
app.logger.info('Data Upload Tool startup')
@app.errorhandler(500)
def internal_server_error(error):
app.logger.error('Server Error: %s', (error))
app.logger.error(traceback.format_exc())
return render_template('500.html'), 500
@app.errorhandler(Exception)
def unhandled_exception(e):
app.logger.error('Unhandled Exception: %s', (e))
app.logger.error(traceback.format_exc())
return render_template('500.html'), 500
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower() in app.config['ALLOWED_THREDDS_EXTENSIONS']
def slugify(text, delim=u'_'):
"""Generates an ASCII-only slug."""
result = []
for word in _punct_re.split(text.lower()):
word = normalize('NFKD', word).encode('ascii', 'ignore')
if word:
result.append(word)
return unicode(delim.join(result))
def gen_file_name(fullpath, filename):
"""
If file exist already, rename it and return a new name
"""
i = 1
while os.path.exists(os.path.join(fullpath, filename)):
name, extension = os.path.splitext(filename)
filename = '%s_%s%s' % (name, str(i), extension)
i = i + 1
return filename
def checkConnection(url, errorMessage):
"""
This function checks if a server is responsive
:param url: the url of the connection to check
:param errorMessage: the errormessage to log when the connection fails
:return:
"""
try:
requests.get(url, timeout=1) # try for max 1 second
except:
flash(errorMessage)
app.logger.error(errorMessage)
return False
return True
@app.route("/zip", methods=['POST'])
def zip():
"""
Zip all the selected files in the list of uploaded files
:return:
"""
jsonString = request.data
jsonDict = json.loads(jsonString)
filesDict = jsonDict['files']
zipFilename = jsonDict['zipfilename']
datasetFoldername = session['DATASETFOLDERNAME']
# create list of the file names from dict
fileList = []
for key in filesDict.keys():
fileList.append(filesDict[key].split('/')[-1])
if len(fileList) > 0:
# do not zip an already zipped file
if len(fileList) == 1:
filename, fileExtension = os.path.splitext(fileList[0])
if fileExtension == '.zip':
flash("Cannot zip a .zip file.")
return simplejson.dumps({"Error": "Cannot zip a .zip file."})
datasetDir = '/'.join([app.config['BASE_UPLOAD_FOLDER'], datasetFoldername])
# Open a zip file
zipPath = os.path.join(datasetDir, "{}.zip".format(zipFilename))
# check if the file already exists; if not, create zipfile
if os.path.isfile(zipPath):
flash("File already exists, please give a different file name.")
return simplejson.dumps({"Error": "File already exists, please give a different file name."})
else:
zf = zipfile.ZipFile(zipPath, 'w')
# write all selected files to the zip file
for file in fileList:
filePath = '/'.join([datasetDir, file])
zf.write(filePath, file)
zf.close()
# delete all the original files
for file in fileList:
filePath = '/'.join([datasetDir, file])
os.remove(filePath)
return simplejson.dumps({"files": filesDict})
else:
flash("No files selected. Please select the files to zip using the checkboxes on the right.")
return simplejson.dumps({"Error": "No file selected"})
@app.route("/submitfiles", methods=['GET', 'POST'])
def submitFiles():
"""
Send the information of the uploaded files to the Open Data Registration Tool as an encoded JSON string in a GET-request
The info is stored in a list of representations, according to the Open Data Registration Tool API:
https://github.com/switchonproject/sip-html5-resource-registration/wiki
"""
threddsAvailable = True
geoserverAvailable = True
# Check if Thredds server is online
threddsAvailable = checkConnection(app.config['THREDDS_SERVER'],
"Failed to connect to the THREDDS server at " + app.config['THREDDS_SERVER'] + \
". NetCDF files will not be accessible using web services, only by HTTP download.")
# Check if GeoServer is online
geoserverAvailable = checkConnection(app.config['GEOSERVER'],
"Failed to connect to the geoserver at " + app.config['GEOSERVER'] + \
". Shapefiles will not be mapped with WMS and can not be downloaded by WFS.")
datasetname = session['DATASETNAME']
datasetFoldername = session['DATASETFOLDERNAME']
generateDOI = session['GENERATEDOI']
if request.form['submitButton'] == 'previous':
return redirect('/?datasetname=' + datasetFoldername)
if request.form['submitButton'] == 'next':
datasetDir = os.path.join(app.config['BASE_UPLOAD_FOLDER'], datasetFoldername)
files = [f for f in os.listdir(datasetDir) if
os.path.isfile(os.path.join(datasetDir, f)) and f not in app.config['IGNORED_FILES']]
if len(files) > 0:
representation = {}
result = []
urlRoot = request.url_root.rstrip('/') # get the url root without the traling '/' (for string concatenation)
# Store the root url of the dataset as the primary representation if there are more than 1 file
if len(files) > 1:
representation['name'] = datasetname
representation['description'] = "File download"
representation['type'] = "original data"
representation['contentlocation'] = '/'.join([urlRoot, 'data', datasetFoldername])
representation['contenttype'] = "application/octet-stream"
representation['function'] = "information"
representation['protocol'] = "WWW:LINK-1.0-http--link"
result.append(representation)
# if there is only one file, store the direct link to this file
if len(files) == 1:
filename, fileExtension = os.path.splitext(f)
# region Check if it is a zipped shapefile
# if it is, ignore it (unless geoserver is unavailable), otherwise the zip file is added twice
zippedShapefile = False
if fileExtension == '.zip' and geoserverAvailable:
zipFilePath = os.path.join(datasetDir, f)
zipFile = zipfile.ZipFile(zipFilePath, 'r')
filesInZip = zipFile.namelist()
zipFile.close()
for fileInZip in filesInZip:
fileInZipExtension = os.path.splitext(fileInZip)[1]
if fileInZipExtension == '.shp':
zippedShapefile = True
#endregion
if fileExtension != '.nc' and zippedShapefile == False:
representation['name'] = datasetname
representation['description'] = "File download"
representation['type'] = "original data"
# TODO: improve file recognition
if fileExtension == ".zip":
representation['contenttype'] = "application/zip"
else:
representation['contenttype'] = "application/octet-stream"
representation['contentlocation'] = '/'.join([urlRoot, 'data', datasetFoldername, f])
representation['function'] = "download"
representation['protocol'] = "WWW:DOWNLOAD-1.0-http--download"
result.append(representation)
#region THREDDS
if threddsAvailable:
if app.config['DEVELOP']:
threddsCatalog = '/'.join((app.config['THREDDS_SERVER'], 'netcdftest', 'catalog.xml'))
else:
threddsCatalog = '/'.join((app.config['THREDDS_SERVER'], datasetFoldername, 'catalog.xml'))
try:
opendapUrls = threddsclient.opendap_urls(threddsCatalog)
for opendapUrl in opendapUrls:
filepath, fileExtension = os.path.splitext(opendapUrl)
filename = opendapUrl.split('/')[-1]
# check if the file is a netCDF file; if yes, store OPeNDAP service url and html download url
if fileExtension == '.nc':
representation = {}
representation['name'] = filename
representation['description'] = "Netcdf file OPeNDAP service"
representation['contentlocation'] = opendapUrl
representation['contenttype'] = "application/x-netcdf"
representation['type'] = "original data"
representation['function'] = "service"
representation['protocol'] = 'OPeNDAP:OPeNDAP'
result.append(representation)
representation = {}
representation['name'] = filename
representation['description'] = "HTML interface OPeNDAP service"
representation['contentlocation'] = opendapUrl + ".html"
representation['contenttype'] = "application/x-netcdf"
representation['type'] = "original data"
representation['function'] = "download"
representation['protocol'] = 'WWW:DOWNLOAD-1.0-http--download'
result.append(representation)
representation = {}
representation['name'] = filename
representation['description'] = "WMS service"
representation['contentlocation'] = opendapUrl.replace('dodsC', 'wms') + "?service=WMS&version=1.3.0&request=GetCapabilities"
representation['contenttype'] = "application/xml"
representation['type'] = "original data"
representation['function'] = "service"
representation['protocol'] = 'OGC:WMS-1.1.1-http-get-capabilities'
result.append(representation)
except:
app.logger.info("URL: " + threddsCatalog + " is not a THREDDS catalog")
#endregion
# region GEOSERVER: loop through all files to check for shapefiles
if geoserverAvailable:
for file in files:
layerName = ''
filename, fileExtension = os.path.splitext(file)
if fileExtension == '.zip':
zipFilePath = os.path.join(datasetDir, file)
zipFile = zipfile.ZipFile(zipFilePath, 'r')
filesInZip = zipFile.namelist()
for fileInZip in filesInZip:
fileInZipName = os.path.split(fileInZip)[1]
fileInZipNoExtName, fileInZipExtension = os.path.splitext(fileInZipName)
if fileInZipExtension == '.shp':
# Layer name is the file without extension
layerName = fileInZipNoExtName
# Publish .zipped shapefile on geoserver, no subdirectories
zipFile.extractall(datasetDir)
for root, dirs, files in os.walk(datasetDir):
for name in files:
os.rename(os.path.join(root, name), os.path.join(datasetDir,name))
# create workspace
r = requests.post(url= app.config['GEOSERVER'] + "/rest/workspaces",
headers={'Content-type': 'text/xml'},
data="<workspace><name>" + datasetFoldername + "</name></workspace>",
auth=HTTPBasicAuth(app.config['GEOSERVER_ADMIN'], app.config['GEOSERVER_PASS']))
if r.status_code > 299: # status code of 201 is success; all else is failure
app.logger.error("Error in creating geoserver workspace for " + datasetFoldername + \
"; Status code: " + str(r.status_code) + ", Content: " + r.content)
flash("Error in creating workspace on geoserver.")
return redirect(url_for('uploadData'))
# for testing purposes.. uploaded file is on local machine and can only publish data that is on the data mount of web app
if app.config['DEVELOP']:
shapeFile = "file://D:/sala/Downloads/sld_cookbook_polygon/sld_cookbook_polygon.shp"
else:
shapeFile = settings['GEOSERVER_DATA_DIR'] + "/" + datasetFoldername + "/" + fileInZipName
# Publish shapefile on the geoserver; the datastore is automatically created and has the same name as the shapefile + ds
r = requests.put(url=app.config['GEOSERVER'] + "/rest/workspaces/" + datasetFoldername + "/datastores/" + datasetFoldername + "_ds/external.shp",
headers={'Content-type': 'text/plain'},
data='file://'+shapeFile,
auth=HTTPBasicAuth(app.config['GEOSERVER_ADMIN'], app.config['GEOSERVER_PASS']))
if r.status_code > 299:
app.logger.error("Error in publishing shapefile " + datasetFoldername + " on geoserver; Status code: " \
+ str(r.status_code) + ", Content: " + r.content)
flash("Error in publishing shapefile on geoserver.")
return redirect(url_for('uploadData'))
representation = {}
representation['name'] = layerName
representation['description'] = "WMS service"
representation['contentlocation'] = app.config['GEOSERVER'] + "/" + datasetFoldername + "/" + \
"wms?service=WMS&version=1.1.0&request=GetCapabilities"
representation['contenttype'] = "application/xml"
representation['type'] = "original data"
representation['function'] = "service"
representation['protocol'] = 'OGC:WMS-1.1.1-http-get-capabilities'
result.append(representation)
representation = {}
representation['name'] = layerName
representation['description'] = "WMS service"
representation['contentlocation'] = app.config['GEOSERVER'] + "/" + datasetFoldername + "/" + \
"wms?service=WMS&version=1.1.0&request=GetCapabilities"
representation['contenttype'] = "application/xml"
representation['type'] = "aggregated data"
representation['function'] = "service"
representation['protocol'] = 'OGC:WMS-1.1.1-http-get-capabilities'
#region Get spatial extent from getcapabilities document
try:
root = ET.fromstring(requests.get(representation['contentlocation']).content)
latlonElem = root.find('Capability/Layer/Layer/LatLonBoundingBox')
latlonDict = latlonElem.attrib
minx = latlonDict['minx']
miny = latlonDict['miny']
maxx = latlonDict['maxx']
maxy = latlonDict['maxy']
# WKT representation: POLYGON((minx miny, maxx miny, maxx maxy, minx maxy, minx miny))
WKTString = 'POLYGON(({0} {1}, {2} {1}, {2} {3}, {0} {3}, {0} {1}))'.format(minx, miny, maxx, maxy)
representation['wktboundingbox'] = WKTString
except:
app.logger.error("Error in deriving WKT bounding box from WMS getcapabilities document")
#endregion
result.append(representation)
representation = {}
representation['name'] = fileInZipNoExtName
representation['description'] = "WFS service"
representation['contentlocation'] = app.config['GEOSERVER'] + "/" + datasetFoldername + "/" + "ows?service=WFS&version=1.0.0&request=GetCapabilities"
representation['contenttype'] = "application/xml"
representation['type'] = "original data"
representation['function'] = "service"
representation['protocol'] = "OGC:WFS-1.0.0-http-get-capabilities"
result.append(representation)
representation = {}
representation['name'] = file
representation['description'] = "Zipped shapefile"
representation['contentlocation'] = '/'.join([urlRoot, 'data', datasetFoldername, file])
representation['contenttype'] = "application/zip"
representation['type'] = "original data"
representation['function'] = "download"
representation['protocol'] = "WWW:DOWNLOAD-1.0-http--download"
representation['uploadmessage'] = "deriveSpatialIndex:shp"
result.append(representation)
# Optional sld file (preconditions, shp uploaded, workspace created)
for fileInZip in filesInZip:
fileInZipName = os.path.split(fileInZip)[1]
fileInZipNoExtName, fileInZipExtension = os.path.splitext(fileInZipName)
if fileInZipExtension == '.sld':
# for testing purposes.. uploaded file is on local machine and can only publish data that is on the data mount of web app
if app.config['DEVELOP']:
sldFile = "D:/sala/Downloads/sld_cookbook_polygon/sld_cookbook_polygon.sld"
else:
sldFile = settings['GEOSERVER_DATA_DIR'] + "/" + datasetFoldername + "/" + fileInZipName
# Connect to geoserver catalogue
cat = Catalog(app.config['GEOSERVER'] + "/rest", app.config['GEOSERVER_ADMIN'], password=app.config['GEOSERVER_PASS'])
# Add or Overwrite
with open(sldFile) as f:
style=cat.create_style(fileInZipNoExtName, f.read(), overwrite=True)
# Link it to the layer
layer = cat.get_layer(layerName)
layer._set_default_style(fileInZipNoExtName)
cat.save(layer)
# close zip file after looping through all files in the zip file
zipFile.close()
#endregion
# region
if generateDOI:
d = DOI(files, datasetDir, datasetname, logger=app.logger)
deposition_id = d.runUpload()
# endregion
resultString = json.dumps(result)
text = urllib.quote_plus(resultString.encode('utf-8'))
if generateDOI: url = app.config['METADATA_URL'] + text + '&deposition=' + deposition_id
else: url = app.config['METADATA_URL'] + text
# store the representation
app.logger.info("Representations of the dataset: " + resultString)
return redirect(url)
else:
flash("Please upload at least one file")
return redirect(url_for('uploadData'))
# accessed from the 'selectServer' page
@app.route("/uploaddata", methods=['GET', 'POST'])
def uploadData():
datasetname = session['DATASETNAME']
datasetFoldername = session['DATASETFOLDERNAME']
return render_template('upload.html', datasetname=datasetname, datasetFoldername=datasetFoldername)
@app.route("/upload", methods=['GET', 'POST'])
def upload():
'''
The upload function is called as an AJAX request from within the Upload.html page in order to avoid refreshing the whole page
when uploading new data.
'''
if request.method == 'POST':
file = request.files['file']
datasetFoldername = session['DATASETFOLDERNAME'] # get the name of the dataset (and folder)
fullpath = os.path.join(app.config['BASE_UPLOAD_FOLDER'], datasetFoldername)
if file:
filename = secure_filename(file.filename)
filename = gen_file_name(fullpath, filename)
try:
uploaded_file_path = os.path.join(app.config['BASE_UPLOAD_FOLDER'], datasetFoldername, filename)
file.save(uploaded_file_path)
size = os.path.getsize(uploaded_file_path) # get file size after saving
except:
errorMessage = 'Error saving file: ' + filename + ' to working copy'
app.logger.error(errorMessage)
return simplejson.dumps({"Error: ": errorMessage})
app.logger.info('File: ' + filename + ' saved succesfully in working copy')
time.sleep(0.2)
result = uploadfile(name=filename, datasetFoldername=datasetFoldername, size=size)
return simplejson.dumps({"files": [result.get_file()]})
if request.method == 'GET':
# get all file in ./data directory
datasetFoldername = session['DATASETFOLDERNAME']
datasetDir = os.path.join(app.config['BASE_UPLOAD_FOLDER'], datasetFoldername)
# GET INFORMATION OF ALL CURRENT FILES IN DIRECTORY
files = [f for f in os.listdir(datasetDir) if
os.path.isfile(os.path.join(datasetDir, f)) and f not in app.config['IGNORED_FILES']]
file_display = []
for file in files:
size = os.path.getsize(os.path.join(datasetDir, file))
file_saved = uploadfile(name=file, datasetFoldername=datasetFoldername, size=size)
file_display.append(file_saved.get_file())
return simplejson.dumps({"files": file_display})
## START APP --> http://127.0.0.1:5000/?datasetname=testJ1&generateDOI=true
@app.route('/', methods=['GET'])
def createDatasetFolder():
# Get a DOI via zenodo (optional)
generateDOI = request.args.get('generateDOI')
if generateDOI == None:
generateDOI = False
else:
if generateDOI == 'true': generateDOI = True
else: generateDOI = False
# Datasetname (mandatory)
datasetname = request.args.get('datasetname')
if datasetname == None:
return "Please send a GET request with a parameter datasetname"
else:
datasetFoldername = datasetname # the dataset folder name must be unique and be allowed as an URL
# create a valid datasetFoldername for use in an URL
datasetFoldername = slugify(unicode(datasetFoldername))
# create the dataset folder in the folder of the servertype; if name already taken, increment foldername
fullpath = os.path.join(app.config['BASE_UPLOAD_FOLDER'], datasetFoldername)
n = 1
origDatasetFoldername = datasetFoldername
while os.path.exists(fullpath):
datasetFoldername = origDatasetFoldername + str(n)
fullpath = os.path.join(app.config['BASE_UPLOAD_FOLDER'], datasetFoldername)
n += 1
os.makedirs(fullpath)
app.logger.info('Dataset will be stored in: ' + fullpath)
# set cookies (used for page refresh)
session['DATASETNAME'] = datasetname
session['DATASETFOLDERNAME'] = datasetFoldername
session['GENERATEDOI'] = generateDOI
return redirect(url_for('uploadData'))
@app.route("/data/<datasetFoldername>/")
def downloadDataset(datasetFoldername):
result = {}
result['datasetFoldername'] = datasetFoldername
datasetDir = os.path.join(os.path.join(app.config['BASE_UPLOAD_FOLDER'], datasetFoldername))
fileInfoList = []
files = [f for f in os.listdir(datasetDir) if
os.path.isfile(os.path.join(datasetDir, f)) and f not in app.config['IGNORED_FILES']]
for f in files:
fileInfo = {}
fileInfo['size'] = os.path.getsize(os.path.join(datasetDir, f))
fileInfo['sizeText'] = functions.formatFileSize(fileInfo['size'])
fileInfo['url'] = os.path.join(request.base_url, f)
fileInfo['name'] = f
fileInfoList.append(fileInfo)
result['files'] = fileInfoList
return render_template('download.html', result=result)
@app.route("/data/<path:path>", methods=['GET'])
def downloadFile(path):
return send_from_directory(os.path.join(app.config['BASE_UPLOAD_FOLDER']), filename=path)
@app.route("/downloadallzip/<path:path>", methods=['GET'])
def downloadallzip(path):
return send_from_directory(os.path.join(app.config['BASE_UPLOAD_FOLDER']), filename=path)
@app.route("/downloadall", methods=['POST'])
def downloadAll():
"""
Zips all files of the dataset and redirects the client to this .zip file to start the download
:return:
"""
datasetFoldername = request.form['datasetFoldername']
zipFilename = "{}.zip".format(datasetFoldername)
zipRootFolder = os.path.join(app.config['BASE_UPLOAD_FOLDER'], app.config['ZIP_DOWNLOAD_ALL_FOLDER'])
if not os.path.exists(zipRootFolder):
os.makedirs(zipRootFolder)
app.logger.info("Created zip root folder at: " + zipRootFolder)
zipFilepath = os.path.join(app.config['BASE_UPLOAD_FOLDER'], app.config['ZIP_DOWNLOAD_ALL_FOLDER'], zipFilename)
# Test if zip file already exists; if yes, remove this .zip file
if os.path.exists(zipFilepath):
os.remove(zipFilepath)
datasetDir = os.path.join(os.path.join(app.config['BASE_UPLOAD_FOLDER'], datasetFoldername))
files = [f for f in os.listdir(datasetDir) if
os.path.isfile(os.path.join(datasetDir, f)) and f not in app.config['IGNORED_FILES']]
# Open a zip file
zf = zipfile.ZipFile(zipFilepath, 'w')
for f in files:
filename = os.path.join(datasetDir, f)
arcName = f
zf.write(filename, arcName)
zf.close()
downloadPath = '/'.join(["downloadallzip", app.config['ZIP_DOWNLOAD_ALL_FOLDER'], zipFilename])
return redirect(downloadPath)
if __name__ == '__main__':
if app.config['DEVELOP']:
app.run(debug=True) # DEVELOPMENT
else:
app.run(host='0.0.0.0') # SERVER
|
switchonproject/sip-html5-data-upload
|
app.py
|
Python
|
lgpl-3.0
| 30,051
|
[
"NetCDF"
] |
e6169db42bbd6c044c46fded7d44f5e288a873a1dbd3f0701c57efea84107744
|
# $Id$
##
## This file is part of pyFormex 0.8.9 (Fri Nov 9 10:49:51 CET 2012)
## pyFormex is a tool for generating, manipulating and transforming 3D
## geometrical models by sequences of mathematical operations.
## Home page: http://pyformex.org
## Project page: http://savannah.nongnu.org/projects/pyformex/
## Copyright 2004-2012 (C) Benedict Verhegghe (benedict.verhegghe@ugent.be)
## Distributed under the GNU General Public License version 3 or later.
##
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see http://www.gnu.org/licenses/.
##
"""Interface with VTK.
This module provides an interface with some function of the Python
Visualiztion Toolkit (VTK).
Documentation for VTK can be found on http://www.vtk.org/
This module provides the basic interface to convert data structures between
vtk and pyFormex.
"""
from vtk.util.numpy_support import numpy_to_vtk as n2v
from vtk.util.numpy_support import vtk_to_numpy as v2n
from vtk.util.numpy_support import create_vtk_array as cva
from vtk.util.numpy_support import get_numpy_array_type as gnat
from vtk.util.numpy_support import get_vtk_array_type as gvat
from numpy import *
from mesh import Mesh
from coords import Coords
from plugins.trisurface import TriSurface
import os
def cleanVPD(vpd):
"""Clean the vtkPolydata
Clean the vtkPolydata, adjusting connectivity, removing duplicate elements
and coords, renumbering the connectivity. This is often needed after
setting the vtkPolydata, to make the vtkPolydata fit for use with other
operations. Be aware that this operation will change the order and
numbering of the original data.
Parameters:
- `vpd`: a vtkPolydata
Returns the cleaned vtkPolydata.
"""
from vtk import vtkCleanPolyData
cleaner=vtkCleanPolyData()
cleaner.SetInput(vpd)
cleaner.Update()
return cleaner.GetOutput()
def convert2VPD(M,clean=False):
"""Convert pyFormex data to vtkPolyData.
Convert a pyFormex Mesh or Coords object into vtkPolyData.
This is limited to vertices, lines, and polygons.
Lines should already be ordered (with connectedLineElems for instance).
Parameters:
- `M`: a Mesh or Coords type. If M is a Coords type it will be saved as
VERTS. Else...
- `clean`: if True, the resulting vtkdata will be cleaned by calling
cleanVPD.
Returns a vtkPolyData.
"""
from vtk import vtkPolyData,vtkPoints,vtkIdTypeArray,vtkCellArray
print('STARTING CONVERSION FOR DATA OF TYPE %s '%type(M))
if type(M) == Coords:
M = Mesh(M,arange(M.ncoords()))
Nelems = M.nelems() # Number of elements
Ncxel = M.nplex() # # Number of nodes per element
# create a vtkPolyData variable
vpd=vtkPolyData()
# creating vtk coords
pts = vtkPoints()
ntype=gnat(pts.GetDataType())
coordsv = n2v(asarray(M.coords,order='C',dtype=ntype),deep=1) #.copy() # deepcopy array conversion for C like array of vtk, it is necessary to avoid memry data loss
pts.SetNumberOfPoints(M.ncoords())
pts.SetData(coordsv)
vpd.SetPoints(pts)
# create vtk connectivity
elms = vtkIdTypeArray()
ntype=gnat(vtkIdTypeArray().GetDataType())
elmsv = concatenate([Ncxel*ones(Nelems).reshape(-1,1),M.elems],axis=1)
elmsv = n2v(asarray(elmsv,order='C',dtype=ntype),deep=1) #.copy() # deepcopy array conversion for C like array of vtk, it is necessary to avoid memry data loss
elms.DeepCopy(elmsv)
# set vtk Cell data
datav = vtkCellArray()
datav.SetCells(Nelems,elms)
if Ncxel == 1:
try:
print("setting VERTS for data with %s maximum number of point for cell "%Ncxel)
vpd.SetVerts(datav)
except:
raise ValueError,"Error in saving VERTS"
elif Ncxel == 2:
try:
print ("setting LINES for data with %s maximum number of point for cell "%Ncxel)
vpd.SetLines(datav)
except:
raise ValueError,"Error in saving LINES"
else:
try:
print ("setting POLYS for data with %s maximum number of point for cell "%Ncxel)
vpd.SetPolys(datav)
except:
raise ValueError,"Error in saving POLYS"
vpd.Update()
if clean:
vpd=cleanVPD(vpd)
return vpd
def convertVPD2Triangles(vpd):
"""Convert a vtkPolyData to a vtk triangular surface.
Convert a vtkPolyData to a vtk triangular surface. This is convenient
when vtkPolyData are non-triangular polygons.
Parameters:
- `vpd`: a vtkPolyData
Returns
"""
from vtk import vtkTriangleFilter
triangles = vtkTriangleFilter()
triangles.SetInput(vpd)
triangles.Update()
return triangles.GetOutput()
def convertFromVPD(vpd):
"""Convert a vtkPolyData into pyFormex objects.
Convert a vtkPolyData into pyFormex objects.
Parameters:
- `vpd`: a vtkPolyData
Returns a tuple with points, polygons, lines, vertices numpy arrays.
Returns None for the missing data.
"""
pts=polys=lines=verts=None
# getting points coords
if vpd.GetPoints().GetData().GetNumberOfTuples():
ntype=gnat(vpd.GetPoints().GetDataType())
pts = asarray(v2n(vpd.GetPoints().GetData()),dtype=ntype)
print('Saved points coordinates array')
# getting Polygons
if vpd.GetPolys().GetData().GetNumberOfTuples():
ntype=gnat(vpd.GetPolys().GetData().GetDataType())
Nplex = vpd.GetPolys().GetMaxCellSize()
polys = asarray(v2n(vpd.GetPolys().GetData()),dtype=ntype).reshape(-1,Nplex+1)[:,1:]
print('Saved polys connectivity array')
# getting Lines
if vpd.GetLines().GetData().GetNumberOfTuples():
ntype=gnat(vpd.GetLines().GetData().GetDataType())
Nplex = vpd.GetLines().GetMaxCellSize()
lines = asarray(v2n(vpd.GetLines().GetData()),dtype=ntype).reshape(-1,Nplex+1)[:,1:]
print('Saved lines connectivity array')
# getting Vertices
if vpd.GetVerts().GetData().GetNumberOfTuples():
ntype=gnat(vpd.GetVerts().GetData().GetDataType())
Nplex = vpd.GetVerts().GetMaxCellSize()
verts = asarray(v2n(vpd.GetVerts().GetData()),dtype=ntype).reshape(-1,Nplex+1)[:,1:]
print('Saved verts connectivity array')
return pts, polys, lines, verts
def vtkPointInsideObject(S,P,tol=0.):
"""vtk function to test which of the points P are inside surface S"""
from vtk import vtkSelectEnclosedPoints
vpp = convert2VPD(P)
vps =convert2VPD(S,clean=False)
enclosed_pts = vtkSelectEnclosedPoints()
enclosed_pts.SetInput(vpp)
enclosed_pts.SetTolerance(tol)
enclosed_pts.SetSurface(vps)
enclosed_pts.SetCheckSurface(1)
enclosed_pts.Update()
inside_arr = enclosed_pts.GetOutput().GetPointData().GetArray('SelectedPoints')
enclosed_pts.ReleaseDataFlagOn()
enclosed_pts.Complete()
del enclosed_pts
return asarray(v2n(inside_arr),'bool')
def vtkIntersectWithSegment(surf,lines,tol=0.0):
"""
Computes the intersection of surf with lines.
Returns a list of the intersection points lists and of the element number of surf where the point lies.
The position in the list is equal to the line number. If there is no intersection with the correspondent
lists are empty
Parameters:
surf : can be Formex, Mesh or TriSurface
lines : a mesh of segments
"""
from vtk import vtkOBBTree
surf = convert2VPD(surf,clean=False)
loc = vtkOBBTree()
loc.SetDataSet(vm)
loc.SetTolerance(tol)
loc.BuildLocator()
loc.Update()
cellids = [[],]*lines.nelems()
pts = [[],]*lines.nelems()
for i in range(lines.nelems()):
ptstmp = vtkPoints()
cellidstmp = vtkIdList()
loc.IntersectWithLine(lines.coords[lines.elems][i][1],lines.coords[lines.elems][i][0],ptstmp, cellidstmp)
if cellidstmp.GetNumberOfIds():
cellids[i] = [cellidstmp.GetId(j) for j in range(cellidstmp.GetNumberOfIds())]
pts[i] = Coords(v2n(ptstmp.GetData()).squeeze())
loc.FreeSearchStructure()
del loc
return pts,cellids
# End
|
dladd/pyFormex
|
pyformex/plugins/vtk_itf.py
|
Python
|
gpl-3.0
| 8,856
|
[
"VTK"
] |
40d740651898145b1cbb7a63b65504b6a290e051f9f1056101917575625da8b0
|
#! /usr/bin/env python
'''
jsSHA HMAC Test Result Generator
Version 1.0 Copyright Brian Turek 2009
Distributed under the BSD License
See http://jssha.sourceforge.net/ for more information
'''
import hashlib
import hmac
def main():
'''
main()
Calculates the HMAC of the test vectors given in FIPS-198a for full
length HMACs. Uses double the key sizes for SHA-384 and SHA-512 as
they have double the block size
'''
# shortKey tests for handling of key lengths less than the block size
shortTxt = b'Sample #2'
shortKey = bytes.fromhex('30313233 34353637 38393a3b 3c3d3e3f 40414243')
# medKey tests for handling of keys lengths equal to the block size
medTxt = b'Sample #1'
medKey = bytes.fromhex('00010203 04050607 08090a0b 0c0d0e0f 10111213' +
'14151617 18191a1b 1c1d1e1f 20212223 24252627 28292a2b 2c2d2e2f'+
'30313233 34353637 38393a3b 3c3d3e3f')
# largeKey tests for handling of keys lengths greater than the block size
largeTxt = b'Sample #3'
largeKey = bytes.fromhex('50515253 54555657 58595a5b 5c5d5e5f 60616263'+
'64656667 68696a6b 6c6d6e6f 70717273 74757677 78797a7b 7c7d7e7f' +
'80818283 84858687 88898a8b 8c8d8e8f 90919293 94959697 98999a9b' +
'9c9d9e9f a0a1a2a3 a4a5a6a7 a8a9aaab acadaeaf b0b1b2b3')
# Perform the SHA-1 Tests
print('\nSHA-1 Short Key Result:')
print(hmac.new(shortKey, shortTxt, hashlib.sha1).hexdigest())
print('\nSHA-1 Medium Key Result:')
print(hmac.new(medKey, medTxt, hashlib.sha1).hexdigest())
print('\nSHA-1 Large Key Result:')
print(hmac.new(largeKey, largeTxt, hashlib.sha1).hexdigest())
# Perform the SHA-224 Tests
print('\nSHA-224 Short Key Result:')
print(hmac.new(shortKey, shortTxt, hashlib.sha224).hexdigest())
print('\nSHA-224 Medium Key Result:')
print(hmac.new(medKey, medTxt, hashlib.sha224).hexdigest())
print('\nSHA-224 Large Key Result:')
print(hmac.new(largeKey, largeTxt, hashlib.sha224).hexdigest())
# Perform the SHA-256 Tests
print('\nSHA-256 Short Key Result:')
print(hmac.new(shortKey, shortTxt, hashlib.sha256).hexdigest())
print('\nSHA-256 Medium Key Result:')
print(hmac.new(medKey, medTxt, hashlib.sha256).hexdigest())
print('\nSHA-256 Large Key Result:')
print(hmac.new(largeKey, largeTxt, hashlib.sha256).hexdigest())
# Since SHA-384 and SHA-512 take double the block size, double the key
# length so the tests act against the same functions as above
shortKey = shortKey * 2
medKey = medKey * 2
largeKey = largeKey * 2
# Perform the SHA-384 Tests
print('\nSHA-384 Short Key Result:')
print(hmac.new(shortKey, shortTxt, hashlib.sha384).hexdigest())
print('\nSHA-384 Medium Key Result:')
print(hmac.new(medKey, medTxt, hashlib.sha384).hexdigest())
print('\nSHA-384 Large Key Result:')
print(hmac.new(largeKey, largeTxt, hashlib.sha384).hexdigest())
# Perform the SHA-512 Tests
print('\nSHA-512 Short Key Result:')
print(hmac.new(shortKey, shortTxt, hashlib.sha512).hexdigest())
print('\nSHA-512 Medium Key Result:')
print(hmac.new(medKey, medTxt, hashlib.sha512).hexdigest())
print('\nSHA-512 Large Key Result:')
print(hmac.new(largeKey, largeTxt, hashlib.sha512).hexdigest())
if ('__main__' == __name__):
main()
|
a2n/jsSHA
|
test/HMACGen.py
|
Python
|
bsd-3-clause
| 3,154
|
[
"Brian"
] |
a0ce03c2656cdd76d46ac8172f0927f70f8cbb13d0a7d3140b900043366da692
|
##############################################################################
# adaptiveMD: A Python Framework to Run Adaptive Molecular Dynamics (MD)
# Simulations on HPC Resources
# Copyright 2017 FU Berlin and the Authors
#
# Authors: Jan-Hendrik Prinz
# Contributors:
#
# `adaptiveMD` is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
# part of the code below was taken from `openpathsampling` see
# <http://www.openpathsampling.org> or
# <http://github.com/openpathsampling/openpathsampling
# for details and license
from base import StorableMixin, create_to_dict
from syncvar import SyncVariable, ObjectSyncVariable, JSONDataSyncVariable
from cache import WeakKeyCache, WeakLRUCache, WeakValueCache, MaxCache, \
NoCache, Cache, LRUCache
from dictify import ObjectJSON, UUIDObjectJSON
from mongodb import MongoDBStorage
from object import ObjectStore
from proxy import DelayedLoader, lazy_loading_attributes, LoaderProxy
from file import FileStore, DataDict
|
thempel/adaptivemd
|
adaptivemd/mongodb/__init__.py
|
Python
|
lgpl-2.1
| 1,650
|
[
"MDTraj"
] |
4b00e741669a3ee0157efebf9501fb0e3e8c16b613d63965e66ccae8adeca093
|
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module implements a NetCDF database differ, assuming xarray structure.
"""
import sys
import os
import numpy as np
import xarray as xr
from Tester import Differ
from UnorderedCSVDiffer import UnorderedCSVDiffer, UnorderedCSV
# get access to math tools from RAVEN
try:
from utils import mathUtils
except ImportError:
new = os.path.realpath(os.path.join(os.path.realpath(__file__), '..', '..',
'..', '..', 'framework'))
sys.path.append(new)
from utils import mathUtils
whoAmI = False # enable to show test dir and out files
debug = False # enable to increase printing
class NetCDFDiffer(UnorderedCSVDiffer):
"""
Used for comparing two NetCDF databases
"""
def diff(self):
"""
Run the comparison.
@ In, None
@ Out, same, bool, if True then files are the same
@ Out, messages, str, messages to print on fail
"""
# read in files
for testFilename, goldFilename in zip(self._out_files, self._gold_files):
# local "same" and message list
same = True
msg = []
# load test file
try:
testDS = xr.load_dataset(testFilename)
# if file doesn't exist, that's a problem
except IOError:
msg.append('Test file "{}" does not exist!'.format(testFilename))
same = False
# load gold file
try:
goldDS = xr.load_dataset(goldFilename)
goldCsv = None
# if file doesn't exist, that's a problem
except IOError:
msg.append('Gold file "{}" does not exist!'.format(goldFilename))
same = False
# if either file did not exist, clean up and go to next outfile
if not same:
self.finalizeMessage(same, msg, testFilename)
continue
# at this point, we've loaded both files, so compare them.
## compare data contents
# TODO zero threshold
if self._check_absolute_values:
kwargs = {'atol': self._rel_err}
else:
kwargs = {'rtol': self._rel_err}
try:
xr.testing.assert_allclose(testDS, goldDS, **kwargs)
except AssertionError as e:
same = False
msg.append('Dataset diff detected ("left" is test, "right" is gold):\n{}'.format(str(e)))
self.finalizeMessage(same, msg, testFilename)
return self._same, self._message
class NetCDF(UnorderedCSV):
"""
This is the class to use for handling the parameters block.
"""
def check_output(self):
"""
Checks that the output matches the gold.
returns (same, message) where same is true if the
test passes, or false if the test failes. message should
gives a human readable explaination of the differences.
@ In, None
@ Out, (same, message), same is true if the tests passes.
"""
csvFiles = self._get_test_files()
goldFiles = self._get_gold_files()
diff = NetCDFDiffer(csvFiles,
goldFiles,
relativeError=self._rel_err,
zeroThreshold=self._zero_threshold,
ignoreSign=self._ignore_sign,
absoluteCheck=self._check_absolute_value)
return diff.diff()
|
idaholab/raven
|
scripts/TestHarness/testers/NetCDFDiffer.py
|
Python
|
apache-2.0
| 3,769
|
[
"NetCDF"
] |
f513c8a864d7b74fae660b79e13d91e19ba47cb495c2c14809471a5d90fa250d
|
"""Astronomical and physics constants.
This module complements constants defined in `astropy.constants`,
with gravitational paremeters and radii.
Note that `GM_jupiter` and `GM_neptune` are both referred to the whole planetary system gravitational parameter.
Unless otherwise specified, gravitational and mass parameters were obtained from:
* Luzum, Brian et al. “The IAU 2009 System of Astronomical Constants: The Report of the IAU Working Group on Numerical
Standards for Fundamental Astronomy.” Celestial Mechanics and Dynamical Astronomy 110.4 (2011): 293–304.
Crossref. Web. `DOI: 10.1007/s10569-011-9352-4`_
radii were obtained from:
* Archinal, B. A. et al. “Report of the IAU Working Group on Cartographic Coordinates and Rotational Elements: 2009.”
Celestial Mechanics and Dynamical Astronomy 109.2 (2010): 101–135. Crossref. Web. `DOI: 10.1007/s10569-010-9320-4`_
.. _`DOI: 10.1007/s10569-011-9352-4`: http://dx.doi.org/10.1007/s10569-011-9352-4
.. _`DOI: 10.1007/s10569-010-9320-4`: http://dx.doi.org/10.1007/s10569-010-9320-4
J2 for the Sun was obtained from:
* https://hal.archives-ouvertes.fr/hal-00433235/document (New values of gravitational moments J2 and J4 deduced
from helioseismology, Redouane Mecheri et al)
"""
from astropy import time
from astropy.constants import Constant
from astropy.constants.iau2015 import (
M_earth as _M_earth,
M_jup as _M_jupiter,
M_sun as _M_sun,
)
__all__ = [
"J2000",
"J2000_TDB",
"J2000_TT",
"GM_sun",
"GM_earth",
"GM_mercury",
"GM_venus",
"GM_mars",
"GM_jupiter",
"GM_saturn",
"GM_uranus",
"GM_neptune",
"GM_pluto",
"GM_moon",
"M_earth",
"M_jupiter",
"M_sun",
"R_mean_earth",
"R_mean_mercury",
"R_mean_venus",
"R_mean_mars",
"R_mean_jupiter",
"R_mean_saturn",
"R_mean_uranus",
"R_mean_neptune",
"R_mean_pluto",
"R_mean_moon",
"R_earth",
"R_mercury",
"R_venus",
"R_mars",
"R_jupiter",
"R_saturn",
"R_sun",
"R_uranus",
"R_neptune",
"R_pluto",
"R_moon",
"R_polar_earth",
"R_polar_mercury",
"R_polar_venus",
"R_polar_mars",
"R_polar_jupiter",
"R_polar_saturn",
"R_polar_uranus",
"R_polar_neptune",
"R_polar_pluto",
"R_polar_moon",
"J2_sun",
"J2_earth",
"J3_earth",
"J2_mars",
"J3_mars",
"J2_venus",
"J3_venus",
"H0_earth",
"rho0_earth",
"Wdivc_sun",
]
# HACK: sphinx-autoapi variable definition
M_earth = _M_earth
M_jupiter = _M_jupiter
M_sun = _M_sun
# See for example USNO Circular 179
J2000_TT = time.Time("J2000", scale="tt")
J2000_TDB = time.Time("J2000", scale="tdb")
J2000 = J2000_TT
GM_sun = Constant(
"GM_sun",
"Heliocentric gravitational constant",
1.32712442099e20,
"m3 / (s2)",
0.0000000001e20,
"IAU 2009 system of astronomical constants",
system="si",
)
GM_earth = Constant(
"GM_earth",
"Geocentric gravitational constant",
3.986004418e14,
"m3 / (s2)",
0.000000008e14,
"IAU 2009 system of astronomical constants",
system="si",
)
# Anderson, John D. et al. “The Mass, Gravity Field, and Ephemeris of Mercury.” Icarus 71.3 (1987): 337–349.
# Crossref. Web. DOI: 10.1016/0019-1035(87)90033-9
GM_mercury = Constant(
"GM_mercury",
"Mercury gravitational constant",
2.203209e13,
"m3 / (s2)",
0.91,
"IAU 2009 system of astronomical constants",
system="si",
)
# Konopliv, A.S., W.B. Banerdt, and W.L. Sjogren. “Venus Gravity: 180th Degree and Order Model.”
# Icarus 139.1 (1999): 3–18. Crossref. Web. DOI: 10.1006/icar.1999.6086
GM_venus = Constant(
"GM_venus",
"Venus gravitational constant",
3.24858592e14,
"m3 / (s2)",
0.006,
"IAU 2009 system of astronomical constants",
system="si",
)
# Konopliv, Alex S. et al. “A Global Solution for the Mars Static and Seasonal Gravity, Mars Orientation, Phobos and
# Deimos Masses, and Mars Ephemeris.” Icarus 182.1 (2006): 23–50.
# Crossref. Web. DOI: 10.1016/j.icarus.2005.12.025
GM_mars = Constant(
"GM_mars",
"Mars gravitational constant",
4.282837440e13,
"m3 / (s2)",
0.00028,
"IAU 2009 system of astronomical constants",
system="si",
)
# Jacobson, R. A. et al. “A comprehensive orbit reconstruction for the galileo prime mission in the JS200 system.”
# The Journal of the Astronautical Sciences 48.4 (2000): 495–516.
# Crossref. Web.
GM_jupiter = Constant(
"GM_jupiter",
"Jovian system gravitational constant",
1.2671276253e17,
"m3 / (s2)",
2.00,
"IAU 2009 system of astronomical constants",
system="si",
)
# Jacobson, R. A. et al. “The Gravity Field of the Saturnian System from Satellite Observations and Spacecraft
# Tracking Data.” The Astronomical Journal 132.6 (2006): 2520–2526.
# Crossref. Web. DOI: 10.1086/508812
GM_saturn = Constant(
"GM_saturn",
"Saturn gravitational constant",
3.79312077e16,
"m3 / (s2)",
1.1,
"IAU 2009 system of astronomical constants",
system="si",
)
# Jacobson, R. A. et al. “The Masses of Uranus and Its Major Satellites from Voyager Tracking Data and Earth-Based
# Uranian Satellite Data.” The Astronomical Journal 103 (1992): 2068.
# Crossref. Web. DOI: 10.1086/116211
GM_uranus = Constant(
"GM_uranus",
"Uranus gravitational constant",
5.7939393e15,
"m3 / (s2)",
13.0,
"IAU 2009 system of astronomical constants",
system="si",
)
# Jacobson, R. A. “THE ORBITS OF THE NEPTUNIAN SATELLITES AND THE ORIENTATION OF THE POLE OF NEPTUNE.”
# The Astronomical Journal 137.5 (2009): 4322–4329. Crossref. Web. DOI:
# 10.1088/0004-6256/137/5/4322
GM_neptune = Constant(
"GM_neptune",
"Neptunian system gravitational constant",
6.836527100580397e15,
"m3 / (s2)",
10.0,
"IAU 2009 system of astronomical constants",
system="si",
)
# Tholen, David J. et al. “MASSES OF NIX AND HYDRA.” The Astronomical Journal 135.3 (2008): 777–784. Crossref. Web.
# DOI: 10.1088/0004-6256/135/3/777
GM_pluto = Constant(
"GM_pluto",
"Pluto gravitational constant",
8.703e11,
"m3 / (s2)",
3.7,
"IAU 2009 system of astronomical constants",
system="si",
)
# Lemoine, Frank G. et al. “High-Degree Gravity Models from GRAIL Primary Mission Data.”
# Journal of Geophysical Research: Planets 118.8 (2013): 1676–1698.
# Crossref. Web. DOI: 10.1002/jgre.20118
GM_moon = Constant(
"GM_moon",
"Moon gravitational constant",
4.90279981e12,
"m3 / (s2)",
0.00000774,
"Journal of Geophysical Research: Planets 118.8 (2013)",
system="si",
)
# Archinal, B. A., Acton, C. H., A’Hearn, M. F., Conrad, A., Consolmagno,
# G. J., Duxbury, T., … Williams, I. P. (2018). Report of the IAU Working
# Group on Cartographic Coordinates and Rotational Elements: 2015. Celestial
# Mechanics and Dynamical Astronomy, 130(3). doi:10.1007/s10569-017-9805-5
R_mean_earth = Constant(
"R_mean_earth",
"Earth mean radius",
6.3710084e6,
"m",
0.1,
"IAU Working Group on Cartographic Coordinates and Rotational Elements: 2015",
system="si",
)
R_mean_mercury = Constant(
"R_mean_mercury",
"Mercury mean radius",
2.4394e6,
"m",
100,
"IAU Working Group on Cartographic Coordinates and Rotational Elements: 2015",
system="si",
)
R_mean_venus = Constant(
"R_mean_venus",
"Venus mean radius",
6.0518e6,
"m",
1000,
"IAU Working Group on Cartographic Coordinates and Rotational Elements: 2015",
system="si",
)
R_mean_mars = Constant(
"R_mean_mars",
"Mars mean radius",
3.38950e6,
"m",
2000,
"IAU Working Group on Cartographic Coordinates and Rotational Elements: 2015",
system="si",
)
R_mean_jupiter = Constant(
"R_mean_jupiter",
"Jupiter mean radius",
6.9911e7,
"m",
6000,
"IAU Working Group on Cartographic Coordinates and Rotational Elements: 2009",
system="si",
)
R_mean_saturn = Constant(
"R_mean_saturn",
"Saturn mean radius",
5.8232e7,
"m",
6000,
"IAU Working Group on Cartographic Coordinates and Rotational Elements: 2015",
system="si",
)
R_mean_uranus = Constant(
"R_mean_uranus",
"Uranus mean radius",
2.5362e7,
"m",
7000,
"IAU Working Group on Cartographic Coordinates and Rotational Elements: 2015",
system="si",
)
R_mean_neptune = Constant(
"R_mean_neptune",
"Neptune mean radius",
2.4622e7,
"m",
19000,
"IAU Working Group on Cartographic Coordinates and Rotational Elements: 2015",
system="si",
)
R_mean_pluto = Constant(
"R_mean_pluto",
"Pluto mean radius",
1.188e6,
"m",
1600,
"IAU Working Group on Cartographic Coordinates and Rotational Elements: 2015",
system="si",
)
R_mean_moon = Constant(
"R_mean_moon",
"Moon mean radius",
1.7374e6,
"m",
0,
"IAU Working Group on Cartographic Coordinates and Rotational Elements: 2015",
system="si",
)
R_sun = Constant(
"R_sun",
"Sun equatorial radius",
6.95700e8,
"m",
0,
"IAU Working Group on Cartographic Coordinates and Rotational Elements: 2015",
system="si",
)
R_earth = Constant(
"R_earth",
"Earth equatorial radius",
6.3781366e6,
"m",
0.1,
"IAU Working Group on Cartographic Coordinates and Rotational Elements: 2015",
system="si",
)
R_mercury = Constant(
"R_mercury",
"Mercury equatorial radius",
2.44053e6,
"m",
40,
"IAU Working Group on Cartographic Coordinates and Rotational Elements: 2015",
system="si",
)
R_venus = Constant(
"R_venus",
"Venus equatorial radius",
6.0518e6,
"m",
1000,
"IAU Working Group on Cartographic Coordinates and Rotational Elements: 2015",
system="si",
)
R_mars = Constant(
"R_mars",
"Mars equatorial radius",
3.39619e6,
"m",
100,
"IAU Working Group on Cartographic Coordinates and Rotational Elements: 2015",
system="si",
)
R_jupiter = Constant(
"R_jupiter",
"Jupiter equatorial radius",
7.1492e7,
"m",
4000,
"IAU Working Group on Cartographic Coordinates and Rotational Elements: 2009",
system="si",
)
R_saturn = Constant(
"R_saturn",
"Saturn equatorial radius",
6.0268e7,
"m",
4000,
"IAU Working Group on Cartographic Coordinates and Rotational Elements: 2015",
system="si",
)
R_uranus = Constant(
"R_uranus",
"Uranus equatorial radius",
2.5559e7,
"m",
4000,
"IAU Working Group on Cartographic Coordinates and Rotational Elements: 2015",
system="si",
)
R_neptune = Constant(
"R_neptune",
"Neptune equatorial radius",
2.4764e7,
"m",
15000,
"IAU Working Group on Cartographic Coordinates and Rotational Elements: 2015",
system="si",
)
R_pluto = Constant(
"R_pluto",
"Pluto equatorial radius",
1.1883e6,
"m",
1600,
"IAU Working Group on Cartographic Coordinates and Rotational Elements: 2015",
system="si",
)
R_moon = Constant(
"R_moon",
"Moon equatorial radius",
1.7374e6,
"m",
0,
"IAU Working Group on Cartographic Coordinates and Rotational Elements: 2015",
system="si",
)
R_polar_earth = Constant(
"R_polar_earth",
"Earth polar radius",
6.3567519e6,
"m",
0.1,
"IAU Working Group on Cartographic Coordinates and Rotational Elements: 2015",
system="si",
)
R_polar_mercury = Constant(
"R_polar_mercury",
"Mercury polar radius",
2.43826e6,
"m",
40,
"IAU Working Group on Cartographic Coordinates and Rotational Elements: 2015",
system="si",
)
R_polar_venus = Constant(
"R_polar_venus",
"Venus polar radius",
6.0518e6,
"m",
1000,
"IAU Working Group on Cartographic Coordinates and Rotational Elements: 2015",
system="si",
)
R_polar_mars = Constant(
"R_polar_mars",
"Mars polar radius",
3.376220e6,
"m",
100,
"IAU Working Group on Cartographic Coordinates and Rotational Elements: 2015",
system="si",
)
R_polar_jupiter = Constant(
"R_polar_jupiter",
"Jupiter polar radius",
6.6854e7,
"m",
10000,
"IAU Working Group on Cartographic Coordinates and Rotational Elements: 2009",
system="si",
)
R_polar_saturn = Constant(
"R_polar_saturn",
"Saturn polar radius",
5.4364e7,
"m",
10000,
"IAU Working Group on Cartographic Coordinates and Rotational Elements: 2015",
system="si",
)
R_polar_uranus = Constant(
"R_polar_uranus",
"Uranus polar radius",
2.4973e7,
"m",
20000,
"IAU Working Group on Cartographic Coordinates and Rotational Elements: 2015",
system="si",
)
R_polar_neptune = Constant(
"R_polar_neptune",
"Neptune polar radius",
2.4341e7,
"m",
30000,
"IAU Working Group on Cartographic Coordinates and Rotational Elements: 2015",
system="si",
)
R_polar_pluto = Constant(
"R_polar_pluto",
"Pluto polar radius",
1.1883e6,
"m",
1600,
"IAU Working Group on Cartographic Coordinates and Rotational Elements: 2015",
system="si",
)
R_polar_moon = Constant(
"R_polar_moon",
"Moon polar radius",
1.7374e6,
"m",
0,
"IAU Working Group on Cartographic Coordinates and Rotational Elements: 2015",
system="si",
)
J2_sun = Constant(
"J2_sun",
"Sun J2 oblateness coefficient",
2.20e-7,
"",
0.01e-7,
"HAL archives",
system="si",
)
J2_earth = Constant(
"J2_earth",
"Earth J2 oblateness coefficient",
0.00108263,
"",
1,
"HAL archives",
system="si",
)
J3_earth = Constant(
"J3_earth",
"Earth J3 asymmetry between the northern and southern hemispheres",
-2.5326613168e-6,
"",
1,
"HAL archives",
system="si",
)
J2_mars = Constant(
"J2_mars",
"Mars J2 oblateness coefficient",
0.0019555,
"",
1,
"HAL archives",
system="si",
)
J3_mars = Constant(
"J3_mars",
"Mars J3 asymmetry between the northern and southern hemispheres",
3.1450e-5,
"",
1,
"HAL archives",
system="si",
)
J2_venus = Constant(
"J2_venus",
"Venus J2 oblateness coefficient",
4.4044e-6,
"",
1,
"HAL archives",
system="si",
)
J3_venus = Constant(
"J3_venus",
"Venus J3 asymmetry between the northern and southern hemispheres",
-2.1082e-6,
"",
1,
"HAL archives",
system="si",
)
H0_earth = Constant(
"H0_earth",
"Earth H0 atmospheric scale height",
8_500,
"m",
1,
"de Pater and Lissauer 2010",
system="si",
)
rho0_earth = Constant(
"rho0_earth",
"Earth rho0 atmospheric density prefactor",
1.3,
"kg / (m3)",
1,
"de Pater and Lissauer 2010",
system="si",
)
Wdivc_sun = Constant(
"Wdivc_sun",
"total radiation power of Sun divided by the speed of light",
1.0203759306204136e14,
"kg km / (s2)",
1,
"Howard Curtis",
system="si",
)
|
poliastro/poliastro
|
src/poliastro/constants/general.py
|
Python
|
mit
| 15,064
|
[
"Brian"
] |
34e000b009c2ac2853440f55ce1a3f5d025af42ab3f5576abdb3d240ce80659a
|
# -*- coding: utf-8 -*-
r"""
Linear elasticity with given displacements.
Find :math:`\ul{u}` such that:
.. math::
\int_{\Omega} D_{ijkl}\ e_{ij}(\ul{v}) e_{kl}(\ul{u})
= 0
\;, \quad \forall \ul{v} \;,
where
.. math::
D_{ijkl} = \mu (\delta_{ik} \delta_{jl}+\delta_{il} \delta_{jk}) +
\lambda \ \delta_{ij} \delta_{kl}
\;.
This example models a cylinder that is fixed at one end while the second end
has a specified displacement of 0.01 in the x direction (this boundary
condition is named ``'Displaced'``). There is also a specified displacement of
0.005 in the z direction for points in the region labeled
``'SomewhereTop'``. This boundary condition is named
``'PerturbedSurface'``. The region ``'SomewhereTop'`` is specified as those
vertices for which::
(z > 0.017) & (x > 0.03) & (x < 0.07)
The displacement field (three DOFs/node) in the ``'Omega region'`` is
approximated using P1 (four-node tetrahedral) finite elements. The material is
linear elastic and its properties are specified as Lamé parameters
:math:`\lambda` and :math:`\mu` (see
http://en.wikipedia.org/wiki/Lam%C3%A9_parameters)
The output is the displacement for each vertex, saved by default to
cylinder.vtk. View the results using::
$ ./postproc.py cylinder.vtk --wireframe -b --only-names=u -d'u,plot_displacements,rel_scaling=1'
"""
from __future__ import absolute_import
from sfepy import data_dir
from sfepy.mechanics.matcoefs import stiffness_from_lame
filename_mesh = data_dir + '/meshes/3d/cylinder.mesh'
regions = {
'Omega' : 'all',
'Left' : ('vertices in (x < 0.001)', 'facet'),
'Right' : ('vertices in (x > 0.099)', 'facet'),
'SomewhereTop' : ('vertices in (z > 0.017) & (x > 0.03) & (x < 0.07)',
'vertex'),
}
materials = {
'solid' : ({'D': stiffness_from_lame(dim=3, lam=1e1, mu=1e0)},),
}
fields = {
'displacement': ('real', 'vector', 'Omega', 1),
}
integrals = {
'i' : 1,
}
variables = {
'u' : ('unknown field', 'displacement', 0),
'v' : ('test field', 'displacement', 'u'),
}
ebcs = {
'Fixed' : ('Left', {'u.all' : 0.0}),
'Displaced' : ('Right', {'u.0' : 0.01, 'u.[1,2]' : 0.0}),
'PerturbedSurface' : ('SomewhereTop', {'u.2' : 0.005}),
}
equations = {
'balance_of_forces' :
"""dw_lin_elastic.i.Omega(solid.D, v, u) = 0""",
}
solvers = {
'ls': ('ls.auto_direct', {}),
'newton': ('nls.newton', {
'i_max' : 1,
'eps_a' : 1e-10,
}),
}
|
vlukes/sfepy
|
examples/linear_elasticity/linear_elastic.py
|
Python
|
bsd-3-clause
| 2,483
|
[
"VTK"
] |
fd9f4525221349fca65435cc8c0821ae73f02559be64910237e70fc9497b6266
|
"""
flatten_obj.py - Flatten multi-state pymol objects into a single state.
<https://pymolwiki.org/index.php/Flatten_obj>
This is particularly useful for dealing with biological assemblies, which are
loaded as multi-state objects when fetched using `fetch PDBID, type=pdb1`. It
can also be used as a quick way to combine multiple objects without causing
collisions between chain identifiers.
The command re-letters chains to avoid collisions. Older versions of PyMOL
restrict the chain id to a single character, so the script will fail for
assemblies with >62 chains. With more recent versions, this problem is solved
with multi-character chain IDs. Several options are available for how
re-lettering should occur.
Author: Spencer Bliven <spencer.bliven@gmail.com>
Date: October 30, 2015
Version: 1.0
License: Public Domain
"""
from pymol import cmd, stored
import re
try:
from collections import OrderedDict
_orderedDict = True
except ImportError:
_orderedDict = False
# PyMOL 1.7.4 introduces support for multi-letter chains, so we can afford to
# use a smaller alphabet. In earlier versions, use lower-case letters if needed
# (requires running `set ignore_case, 0`)
_long_chains = cmd.get_version()[1] >= 1.74
_default_base = 36 if _long_chains else 62
class OutOfChainsError(Exception):
def __init__(self,msg):
self.msg=msg
def __str__(self):
return str(self.msg)
class ChainSet(object):
"""
Base class for various methods to rename chains
Contains _chains, which maps from the renamed chain to a tuple with the
original (object,state,chain). All dict-like accessors work on ChainSets,
e.g.
chain_set["A"] -> ("obj",1,"A")
"""
def __init__(self):
# Use an OrderedDict in Python >= 1.7 for better printing
if _orderedDict:
self._chains = OrderedDict()
else:
self._chains = dict()
def map_chain(self, obj, state, origChain ):
"""
map_chain(string obj,int state, string chain]]) -> string
Maps a chain letter to a unique chainID. Results are unique within each
instance, and can be used as keys on this chain set.
"""
raise NotImplementedError("Base class")
# delegate most methods to _chains
def __getattr__(self,at):
if at in "pop popitem update setdefault".split():
raise AttributeError("type object '%s' has no attribute '%s'"%(type(self),at))
return getattr(self._chains,at)
def __cmp__(self,other): return self._chains.__cmp__(other)
def __eq__(self,other): return self._chains.__eq__(other)
def __ge__(self,other): return self._chains.__ge__(other)
def __gt__(self,other): return self._chains.__gt__(other)
def __le__(self,other): return self._chains.__le__(other)
def __lt__(self,other): return self._chains.__lt__(other)
def __ne__(self,other): return self._chains.__ne__(other)
def __len__(self): return self._chains.__len__()
def __contains__(self,key): return self._chains.__contains__(key)
def __getitem__(self,key): return self._chains.__getitem__(key)
def __iter__(self): return self._chains.__iter__()
def __str__(self): return str(self._chains)
@staticmethod
def _int_to_chain(i,base=_default_base):
"""
_int_to_chain(int,int) -> str
Converts a positive integer to a chain ID. Chain IDs include uppercase
characters, numbers, and optionally lowercase letters.
i = a positive integer to convert
base = the alphabet size to include. Typically 36 or 62.
"""
if i < 0:
raise ValueError("positive integers only")
if base < 0 or 62 < base:
raise ValueError("Invalid base")
quot = int(i)//base
rem = i%base
if rem < 26:
letter = chr( ord("A") + rem)
elif rem < 36:
letter = str( rem-26)
else:
letter = chr( ord("a") + rem - 36)
if quot == 0:
return letter
else:
return ChainSet._int_to_chain(quot-1,base) + letter
class DefaultChainSet(ChainSet):
"""
Avoids relettering chains if possible. If a chain has been used, uses the
next available chain letter. Note that this can potentially lead to
cascading renames, e.g. if chains are sorted alphabetically rather than by
object.
Used for rename = 0.
"""
def __init__(self):
super(DefaultChainSet,self).__init__()
self._next_chain = 0
def map_chain(self, obj, state, origChain ):
# Keep _next_chain up-to-date
while ChainSet._int_to_chain(self._next_chain) in self:
self._next_chain += 1
# Map this chain
if origChain in self:
# Rename
next_chain = ChainSet._int_to_chain(self._next_chain)
self._next_chain += 1
else:
next_chain = origChain
self._chains[next_chain] = (obj,state,origChain)
return next_chain
class SequentialChainSet(ChainSet):
"""
Renumbers all chains starting at A, continuing through the capital letters
and numbers, and then adding additional letters through 9999 (the last
valid chain for mmCIF) and beyond.
Used for rename=1
"""
def __init__(self):
super(SequentialChainSet,self).__init__()
self._next_chain = 0
def map_chain(self, obj, state, origChain ):
next_chain = ChainSet._int_to_chain(self._next_chain)
self._chains[next_chain] = (obj,state,origChain)
self._next_chain += 1
return next_chain
class LongChainSet(ChainSet):
"""
Uses long strings for the chain names. Chains are renamed like
"%s_%s_%04d"%(original_chainid,objectname,state).
Used for rename=2
"""
def map_chain(self, obj, state, origChain ):
ch = "%s_%s_%04d"%(origChain,obj,state)
if ch in self:
raise ValueError("Duplicate chain %s"%(ch))
self._chains[ch] = (obj,state,origChain)
return ch
def flatten_obj(name="",selection="",state=0,rename=0,quiet=1,chain_map=""):
"""
DESCRIPTION
"flatten_obj" combines multiple objects or states into a single object,
renaming chains where required
USAGE
flatten_obj name, selection[, state[, rename[, quiet[, chain_map]]]]
ARGUMENTS
name = a unique name for the flattened object {default: flat}
selection = the set of objects to include in the flattening. The selection
will be expanded to include all atoms of objects. {default: all}
state = the source state to select. Use 0 or -1 to flatten all states {default: 0}
rename = The scheme to use for renaming chains: {default: 0}
(0) preserve chains IDs where possible, rename other chains
alphabetically
(1) rename all chains alphabetically
(2) rename chains using the original chain letter, object name, and state
quiet = If set to 0, print some additional information about progress and
chain renaming {default: 1}
chain_map = An attribute name for the 'stored' scratch object. If
specified, `stored.<chain_map>` will be populated with a dictionary
mapping the new chain names to a tuple giving the originated object,
state, and chainID. {default: ""}
NOTES
Like the select command, if name is omitted then the default object name
("flat") is used as the name argument.
Chain renaming is tricky. PDB files originally limited chains to single
letter identifiers containing [A-Za-z0-9]. When this was found to be
limiting, multi-letter chains (ideally < 4 chars) were allowed. This is
supported as of PyMOL 1.7. Earlier versions do not accept rename=2, and
will raise an exception when flattening a structure with more than 62
chains.
EXAMPLES
flatten_obj flat, nmrObj
flatten_obj ( obj1 or obj2 )
SEE ALSO
split_states
"""
# arguments
# Single argument; treat as selection
if name and not selection:
selection = name
name = ""
# default name and selection
if not name:
name = "flat"
if not selection:
selection = "(all)"
state = int(state)
rename = int(rename)
quiet = int(quiet)
# Wrap in extra parantheses for get_object_list
selection = "( %s )" % selection
if rename == 0:
chainSet = DefaultChainSet()
elif rename == 1:
chainSet = SequentialChainSet()
elif rename == 2:
chainSet = LongChainSet()
else:
raise ValueError("Unrecognized rename option (Valid: 0,1,2)")
metaprefix = "temp" #TODO unique prefix
# store original value of retain_order, which causes weird interleaving of
# structures if enabled.
retain_order = cmd.get("retain_order")
try:
cmd.set("retain_order",0)
# create new object for each state
for obj in cmd.get_object_list(selection):
if state <= 0:
# all states
prefix = "%s_%s_"%(metaprefix,obj)
cmd.split_states(obj,prefix=prefix)
else:
prefix = "%s_%s_%04d"%(metaprefix,obj,state)
cmd.create(prefix, obj, state, 1)
# renumber all states
statere = re.compile("^%s_(.*)_(\d+)$" % metaprefix) # matches split object names
warn_lowercase = False
# Iterate over all objects with metaprefix
try:
for obj in cmd.get_object_list("(%s_*)"%(metaprefix) ):
m = statere.match(obj)
if m is None:
print(("Failed to match object %s" %obj))
continue
origobj = m.group(1)
statenum = int(m.group(2))
chains = cmd.get_chains(obj)
rev_chain_map = {} #old -> new, for this obj only
for chain in sorted(chains,key=lambda x:(len(x),x)):
new_chain = chainSet.map_chain(origobj,statenum,chain)
rev_chain_map[chain] = new_chain
if not quiet:
print((" %s state %d chain %s -> %s"%(origobj,statenum,chain, new_chain) ))
if not _long_chains:
if len(new_chain) > 1:
raise OutOfChainsError("No additional chains available (max 62).")
space = {'rev_chain_map':rev_chain_map}
cmd.alter(obj,"chain = rev_chain_map[chain]",space=space)
print(("Creating object from %s_*"%metaprefix))
# Recombine into a single object
cmd.create(name,"%s_*"%metaprefix)
# Set chain_map
if chain_map:
setattr(stored,chain_map,chainSet)
# Warn if lowercase chains were generated
if cmd.get("ignore_case") == "on" and any([c.upper() != c for c in list(chainSet.keys())]):
print("Warning: using lower-case chain IDs. Consider running the "
"following command:\n set ignore_case, 0" )
finally:
# Clean up
print("Cleaning up intermediates")
cmd.delete("%s_*"%metaprefix)
finally:
# restore original parameters
print("Resetting variables")
cmd.set("retain_order",retain_order)
cmd.extend('flatten_obj', flatten_obj)
# tab-completion of arguments
cmd.auto_arg[0]['flatten_obj'] = [ cmd.object_sc, 'name or selection', '']
cmd.auto_arg[1]['flatten_obj'] = [ cmd.object_sc, 'selection', '']
|
mmagnus/rna-pdb-tools
|
rna_tools/tools/PyMOL4RNA/external_flatten_object.py
|
Python
|
gpl-3.0
| 11,652
|
[
"PyMOL"
] |
a9647219f4261f22ee4cf62597ea1714ea37a4583489e45c4e7d7d7163f5f85b
|
#! /usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2010 (ita)
"""
This file is provided to enable compatibility with waf 1.5, it will be removed in waf 1.7
"""
import sys
from waflib import ConfigSet, Logs, Options, Scripting, Task, Build, Configure, Node, Runner, TaskGen, Utils, Errors, Context
# the following is to bring some compatibility with waf 1.5 "import waflib.Configure → import Configure"
sys.modules['Environment'] = ConfigSet
ConfigSet.Environment = ConfigSet.ConfigSet
sys.modules['Logs'] = Logs
sys.modules['Options'] = Options
sys.modules['Scripting'] = Scripting
sys.modules['Task'] = Task
sys.modules['Build'] = Build
sys.modules['Configure'] = Configure
sys.modules['Node'] = Node
sys.modules['Runner'] = Runner
sys.modules['TaskGen'] = TaskGen
sys.modules['Utils'] = Utils
from waflib.Tools import c_preproc
sys.modules['preproc'] = c_preproc
from waflib.Tools import c_config
sys.modules['config_c'] = c_config
ConfigSet.ConfigSet.copy = ConfigSet.ConfigSet.derive
ConfigSet.ConfigSet.set_variant = Utils.nada
Build.BuildContext.add_subdirs = Build.BuildContext.recurse
Build.BuildContext.new_task_gen = Build.BuildContext.__call__
Build.BuildContext.is_install = 0
Node.Node.relpath_gen = Node.Node.path_from
def name_to_obj(self, s, env=None):
Logs.warn('compat: change "name_to_obj(name, env)" by "get_tgen_by_name(name)"')
return self.get_tgen_by_name(s)
Build.BuildContext.name_to_obj = name_to_obj
def env_of_name(self, name):
try:
return self.all_envs[name]
except KeyError:
Logs.error('no such environment: '+name)
return None
Build.BuildContext.env_of_name = env_of_name
def set_env_name(self, name, env):
self.all_envs[name] = env
return env
Configure.ConfigurationContext.set_env_name = set_env_name
def retrieve(self, name, fromenv=None):
try:
env = self.all_envs[name]
except KeyError:
env = ConfigSet.ConfigSet()
self.prepare_env(env)
self.all_envs[name] = env
else:
if fromenv: Logs.warn("The environment %s may have been configured already" % name)
return env
Configure.ConfigurationContext.retrieve = retrieve
Configure.ConfigurationContext.sub_config = Configure.ConfigurationContext.recurse
Configure.ConfigurationContext.check_tool = Configure.ConfigurationContext.load
Configure.conftest = Configure.conf
Configure.ConfigurationError = Errors.ConfigurationError
Options.OptionsContext.sub_options = Options.OptionsContext.recurse
Options.OptionsContext.tool_options = Context.Context.load
Options.Handler = Options.OptionsContext
Task.simple_task_type = Task.task_type_from_func = Task.task_factory
Task.TaskBase.classes = Task.classes
def setitem(self, key, value):
if key.startswith('CCFLAGS'):
key = key[1:]
self.table[key] = value
ConfigSet.ConfigSet.__setitem__ = setitem
@TaskGen.feature('d')
@TaskGen.before('apply_incpaths')
def old_importpaths(self):
if getattr(self, 'importpaths', []):
self.includes = self.importpaths
from waflib import Context
eld = Context.load_tool
def load_tool(*k, **kw):
ret = eld(*k, **kw)
if 'set_options' in ret.__dict__:
Logs.warn('compat: rename "set_options" to options')
ret.options = ret.set_options
if 'detect' in ret.__dict__:
Logs.warn('compat: rename "detect" to "configure"')
ret.configure = ret.detect
return ret
Context.load_tool = load_tool
rev = Context.load_module
def load_module(path):
ret = rev(path)
if 'set_options' in ret.__dict__:
Logs.warn('compat: rename "set_options" to "options" (%r)' % path)
ret.options = ret.set_options
if 'srcdir' in ret.__dict__:
Logs.warn('compat: rename "srcdir" to "top" (%r)' % path)
ret.top = ret.srcdir
if 'blddir' in ret.__dict__:
Logs.warn('compat: rename "blddir" to "out" (%r)' % path)
ret.out = ret.blddir
return ret
Context.load_module = load_module
old_post = TaskGen.task_gen.post
def post(self):
self.features = self.to_list(self.features)
if 'cc' in self.features:
Logs.warn('compat: the feature cc does not exist anymore (use "c")')
self.features.remove('cc')
self.features.append('c')
if 'cstaticlib' in self.features:
Logs.warn('compat: the feature cstaticlib does not exist anymore (use "cstlib" or "cxxstlib")')
self.features.remove('cstaticlib')
self.features.append(('cxx' in self.features) and 'cxxstlib' or 'cstlib')
if getattr(self, 'ccflags', None):
Logs.warn('compat: "ccflags" was renamed to "cflags"')
self.cflags = self.ccflags
return old_post(self)
TaskGen.task_gen.post = post
def waf_version(*k, **kw):
Logs.warn('wrong version (waf_version was removed in waf 1.6)')
Utils.waf_version = waf_version
import os
@TaskGen.feature('c', 'cxx', 'd')
@TaskGen.before('apply_incpaths', 'propagate_uselib_vars')
@TaskGen.after('apply_link', 'process_source')
def apply_uselib_local(self):
"""
process the uselib_local attribute
execute after apply_link because of the execution order set on 'link_task'
"""
env = self.env
from waflib.Tools.ccroot import stlink_task
# 1. the case of the libs defined in the project (visit ancestors first)
# the ancestors external libraries (uselib) will be prepended
self.uselib = self.to_list(getattr(self, 'uselib', []))
self.includes = self.to_list(getattr(self, 'includes', []))
names = self.to_list(getattr(self, 'uselib_local', []))
get = self.bld.get_tgen_by_name
seen = set([])
tmp = Utils.deque(names) # consume a copy of the list of names
if tmp:
Logs.warn('compat: "uselib_local" is deprecated, replace by "use"')
while tmp:
lib_name = tmp.popleft()
# visit dependencies only once
if lib_name in seen:
continue
y = get(lib_name)
y.post()
seen.add(lib_name)
# object has ancestors to process (shared libraries): add them to the end of the list
if getattr(y, 'uselib_local', None):
for x in self.to_list(getattr(y, 'uselib_local', [])):
obj = get(x)
obj.post()
if getattr(obj, 'link_task', None):
if not isinstance(obj.link_task, stlink_task):
tmp.append(x)
# link task and flags
if getattr(y, 'link_task', None):
link_name = y.target[y.target.rfind(os.sep) + 1:]
if isinstance(y.link_task, stlink_task):
env.append_value('STLIB', [link_name])
else:
# some linkers can link against programs
env.append_value('LIB', [link_name])
# the order
self.link_task.set_run_after(y.link_task)
# for the recompilation
self.link_task.dep_nodes += y.link_task.outputs
# add the link path too
tmp_path = y.link_task.outputs[0].parent.bldpath()
if not tmp_path in env['LIBPATH']:
env.prepend_value('LIBPATH', [tmp_path])
# add ancestors uselib too - but only propagate those that have no staticlib defined
for v in self.to_list(getattr(y, 'uselib', [])):
if not env['STLIB_' + v]:
if not v in self.uselib:
self.uselib.insert(0, v)
# if the library task generator provides 'export_includes', add to the include path
# the export_includes must be a list of paths relative to the other library
if getattr(y, 'export_includes', None):
self.includes.extend(y.to_incnodes(y.export_includes))
@TaskGen.feature('cprogram', 'cxxprogram', 'cstlib', 'cxxstlib', 'cshlib', 'cxxshlib', 'dprogram', 'dstlib', 'dshlib')
@TaskGen.after('apply_link')
def apply_objdeps(self):
"add the .o files produced by some other object files in the same manner as uselib_local"
names = getattr(self, 'add_objects', [])
if not names:
return
names = self.to_list(names)
get = self.bld.get_tgen_by_name
seen = []
while names:
x = names[0]
# visit dependencies only once
if x in seen:
names = names[1:]
continue
# object does not exist ?
y = get(x)
# object has ancestors to process first ? update the list of names
if getattr(y, 'add_objects', None):
added = 0
lst = y.to_list(y.add_objects)
lst.reverse()
for u in lst:
if u in seen: continue
added = 1
names = [u]+names
if added: continue # list of names modified, loop
# safe to process the current object
y.post()
seen.append(x)
for t in getattr(y, 'compiled_tasks', []):
self.link_task.inputs.extend(t.outputs)
@TaskGen.after('apply_link')
def process_obj_files(self):
if not hasattr(self, 'obj_files'):
return
for x in self.obj_files:
node = self.path.find_resource(x)
self.link_task.inputs.append(node)
@TaskGen.taskgen_method
def add_obj_file(self, file):
"""Small example on how to link object files as if they were source
obj = bld.create_obj('cc')
obj.add_obj_file('foo.o')"""
if not hasattr(self, 'obj_files'): self.obj_files = []
if not 'process_obj_files' in self.meths: self.meths.append('process_obj_files')
self.obj_files.append(file)
old_define = Configure.ConfigurationContext.__dict__['define']
@Configure.conf
def define(self, key, val, quote=True):
old_define(self, key, val, quote)
if key.startswith('HAVE_'):
self.env[key] = 1
old_undefine = Configure.ConfigurationContext.__dict__['undefine']
@Configure.conf
def undefine(self, key):
old_undefine(self, key)
if key.startswith('HAVE_'):
self.env[key] = 0
# some people might want to use export_incdirs, but it was renamed
def set_incdirs(self, val):
Logs.warn('compat: change "export_incdirs" by "export_includes"')
self.export_includes = val
TaskGen.task_gen.export_incdirs = property(None, set_incdirs)
|
Theragon/kupfer
|
waflib/extras/compat15.py
|
Python
|
gpl-3.0
| 9,248
|
[
"VisIt"
] |
dfce0e467fa6834fecee7dc70ab46cbed99a98e9bb699389c391011ae6c97691
|
import multiprocessing
import tempfile
import time
import numpy
import vtk
import i18n
import converters
# import imagedata_utils as iu
from scipy import ndimage
# TODO: Code duplicated from file {imagedata_utils.py}.
def ResampleImage3D(imagedata, value):
"""
Resample vtkImageData matrix.
"""
spacing = imagedata.GetSpacing()
extent = imagedata.GetExtent()
size = imagedata.GetDimensions()
width = float(size[0])
height = float(size[1]/value)
resolution = (height/(extent[1]-extent[0])+1)*spacing[1]
resample = vtk.vtkImageResample()
resample.SetInput(imagedata)
resample.SetAxisMagnificationFactor(0, resolution)
resample.SetAxisMagnificationFactor(1, resolution)
return resample.GetOutput()
class SurfaceProcess(multiprocessing.Process):
def __init__(self, pipe, filename, shape, dtype, mask_filename,
mask_shape, mask_dtype, spacing, mode, min_value, max_value,
decimate_reduction, smooth_relaxation_factor,
smooth_iterations, language, flip_image, q_in, q_out,
from_binary, algorithm, imagedata_resolution):
multiprocessing.Process.__init__(self)
self.pipe = pipe
self.spacing = spacing
self.filename = filename
self.mode = mode
self.min_value = min_value
self.max_value = max_value
self.decimate_reduction = decimate_reduction
self.smooth_relaxation_factor = smooth_relaxation_factor
self.smooth_iterations = smooth_iterations
self.language = language
self.flip_image = flip_image
self.q_in = q_in
self.q_out = q_out
self.dtype = dtype
self.shape = shape
self.from_binary = from_binary
self.algorithm = algorithm
self.imagedata_resolution = imagedata_resolution
self.mask_filename = mask_filename
self.mask_shape = mask_shape
self.mask_dtype = mask_dtype
def run(self):
if self.from_binary:
self.mask = numpy.memmap(self.mask_filename, mode='r',
dtype=self.mask_dtype,
shape=self.mask_shape)
else:
self.image = numpy.memmap(self.filename, mode='r', dtype=self.dtype,
shape=self.shape)
self.mask = numpy.memmap(self.mask_filename, mode='r',
dtype=self.mask_dtype,
shape=self.mask_shape)
while 1:
roi = self.q_in.get()
if roi is None:
break
self.CreateSurface(roi)
def SendProgress(self, obj, msg):
prog = obj.GetProgress()
self.pipe.send([prog, msg])
def CreateSurface(self, roi):
if self.from_binary:
a_mask = numpy.array(self.mask[roi.start + 1: roi.stop + 1,
1:, 1:])
image = converters.to_vtk(a_mask, self.spacing, roi.start,
"AXIAL")
del a_mask
else:
a_image = numpy.array(self.image[roi])
if self.algorithm == u'InVesalius 3.b2':
a_mask = numpy.array(self.mask[roi.start + 1: roi.stop + 1,
1:, 1:])
a_image[a_mask == 1] = a_image.min() - 1
a_image[a_mask == 254] = (self.min_value + self.max_value) / 2.0
image = converters.to_vtk(a_image, self.spacing, roi.start,
"AXIAL")
gauss = vtk.vtkImageGaussianSmooth()
gauss.SetInput(image)
gauss.SetRadiusFactor(0.3)
gauss.ReleaseDataFlagOn()
gauss.Update()
del image
image = gauss.GetOutput()
del gauss
del a_mask
else:
image = converters.to_vtk(a_image, self.spacing, roi.start,
"AXIAL")
del a_image
if self.imagedata_resolution:
# image = iu.ResampleImage3D(image, self.imagedata_resolution)
image = ResampleImage3D(image, self.imagedata_resolution)
flip = vtk.vtkImageFlip()
flip.SetInput(image)
flip.SetFilteredAxis(1)
flip.FlipAboutOriginOn()
flip.ReleaseDataFlagOn()
flip.Update()
del image
image = flip.GetOutput()
del flip
#filename = tempfile.mktemp(suffix='_%s.vti' % (self.pid))
#writer = vtk.vtkXMLImageDataWriter()
#writer.SetInput(mask_vtk)
#writer.SetFileName(filename)
#writer.Write()
#print "Writing piece", roi, "to", filename
# Create vtkPolyData from vtkImageData
#print "Generating Polydata"
#if self.mode == "CONTOUR":
#print "Contour"
contour = vtk.vtkContourFilter()
contour.SetInput(image)
#contour.SetInput(flip.GetOutput())
if self.from_binary:
contour.SetValue(0, 127) # initial threshold
else:
contour.SetValue(0, self.min_value) # initial threshold
contour.SetValue(1, self.max_value) # final threshold
contour.ComputeScalarsOn()
contour.ComputeGradientsOn()
contour.ComputeNormalsOn()
contour.ReleaseDataFlagOn()
contour.Update()
#contour.AddObserver("ProgressEvent", lambda obj,evt:
# self.SendProgress(obj, _("Generating 3D surface...")))
polydata = contour.GetOutput()
del image
del contour
#else: #mode == "GRAYSCALE":
#mcubes = vtk.vtkMarchingCubes()
#mcubes.SetInput(flip.GetOutput())
#mcubes.SetValue(0, self.min_value)
#mcubes.SetValue(1, self.max_value)
#mcubes.ComputeScalarsOff()
#mcubes.ComputeGradientsOff()
#mcubes.ComputeNormalsOff()
#mcubes.AddObserver("ProgressEvent", lambda obj,evt:
#self.SendProgress(obj, _("Generating 3D surface...")))
#polydata = mcubes.GetOutput()
#triangle = vtk.vtkTriangleFilter()
#triangle.SetInput(polydata)
#triangle.AddObserver("ProgressEvent", lambda obj,evt:
#self.SendProgress(obj, _("Generating 3D surface...")))
#triangle.Update()
#polydata = triangle.GetOutput()
#if self.decimate_reduction:
##print "Decimating"
#decimation = vtk.vtkDecimatePro()
#decimation.SetInput(polydata)
#decimation.SetTargetReduction(0.3)
#decimation.AddObserver("ProgressEvent", lambda obj,evt:
#self.SendProgress(obj, _("Generating 3D surface...")))
##decimation.PreserveTopologyOn()
#decimation.SplittingOff()
#decimation.BoundaryVertexDeletionOff()
#polydata = decimation.GetOutput()
self.pipe.send(None)
filename = tempfile.mktemp(suffix='_%s.vtp' % (self.pid))
writer = vtk.vtkXMLPolyDataWriter()
writer.SetInput(polydata)
writer.SetFileName(filename)
writer.Write()
print "Writing piece", roi, "to", filename
del polydata
del writer
self.q_out.put(filename)
|
givanaldo/invesalius3
|
invesalius/data/surface_process.py
|
Python
|
gpl-2.0
| 7,484
|
[
"VTK"
] |
3875793f6e62003f2ffb5cb2ebd96b46e1f7ead3401082fdbe6b7963e9f3fd4e
|
#
# Copyright (C) 2007, Mark Lee
#
#http://rl-glue-ext.googlecode.com/
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# $Revision: 446 $
# $Date: 2009-01-22 22:20:21 -0500 (Thu, 22 Jan 2009) $
# $Author: brian@tannerpages.com $
# $HeadURL: http://rl-glue-ext.googlecode.com/svn/trunk/projects/codecs/Python/src/rlglue/environment/EnvironmentLoaderScript.py $
import sys
import os
import rlglue.network.Network as Network
from ClientEnvironment import ClientEnvironment
import EnvironmentLoader as EnvironmentLoader
def main():
usage = "PYTHONPATH=<Path to RLGlue> python -c 'import rlglue.environment.EnvironmentLoaderScript' <Environment>";
envVars = "The following environment variables are used by the environment to control its function:\n" + \
"RLGLUE_HOST : If set the agent will use this ip or hostname to connect to rather than " + Network.kLocalHost + "\n" + \
"RLGLUE_PORT : If set the agent will use this port to connect on rather than " + str(Network.kDefaultPort) + "\n"
if (len(sys.argv) < 2):
print usage
print envVars
sys.exit(1)
EnvironmentLoader.loadEnvironmentLikeScript()
main()
|
shiwalimohan/RLInfiniteMario
|
system/codecs/Python/src/rlglue/environment/EnvironmentLoaderScript.py
|
Python
|
gpl-2.0
| 1,627
|
[
"Brian"
] |
ae82043da81be1c0fdcdf9668c67c1742eabf0dfdc5403cce7c27d58a12c1c62
|
from director.shallowCopy import shallowCopy
import director.vtkAll as vtk
from vtk.util import numpy_support
import numpy as np
def numpyToPolyData(pts, pointData=None, createVertexCells=True):
pd = vtk.vtkPolyData()
pd.SetPoints(getVtkPointsFromNumpy(pts.copy()))
if pointData is not None:
for key, value in pointData.iteritems():
addNumpyToVtk(pd, value.copy(), key)
if createVertexCells:
f = vtk.vtkVertexGlyphFilter()
f.SetInputData(pd)
f.Update()
pd = shallowCopy(f.GetOutput())
return pd
def numpyToImageData(img, flip=True, vtktype=vtk.VTK_UNSIGNED_CHAR):
if flip:
img = np.flipud(img)
height, width, numChannels = img.shape
image = vtk.vtkImageData()
image.SetDimensions(width, height, 1)
image.AllocateScalars(vtktype, numChannels)
scalars = getNumpyFromVtk(image, 'ImageScalars')
if numChannels > 1:
scalars[:] = img.reshape(width*height, numChannels)[:]
else:
scalars[:] = img.reshape(width*height)[:]
return image
def getNumpyFromVtk(dataObj, arrayName='Points', arrayType='points'):
assert arrayType in ('points', 'cells')
if arrayName == 'Points':
vtkArray = dataObj.GetPoints().GetData()
elif arrayType == 'points':
vtkArray = dataObj.GetPointData().GetArray(arrayName)
else:
vtkArray = dataObj.GetCellData().GetArray(arrayName)
if not vtkArray:
raise KeyError('Array not found')
return numpy_support.vtk_to_numpy(vtkArray)
def getVtkPointsFromNumpy(numpyArray):
points = vtk.vtkPoints()
points.SetData(getVtkFromNumpy(numpyArray))
return points
def getVtkPolyDataFromNumpyPoints(points):
return numpyToPolyData(points)
def getVtkFromNumpy(numpyArray):
def MakeCallback(numpyArray):
def Closure(caller, event):
closureArray = numpyArray
return Closure
vtkArray = numpy_support.numpy_to_vtk(numpyArray)
vtkArray.AddObserver('DeleteEvent', MakeCallback(numpyArray))
return vtkArray
def addNumpyToVtk(dataObj, numpyArray, arrayName, arrayType='points'):
assert arrayType in ('points', 'cells')
vtkArray = getVtkFromNumpy(numpyArray)
vtkArray.SetName(arrayName)
if arrayType == 'points':
assert dataObj.GetNumberOfPoints() == numpyArray.shape[0]
dataObj.GetPointData().AddArray(vtkArray)
else:
assert dataObj.GetNumberOfCells() == numpyArray.shape[0]
dataObj.GetCellData().AddArray(vtkArray)
|
patmarion/director
|
src/python/director/vtkNumpy.py
|
Python
|
bsd-3-clause
| 2,528
|
[
"VTK"
] |
64e531246623a6f321ba006a0898199633fe895c7d417fe1c2a52f905fbcb1f6
|
import sys
import unittest
sys.path.append('./code')
from models import GSM, Distribution
from tools import logmeanexp, mapp
from numpy import zeros, all, abs, array, square, log, pi, sum, mean, inf, exp
from numpy import histogram, max, sqrt
from numpy.random import randn, rand
from scipy import integrate
from scipy.stats import laplace
mapp.max_processes = 1
Distribution.VERBOSITY = 0
class Tests(unittest.TestCase):
def test_energy_gradient(self):
"""
Tests whether the energy gradient is similar to a numerical gradient.
"""
step_size = 1E-5
model = GSM(3, num_scales=7)
model.initialize('laplace')
# samples and true gradient
X = model.sample(100)
G = model.energy_gradient(X)
# numerical gradient
N = zeros(G.shape)
for i in range(N.shape[0]):
d = zeros(X.shape)
d[i] = step_size
N[i] = (model.energy(X + d) - model.energy(X - d)) / (2. * step_size)
# test consistency of energy and gradient
self.assertTrue(all(abs(G - N) < 1E-5))
def test_loglikelihood(self):
"""
Tests whether 1-dimensional GSMs are normalized. Tests the log-likelihood
of several instantiations of the GSM.
"""
# check whether the log-likelihood of 1D GSMs is normalized
for num_scales in [1, 2, 3, 4, 5]:
model = GSM(1, num_scales=num_scales)
# implied probability density of model
pdf = lambda x: exp(model.loglikelihood(array(x).reshape(1, -1)))
# compute normalization constant and upper bound on error
partf, err = integrate.quad(pdf, -inf, inf)
self.assertTrue(partf - err <= 1.)
self.assertTrue(partf + err >= 1.)
# test the log-likelihood of a couple of GSMs
for dim in [1, 2, 3, 4, 5]:
for num_scales in [1, 2, 3, 4, 5]:
# create Gaussian scale mixture
model = GSM(dim, num_scales=num_scales)
scales = model.scales.reshape(-1, 1)
# create random data
data = randn(model.dim, 100)
# evaluate likelihood
ll = logmeanexp(
-0.5 * sum(square(data), 0) / square(scales)
- model.dim * log(scales)
- model.dim / 2. * log(2. * pi), 0)
self.assertTrue(all(abs(ll - model.loglikelihood(data)) < 1E-6))
# random scales
scales = rand(num_scales, 1) + 0.5
model.scales[:] = scales.flatten()
# sample data from model
data = model.sample(100)
# evaluate likelihood
ll = logmeanexp(
-0.5 * sum(square(data), 0) / square(scales)
- model.dim * log(scales)
- model.dim / 2. * log(2. * pi), 0)
self.assertTrue(all(abs(ll - model.loglikelihood(data)) < 1E-6))
def test_train(self):
"""
Tests whether training can recover parameters.
"""
for dim in [1, 2, 3]:
gsm1 = GSM(dim, 2)
gsm1.scales = array([0.5, 4.])
data = gsm1.sample(20000)
gsm2 = GSM(dim, 2)
gsm2.gamma = 0.
gsm2.train(data, max_iter=100)
self.assertTrue(any(abs(gsm1.scales[0] - gsm2.scales) < 1E-1))
self.assertTrue(any(abs(gsm1.scales[1] - gsm2.scales) < 1E-1))
def test_sample(self):
"""
Compares model density with histogram obtained from samples.
"""
model = GSM(1, 3)
model.scales = array([1., 3., 8.])
data = model.sample(50000)
try:
hist, x = histogram(data, 100, density=True)
except:
# use deprecated method with older versions of Python
hist, x = histogram(data, 100, normed=True)
x = (x[1:] + x[:-1]) / 2.
pdf = exp(model.loglikelihood(x.reshape(1, -1)))
self.assertTrue(all(abs(pdf - hist) < 1E-1))
if __name__ == '__main__':
unittest.main()
|
lucastheis/isa
|
code/models/tests/gsm_test.py
|
Python
|
mit
| 3,467
|
[
"Gaussian"
] |
e35dc55af92afc19504d7087f90fc8f7fb1d2d5e81ab2a02c2755eaf7544af54
|
#!/usr/bin/env python
# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
Selected CI
Simple usage::
>>> from pyscf import gto, scf, ao2mo, fci
>>> mol = gto.M(atom='C 0 0 0; C 0 0 1')
>>> mf = scf.RHF(mol).run()
>>> h1 = mf.mo_coeff.T.dot(mf.get_hcore()).dot(mf.mo_coeff)
>>> h2 = ao2mo.kernel(mol, mf.mo_coeff)
>>> e = fci.selected_ci.kernel(h1, h2, mf.mo_coeff.shape[1], mol.nelectron)[0]
'''
import ctypes
import numpy
from pyscf import lib
from pyscf.lib import logger
from pyscf import ao2mo
from pyscf.fci import cistring
from pyscf.fci import direct_spin1
from pyscf.fci import rdm
from pyscf import __config__
libfci = lib.load_library('libfci')
@lib.with_doc(direct_spin1.contract_2e.__doc__)
def contract_2e(eri, civec_strs, norb, nelec, link_index=None):
ci_coeff, nelec, ci_strs = _unpack(civec_strs, nelec)
if link_index is None:
link_index = _all_linkstr_index(ci_strs, norb, nelec)
cd_indexa, dd_indexa, cd_indexb, dd_indexb = link_index
na, nlinka = cd_indexa.shape[:2]
nb, nlinkb = cd_indexb.shape[:2]
eri = ao2mo.restore(1, eri, norb)
eri1 = eri.transpose(0,2,1,3) - eri.transpose(0,2,3,1)
idx,idy = numpy.tril_indices(norb, -1)
idx = idx * norb + idy
eri1 = lib.take_2d(eri1.reshape(norb**2,-1), idx, idx) * 2
fcivec = ci_coeff.reshape(na,nb)
# (bb|bb)
if nelec[1] > 1:
mb, mlinkb = dd_indexb.shape[:2]
fcivecT = lib.transpose(fcivec)
ci1T = numpy.zeros((nb,na))
libfci.SCIcontract_2e_aaaa(eri1.ctypes.data_as(ctypes.c_void_p),
fcivecT.ctypes.data_as(ctypes.c_void_p),
ci1T.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb),
ctypes.c_int(nb), ctypes.c_int(na),
ctypes.c_int(mb), ctypes.c_int(mlinkb),
dd_indexb.ctypes.data_as(ctypes.c_void_p))
ci1 = lib.transpose(ci1T, out=fcivecT)
else:
ci1 = numpy.zeros_like(fcivec)
# (aa|aa)
if nelec[0] > 1:
ma, mlinka = dd_indexa.shape[:2]
libfci.SCIcontract_2e_aaaa(eri1.ctypes.data_as(ctypes.c_void_p),
fcivec.ctypes.data_as(ctypes.c_void_p),
ci1.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb),
ctypes.c_int(na), ctypes.c_int(nb),
ctypes.c_int(ma), ctypes.c_int(mlinka),
dd_indexa.ctypes.data_as(ctypes.c_void_p))
# Adding h_ps below to because contract_2e function computes the
# contraction "E_{pq}E_{rs} V_{pqrs} |CI>" (~ p^+ q r^+ s |CI>) while
# the actual contraction for (aa|aa) and (bb|bb) part is
# "p^+ r^+ s q V_{pqrs} |CI>". To make (aa|aa) and (bb|bb) code reproduce
# "p^+ q r^+ s |CI>", we employ the identity
# p^+ q r^+ s = p^+ r^+ s q + delta(qr) p^+ s
# the second term is the source of h_ps
h_ps = numpy.einsum('pqqs->ps', eri)
eri1 = eri * 2
for k in range(norb):
eri1[:,:,k,k] += h_ps/nelec[0]
eri1[k,k,:,:] += h_ps/nelec[1]
eri1 = ao2mo.restore(4, eri1, norb)
# (bb|aa)
libfci.SCIcontract_2e_bbaa(eri1.ctypes.data_as(ctypes.c_void_p),
fcivec.ctypes.data_as(ctypes.c_void_p),
ci1.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb),
ctypes.c_int(na), ctypes.c_int(nb),
ctypes.c_int(nlinka), ctypes.c_int(nlinkb),
cd_indexa.ctypes.data_as(ctypes.c_void_p),
cd_indexb.ctypes.data_as(ctypes.c_void_p))
return _as_SCIvector(ci1.reshape(ci_coeff.shape), ci_strs)
def select_strs(myci, eri, eri_pq_max, civec_max, strs, norb, nelec):
strs = numpy.asarray(strs, dtype=numpy.int64)
nstrs = len(strs)
nvir = norb - nelec
strs_add = numpy.empty((nstrs*(nelec*nvir)**2//4), dtype=numpy.int64)
libfci.SCIselect_strs.restype = ctypes.c_int
nadd = libfci.SCIselect_strs(strs_add.ctypes.data_as(ctypes.c_void_p),
strs.ctypes.data_as(ctypes.c_void_p),
eri.ctypes.data_as(ctypes.c_void_p),
eri_pq_max.ctypes.data_as(ctypes.c_void_p),
civec_max.ctypes.data_as(ctypes.c_void_p),
ctypes.c_double(myci.select_cutoff),
ctypes.c_int(norb), ctypes.c_int(nelec),
ctypes.c_int(nstrs))
strs_add = sorted(set(strs_add[:nadd]) - set(strs))
return numpy.asarray(strs_add, dtype=numpy.int64)
def enlarge_space(myci, civec_strs, eri, norb, nelec):
if isinstance(civec_strs, (tuple, list)):
nelec, (strsa, strsb) = _unpack(civec_strs[0], nelec)[1:]
ci_coeff = lib.asarray(civec_strs)
else:
ci_coeff, nelec, (strsa, strsb) = _unpack(civec_strs, nelec)
na = len(strsa)
nb = len(strsb)
ci0 = ci_coeff.reshape(-1,na,nb)
civec_a_max = lib.norm(ci0, axis=2).max(axis=0)
civec_b_max = lib.norm(ci0, axis=1).max(axis=0)
ci_aidx = numpy.where(civec_a_max > myci.ci_coeff_cutoff)[0]
ci_bidx = numpy.where(civec_b_max > myci.ci_coeff_cutoff)[0]
civec_a_max = civec_a_max[ci_aidx]
civec_b_max = civec_b_max[ci_bidx]
strsa = strsa[ci_aidx]
strsb = strsb[ci_bidx]
eri = ao2mo.restore(1, eri, norb)
eri_pq_max = abs(eri.reshape(norb**2,-1)).max(axis=1).reshape(norb,norb)
strsa_add = select_strs(myci, eri, eri_pq_max, civec_a_max, strsa, norb, nelec[0])
strsb_add = select_strs(myci, eri, eri_pq_max, civec_b_max, strsb, norb, nelec[1])
strsa = numpy.append(strsa, strsa_add)
strsb = numpy.append(strsb, strsb_add)
aidx = numpy.argsort(strsa)
bidx = numpy.argsort(strsb)
ci_strs = (strsa[aidx], strsb[bidx])
aidx = numpy.where(aidx < len(ci_aidx))[0]
bidx = numpy.where(bidx < len(ci_bidx))[0]
ma = len(strsa)
mb = len(strsb)
cs = []
for i in range(ci0.shape[0]):
ci1 = numpy.zeros((ma,mb))
tmp = lib.take_2d(ci0[i], ci_aidx, ci_bidx)
lib.takebak_2d(ci1, tmp, aidx, bidx)
cs.append(_as_SCIvector(ci1, ci_strs))
if not isinstance(civec_strs, (tuple, list)) and civec_strs.ndim < 3:
cs = cs[0]
return cs
def cre_des_linkstr(strs, norb, nelec, tril=False):
'''Given intermediates, the link table to generate input strs
'''
strs = numpy.asarray(strs, dtype=numpy.int64)
nvir = norb - nelec
nstrs = len(strs)
link_index = numpy.zeros((nstrs,nelec+nelec*nvir,4), dtype=numpy.int32)
libfci.SCIcre_des_linkstr(link_index.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb), ctypes.c_int(nstrs),
ctypes.c_int(nelec),
strs.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(tril))
return link_index
def cre_des_linkstr_tril(strs, norb, nelec):
'''Given intermediates, the link table to generate input strs
'''
return cre_des_linkstr(strs, norb, nelec, True)
def des_des_linkstr(strs, norb, nelec, tril=False):
'''Given intermediates, the link table to generate input strs
'''
if nelec < 2:
return None
strs = numpy.asarray(strs, dtype=numpy.int64)
nvir = norb - nelec
nstrs = len(strs)
inter1 = numpy.empty((nstrs*nelec), dtype=numpy.int64)
libfci.SCIdes_uniq_strs.restype = ctypes.c_int
ninter = libfci.SCIdes_uniq_strs(inter1.ctypes.data_as(ctypes.c_void_p),
strs.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb), ctypes.c_int(nelec),
ctypes.c_int(nstrs))
inter1 = numpy.asarray(sorted(set(inter1[:ninter])), dtype=numpy.int64)
ninter = len(inter1)
inter = numpy.empty((ninter*nelec), dtype=numpy.int64)
ninter = libfci.SCIdes_uniq_strs(inter.ctypes.data_as(ctypes.c_void_p),
inter1.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb), ctypes.c_int(nelec-1),
ctypes.c_int(ninter))
inter = numpy.asarray(sorted(set(inter[:ninter])), dtype=numpy.int64)
ninter = len(inter)
nvir += 2
link_index = numpy.zeros((ninter,nvir*nvir,4), dtype=numpy.int32)
libfci.SCIdes_des_linkstr(link_index.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb), ctypes.c_int(nelec),
ctypes.c_int(nstrs), ctypes.c_int(ninter),
strs.ctypes.data_as(ctypes.c_void_p),
inter.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(tril))
return link_index
def des_des_linkstr_tril(strs, norb, nelec):
'''Given intermediates, the link table to generate input strs
'''
return des_des_linkstr(strs, norb, nelec, True)
def gen_des_linkstr(strs, norb, nelec):
'''Given intermediates, the link table to generate input strs
'''
if nelec < 1:
return None
strs = numpy.asarray(strs, dtype=numpy.int64)
nvir = norb - nelec
nstrs = len(strs)
inter = numpy.empty((nstrs*nelec), dtype=numpy.int64)
libfci.SCIdes_uniq_strs.restype = ctypes.c_int
ninter = libfci.SCIdes_uniq_strs(inter.ctypes.data_as(ctypes.c_void_p),
strs.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb), ctypes.c_int(nelec),
ctypes.c_int(nstrs))
inter = numpy.asarray(sorted(set(inter[:ninter])), dtype=numpy.int64)
ninter = len(inter)
nvir += 1
link_index = numpy.zeros((ninter,nvir,4), dtype=numpy.int32)
libfci.SCIdes_linkstr(link_index.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb), ctypes.c_int(nelec),
ctypes.c_int(nstrs), ctypes.c_int(ninter),
strs.ctypes.data_as(ctypes.c_void_p),
inter.ctypes.data_as(ctypes.c_void_p))
return link_index
def gen_cre_linkstr(strs, norb, nelec):
'''Given intermediates, the link table to generate input strs
'''
if nelec == norb:
return None
strs = numpy.asarray(strs, dtype=numpy.int64)
nvir = norb - nelec
nstrs = len(strs)
inter = numpy.empty((nstrs*nvir), dtype=numpy.int64)
libfci.SCIcre_uniq_strs.restype = ctypes.c_int
ninter = libfci.SCIcre_uniq_strs(inter.ctypes.data_as(ctypes.c_void_p),
strs.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb), ctypes.c_int(nelec),
ctypes.c_int(nstrs))
inter = numpy.asarray(sorted(set(inter[:ninter])), dtype=numpy.int64)
ninter = len(inter)
link_index = numpy.zeros((ninter,nelec+1,4), dtype=numpy.int32)
libfci.SCIcre_linkstr(link_index.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb), ctypes.c_int(nelec),
ctypes.c_int(nstrs), ctypes.c_int(ninter),
strs.ctypes.data_as(ctypes.c_void_p),
inter.ctypes.data_as(ctypes.c_void_p))
return link_index
def make_hdiag(h1e, eri, ci_strs, norb, nelec):
ci_coeff, nelec, ci_strs = _unpack(None, nelec, ci_strs)
na = len(ci_strs[0])
nb = len(ci_strs[1])
hdiag = numpy.empty(na*nb)
h1e = numpy.asarray(h1e, order='C')
eri = ao2mo.restore(1, eri, norb)
jdiag = numpy.asarray(numpy.einsum('iijj->ij',eri), order='C')
kdiag = numpy.asarray(numpy.einsum('ijji->ij',eri), order='C')
c_h1e = h1e.ctypes.data_as(ctypes.c_void_p)
c_jdiag = jdiag.ctypes.data_as(ctypes.c_void_p)
c_kdiag = kdiag.ctypes.data_as(ctypes.c_void_p)
occslsta = cistring._strs2occslst(ci_strs[0], norb)
occslstb = cistring._strs2occslst(ci_strs[1], norb)
libfci.FCImake_hdiag_uhf(hdiag.ctypes.data_as(ctypes.c_void_p),
c_h1e, c_h1e, c_jdiag, c_jdiag, c_jdiag, c_kdiag, c_kdiag,
ctypes.c_int(norb),
ctypes.c_int(na), ctypes.c_int(nb),
ctypes.c_int(nelec[0]), ctypes.c_int(nelec[1]),
occslsta.ctypes.data_as(ctypes.c_void_p),
occslstb.ctypes.data_as(ctypes.c_void_p))
return hdiag
def kernel_fixed_space(myci, h1e, eri, norb, nelec, ci_strs, ci0=None,
tol=None, lindep=None, max_cycle=None, max_space=None,
nroots=None, davidson_only=None,
max_memory=None, verbose=None, ecore=0, **kwargs):
log = logger.new_logger(myci, verbose)
if tol is None: tol = myci.conv_tol
if lindep is None: lindep = myci.lindep
if max_cycle is None: max_cycle = myci.max_cycle
if max_space is None: max_space = myci.max_space
if max_memory is None: max_memory = myci.max_memory
if nroots is None: nroots = myci.nroots
if myci.verbose >= logger.WARN:
myci.check_sanity()
nelec = direct_spin1._unpack_nelec(nelec, myci.spin)
ci0, nelec, ci_strs = _unpack(ci0, nelec, ci_strs)
na = len(ci_strs[0])
nb = len(ci_strs[1])
h2e = direct_spin1.absorb_h1e(h1e, eri, norb, nelec, .5)
h2e = ao2mo.restore(1, h2e, norb)
link_index = _all_linkstr_index(ci_strs, norb, nelec)
hdiag = myci.make_hdiag(h1e, eri, ci_strs, norb, nelec)
if isinstance(ci0, _SCIvector):
if ci0.size == na*nb:
ci0 = [ci0.ravel()]
else:
ci0 = [x.ravel() for x in ci0]
else:
ci0 = myci.get_init_guess(ci_strs, norb, nelec, nroots, hdiag)
def hop(c):
hc = myci.contract_2e(h2e, _as_SCIvector(c, ci_strs), norb, nelec, link_index)
return hc.reshape(-1)
precond = lambda x, e, *args: x/(hdiag-e+1e-4)
#e, c = lib.davidson(hop, ci0, precond, tol=myci.conv_tol)
e, c = myci.eig(hop, ci0, precond, tol=tol, lindep=lindep,
max_cycle=max_cycle, max_space=max_space, nroots=nroots,
max_memory=max_memory, verbose=log, **kwargs)
if nroots > 1:
return e+ecore, [_as_SCIvector(ci.reshape(na,nb),ci_strs) for ci in c]
else:
return e+ecore, _as_SCIvector(c.reshape(na,nb), ci_strs)
def kernel_float_space(myci, h1e, eri, norb, nelec, ci0=None,
tol=None, lindep=None, max_cycle=None, max_space=None,
nroots=None, davidson_only=None,
max_memory=None, verbose=None, ecore=0, **kwargs):
log = logger.new_logger(myci, verbose)
if tol is None: tol = myci.conv_tol
if lindep is None: lindep = myci.lindep
if max_cycle is None: max_cycle = myci.max_cycle
if max_space is None: max_space = myci.max_space
if max_memory is None: max_memory = myci.max_memory
if nroots is None: nroots = myci.nroots
if myci.verbose >= logger.WARN:
myci.check_sanity()
nelec = direct_spin1._unpack_nelec(nelec, myci.spin)
h2e = direct_spin1.absorb_h1e(h1e, eri, norb, nelec, .5)
h2e = ao2mo.restore(1, h2e, norb)
# TODO: initial guess from CISD
if isinstance(ci0, _SCIvector):
if ci0.size == len(ci0._strs[0])*len(ci0._strs[1]):
ci0 = [ci0.ravel()]
else:
ci0 = [x.ravel() for x in ci0]
else:
ci_strs = (numpy.asarray([int('1'*nelec[0], 2)]),
numpy.asarray([int('1'*nelec[1], 2)]))
ci0 = _as_SCIvector(numpy.ones((1,1)), ci_strs)
ci0 = myci.enlarge_space(ci0, h2e, norb, nelec)
if ci0.size < nroots:
log.warn('''
Selected-CI space generated from HF ground state (by double exciting) is not enough for excited states.
HOMO->LUMO excitations are included in the initial guess.
NOTE: This may introduce excited states of different symmetry.\n''')
corea = '1' * (nelec[0]-1)
coreb = '1' * (nelec[1]-1)
ci_strs = (numpy.asarray([int('1'+corea, 2), int('10'+corea, 2)]),
numpy.asarray([int('1'+coreb, 2), int('10'+coreb, 2)]))
ci0 = _as_SCIvector(numpy.ones((2,2)), ci_strs)
ci0 = myci.enlarge_space(ci0, h2e, norb, nelec)
if ci0.size < nroots:
raise RuntimeError('Not enough selected-CI space for %d states' % nroots)
ci_strs = ci0._strs
hdiag = myci.make_hdiag(h1e, eri, ci_strs, norb, nelec)
ci0 = myci.get_init_guess(ci_strs, norb, nelec, nroots, hdiag)
def hop(c):
hc = myci.contract_2e(h2e, _as_SCIvector(c, ci_strs), norb, nelec, link_index)
return hc.ravel()
precond = lambda x, e, *args: x/(hdiag-e+myci.level_shift)
namax = cistring.num_strings(norb, nelec[0])
nbmax = cistring.num_strings(norb, nelec[1])
e_last = 0
float_tol = myci.start_tol
tol_decay_rate = myci.tol_decay_rate
conv = False
for icycle in range(norb):
ci_strs = ci0[0]._strs
float_tol = max(float_tol*tol_decay_rate, tol*1e2)
log.debug('cycle %d ci.shape %s float_tol %g',
icycle, (len(ci_strs[0]), len(ci_strs[1])), float_tol)
ci0 = [c.ravel() for c in ci0]
link_index = _all_linkstr_index(ci_strs, norb, nelec)
hdiag = myci.make_hdiag(h1e, eri, ci_strs, norb, nelec)
#e, ci0 = lib.davidson(hop, ci0.reshape(-1), precond, tol=float_tol)
e, ci0 = myci.eig(hop, ci0, precond, tol=float_tol, lindep=lindep,
max_cycle=max_cycle, max_space=max_space, nroots=nroots,
max_memory=max_memory, verbose=log, **kwargs)
if nroots > 1:
ci0 = [_as_SCIvector(c, ci_strs) for c in ci0]
de, e_last = min(e)-e_last, min(e)
log.info('cycle %d E = %s dE = %.8g', icycle, e+ecore, de)
else:
ci0 = [_as_SCIvector(ci0, ci_strs)]
de, e_last = e-e_last, e
log.info('cycle %d E = %.15g dE = %.8g', icycle, e+ecore, de)
if ci0[0].shape == (namax,nbmax) or abs(de) < tol*1e3:
conv = True
break
last_ci0_size = float(len(ci_strs[0])), float(len(ci_strs[1]))
ci0 = myci.enlarge_space(ci0, h2e, norb, nelec)
na = len(ci0[0]._strs[0])
nb = len(ci0[0]._strs[1])
if ((.99 < na/last_ci0_size[0] < 1.01) and
(.99 < nb/last_ci0_size[1] < 1.01)):
conv = True
break
ci_strs = ci0[0]._strs
log.debug('Extra CI in selected space %s', (len(ci_strs[0]), len(ci_strs[1])))
ci0 = [c.ravel() for c in ci0]
link_index = _all_linkstr_index(ci_strs, norb, nelec)
hdiag = myci.make_hdiag(h1e, eri, ci_strs, norb, nelec)
e, c = myci.eig(hop, ci0, precond, tol=tol, lindep=lindep,
max_cycle=max_cycle, max_space=max_space, nroots=nroots,
max_memory=max_memory, verbose=log, **kwargs)
na = len(ci_strs[0])
nb = len(ci_strs[1])
if nroots > 1:
for i, ei in enumerate(e+ecore):
log.info('Selected CI state %d E = %.15g', i, ei)
return e+ecore, [_as_SCIvector(ci.reshape(na,nb),ci_strs) for ci in c]
else:
log.info('Selected CI E = %.15g', e+ecore)
return e+ecore, _as_SCIvector(c.reshape(na,nb), ci_strs)
def kernel(h1e, eri, norb, nelec, ci0=None, level_shift=1e-3, tol=1e-10,
lindep=1e-14, max_cycle=50, max_space=12, nroots=1,
davidson_only=False, pspace_size=400, orbsym=None, wfnsym=None,
select_cutoff=1e-3, ci_coeff_cutoff=1e-3, ecore=0, **kwargs):
return direct_spin1._kfactory(SelectedCI, h1e, eri, norb, nelec, ci0,
level_shift, tol, lindep, max_cycle,
max_space, nroots, davidson_only,
pspace_size, select_cutoff=select_cutoff,
ci_coeff_cutoff=ci_coeff_cutoff, ecore=ecore,
**kwargs)
def make_rdm1s(civec_strs, norb, nelec, link_index=None):
'''Spin separated 1-particle density matrices.
The return values include two density matrices: (alpha,alpha), (beta,beta)
dm1[p,q] = <q^\dagger p>
The convention is based on McWeeney's book, Eq (5.4.20).
The contraction between 1-particle Hamiltonian and rdm1 is
E = einsum('pq,qp', h1, rdm1)
'''
ci_coeff, nelec, ci_strs = _unpack(civec_strs, nelec)
if link_index is None:
cd_indexa = cre_des_linkstr(ci_strs[0], norb, nelec[0])
cd_indexb = cre_des_linkstr(ci_strs[1], norb, nelec[1])
else:
cd_indexa, dd_indexa, cd_indexb, dd_indexb = link_index
rdm1a = rdm.make_rdm1_spin1('FCImake_rdm1a', ci_coeff, ci_coeff,
norb, nelec, (cd_indexa,cd_indexb))
rdm1b = rdm.make_rdm1_spin1('FCImake_rdm1b', ci_coeff, ci_coeff,
norb, nelec, (cd_indexa,cd_indexb))
return rdm1a, rdm1b
def make_rdm1(civec_strs, norb, nelec, link_index=None):
r'''Spin-traced 1-particle density matrix.
dm1[p,q] = <q_alpha^\dagger p_alpha> + <q_beta^\dagger p_beta>
The convention is based on McWeeney's book, Eq (5.4.20)
The contraction between 1-particle Hamiltonian and rdm1 is
E = einsum('pq,qp', h1, rdm1)
'''
rdm1a, rdm1b = make_rdm1s(civec_strs, norb, nelec, link_index)
return rdm1a + rdm1b
# dm[p,q,r,s] = <|p^+ q r^+ s|>
def make_rdm2s(civec_strs, norb, nelec, link_index=None, **kwargs):
r'''Spin separated 2-particle density matrices.
The return values include three density matrices:
(alpha,alpha,alpha,alpha), (alpha,alpha,beta,beta), (beta,beta,beta,beta)
2pdm[p,q,r,s] = :math:`\langle p^\dagger r^\dagger s q\rangle`
'''
ci_coeff, nelec, ci_strs = _unpack(civec_strs, nelec)
if link_index is None:
cd_indexa = cre_des_linkstr(ci_strs[0], norb, nelec[0])
dd_indexa = des_des_linkstr(ci_strs[0], norb, nelec[0])
cd_indexb = cre_des_linkstr(ci_strs[1], norb, nelec[1])
dd_indexb = des_des_linkstr(ci_strs[1], norb, nelec[1])
else:
cd_indexa, dd_indexa, cd_indexb, dd_indexb = link_index
na, nlinka = cd_indexa.shape[:2]
nb, nlinkb = cd_indexb.shape[:2]
fcivec = ci_coeff.reshape(na,nb)
# (bb|aa) and (aa|bb)
dm2ab = rdm.make_rdm12_spin1('FCItdm12kern_ab', fcivec, fcivec,
norb, nelec, (cd_indexa,cd_indexb), 0)[1]
# (aa|aa)
dm2aa = numpy.zeros([norb]*4)
if nelec[0] > 1:
ma, mlinka = dd_indexa.shape[:2]
libfci.SCIrdm2_aaaa(libfci.SCIrdm2kern_aaaa,
dm2aa.ctypes.data_as(ctypes.c_void_p),
fcivec.ctypes.data_as(ctypes.c_void_p),
fcivec.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb),
ctypes.c_int(na), ctypes.c_int(nb),
ctypes.c_int(ma), ctypes.c_int(mlinka),
dd_indexa.ctypes.data_as(ctypes.c_void_p))
# (bb|bb)
dm2bb = numpy.zeros([norb]*4)
if nelec[1] > 1:
mb, mlinkb = dd_indexb.shape[:2]
fcivecT = lib.transpose(fcivec)
libfci.SCIrdm2_aaaa(libfci.SCIrdm2kern_aaaa,
dm2bb.ctypes.data_as(ctypes.c_void_p),
fcivecT.ctypes.data_as(ctypes.c_void_p),
fcivecT.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb),
ctypes.c_int(nb), ctypes.c_int(na),
ctypes.c_int(mb), ctypes.c_int(mlinkb),
dd_indexb.ctypes.data_as(ctypes.c_void_p))
return dm2aa, dm2ab, dm2bb
def make_rdm2(civec_strs, norb, nelec, link_index=None, **kwargs):
r'''Spin-traced two-particle density matrix.
2pdm[p,q,r,s] = :math:`\langle p_\alpha^\dagger r_\alpha^\dagger s_\alpha q_\alpha\rangle +
\langle p_\beta^\dagger r_\alpha^\dagger s_\alpha q_\beta\rangle +
\langle p_\alpha^\dagger r_\beta^\dagger s_\beta q_\alpha\rangle +
\langle p_\beta^\dagger r_\beta^\dagger s_\beta q_\beta\rangle`.
'''
dm2aa, dm2ab, dm2bb = make_rdm2s(civec_strs, norb, nelec, link_index)
dm2aa += dm2bb
dm2aa += dm2ab
dm2aa += dm2ab.transpose(2,3,0,1)
return dm2aa
def trans_rdm1s(cibra_strs, ciket_strs, norb, nelec, link_index=None):
r'''Spin separated transition 1-particle density matrices.
See also function :func:`make_rdm1s`
1pdm[p,q] = :math:`\langle q^\dagger p \rangle`
'''
cibra, nelec, ci_strs = _unpack(cibra_strs, nelec)
ciket, nelec1, ci_strs1 = _unpack(ciket_strs, nelec)
assert(all(ci_strs[0] == ci_strs1[0]) and
all(ci_strs[1] == ci_strs1[1]))
if link_index is None:
cd_indexa = cre_des_linkstr(ci_strs[0], norb, nelec[0])
cd_indexb = cre_des_linkstr(ci_strs[1], norb, nelec[1])
else:
cd_indexa, dd_indexa, cd_indexb, dd_indexb = link_index
rdm1a = rdm.make_rdm1_spin1('FCItrans_rdm1a', cibra, ciket,
norb, nelec, (cd_indexa,cd_indexb))
rdm1b = rdm.make_rdm1_spin1('FCItrans_rdm1b', cibra, ciket,
norb, nelec, (cd_indexa,cd_indexb))
return rdm1a, rdm1b
def trans_rdm1(cibra_strs, ciket_strs, norb, nelec, link_index=None):
r'''Spin traced transition 1-particle density matrices.
See also function :func:`make_rdm1`
1pdm[p,q] = :math:`\langle q_\alpha^\dagger p_\alpha \rangle
+ \langle q_\beta^\dagger p_\beta \rangle`
'''
rdm1a, rdm1b = trans_rdm1s(cibra_strs, ciket_strs, norb, nelec, link_index)
return rdm1a + rdm1b
def spin_square(civec_strs, norb, nelec):
'''Spin square for RHF-FCI CI wfn only (obtained from spin-degenerated
Hamiltonian)'''
ci1 = contract_ss(civec_strs, norb, nelec)
ss = numpy.einsum('ij,ij->', civec_strs.reshape(ci1.shape), ci1)
s = numpy.sqrt(ss+.25) - .5
multip = s*2+1
return ss, multip
def contract_ss(civec_strs, norb, nelec):
r''' S^2 |\Psi\rangle
'''
ci_coeff, nelec, ci_strs = _unpack(civec_strs, nelec)
strsa, strsb = ci_strs
neleca, nelecb = nelec
ci_coeff = ci_coeff.reshape(len(strsa),len(strsb))
def gen_map(fstr_index, strs, nelec, des=True):
a_index = fstr_index(strs, norb, nelec)
amap = numpy.zeros((a_index.shape[0],norb,2), dtype=numpy.int32)
if des:
for k, tab in enumerate(a_index):
sign = tab[:,3]
tab = tab[sign!=0]
amap[k,tab[:,1]] = tab[:,2:]
else:
for k, tab in enumerate(a_index):
sign = tab[:,3]
tab = tab[sign!=0]
amap[k,tab[:,0]] = tab[:,2:]
return amap
if neleca > 0:
ades = gen_map(gen_des_linkstr, strsa, neleca)
else:
ades = None
if nelecb > 0:
bdes = gen_map(gen_des_linkstr, strsb, nelecb)
else:
bdes = None
if neleca < norb:
acre = gen_map(gen_cre_linkstr, strsa, neleca, False)
else:
acre = None
if nelecb < norb:
bcre = gen_map(gen_cre_linkstr, strsb, nelecb, False)
else:
bcre = None
def trans(ci1, aindex, bindex):
if aindex is None or bindex is None:
return None
ma = len(aindex)
mb = len(bindex)
t1 = numpy.zeros((ma,mb))
for i in range(norb):
signa = aindex[:,i,1]
signb = bindex[:,i,1]
maska = numpy.where(signa!=0)[0]
maskb = numpy.where(signb!=0)[0]
addra = aindex[maska,i,0]
addrb = bindex[maskb,i,0]
citmp = lib.take_2d(ci_coeff, addra, addrb)
citmp *= signa[maska].reshape(-1,1)
citmp *= signb[maskb]
#: t1[addra.reshape(-1,1),addrb] += citmp
lib.takebak_2d(t1, citmp, maska, maskb)
for i in range(norb):
signa = aindex[:,i,1]
signb = bindex[:,i,1]
maska = numpy.where(signa!=0)[0]
maskb = numpy.where(signb!=0)[0]
addra = aindex[maska,i,0]
addrb = bindex[maskb,i,0]
citmp = lib.take_2d(t1, maska, maskb)
citmp *= signa[maska].reshape(-1,1)
citmp *= signb[maskb]
#: ci1[maska.reshape(-1,1), maskb] += citmp
lib.takebak_2d(ci1, citmp, addra, addrb)
ci1 = numpy.zeros_like(ci_coeff)
trans(ci1, ades, bcre) # S+*S-
trans(ci1, acre, bdes) # S-*S+
ci1 *= .5
ci1 += (neleca-nelecb)**2*.25*ci_coeff
return _as_SCIvector(ci1, ci_strs)
def to_fci(civec_strs, norb, nelec):
ci_coeff, nelec, ci_strs = _unpack(civec_strs, nelec)
addrsa = [cistring.str2addr(norb, nelec[0], x) for x in ci_strs[0]]
addrsb = [cistring.str2addr(norb, nelec[1], x) for x in ci_strs[1]]
na = cistring.num_strings(norb, nelec[0])
nb = cistring.num_strings(norb, nelec[1])
ci0 = numpy.zeros((na,nb))
lib.takebak_2d(ci0, ci_coeff, addrsa, addrsb)
return ci0
def from_fci(fcivec, ci_strs, norb, nelec):
fcivec, nelec, ci_strs = _unpack(fcivec, nelec, ci_strs)
addrsa = [cistring.str2addr(norb, nelec[0], x) for x in ci_strs[0]]
addrsb = [cistring.str2addr(norb, nelec[1], x) for x in ci_strs[1]]
na = cistring.num_strings(norb, nelec[0])
nb = cistring.num_strings(norb, nelec[1])
fcivec = fcivec.reshape(na,nb)
civec = lib.take_2d(fcivec, addrsa, addrsb)
return _as_SCIvector(civec, ci_strs)
class SelectedCI(direct_spin1.FCISolver):
ci_coeff_cutoff = getattr(__config__, 'fci_selected_ci_SCI_ci_coeff_cutoff', .5e-3)
select_cutoff = getattr(__config__, 'fci_selected_ci_SCI_select_cutoff', .5e-3)
conv_tol = getattr(__config__, 'fci_selected_ci_SCI_conv_tol', 1e-9)
start_tol = getattr(__config__, 'fci_selected_ci_SCI_start_tol', 3e-4)
tol_decay_rate = getattr(__config__, 'fci_selected_ci_SCI_tol_decay_rate', 0.3)
def __init__(self, mol=None):
direct_spin1.FCISolver.__init__(self, mol)
##################################################
# don't modify the following attributes, they are not input options
#self.converged = False
#self.ci = None
self._strs = None
keys = set(('ci_coeff_cutoff', 'select_cutoff', 'conv_tol',
'start_tol', 'tol_decay_rate'))
self._keys = self._keys.union(keys)
def dump_flags(self, verbose=None):
direct_spin1.FCISolver.dump_flags(self, verbose)
logger.info(self, 'ci_coeff_cutoff %g', self.ci_coeff_cutoff)
logger.info(self, 'select_cutoff %g', self.select_cutoff)
def contract_2e(self, eri, civec_strs, norb, nelec, link_index=None, **kwargs):
# The argument civec_strs is a CI vector in function FCISolver.contract_2e.
# Save and patch self._strs to make this contract_2e function compatible to
# FCISolver.contract_2e.
if getattr(civec_strs, '_strs', None) is not None:
self._strs = civec_strs._strs
else:
assert(civec_strs.size == len(self._strs[0])*len(self._strs[1]))
civec_strs = _as_SCIvector(civec_strs, self._strs)
return contract_2e(eri, civec_strs, norb, nelec, link_index)
def get_init_guess(self, ci_strs, norb, nelec, nroots, hdiag):
'''Initial guess is the single Slater determinant
'''
na = len(ci_strs[0])
nb = len(ci_strs[1])
ci0 = direct_spin1._get_init_guess(na, nb, nroots, hdiag)
return [_as_SCIvector(x, ci_strs) for x in ci0]
def make_hdiag(self, h1e, eri, ci_strs, norb, nelec):
return make_hdiag(h1e, eri, ci_strs, norb, nelec)
enlarge_space = enlarge_space
kernel = kernel_float_space
kernel_fixed_space = kernel_fixed_space
# def approx_kernel(self, h1e, eri, norb, nelec, ci0=None, link_index=None,
# tol=None, lindep=None, max_cycle=None,
# max_memory=None, verbose=None, **kwargs):
# ci_strs = getattr(ci0, '_strs', self._strs)
# return self.kernel_fixed_space(h1e, eri, norb, nelec, ci_strs,
# ci0, link_index, tol, lindep, 6,
# max_memory, verbose, **kwargs)
@lib.with_doc(spin_square.__doc__)
def spin_square(self, civec_strs, norb, nelec):
nelec = direct_spin1._unpack_nelec(nelec, self.spin)
return spin_square(_as_SCIvector_if_not(civec_strs, self._strs), norb, nelec)
def large_ci(self, civec_strs, norb, nelec, tol=.1, return_strs=True):
nelec = direct_spin1._unpack_nelec(nelec, self.spin)
ci, _, (strsa, strsb) = _unpack(civec_strs, nelec, self._strs)
addra, addrb = numpy.where(abs(ci) > tol)
if return_strs:
strsa = [bin(x) for x in strsa[addra]]
strsb = [bin(x) for x in strsb[addrb]]
return list(zip(ci[addra,addrb], strsa, strsb))
else:
occslsta = cistring._strs2occslst(strsa[addra], norb)
occslstb = cistring._strs2occslst(strsb[addrb], norb)
return list(zip(ci[addra,addrb], occslsta, occslstb))
def contract_ss(self, fcivec, norb, nelec):
return contract_ss(fcivec, norb, nelec)
@lib.with_doc(make_rdm1s.__doc__)
def make_rdm1s(self, civec_strs, norb, nelec, link_index=None):
nelec = direct_spin1._unpack_nelec(nelec, self.spin)
civec_strs = _as_SCIvector_if_not(civec_strs, self._strs)
return make_rdm1s(civec_strs, norb, nelec, link_index)
@lib.with_doc(make_rdm1.__doc__)
def make_rdm1(self, civec_strs, norb, nelec, link_index=None):
nelec = direct_spin1._unpack_nelec(nelec, self.spin)
rdm1a, rdm1b = self.make_rdm1s(civec_strs, norb, nelec, link_index)
return rdm1a + rdm1b
@lib.with_doc(make_rdm2s.__doc__)
def make_rdm2s(self, civec_strs, norb, nelec, link_index=None, **kwargs):
nelec = direct_spin1._unpack_nelec(nelec, self.spin)
civec_strs = _as_SCIvector_if_not(civec_strs, self._strs)
return make_rdm2s(civec_strs, norb, nelec, link_index)
@lib.with_doc(make_rdm2.__doc__)
def make_rdm2(self, civec_strs, norb, nelec, link_index=None, **kwargs):
nelec = direct_spin1._unpack_nelec(nelec, self.spin)
civec_strs = _as_SCIvector_if_not(civec_strs, self._strs)
return make_rdm2(civec_strs, norb, nelec, link_index)
def make_rdm12s(self, civec_strs, norb, nelec, link_index=None, **kwargs):
neleca, nelecb = nelec = direct_spin1._unpack_nelec(nelec, self.spin)
civec_strs = _as_SCIvector_if_not(civec_strs, self._strs)
dm2aa, dm2ab, dm2bb = make_rdm2s(civec_strs, norb, nelec, link_index)
if neleca > 1 and nelecb > 1:
dm1a = numpy.einsum('iikl->kl', dm2aa) / (neleca-1)
dm1b = numpy.einsum('iikl->kl', dm2bb) / (nelecb-1)
else:
dm1a, dm1b = make_rdm1s(civec_strs, norb, nelec, link_index)
return (dm1a, dm1b), (dm2aa, dm2ab, dm2bb)
def make_rdm12(self, civec_strs, norb, nelec, link_index=None, **kwargs):
nelec = direct_spin1._unpack_nelec(nelec, self.spin)
nelec_tot = sum(nelec)
civec_strs = _as_SCIvector_if_not(civec_strs, self._strs)
dm2 = make_rdm2(civec_strs, norb, nelec, link_index)
if nelec_tot > 1:
dm1 = numpy.einsum('iikl->kl', dm2) / (nelec_tot-1)
else:
dm1 = make_rdm1(civec_strs, norb, nelec, link_index)
return dm1, dm2
@lib.with_doc(trans_rdm1s.__doc__)
def trans_rdm1s(self, cibra, ciket, norb, nelec, link_index=None):
nelec = direct_spin1._unpack_nelec(nelec, self.spin)
cibra = _as_SCIvector_if_not(cibra, self._strs)
ciket = _as_SCIvector_if_not(ciket, self._strs)
return trans_rdm1s(cibra, ciket, norb, nelec, link_index)
@lib.with_doc(trans_rdm1.__doc__)
def trans_rdm1(self, cibra, ciket, norb, nelec, link_index=None):
nelec = direct_spin1._unpack_nelec(nelec, self.spin)
cibra = _as_SCIvector_if_not(cibra, self._strs)
ciket = _as_SCIvector_if_not(ciket, self._strs)
return trans_rdm1(cibra, ciket, norb, nelec, link_index)
def gen_linkstr(self, norb, nelec, tril=True, spin=None, ci_strs=None):
if spin is None:
spin = self.spin
if ci_strs is None:
ci_strs = self._strs
neleca, nelecb = direct_spin1._unpack_nelec(nelec, spin)
if tril:
cd_indexa = cre_des_linkstr_tril(ci_strs[0], norb, neleca)
dd_indexa = des_des_linkstr_tril(ci_strs[0], norb, neleca)
cd_indexb = cre_des_linkstr_tril(ci_strs[1], norb, nelecb)
dd_indexb = des_des_linkstr_tril(ci_strs[1], norb, nelecb)
else:
cd_indexa = cre_des_linkstr(ci_strs[0], norb, neleca)
dd_indexa = des_des_linkstr(ci_strs[0], norb, neleca)
cd_indexb = cre_des_linkstr(ci_strs[1], norb, nelecb)
dd_indexb = des_des_linkstr(ci_strs[1], norb, nelecb)
return cd_indexa, dd_indexa, cd_indexb, dd_indexb
SCI = SelectedCI
def _unpack(civec_strs, nelec, ci_strs=None, spin=None):
neleca, nelecb = direct_spin1._unpack_nelec(nelec, spin)
ci_strs = getattr(civec_strs, '_strs', ci_strs)
if ci_strs is not None:
strsa, strsb = ci_strs
strsa = numpy.asarray(strsa)
strsb = numpy.asarray(strsb)
ci_strs = (strsa, strsb)
return civec_strs, (neleca, nelecb), ci_strs
def _all_linkstr_index(ci_strs, norb, nelec):
cd_indexa = cre_des_linkstr_tril(ci_strs[0], norb, nelec[0])
dd_indexa = des_des_linkstr_tril(ci_strs[0], norb, nelec[0])
cd_indexb = cre_des_linkstr_tril(ci_strs[1], norb, nelec[1])
dd_indexb = des_des_linkstr_tril(ci_strs[1], norb, nelec[1])
return cd_indexa, dd_indexa, cd_indexb, dd_indexb
# numpy.ndarray does not allow to attach attribtues. Overwrite the
# numpy.ndarray class to tag the ._strs attribute
class _SCIvector(numpy.ndarray):
def __array_finalize__(self, obj):
self._strs = getattr(obj, '_strs', None)
# Whenever the contents of the array was modified (through ufunc), the tag
# should be expired. Overwrite the output of ufunc to restore ndarray type.
def __array_wrap__(self, out, context=None):
return numpy.ndarray.__array_wrap__(self, out, context).view(numpy.ndarray)
def _as_SCIvector(civec, ci_strs):
civec = civec.view(_SCIvector)
civec._strs = ci_strs
return civec
def _as_SCIvector_if_not(civec, ci_strs):
if getattr(civec, '_strs', None) is None:
civec = _as_SCIvector(civec, ci_strs)
return civec
if __name__ == '__main__':
from functools import reduce
from pyscf import gto
from pyscf import scf
from pyscf import ao2mo
from pyscf.fci import spin_op
from pyscf.fci import addons
mol = gto.Mole()
mol.verbose = 0
mol.output = None
mol.atom = [
['H', ( 1.,-1. , 0. )],
['H', ( 0.,-1. ,-1. )],
['H', ( 1.,-0.5 ,-1. )],
['H', ( 0.,-0. ,-1. )],
['H', ( 1.,-0.5 , 0. )],
['H', ( 0., 1. , 1. )],
['H', ( 1., 2. , 3. )],
['H', ( 1., 2. , 4. )],
]
mol.basis = 'sto-3g'
mol.build()
m = scf.RHF(mol)
m.kernel()
norb = m.mo_coeff.shape[1]
nelec = mol.nelectron
h1e = reduce(numpy.dot, (m.mo_coeff.T, m.get_hcore(), m.mo_coeff))
eri = ao2mo.kernel(m._eri, m.mo_coeff, compact=False)
eri = eri.reshape(norb,norb,norb,norb)
e1, c1 = kernel(h1e, eri, norb, nelec)
e2, c2 = direct_spin1.kernel(h1e, eri, norb, nelec)
print(e1, e1 - -11.894559902235565, 'diff to FCI', e1-e2)
print(c1.shape, c2.shape)
dm1_1 = make_rdm1(c1, norb, nelec)
dm1_2 = direct_spin1.make_rdm1(c2, norb, nelec)
print(abs(dm1_1 - dm1_2).sum())
dm2_1 = make_rdm2(c1, norb, nelec)
dm2_2 = direct_spin1.make_rdm12(c2, norb, nelec)[1]
print(abs(dm2_1 - dm2_2).sum())
myci = SelectedCI()
e, c = kernel_fixed_space(myci, h1e, eri, norb, nelec, c1._strs)
print(e - -11.894559902235565)
print(myci.large_ci(c1, norb, nelec))
print(myci.spin_square(c1, norb, nelec)[0] -
spin_op.spin_square0(to_fci(c1, norb, nelec), norb, nelec)[0])
myci = SelectedCI()
myci = addons.fix_spin_(myci)
e1, c1 = myci.kernel(h1e, eri, norb, nelec)
print(e1, e1 - -11.89467612053687)
print(myci.spin_square(c1, norb, nelec))
|
gkc1000/pyscf
|
pyscf/fci/selected_ci.py
|
Python
|
apache-2.0
| 41,715
|
[
"PySCF",
"exciting"
] |
dbe8d213cf43ab2e29a62d9decb89a8eb925d09fe27d2a2da44db80be5119513
|
from os.path import join, abspath, dirname, getsize, curdir
from netCDF4 import Dataset
from flyingpigeon import config
import logging
logger = logging.getLogger(__name__)
DIR_SHP = config.shapefiles_dir()
def has_Lambert_Conformal(resource):
"""
Check if grid is organised as Lambert_Conformal
:param resource: file to be checked
:return Boolean: True/False
"""
if type(resource) != list:
resource = [resource]
for nc in resource:
ds = Dataset(nc)
if 'Lambert_Conformal' not in ds.variables.keys():
return False
return True
def call(resource=[], variable=None, dimension_map=None, calc=None,
calc_grouping=None, conform_units_to=None, memory_limit=None, prefix=None,
regrid_destination=None, regrid_options='bil', level_range=None,
geom=None, output_format_options=False, search_radius_mult=2.,
select_nearest=False, select_ugid=None, spatial_wrapping=None, t_calendar=None, time_region=None,
time_range=None, dir_output=None, output_format='nc'):
'''
ocgis operation call
:param resource:
:param variable: variable in the input file to be picked
:param dimension_map: dimension map in case of unconventional storage of data
:param calc: ocgis calc syntax for calculation partion
:param calc_grouping: time aggregate grouping
:param conform_units_to:
:param memory_limit: limit the amount of data to be loaded into the memory at once \
if None (default) free memory is detected by birdhouse
:param level_range: subset of given levels
:param prefix: string for the file base name
:param regrid_destination: file path with netCDF file with grid for output file
:param geom: name of shapefile stored in birdhouse shape cabinet
:param output_format_options: output options for netCDF e.g compression level()
:param regrid_destination: file containing the targed grid (griddes.txt or netCDF file)
:param regrid_options: methods for regridding:
'bil' = Bilinear interpolation
'bic' = Bicubic interpolation
'dis' = Distance-weighted average remapping
'nn' = nearest neighbour
'con' = First-order conservative remapping
'laf' = largest area fraction reamapping
:param search_radius_mult: search radius for point geometries. All included gridboxes will be returned
:param select_nearest: nearest neighbour selection for point geometries
:param select_ugid: ugid for appropriate polygons
:param spatial_wrapping: how to handle coordinates in case of subsets, options: None (default), 'wrap', 'unwrap'
:param time_region: select single month
:param time_range: sequence of two datetime.datetime objects to mark start and end point
:param dir_output (default= curdir):
:param output_format:
:return: output file path
'''
logger.info('Start ocgis module call function')
from ocgis import OcgOperations, RequestDataset, env
from ocgis.util.large_array import compute
import uuid
# prepare the environment
env.DIR_SHPCABINET = DIR_SHP
env.OVERWRITE = True
# env.DIR_OUTPUT = dir_output
# logger.debug(' **** env.DIR_OUTPUT = %s ' % env.DIR_OUTPUT)
if dir_output is None:
dir_output = abspath(curdir)
#
# if geom is not None:
# spatial_reorder = True
# spatial_wrapping = 'wrap'
# else:
# spatial_reorder = False
# spatial_wrapping = None
#
if spatial_wrapping == 'wrap':
spatial_reorder = True
else:
spatial_reorder = False
logger.debug('spatial_reorder: %s and spatial_wrapping: %s ' % (spatial_reorder, spatial_wrapping))
if prefix is None:
prefix = str(uuid.uuid1())
env.PREFIX = prefix
if output_format_options is False:
output_format_options = None
elif output_format_options is True:
output_format_options = {'data_model': 'NETCDF4', # NETCDF4_CLASSIC
'variable_kwargs': {'zlib': True, 'complevel': 9}}
else:
logger.info('output_format_options are set to %s ' % (output_format_options))
if type(resource) != list:
resource = list([resource])
# execute ocgis
logger.info('Execute ocgis module call function')
# if has_Lambert_Conformal(resource) is True and geom is not None:
# logger.debug('input has Lambert_Conformal projection and can not prcessed with ocgis:\
# https://github.com/NCPP/ocgis/issues/424')
# return None
# else:
try:
logger.debug('call module curdir = %s ' % abspath(curdir))
rd = RequestDataset(resource, variable=variable, level_range=level_range,
dimension_map=dimension_map, conform_units_to=conform_units_to,
time_region=time_region, t_calendar=t_calendar, time_range=time_range)
ops = OcgOperations(dataset=rd,
output_format_options=output_format_options,
dir_output=dir_output,
spatial_wrapping=spatial_wrapping,
spatial_reorder=spatial_reorder,
# regrid_destination=rd_regrid,
# options=options,
calc=calc,
calc_grouping=calc_grouping,
geom=geom,
output_format=output_format,
prefix=prefix,
search_radius_mult=search_radius_mult,
select_nearest=select_nearest,
select_ugid=select_ugid,
add_auxiliary_files=False)
logger.info('OcgOperations set')
except Exception as e:
logger.debug('failed to setup OcgOperations: %s' % e)
raise
return None
try:
from numpy import sqrt
from flyingpigeon.utils import FreeMemory
if memory_limit is None:
f = FreeMemory()
mem_kb = f.user_free
mem_mb = mem_kb / 1024.
mem_limit = mem_mb / 2. # set limit to half of the free memory
else:
mem_limit = memory_limit
if mem_limit >= 1024. * 4:
mem_limit = 1024. * 4
# 475.0 MB for openDAP
data_kb = ops.get_base_request_size()['total']
data_mb = data_kb / 1024.
if variable is None:
variable = rd.variable
logger.info('%s as variable dedected' % (variable))
# data_kb = size['total']/reduce(lambda x,y: x*y,size['variables'][variable]['value']['shape'])
logger.info('data_mb = %s ; memory_limit = %s ' % (data_mb, mem_limit))
except Exception as e:
logger.debug('failed to compare dataload with free memory %s ' % e)
raise
if data_mb <= mem_limit: # input is smaler than the half of free memory size
try:
logger.info('ocgis module call as ops.execute()')
geom_file = ops.execute()
except Exception as e:
logger.debug('failed to execute ocgis operation')
raise
return None
else:
##########################
# calcultion of chunk size
##########################
try:
size = ops.get_base_request_size()
nb_time_coordinates_rd = size['variables'][variable]['temporal']['shape'][0]
element_in_kb = size['total']/reduce(lambda x, y: x*y, size['variables'][variable]['value']['shape'])
element_in_mb = element_in_kb / 1024.
tile_dim = sqrt(mem_limit/(element_in_mb*nb_time_coordinates_rd)) # maximum chunk size
logger.info('ocgis module call compute with chunks')
if calc is None:
calc = '%s=%s*1' % (variable, variable)
logger.info('calc set to = %s ' % calc)
ops = OcgOperations(dataset=rd,
output_format_options=output_format_options,
dir_output=dir_output,
spatial_wrapping=spatial_wrapping,
spatial_reorder=spatial_reorder,
# regrid_destination=rd_regrid,
# options=options,
calc=calc,
calc_grouping=calc_grouping,
geom=geom,
output_format=output_format,
prefix=prefix,
search_radius_mult=search_radius_mult,
select_nearest=select_nearest,
select_ugid=select_ugid,
add_auxiliary_files=False)
geom_file = compute(ops, tile_dimension=int(tile_dim), verbose=True)
print 'ocgis calculated'
except Exception as e:
logger.debug('failed to compute ocgis with chunks')
raise
return None
logger.info('Succeeded with ocgis module call function')
############################################
# remapping according to regrid informations
############################################
if regrid_destination is not None:
try:
from tempfile import mkstemp
from cdo import Cdo
cdo = Cdo()
output = '%s.nc' % uuid.uuid1()
remap = 'remap%s' % regrid_options
call = [op for op in dir(cdo) if remap in op]
cmd = "output = cdo.%s('%s',input='%s', output='%s')" \
% (str(call[0]), regrid_destination, geom_file, output)
exec cmd
except Exception as e:
logger.debug('failed to remap')
raise
return None
else:
output = geom_file
return output
def eval_timerange(resource, time_range):
"""
quality checker if given time_range is covered by timesteps in resource files
:param resource: input netCDF files
:param time_range: start and end date of time range [datetime,datetime]
:returns [datetime,datetime]: time_range
"""
from flyingpigeon.utils import get_time
logger.info('time_range: %s' % time_range)
if type(resource) != str:
resource.sort()
time = get_time(resource)
start = time[0]
end = time[-1]
if (time_range[0] > start or time_range[0] < end):
logger.debug('time range start %s not in input dataset covering: %s to %s' % (time_range[0], start, end))
time_range[0] = start
logger.debug('time_range start changed to first timestep of dataset')
if (time_range[1] > end or time_range[1] < start):
logger.debug('time range end %s not in input dataset covering: %s to %s' % (time_range[0], start, end))
time_range[1] = end
logger.debug('time_range end changed to last timestep of dataset')
if (time_range[0] > time_range[1]):
time_range = reversed(time_range)
logger.debug('time range reversed! start was later than end ')
logger.info('time range start and end set')
return time_range
# # check memory load
# from os import stat
# if memory_limit == None:
# f = FreeMemory()
# mem_kb = f.user_free
# mem_mb = mem_kb / 1024.
# mem_limit = mem_mb / 2. # set limit to half of the free memory
# else:
# mem_limit = memory_limit
#
# if mem_limit >= 1024. * 4:
# mem_limit = 1024. * 4
# # 475.0 MB for openDAP
#
# #if type(resource) == list :
# #data_kb = stat(resource[0]).st_size * len(resource)
# #else:
# #data_kb = stat(resource).st_size
# size = ops.get_base_request_size()['total']
# data_kb = size['total']/reduce(lambda x,y: x*y,size['variables'][variable]['value']['shape'])
# data_mb = data_kb / 1024.
#
# if variable == None:
# variable = rd.variable
# logger.info('%s as variable dedected' % (variable))
|
KatiRG/flyingpigeon
|
flyingpigeon/ocgis_module.py
|
Python
|
apache-2.0
| 12,205
|
[
"NetCDF"
] |
020efe488f0baa9ab851e31f615d034b594278dbe5d3be3bff6a37d752d515a8
|
# -*- coding: utf-8 -*-
'''
GNU GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed.
Preamble
The GNU General Public License is a free, copyleft license for software and other kinds of works.
The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. We, the Free Software Foundation, use the GNU General Public License for most of our software; it applies also to any other work released this way by its authors. You can apply it to your programs, too.
When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things.
To protect your rights, we need to prevent others from denying you these rights or asking you to surrender the rights. Therefore, you have certain responsibilities if you distribute copies of the software, or if you modify it: responsibilities to respect the freedom of others.
For example, if you distribute copies of such a program, whether gratis or for a fee, you must pass on to the recipients the same freedoms that you received. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights.
Developers that use the GNU GPL protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License giving you legal permission to copy, distribute and/or modify it.
For the developers' and authors' protection, the GPL clearly explains that there is no warranty for this free software. For both users' and authors' sake, the GPL requires that modified versions be marked as changed, so that their problems will not be attributed erroneously to authors of previous versions.
Some devices are designed to deny users access to install or run modified versions of the software inside them, although the manufacturer can do so. This is fundamentally incompatible with the aim of protecting users' freedom to change the software. The systematic pattern of such abuse occurs in the area of products for individuals to use, which is precisely where it is most unacceptable. Therefore, we have designed this version of the GPL to prohibit the practice for those products. If such problems arise substantially in other domains, we stand ready to extend this provision to those domains in future versions of the GPL, as needed to protect the freedom of users.
Finally, every program is threatened constantly by software patents. States should not allow patents to restrict development and use of software on general-purpose computers, but in those that do, we wish to avoid the special danger that patents applied to a free program could make it effectively proprietary. To prevent this, the GPL assures that patents cannot be used to render the program non-free.
The precise terms and conditions for copying, distribution and modification follow.
TERMS AND CONDITIONS
0. Definitions.
“This License” refers to version 3 of the GNU General Public License.
“Copyright” also means copyright-like laws that apply to other kinds of works, such as semiconductor masks.
“The Program” refers to any copyrightable work licensed under this License. Each licensee is addressed as “you”. “Licensees” and “recipients” may be individuals or organizations.
To “modify” a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a “modified version” of the earlier work or a work “based on” the earlier work.
A “covered work” means either the unmodified Program or a work based on the Program.
To “propagate” a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well.
To “convey” a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays “Appropriate Legal Notices” to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion.
1. Source Code.
The “source code” for a work means the preferred form of the work for making modifications to it. “Object code” means any non-source form of a work.
A “Standard Interface” means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language.
The “System Libraries” of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A “Major Component”, in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it.
The “Corresponding Source” for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work.
The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source.
The Corresponding Source for a work in source code form is that same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures.
When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions:
* a) The work must carry prominent notices stating that you modified it, and giving a relevant date.
* b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to “keep intact all notices”.
* c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it.
* d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so.
A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an “aggregate” if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways:
* a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange.
* b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge.
* c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b.
* d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements.
* e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d.
A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work.
A “User Product” is either (1) a “consumer product”, which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, “normally used” refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product.
“Installation Information” for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made.
If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM).
The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying.
7. Additional Terms.
“Additional permissions” are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms:
* a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or
* b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or
* c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or
* d) Limiting the use for publicity purposes of names of licensors or authors of the material; or
* e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or
* f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors.
All other non-permissive additional terms are considered “further restrictions” within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11).
However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice.
Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License.
An “entity transaction” is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it.
11. Patents.
A “contributor” is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's “contributor version”.
A contributor's “essential patent claims” are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, “control” includes the right to grant patent sublicenses in a manner consistent with the requirements of this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version.
In the following three paragraphs, a “patent license” is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To “grant” such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party.
If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. “Knowingly relying” means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it.
A patent license is “discriminatory” if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program.
13. Use with the GNU Affero General Public License.
Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU Affero General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the special requirements of the GNU Affero General Public License, section 13, concerning interaction through a network will apply to the combination as such.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of the GNU General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns.
Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU General Public License “or any later version” applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU General Public License, you may choose any version ever published by the Free Software Foundation.
If the Program specifies that a proxy can decide which future versions of the GNU General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program.
Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM “AS IS” WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
'''
# fix a bug in uuid, import it first !!
import uuid
import os
import sys
import ConfigParser
#sys.path.append('/usr/lib/python/')
#sys.path.append('/usr/lib64/python2.4/site-packages')
#sys.path.append('/usr/lib64/python2.4/site-packages/gtk-2.0')
#sys.path.append(os.environ['CUON_PATH'])
#try:
import pygtk
#except:
# print 'No python-module pygtk found. please install first'
# sys.exit(0)
import os.path
import shlex, subprocess
pygtk.require('2.0')
import gtk
import gtk.glade
import gobject
import cuon.Addresses.addresses
import cuon.Addresses.SingleAddress
import cuon.Addresses.SinglePartner
import cuon.Addresses.SingleScheduling
print 'import Address'
import cuon.Articles.articles
import cuon.Bank.bank
import cuon.Addresses.contact
try:
import cuon.Clients.clients
except Exception, params:
print 'import failed'
print Exception, params
import cuon.Leasing.leasing
import cuon.Order.order
import cuon.Proposal.proposal
import cuon.User.user
import cuon.Preferences.preferences
import cuon.PrefsFinance.prefsFinance
import cuon.Stock.stock
import cuon.XML.MyXML
from cuon.TypeDefs.typedefs import typedefs
from cuon.Windows.windows import windows
import cPickle
import cuon.Databases.dumps
from cuon.TypeDefs.typedefs_server import typedefs_server
import cuon.Databases.cyr_load_table
#import threading
import cuon.Staff.staff
import cuon.Project.project
import commands
import cuon.Databases.SingleDataTreeModel
import cuon.Databases.SingleCuon
import cuon.Finances.invoicebook
import cuon.Finances.bookkeeping
import cuon.Finances.cashAccountBook
import cuon.Calendar.calendar
try:
import cuon.Web2.web2
except:
print 'no Module Web2'
try:
from PIL import Image
except:
print 'no PIL Image found'
try:
import cuon.SQL_Shell.sql_shell
except:
pass
# localisation
import locale, gettext
import time
import cuon.E_Mail.imap_dms
try:
#import gtkhtml2
import gtkmozembed as moz
except:
print 'gtkhtml not found'
#http connections
import httplib, urllib
#try:
# import profile
#except:
# print "no Profile"
import cuon.DMS.documentTools
import bz2
class MainWindow(windows):
"""
@author: Juergen Hamel
@organization: Cyrus-Computer GmbH, D-32584 Loehne
@copyright: by Juergen Hamel
@license: GPL ( GNU GENERAL PUBLIC LICENSE )
@contact: jh@cyrus.de
"""
def __init__(self, sT):
windows.__init__(self)
self.sStartType = sT
self.Version = {'Major': 11, 'Minor': 10, 'Rev': 5, 'Species': 0, 'Maschine': 'Linux,BSD,Windows,Mac'}
self.sTitle = `self.Version['Major']` + '.' + `self.Version['Minor']` + '.' + `self.Version['Rev']`
self.t0 = None
self.t1 = None
self.t2 = None
self.t3 = None
self.allTables = {}
self.sDebug = 'NO'
self.ModulNumber = self.MN['Mainwindow']
self.extMenucommand = {}
self.store = None
self.connectTreeId = None
self.singleAddress = None
self.singlePartner = None
self.singleSchedul = None
self.schedulHash1 = None
self.schedulHash2 = None
self.schedulHash3 = None
self.ClientID = 0
self.firstGtkMozStart = True
self.mapmoz = None
#self.extMenucommand['ext1'] = 'Test'
#set this Functions to None
def loadUserInfo(self):
pass
def checkClient(self):
pass
def delete_event(self, widget, event, data=None):
self.on_end1_activate(None)
return False
def destroy(self, widget, data=None):
print "destroy signal occurred"
self.on_end1_activate(None)
def on_end1_activate(self,event):
print "exit cuon"
#clean up the tmp-files
try:
os.system( 'rm ' + os.path.normpath(self.td.cuon_path + '/cuon__*' ))
except Exception, params:
print 'prm1', Exception, params
#pass
try:
os.system( 'rm ' + os.path.normpath(self.td.cuon_path + '/cuon_data/dms/cuon__*' ))
except Exception, params:
print 'prm1', Exception, params
#pass
try:
os.system( 'rm ' + os.path.normpath(self.td.cuon_path + '/cuon_data/dms/*__dms*' ))
except Exception, params:
print 'prm1', Exception, params
#pass
try:
os.system( 'rm ' + os.path.normpath(self.dicUser['prefPath']['tmp'] + '/*__dms*' ))
except Exception, params:
print 'prm2',Exception, params
#pass
try:
os.system( 'rm ' + os.path.normpath( self.td.cuon_path + '/*__dms*' ))
except Exception, params:
print 'prm3',Exception, params
#pass
try:
os.system( 'rm ' + os.path.normpath( './*__dms*' ))
except Exception, params:
print 'prm3',Exception, params
self.on_logout1_activate(None)
self.gtk_main_quit()
def on_databases1_activate(self,event):
import cuon.Databases.databases
daba = cuon.Databases.databases.databaseswindow()
def on_login1_activate(self,event):
import cuon.Login.login
print 'lgi client id = ', self.ClientID
lgi = cuon.Login.login.loginwindow( [self.getWidget('eUserName')], None, Username, PASSWORD, self.ClientID)
self.openDB()
self.oUser = self.loadObject('User')
self.closeDB()
if self.oUser.getUserName()== 'EMPTY':
pass
else:
self.getWidget('eServer').set_text(self.td.server)
#choose the client
#sys.exit()
self.on_clients1_activate(None)
print 'Hallo - client'
self.checkMenus()
print 'ShowNews = ', self.dicUser['showNews']
if self.dicUser['showNews'] :
self.activateClick('onlineNews')
def checkMenus(self):
liModullist = self.rpc.callRP('User.getModulList', self.oUser.getSqlDicUser())
#print liModullist
if self.sStartType == 'server':
self.enableMenuItem('serverMode')
self.disableMenuItem('user')
self.enableMenuItem('login')
misc_menu = False
#print 'LI_MODULELIST'
print liModullist
for iL in liModullist:
#print iL
if 'all' in iL:
#print 'key all found'
#data
self.addEnabledMenuItems('work','mi_addresses1')
self.addEnabledMenuItems('work','mi_articles1')
self.addEnabledMenuItems('work','mi_bibliographic')
self.addEnabledMenuItems('work','mi_clients1')
self.addEnabledMenuItems('work','contracts1')
self.addEnabledMenuItems('work','mi_leasing1')
print 'enableMenuItem staff'
self.addEnabledMenuItems('work','mi_staff1')
print 'enableMenuItem staff end'
#action
self.addEnabledMenuItems('work','mi_order1')
self.addEnabledMenuItems('work','mi_stock1')
self.addEnabledMenuItems('work','mi_dms1')
self.addEnabledMenuItems('work','mi_supportticket1')
#accounting
self.addEnabledMenuItems('work','mi_cash_account_book1')
# extras
self.addEnabledMenuItems('work','mi_expert_system1')
self.addEnabledMenuItems('work','mi_project1')
## self.addEnabledMenuItems('work','mi_forms1')
## self.addEnabledMenuItems('work','mi_forms_addresses1')
#tools
self.addEnabledMenuItems('work','mi_preferences1')
self.addEnabledMenuItems('work','mi_user1')
self.addEnabledMenuItems('work','mi_finances1')
#self.addEnabledMenuItems('work','mi_project1')
self.addEnabledMenuItems('work','mi_import_data1')
self.enableMenuItem('work')
if iL.has_key('addresses'):
self.addEnabledMenuItems('misc','mi_addresses1')
misc_menu = True
if iL.has_key('articles'):
self.addEnabledMenuItems('misc','mi_articles1')
misc_menu = True
if iL.has_key('biblio'):
self.addEnabledMenuItems('misc','mi_bibliographic')
misc_menu = True
if iL.has_key('clients'):
self.addEnabledMenuItems('misc','mi_clients1')
misc_menu = True
if iL.has_key('staff'):
self.addEnabledMenuItems('misc','mi_staff1')
misc_menu = True
if iL.has_key('order'):
self.addEnabledMenuItems('misc','mi_order1')
misc_menu = True
if iL.has_key('stock'):
self.addEnabledMenuItems('misc','mi_stock1')
misc_menu = True
if iL.has_key('dms'):
self.addEnabledMenuItems('misc','mi_dms1')
misc_menu = True
if iL.has_key('account_book'):
self.addEnabledMenuItems('misc','mi_cash_account_book1')
misc_menu = True
if iL.has_key('expert_system'):
self.addEnabledMenuItems('misc','mi_expert_system1')
misc_menu = True
if iL.has_key('project'):
print 'key project found '
self.addEnabledMenuItems('misc','mi_project1')
misc_menu = True
if iL.has_key('web2'):
print 'key web2 found '
self.addEnabledMenuItems('misc','web2')
misc_menu = True
if iL.has_key('forms'):
print 'key forms found '
self.addEnabledMenuItems('misc','forms1')
misc_menu = True
print '-----------------------'
if iL.has_key('forms_addresses'):
print 'key forms_addresses found '
self.addEnabledMenuItems('misc','forms_addresses1')
self.addEnabledMenuItems('misc','mi_addresses_notes_misc1')
self.addEnabledMenuItems('misc','mi_addresses_notes_contacter1')
self.addEnabledMenuItems('misc','mi_addresses_notes_representant1')
self.addEnabledMenuItems('misc','mi_addresses_notes_salesman1')
misc_menu = True
if iL.has_key('experimental'):
print 'key experimental found'
self.addEnabledMenuItems('experimental','mi_mayavi1')
self.addEnabledMenuItems('experimental','mi_test1')
self.enableMenuItem('experimental')
if iL.has_key('extendet_gpl'):
try:
liExtGpl = iL['extendet_gpl']
print 'Ext.GPL =', liExtGpl
for newProgram in liExtGpl:
print newProgram
mi1 = self.addMenuItem(self.getWidget(newProgram['MenuItem']['Main']),newProgram['MenuItem']['Sub'])
try:
print 'new Item = ', `mi1`
if newProgram['MenuItem']['ExternalNumber'] == 'ext1':
mi1.connect("activate", self.on_ext1_activate)
elif newProgram['MenuItem']['ExternalNumber'] == 'ext2':
mi1.connect("activate", self.on_ext2_activate)
elif newProgram['MenuItem']['ExternalNumber'] == 'ext3':
mi1.connect("activate", self.on_ext3_activate)
elif newProgram['MenuItem']['ExternalNumber'] == 'ext4':
mi1.connect("activate", self.on_ext4_activate)
if newProgram.has_key('Imports'):
newImports = newProgram['Imports']
for nI in newImports:
try:
print 'import ext Module 1', nI
exec('import ' + nI)
print 'import extendet module 2', nI
except Exception, params:
print Exception, params
if newProgram.has_key('MenuStart'):
print 'MenuStart = ', newProgram['MenuItem']['ExternalNumber']
self.extMenucommand[newProgram['MenuItem']['ExternalNumber']] = newProgram['MenuStart']
if newProgram.has_key('Start'):
exec(newProgram['Start'])
print 'EXEC = ', newProgram['Start']
except Exception,params:
print Exception,params
except Exception,params:
print Exception,params
if misc_menu:
self.enableMenuItem('misc')
def on_logout1_activate(self, event):
print 'Logout'
try:
self.rpc.callRP('Database.logout', self.oUser.getUserName())
except:
print 'Exception'
self.disableMenuItem('login')
self.enableMenuItem('user')
def on_eUserName_changed(self, event):
if self.getWidget('eUserName').get_text() != 'EMPTY':
print 'User changed 22'
self.openDB()
self.oUser = self.loadObject('User')
print 'sDebug (Cuon) = ' + self.sDebug
self.oUser.setDebug(self.sDebug)
self.saveObject('User', self.oUser)
self.closeDB()
# self.openDB()
#if self.startProgressBar():
if not self.allTables:
self.generateLocalSqlObjects()
# self.stopProgressBar()
#print self.oUser.getDicUser()
# now start scheduling
print 'Client = ', self.oUser.getSqlDicUser()['client']
def generateSqlObjects(self):
self.setProgressBar( 0.4)
entryList = self.rpc.callRP('Database.executeNormalQuery',"select skey from cuon where skey ~* 'entry_' ")
#print entryList
self.openDB()
for i in entryList:
#print i['skey']
sk = self.rpc.callRP('Database.getInfo', i['skey'])
self.saveObject(i['skey'],sk)
#print sk
self.closeDB()
#self.rpc.callRP('src.Databases.py_getInfoOfTable', 'allTables')
at = self.rpc.callRP('Database.getInfo', 'allTables')
#print 'at23 = ', `at`
self.setProgressBar( 3.0)
liAllTables = cPickle.loads(eval(self.doDecode(at)))
#sys.exit(0)
#print 'liAllTables = '
#print liAllTables
iCount = len(liAllTables)
for i in range(iCount):
self.loadSqlDefs(liAllTables, i)
self.setProgressBar( (float(i) * 1.0/float(iCount) * 100.0) + 5.0)
#print 'Progress-Value = ' + str(float(i) * 1.0/float(iCount) * 100.0)
#print self.allTables
def generateLocalSqlObjects(self):
at = self.rpc.callRP('Database.getInfo', 'allTables')
#print 'at24 = ', `at`
liAllTables = cPickle.loads(eval(self.doDecode(at)))
#liAllTables = cPickle.loads(self.rpc.callRP('src.Databases.py_getInfoOfTable', 'allTables'))
#print 'liAllTables = ', liAllTables
#print liAllTables
iCount = len(liAllTables)
#print 'iCount = ', iCount
for i in range(iCount):
self.loadLocalSqlDefs(liAllTables, i)
#self.setProgressBar(float(i) * 1.0/float(iCount) * 100.0)
#print 'Progress-Value = ' + str(float(i) * 1.0/float(iCount) * 100.0)
#print self.allTables
def loadSqlDefs(self, liAllTables, i ):
try:
clt = cuon.Databases.cyr_load_table.cyr_load_table()
print 'Table0 = ', liAllTables[i]
if liAllTables[i].find('_history') < 0:
print 'Table = ', liAllTables[i]
self.allTables[liAllTables[i]] = clt.loadTable(liAllTables[i])
except Exception, param:
print 'ERROR SQL Defs'
print Exception
print param
print liAllTables[i]
def loadLocalSqlDefs(self, liAllTables, i ):
#print 'loadLocalSQL1 ', liAllTables
#print 'loadLocalSQL2 ', i
clt = cuon.Databases.cyr_load_table.cyr_load_table()
self.allTables[liAllTables[i]] = clt.loadLocalTable(liAllTables[i])
#print 'loadLocalSQL3 ', `self.allTables`
# Data-Menu
#-->
def on_addresses1_activate(self,event):
adr = cuon.Addresses.addresses.addresswindow(self.allTables)
def on_articles1_activate(self,event):
art = cuon.Articles.articles.articleswindow(self.allTables)
def on_bank1_activate(self,event):
bank = cuon.Bank.bank.bankwindow(self.allTables)
def on_bibliographic_activate(self, event):
import cuon.Biblio.biblio
bib = cuon.Biblio.biblio.bibliowindow(self.allTables)
def on_clients1_activate(self, event):
#print self.allTables
self.dicUser = self.oUser.getDicUser()
if event:
self.ClientID = 0
print 'cli = ', self.ClientID
cli = cuon.Clients.clients.clientswindow(self.allTables, self.ClientID, eClient = self.getWidget('eClient'))
def on_staff1_activate(self, event):
staff = cuon.Staff.staff.staffwindow(self.allTables)
# submenu contracts1
def on_leasing1_activate(self, event):
leasing = cuon.Leasing.leasing.leasingwindow(self.allTables)
# Action-Menu
def on_proposal1_activate(self,event):
ord = cuon.Proposal.proposal.proposalwindow(self.allTables)
def on_order1_activate(self,event):
ord = cuon.Order.order.orderwindow(self.allTables)
def on_stock1_activate(self,event):
ord = cuon.Stock.stock.stockwindow(self.allTables)
def on_mi_supportticket1_activate(self, event):
import cuon.SupportTicket.supportTicket
supt = cuon.SupportTicket.supportTicket.supportticketwindow(self.allTables)
def on_dms1_activate(self,event):
import cuon.DMS.dms
dms = cuon.DMS.dms.dmswindow(self.allTables)
# Finances
# Cash Account Book
def on_cash_account_book1_activate(self, event):
cab = cuon.Finances.cashAccountBook.cashAccountBookwindow(self.allTables)
def on_bookkeeping1_activate(self, event):
bk = cuon.Finances.bookkeeping.bookkeepingwindow(self.allTables)
def on_listOfInvoices1_activate(self, event):
loi = cuon.Finances.invoicebook.invoicebookwindow(self.allTables)
def on_analyse_costs1_activate(self, event ):
pass
# Extras
def on_expert_system1_activate(self, event):
import cuon.AI.ai
cai = cuon.AI.ai.aiwindow(self.allTables)
def on_project1_activate(self, event):
cpro = cuon.Project.project.projectwindow(self.allTables)
def on_web2_activate(self, event):
web2 = cuon.Web2.web2.web2window(self.allTables)
def on_stats1_activate(self, event):
import cuon.Stats.stats
stats = cuon.Stats.stats.statswindow(self.allTables)
def on_calendar_activate(self, event):
ccal = cuon.Calendar.calendar.calendarwindow(self.allTables)
def on_mindmap1_activate(self, event):
import cuon.Think.think
think = cuon.Think.think.thinkwindow(self.allTables)
# Tools
def on_addresses_notes_misc1_activate(self,event):
dms = cuon.DMS.dms.dmswindow(self.allTables, self.MN['Forms_Address_Notes_Misc'])
def on_addresses_notes_contacter1_activate(self,event):
dms = cuon.DMS.dms.dmswindow(self.allTables, self.MN['Forms_Address_Notes_Contacter'])
def on_addresses_notes_representant1_activate(self,event):
dms = cuon.DMS.dms.dmswindow(self.allTables, self.MN['Forms_Address_Notes_Rep'])
def on_addresses_notes_salesman1_activate(self,event):
dms = cuon.DMS.dms.dmswindow(self.allTables, self.MN['Forms_Address_Notes_Salesman'])
def on_update1_activate(self, event):
self.updateVersion()
def on_pref_user1_activate(self,event):
prefs = cuon.Preferences.preferences.preferenceswindow(self.allTables)
def on_prefs_finances_activate(self,event):
prefs = cuon.PrefsFinance.prefsFinance.prefsFinancewindow(self.allTables)
def on_webshop1_activate(self,event):
import cuon.WebShop.webshop
print 'Webshop'
prefs = cuon.WebShop.webshop.webshopwindow(self.allTables)
def updateVersion(self):
if self.startProgressBar():
self.generateSqlObjects()
self.writeAllGladeFiles()
self.stopProgressBar()
def on_import_data1_activate(self, event):
import cuon.Databases.import_generic1
imp1 = cuon.Databases.import_generic1.import_generic1(self.allTables)
def on_ExportData_activate(self, event):
print 'export Data'
import cuon.Databases.export_generic1
exp1 = cuon.Databases.export_generic1.export_generic1(self.allTables)
def on_sql_shell_activated(self, event):
sqlw = cuon.SQL_Shell.sql_shell.sql_shell()
def on_test1_activate(self, event):
import cuon.VTK.mainLogo
import cuon.VTK.test
te = cuon.VTK.test.test()
te.show()
# Logs
def on_logs_mail1_activate(self, event):
import cuon.Editor.editor
dicFile = {'TYPE':'FILE','NAME':os.path.normpath(self.td.cuon_path + '/' + 'cuonmail.log'),'Rights':'RO'}
em = cuon.Editor.editor.editorwindow(dicFile)
# help and info
def on_about1_activate(self, event):
about1 = self.getWidget('aCuon')
about1.show()
def on_onlinehelp_activate(self, event):
import cuon.Help.help
he1 = cuon.Help.help.helpwindow()
# hide about-info
def on_okAbout1_clicked(self, event):
about1 = self.getWidget('aCuon')
about1.hide()
# extendet Menu
def on_ext1_activate(self, event):
print 'ext1 menu activated !!!!!'
ext1 = eval(self.extMenucommand['ext1'])
try:
ext1.start()
except:
print 'No StartModule'
def on_ext2_activate(self, event):
print 'ext2 menu activated !!!!!'
ext2 = eval(self.extMenucommand['ext2'])
try:
ext2.start()
except:
print 'No StartModule'
def on_ext3_activate(self, event):
print 'ext3 menu activated !!!!!'
ext3 = eval(self.extMenucommand['ext3'])
try:
ext3.start()
except:
print 'No StartModule'
def on_ext4_activate(self, event):
print 'ext4 menu activated !!!!!'
print self.extMenucommand['ext4']
ext4 = eval(self.extMenucommand['ext4'])
try:
ext4.start()
except:
print 'No StartModule ext4'
def getNewClientSoftware(self, id):
cuonpath = '..'
self.infoMsg('C.U.O.N. will now try to load the new Clientversion. ')
shellcommand = 'rm ' + cuonpath + '/newclient'
liStatus = commands.getstatusoutput(shellcommand)
print shellcommand, liStatus
shellcommand = 'rm -R ' + cuonpath + '/iClient'
liStatus = commands.getstatusoutput(shellcommand)
print shellcommand, liStatus
sc = cuon.Databases.SingleCuon.SingleCuon(self.allTables)
sc.saveNewVersion(id)
shellcommand = 'cd '+cuonpath+' ; tar -xvjf newclient'
liStatus = commands.getstatusoutput(shellcommand)
print shellcommand, liStatus
#shellcommand = 'sh ' + cuonpath + '/iClient/iCuon '
#liStatus = commands.getstatusoutput(shellcommand)
#print shellcommand, liStatus
f = open('newversion','a')
f.write(`self.Version`)
f.close()
# Plugins
# Dia
shellcommand = 'if [ ! -d ~/.dia/python ] ; then mkdir ~/.dia/python ; fi '
liStatus = commands.getstatusoutput(shellcommand)
print shellcommand, liStatus
shellcommand = 'cd ' + cuonpath +'/Plugins/Dia ; cp cuon_dia.py ~/.dia/python '
liStatus = commands.getstatusoutput(shellcommand)
print shellcommand, liStatus
self.infoMsg('Update complete. Please start C.U.O.N. new ')
def startT0(self):
try:
print 'First T0'
self.openDB()
oUser = self.loadObject('User')
self.closeDB()
if oUser:
#print 'T0 Client = ', oUser.client
if oUser.client > 0:
self.singleAddress = cuon.Addresses.SingleAddress.SingleAddress(self.allTables)
self.singlePartner = cuon.Addresses.SinglePartner.SinglePartner(self.allTables)
self.singleSchedul = cuon.Addresses.SingleScheduling.SingleScheduling(self.allTables)
self.startTiming()
except Exception, params:
print Exception, params
return True
def startTiming(self):
#'print start Timer'
# 60*1000 = 1 minute
time_contact = 2*60*1000
time_schedul = 15*60*1000
time_imap_dms = 30*60*1000
if self.t0:
gobject.source_remove(self.t0)
if self.t1:
gobject.source_remove(self.t1)
if self.t2:
gobject.source_remove(self.t2)
if self.t3:
gobject.source_remove(self.t3)
try:
if not self.t1:
self.startChecking()
self.t1 = gobject.timeout_add(time_contact, self.startChecking)
except Exception, params:
print Exception, params
try:
if not self.t2:
self.setSchedulTree()
self.t2 = gobject.timeout_add(time_schedul,self.setSchedulTree)
except Exception, params:
print Exception, params
try:
if not self.t3:
self.checkImapDMS()
self.t3 = gobject.timeout_add(time_imap_dms,self.checkImapDMS)
except Exception, params:
print Exception, params
def checkImapDMS(self):
#print '######################################### EMail #########################'
self.openDB()
oUser = self.loadObject('User')
self.closeDB()
imapD = cuon.E_Mail.imap_dms.imap_dms(self.allTables, oUser.getDicUser())
imapD.run()
#print '######################################### EMail END #########################'
return True
def startChecking(self):
#gtk.gdk.threads_enter()
try:
#print 'start scheduling'
#print self.Version
self.openDB()
oUser = self.loadObject('User')
liSchedul = self.loadObject('Scheduling')
self.closeDB()
#print `self.oUser.getDicUser()`
#print 'Client = ', oUser.getDicUser()['client']
liContacts = self.rpc.callRP('Address.getAllActiveContacts', oUser.getSqlDicUser())
#print liContacts
try:
if not liSchedul:
liSchedul = []
for contacts in liContacts:
ok = False
for oldSchedul in liSchedul:
if oldSchedul == contacts['id']:
ok = True
if not ok:
cuon.Addresses.contact.contactwindow(self.allTables, contacts['address_id'], contacts['partner_id'])
liSchedul.append(contacts['id'])
except Exception, params:
print Exception, params
self.openDB()
self.saveObject('Scheduling', liSchedul)
self.closeDB()
#cuon.Addresses.contact.contactwindow(self.allTables)
finally:
# print 'Ende'
pass
return True
#gtk.gdk.threads_leave()
#self.startTimer(10)
def on_rbScheduls_activate(self, event):
print 'rbScheduls clicked'
self.setSchedulTree()
def disconnectTree(self):
try:
self.getWidget('treeSchedul').get_selection().disconnect(self.connectTreeId)
except:
pass
def connectTree(self):
try:
self.connectTreeId = self.getWidget('treeSchedul').get_selection().connect("changed", self.tree_select_callback)
except:
pass
def tree_select_callback(self, treeSelection):
listStore, iter = treeSelection.get_selected()
print listStore,iter
if listStore and len(listStore) > 0:
row = listStore[0]
else:
row = -1
if iter != None:
sNewId = listStore.get_value(iter, 0)
print sNewId
try:
newID = int(sNewId[sNewId.find('###')+ 3:])
self.setDateValues(newID)
except:
pass
#self.fillEntries(newId)
def on_treeSchedul_row_activated(self, event):
print 'event'
self.on_bGotoAddress_clicked(event)
def setSchedulTree(self):
self.openDB()
oUser = self.loadObject('User')
self.closeDB()
# Data
sChoice = 'All'
if self.getWidget('rbSchedulsNew').get_active():
sChoice = 'New'
elif self.getWidget('rbSchedulsCancel').get_active():
sChoice = 'Cancel'
elif self.getWidget('rbSchedulsActualWeek').get_active():
sChoice = 'actualWeek'
print 'sChoice = ', sChoice
liDates, newHash = self.rpc.callRP('Address.getAllActiveSchedul', oUser.getSqlDicUser(),'Name','All', sChoice, self.schedulHash1)
#print 'lidates = ', liDates
#print 'newHash = ', newHash
if liDates == ['NO_NEW_DATA']:
print 'liDates = no Data'
return True
# new data arrived, go on
#liststore = gtk.ListStore(str)
self.schedulHash1 = newHash
self.disconnectTree()
treeview = self.getWidget('treeSchedul')
#treeview.set_model(liststore)
#renderer = gtk.CellRendererText()
#column = gtk.TreeViewColumn("Scheduls", renderer, text=0)
#treeview.append_column(column)
treestore = gtk.TreeStore(object)
treestore = gtk.TreeStore(str)
## renderer = gtk.CellRendererText()
##
## column = gtk.TreeViewColumn("Zweite Spalte", renderer, text=0)
## treeview.append_column(column)
treeview.set_model(treestore)
print 'Schedul by names: ', liDates
if liDates:
lastRep = None
lastSalesman = None
Schedulname = None
lastSchedulname = None
iter = treestore.append(None,[_('Names')])
iter2 = None
iter3 = None
for oneDate in liDates:
Schedulname = oneDate['schedul_name']
if lastSchedulname != Schedulname:
lastSchedulname = Schedulname
iter2 = treestore.insert_after(iter,None,[lastSchedulname])
sTime = self.getTimeString(oneDate['time_begin'] )
sTime2 = self.getTimeString(oneDate['time_end'] )
iter3 = treestore.insert_after(iter2,None,[oneDate['date'] +'--' + sTime + '-' +sTime2 +', ' + oneDate['a_lastname'] + ', ' + oneDate['a_city'] + ' ###' + `oneDate['id']`])
## try:
## iter = treestore.append(None,['Names'])
## iter2 = treestore.insert_after(iter,None,['jhamel'])
## iter3 = treestore.insert_after(iter2,None,['termin1'])
## iter = treestore.append(None,['Scheduls'])
## iter2 = treestore.insert_after(iter,None,['date'])
## iter3 = treestore.insert_after(iter2,None,['termin1'])
## except Exception,params:
## print Exception,params
##
#liDates, self.schedulHash2 = self.rpc.callRP('Address.getAllActiveSchedul', oUser.getSqlDicUser(),'Schedul','All',sChoice)
#liTest.sort(key=(lambda x: (x['test1'], lambda x: x['testA']) ))
liDates.sort(key=(lambda x: (x['date_norm'], x['schedul_name'], x['time_begin'] )), reverse = True)
#print 'Schedul by schedul_date 2 : ', liDates
if liDates:
lastRep = None
lastSalesman = None
Schedulname = None
lastSchedulname = None
iter = treestore.append(None,[_('Schedul')])
iter2 = None
iter3 = None
for oneDate in liDates:
Schedulname = oneDate['date']
if lastSchedulname != Schedulname:
lastSchedulname = Schedulname
iter2 = treestore.insert_after(iter,None,[lastSchedulname])
sTime = self.getTimeString(oneDate['time_begin'] )
sTime2 = self.getTimeString(oneDate['time_end'] )
iter3 = treestore.insert_after(iter2,None,[oneDate['schedul_name'] +'--' + sTime + '-' +sTime2 +', ' + oneDate['a_lastname'] + ', ' + oneDate['a_city'] +' ###' + `oneDate['id']`])
# reps and Saleman
# #liDates, self.schedulHash3 = self.rpc.callRP('Address.getAllActiveSchedul', oUser.getSqlDicUser(),'rep_salesman','All', sChoice)
#liDates.sort(key=(lambda x: (x['date_norm'], lambda x: x['rep_lastname'], lambda x: x['salesman_lastname'], lambda x: x['date_norm'])), reverse = True)
liDates.sort(key=(lambda x: (x['salesman_lastname'], x['rep_lastname'], x['date_norm']) ), reverse = True)
#print 'Schedul by names: 3', liDates
if liDates and liDates not in ['NONE']:
lastRep = None
lastSalesman = None
Schedulname = None
lastSchedulname = None
iter = treestore.append(None,[_('Salesman')])
iter2 = None
iter3 = None
for oneDate in liDates:
Schedulname = oneDate['schedul_name']
if lastSchedulname != Schedulname:
lastSchedulname = Schedulname
iter2 = treestore.insert_after(iter,None,[lastSchedulname])
sTime = self.getTimeString(oneDate['time_begin'] )
sTime2 = self.getTimeString(oneDate['time_end'] )
iter3 = treestore.insert_after(iter2,None,[oneDate['date'] +'--' + sTime + '-' +sTime2 +', ' + oneDate['a_lastname'] + ', ' + oneDate['a_city'] + ' ###' + `oneDate['id']`])
treeview.show()
self.connectTree()
return True
def setDateValues(self, id):
widgetTVAddress = self.getWidget('tvAddress')
widgetTVPartner = self.getWidget('tvPartner')
widgetEShortRemark = self.getWidget('eShortRemark')
widgetTvEvent = self.getWidget('tvEvent')
self.singleSchedul.load(id)
partnerid = self.singleSchedul.getPartnerID()
self.singlePartner.load(partnerid)
addressid = self.singlePartner.getAddressID()
self.singleAddress.load(addressid)
print partnerid
print addressid
s = self.singleSchedul.getShortRemark()
print 's=', s
if s:
widgetEShortRemark.set_text(s)
else:
widgetEShortRemark.set_text('')
s = self.singleSchedul.getNotes()
print 's=', s
if s:
self.add2Textbuffer(widgetTvEvent,s,'Overwrite')
else:
self.add2Textbuffer(widgetTvEvent,' ','Overwrite')
s = self.singleAddress.getMailAddress()
if s:
self.add2Textbuffer(widgetTVAddress,s,'Overwrite')
else:
self.add2Textbuffer(widgetTVAddress,' ','Overwrite')
s = self.singlePartner.getMailAddress()
if s:
self.add2Textbuffer(widgetTVPartner,s,'Overwrite')
else:
self.add2Textbuffer(widgetTVPartner,' ','Overwrite')
def on_bGotoAddress_clicked(self, event):
if self.singleAddress.ID > 0:
adr = cuon.Addresses.addresses.addresswindow(self.allTables, addrid = self.singleAddress.ID)
def on_bChat_clicked(self, event):
print self.dicUser
shellcommand = shlex.split(self.dicUser['Communications']['textChat'] )
liStatus = subprocess.Popen(shellcommand)
#liStatus = commands.getstatusoutput(shellcommand)
print shellcommand, liStatus
def on_b3DChat_clicked(self, event):
shellcommand = shlex.split(self.dicUser['Communications']['3DChat'])
liStatus = subprocess.Popen(shellcommand)
#liStatus = commands.getstatusoutput(shellcommand)
print shellcommand, liStatus
def on_bEmail_clicked(self, event):
shellcommand = shlex.split(self.dicUser['Communications']['emailPrg'])
liStatus = subprocess.Popen(shellcommand)
#liStatus = commands.getstatusoutput(shellcommand)
print shellcommand, liStatus
#def startTimer(self, seconds):
# self.t1 = threading.Timer(seconds, self.startChecking)
# self.t1.start()
def on_eClient_changed(self, event):
''' client ID changed '''
try:
dt = cuon.DMS.documentTools.documentTools()
sFile = dt.load_mainwindow_logo(self.allTables)
if sFile:
print "image found"
logo = self.getWidget("company_logo")
#
# newIm = Image.fromstring('RGB',[1024, 1024], bz2.decompress( image))
# newIm.thumbnail([208,208])
# sFile = self.dicUser['prefPath']['tmp'] + 'cuon_mainwindow_logo.png'
# save(sFile)
print 'sFile = ', sFile
pixbuf = gtk.gdk.pixbuf_new_from_file(sFile)
scaled_buf = pixbuf.scale_simple(208,208,gtk.gdk.INTERP_BILINEAR)
logo.set_from_pixbuf(scaled_buf)
logo.show()
except:
pass
#logo.set_from_file(sFile)
def on_onlineNews_activate(self, event):
self.winNews.remove(self.swMap)
self.mapmoz = None
self.mapmoz = moz.MozEmbed()
self.viewMap = gtk.Viewport()
self.swMap = gtk.ScrolledWindow()
self.viewMap.add(self.mapmoz)
self.swMap.add(self.viewMap)
self.winNews.add(self.swMap)
print 'mapmoz = ', self.mapmoz
if self.mapmoz:
if self.dicUser['Locales'].lower() == 'de':
sUrl = 'http://cuon.org/Cuon/news.html'
else:
sUrl = 'http://cuon.org/en_Cuon/news.html'
print sUrl
self.mapmoz.load_url(sUrl)
#self.mapmoz.set_size_request(816,600)
self.viewMap.show()
self.swMap.show()
self.mapmoz.show()
self.winNews.show_all()
def closeOnlineNews(self, event, data=None):
self.winNews.hide()
def startMain(self, sStartType, sDebug,sLocal='NO', Username='EMPTY', PASSWORD='Test', ClientID=0):
#ML = cuon.VTK.mainLogo.mainLogo()
#ML.startLogo()
self.ClientID = ClientID
if sDebug:
self.sDebug = sDebug
else:
self.sDebug = 'NO'
if sStartType == 'server':
print 'Server-Modus'
td = typedefs_server()
# create widget tree ...
self.gladeName = '/usr/share/cuon/glade/cuon.glade2'
self.loadGladeFile(self.gladeName)
else:
id, version = self.rpc.callRP('Database.getLastVersion')
print 'Version', version
print 'id', id
## self.openDB()
## version = self.loadObject('ProgramVersion')
## self.closeDB()
##
print 'Version:' + str(version)
print self.Version['Major'], version['Major']
print self.Version['Minor'], version['Minor']
print self.Version['Rev'], version['Rev']
print self.Version, version
self.openDB()
oUser = self.loadObject('User')
if not oUser:
oUser = cuon.User.user.User()
oUser.client = 0
self.saveObject('User',oUser)
self.closeDB()
if not version:
print 'no Version, please inform Cuon-Administrator'
sys.exit(0)
if self.rpc.callRP('Database.checkVersion', self.Version, version) == 'Wrong':
print ' ungleiche Versionen'
print 'load new version of pyCuon'
self.getNewClientSoftware(id)
cuonpath = '..'
shellcommand = 'rm ' + cuonpath + '/cuonObjects'
liStatus = commands.getstatusoutput(shellcommand)
print shellcommand, liStatus
self.openDB()
version = self.saveObject('newClientVersion',True)
self.closeDB()
sys.exit(0)
self.openDB()
newClientExist = self.loadObject('newClientVersion')
self.closeDB()
if newClientExist:
self.updateVersion()
self.openDB()
self.saveObject('ProgramVersion', self.Version)
version = self.saveObject('newClientVersion',False)
self.closeDB()
version = self.rpc.callRP('Database.getLastVersion')
print 'Version', version
if sLocal != 'NO' and self.rpc.callRP('Database.checkVersion', self.Version, version[1]) == 'Wrong':
self.getNewClientSoftware(id)
sys.exit(0)
# create widget tree ...
# self.gladeName = td.main_glade_name
self.loadGlade('cuon.xml','window1')
self.win1 = self.getWidget("window1")
self.win1.connect("delete_event", self.delete_event)
self.win1.connect("destroy", self.destroy)
# Online news
self.winNews= self.getWidget('OnlineNews')
self.winNews.connect("delete_event", self.closeOnlineNews)
self.swMap = self.getWidget('swOnlineNews')
# Menu-items
self.initMenuItemsMain()
self.disableAllMenuItems()
self.addEnabledMenuItems('login','logout1')
self.addEnabledMenuItems('login','data')
self.addEnabledMenuItems('login','action1')
self.addEnabledMenuItems('login','accounting1')
self.addEnabledMenuItems('login','extras')
self.addEnabledMenuItems('login','tools')
self.addEnabledMenuItems('serverMode','databases1')
self.addEnabledMenuItems('user','login1')
self.addEnabledMenuItems('user','tools')
self.addEnabledMenuItems('user','update1')
self.disableMenuItem('login')
self.disableMenuItem('serverMode')
self.enableMenuItem('user')
sTitle = self.getWidget('window1').get_title() + self.sTitle
self.setTitle('window1',sTitle)
self.openDB()
oUser = self.loadObject('User')
if not oUser:
oUser = cuon.User.user.User()
oUser.client = 0
self.saveObject('User',oUser)
self.saveObject('Scheduling', [])
self.closeDB()
# set initial columns
treeview = self.getWidget('treeSchedul')
#treeview.set_model(liststore)
renderer = gtk.CellRendererText()
column = gtk.TreeViewColumn("Scheduls", renderer, text=0)
treeview.append_column(column)
self.t0 = gobject.timeout_add(2000, self.startT0)
if Username != "empty":
print "Username = ", Username
self.activateClick("login1")
def gtk_main_quit(self):
if self.t1:
gobject.source_remove(self.t1)
gtk.main_quit()
#gtk.gdk.threads_init()
def getConfigOption(cpParser, section, option):
value = None
if cpParser.has_option(section,option):
value = cpParser.get(section, option)
print 'getConfigOption', section + ', ' + option + ' = ' + value
if not value:
value = " "
return value
sStartType = 'client'
sDebug = 'NO'
sLocal = 'NO'
print sys.argv
# Args:
# 1 server http://host:post
# 2 client/server
# 3 Debug = ON/OFF
# 4 Path to Locale/ default
# 5 cuon_path
try:
cpParser = ConfigParser.ConfigParser()
sFile = 'cuon.ini'
f = open(sFile)
cpParser.readfp(f)
f.close()
except:
pass
Description = None
WorkingDir= 'NO'
Host = None
Port = None
Proto = None
sStartType = 'client'
sLocal = 'NO'
sDebug = 'NO'
AlternateGui = 'LINUX-Standard'
Username = "EMPTY"
PASSWORD = "TEST"
ClientID = 0
sSect = 'Client'
Description = getConfigOption(cpParser, sSect,'DESCRIPTION')
WorkingDir = getConfigOption(cpParser, sSect,'WORKINGDIR')
Host = getConfigOption(cpParser, sSect,'HOST')
Port = getConfigOption(cpParser, sSect,'PORT')
Proto = getConfigOption(cpParser, sSect,'PROTOCOL')
sStartType = getConfigOption(cpParser, sSect,'TYPE')
sLocal = WorkingDir + getConfigOption(cpParser, sSect,'LOCALE')
sDebug = getConfigOption(cpParser, sSect,'DEBUG')
AlternateGui = getConfigOption(cpParser, sSect,'ALTERNATEGUI')
Username = getConfigOption(cpParser, sSect, "USERNAME").strip()
PASSWORD = getConfigOption(cpParser, sSect, "PASSWORD").strip()
try:
ClientID = int(getConfigOption(cpParser, sSect, "CLIENT_ID"))
except:
ClientID = 0
if not Username or not PASSWORD or not ClientID:
Username = "EMPTY"
PASSWORD = "TEST"
ClientID = 0
print "AlternateGui = ", AlternateGui
if not AlternateGui or AlternateGui == 'NO' :
AlternateGui = 'LINUX-Standard'
if len(sys.argv) > 4:
if len(sys.argv[4]) > 1:
sLocal = sys.argv[4]
if len(sys.argv) > 3:
if len(sys.argv[3]) > 1:
sDebug = sys.argv[3]
if len(sys.argv) > 2:
if len(sys.argv[2]) > 1:
sStartType = sys.argv[2]
print sStartType
if sStartType == 'server':
td = cuon.TypeDefs.typedefs_server.typedefs_server()
else:
td = cuon.TypeDefs.typedefs.typedefs()
td.SystemName = AlternateGui
td.cuon_path = WorkingDir
td.server = Proto.strip() +'://' + Host.strip() +':' + Port.strip()
if len(sys.argv) > 1:
if len(sys.argv[1]) > 1:
if sys.argv[1] != 'NO':
td.server = sys.argv[1]
print 'td-server =', td.server
if len(sys.argv) > 5:
if len(sys.argv[5]) > 1:
if sys.argv[5] != 'NO':
td.cuon_path = sys.argv[5]
print 'td.cuon_path =', td.cuon_path
if len(sys.argv) > 6:
if len(sys.argv[6]) > 1:
if sys.argv[6] != 'NO':
td.SystemName = sys.argv[6]
print 'td.System =', td.SystemName
else:
td.SystemName = 'LINUX-Standard'
else:
td.SystemName = 'LINUX-Standard'
else:
td.SystemName = AlternateGui
print 'now -> ', td.SystemName
if len(sys.argv) > 7:
if len(sys.argv[7]) > 1:
Username = sys.argv[7]
print 'Username =', Username
print len(sys.argv)
if len(sys.argv) > 8:
if len(sys.argv[8]) > 1:
PASSWORD = sys.argv[8]
print 'password =', PASSWORD
if len(sys.argv) > 9:
if len(sys.argv[9]) > 0:
ClientID = int(sys.argv[9].strip())
print 'clientID =', ClientID
# set some pathes
try:
print 'WorkingDir', WorkingDir
if not os.path.exists(WorkingDir + '/cuon_data'):
print 'make dir cuon_data'
os.mkdir(WorkingDir +'/cuon_data')
if not os.path.exists(WorkingDir +'/cuon_data/dms'):
print 'make dir cuon_data/dms'
os.mkdir(WorkingDir +'/cuon_data/dms')
if not os.path.exists(WorkingDir +'/cuon_data/import'):
print 'make dir cuon_data/dms'
os.mkdir(WorkingDir +'/cuon_data/import')
if not os.path.exists(WorkingDir +'/cuon_data/export'):
print 'make dir cuon_data/dms'
os.mkdir(WorkingDir +'/cuon_data/export')
except Exception, params:
print Exception, params
d = cuon.Databases.dumps.dumps(td)
d.openDB()
d.saveObject('td', td)
d.closeDB()
if sLocal == 'NO':
DIR = '/usr/share/locale'
else:
DIR = sLocal
#locale.setlocale (locale.LC_ALL, '')
APP = 'cuon'
gettext.bindtextdomain (APP, DIR)
gettext.textdomain (APP)
gettext.install (APP, DIR, unicode=1)
gtk.glade.bindtextdomain(APP,DIR)
gtk.glade.textdomain(APP)
print _('Debug by C.U.O.N. = ' ), sDebug
m = MainWindow(sStartType)
m.startMain(sStartType, sDebug,sLocal, Username, PASSWORD, ClientID)
#profile.run('m.startMain(sStartType, sDebug,sLocal)','cuonprofile')
# Import Psyco if available
#try:
# import psyco
#psyco.full()
#print ' start psyco'
#except ImportError:
# print 'no psyco found'
#gtk.gdk.threads_enter()
gtk.main()
#gtk.gdk.threads_leave()
#gtk.main()
|
juergenhamel/cuon
|
cuon_client/Cuon.py
|
Python
|
gpl-3.0
| 83,624
|
[
"VTK"
] |
672545f7c9a06f453ec21942b8db021e8bfd76b26754c30c8e44f7cedb3ced77
|
########################################################################
# $HeadURL: svn+ssh://svn.cern.ch/reps/dirac/DIRAC/trunk/DIRAC/WorkloadManagementSystem/private/DIRACPilotDirector.py $
# File : PilotBundle.py
# Author : Ricardo Graciani
########################################################################
"""
Collection of Utilities to handle pilot jobs
"""
__RCSID__ = "$Id: DIRACPilotDirector.py 28536 2010-09-23 06:08:40Z rgracian $"
import os, base64, bz2, types, tempfile
def getExecutableScript( executable, arguments=[], proxy=None, sandboxDict = {}, environDict={}, execDir='' ):
"""
Prepare a wrapper script for executable including as required environment, proxy, sandbox,...
A temporary directory is created and removed at the end where sandbox is unpacked and
executable called
In executable, arguments and execDir environmental variables can be used
:Parameters:
`executable`
- string - if included in sandboxDict ./ is prepended
`arguments`
- list of strings - arguments to be passed to executable
`proxy`
- DIRAC.Core.Security.X509Chain - proxy to be setup in environment
`sandboxDict`
- dictionary - name: path
`environmentDict`
- dictionary - key: value
`execDir`
- string - path where temporary directory is created
:returns:
- string - produced script
"""
if type( arguments) in types.StringTypes:
arguments = arguments.split(' ')
compressedAndEncodedFiles = {}
if proxy:
compressedAndEncodedFiles['.proxy'] = base64.encodestring( bz2.compress( proxy.dumpAllToString()['Value'] ) ).replace('\n','')
for fileName, filePath in sandboxDict.items():
encodedFile = base64.encodestring( bz2.compress( open( filePath, "rb" ).read() ) ).replace('\n','')
compressedAndEncodedFiles[fileName] = encodedFile
script = """#!/usr/bin/env python
try:
import os, tempfile, sys, shutil, base64, bz2, subprocess, datetime
except:
print 'Failed to import os, tempfile, sys, shutil, base64, bz2, subprocess'
print 'Unsupported python version'
exit(1)
print 'START TIME:', datetime.datetime(2000,1,1).utcnow(), 'UTC'
print
# 1. Get Name of the executable
executable = '%(executable)s'
cmdTuple = [ executable ]
cmdTuple.extend( %(arguments)s )
# 2. Print environment
print '==========================================================='
print
print 'Existing Environment:'
print
for key, value in os.environ.items():
print key, '=', value
print
print 'Added Environment:'
print
# 3. Set environment
environDict = %(environDict)s
if 'LD_LIBRARY_PATH' not in os.environ and 'LD_LIBRARY_PATH' not in environDict:
environDict['LD_LIBRARY_PATH'] = ''
for key, value in environDict.items():
os.environ[key] = value
print key, '=', value
# 4. Create Working Directory
execDir = '%(execDir)s'
if not execDir:
execDir = None
else:
execDir = os.path.expanduser( os.path.expandvars( execDir ) )
workingDirectory = tempfile.mkdtemp( suffix = 'pilot', prefix = 'DIRAC_', dir = execDir )
os.chdir( workingDirectory )
os.environ['X509_CERT_DIR'] = os.path.join( workingDirectory, 'etc', 'grid-security', 'certificates' )
# 5. Extract Sandbox files
for fileName, fileCont in %(compressedAndEncodedFiles)s.items():
f = open( fileName, 'w' )
f.write( bz2.decompress( base64.decodestring( fileCont ) ) )
f.close()
if fileName == '.proxy':
os.chmod( fileName, 0600 )
os.environ['X509_USER_PROXY'] = os.path.join( workingDirectory, fileName )
print 'X509_USER_PROXY', '=', os.path.join( workingDirectory, fileName )
elif fileName == executable:
os.chmod( fileName, 0755 )
executable = './' + executable
print
print '==========================================================='
print
# 6. Executing
cmdTuple = [ os.path.expanduser( os.path.expandvars( k ) ) for k in cmdTuple ]
print 'Executing: ', ' '.join( cmdTuple )
print 'at:', os.getcwd()
print
sys.stdout.flush()
try:
exitCode = subprocess.call( cmdTuple )
if exitCode < 0:
print >> sys.stderr, 'Command killed by signal', - exitCode
if exitCode > 0:
print >> sys.stderr, 'Command returned', exitCode
except OSError, e:
exitCode = -1
print >> sys.stderr, "Execution failed:", e
shutil.rmtree( workingDirectory )
myDate = datetime.datetime(2000,1,1).utcnow()
print
print 'END TIME:', datetime.datetime(2000,1,1).utcnow(), 'UTC'
exit( exitCode )
""" % { 'execDir': execDir,
'executable': executable,
'compressedAndEncodedFiles': compressedAndEncodedFiles,
'arguments': arguments,
'environDict': environDict, }
return script
def bundleProxy( executableFile, proxy ):
""" Create a self extracting archive bundling together an executable script and a proxy
"""
compressedAndEncodedProxy = base64.encodestring( bz2.compress( proxy.dumpAllToString()['Value'] ) ).replace( '\n', '' )
compressedAndEncodedExecutable = base64.encodestring( bz2.compress( open( executableFile, "rb" ).read(), 9 ) ).replace( '\n', '' )
bundle = """#!/usr/bin/env python
# Wrapper script for executable and proxy
import os, tempfile, sys, base64, bz2, shutil
try:
workingDirectory = tempfile.mkdtemp( suffix = '_wrapper', prefix= 'TORQUE_' )
os.chdir( workingDirectory )
open( 'proxy', "w" ).write(bz2.decompress( base64.decodestring( "%(compressedAndEncodedProxy)s" ) ) )
open( '%(executable)s', "w" ).write(bz2.decompress( base64.decodestring( "%(compressedAndEncodedExecutable)s" ) ) )
os.chmod('proxy',0600)
os.chmod('%(executable)s',0700)
os.environ["X509_USER_PROXY"]=os.path.join(workingDirectory, 'proxy')
except Exception, x:
print >> sys.stderr, x
sys.exit(-1)
cmd = "./%(executable)s"
print 'Executing: ', cmd
sys.stdout.flush()
os.system( cmd )
shutil.rmtree( workingDirectory )
""" % { 'compressedAndEncodedProxy': compressedAndEncodedProxy, \
'compressedAndEncodedExecutable': compressedAndEncodedExecutable, \
'executable': os.path.basename( executableFile ) }
return bundle
def writeScript( script, writeDir=None ):
"""
Write script into a temporary unique file under provided writeDir
"""
fd, name = tempfile.mkstemp( suffix = '_pilotWrapper.py', prefix = 'DIRAC_', dir=writeDir )
pilotWrapper = os.fdopen(fd, 'w')
pilotWrapper.write( script )
pilotWrapper.close()
return name
|
sposs/DIRAC
|
Resources/Computing/PilotBundle.py
|
Python
|
gpl-3.0
| 6,349
|
[
"DIRAC"
] |
8e3a4209999016a04d5c4c49de137155b8c9967f444de846ba66c54bbfde82e8
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Appcelerator Titanium Mobile
# Copyright (c) 2011 by Appcelerator, Inc. All Rights Reserved.
# Licensed under the terms of the Apache Public License
# Please see the LICENSE included with this distribution for details.
#
# Android Application Script
#
import os, sys, shutil, platform, zipfile
import string, subprocess, re
from mako.template import Template
from xml.etree.ElementTree import ElementTree
from StringIO import StringIO
from os.path import join, splitext, split, exists
from shutil import copyfile
from androidsdk import AndroidSDK
from compiler import Compiler
import bindings
template_dir = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))
module_dir = os.path.join(os.path.dirname(template_dir), 'module')
sys.path.extend([os.path.dirname(template_dir), module_dir])
from tiapp import TiAppXML
from manifest import Manifest
from module import ModuleDetector
import simplejson
ignoreFiles = ['.gitignore', '.cvsignore', '.DS_Store'];
ignoreDirs = ['.git','.svn','_svn', 'CVS'];
def run(args):
return subprocess.Popen(args, stderr=subprocess.STDOUT, stdout=subprocess.PIPE).communicate()[0]
def pipe(args1,args2):
p1 = subprocess.Popen(args1, stdout=subprocess.PIPE)
p2 = subprocess.Popen(args2, stdin=p1.stdout, stdout=subprocess.PIPE)
return p2.communicate()[0]
def copy_resources(source, target):
if not os.path.exists(os.path.expanduser(target)):
os.mkdir(os.path.expanduser(target))
for root, dirs, files in os.walk(source):
for name in ignoreDirs:
if name in dirs:
dirs.remove(name) # don't visit ignored directories
for file in files:
if file in ignoreFiles:
continue
from_ = join(root, file)
to_ = os.path.expanduser(from_.replace(source, target, 1))
to_directory = os.path.expanduser(split(to_)[0])
if not exists(to_directory):
os.makedirs(to_directory)
print "[TRACE] copying: %s to: %s" % (from_,to_)
copyfile(from_, to_)
class Android(object):
def __init__(self, name, myid, sdk, deploy_type, java):
self.name = name
# android requires at least one dot in packageid
if len(re.findall(r'\.',myid))==0:
myid = 'com.%s' % myid
self.id = myid
self.sdk = sdk
# Used in templating
self.config = {
'appid': self.id,
'appname' : self.name,
'appversion' : '1',
'apiversion' : '7', #Android 2.1
'deploy_type': deploy_type
}
self.config['classname'] = Android.strip_classname(self.name)
self.deploy_type = deploy_type
self.java = java
@classmethod
def strip_classname(cls, name):
classname = ''.join([str.capitalize() for str in re.split('[^A-Za-z0-9_]', name)])
if re.search("^[0-9]", classname) != None:
classname = "_" + classname
return classname
def newdir(self, *segments):
path = os.path.join(*segments)
if not os.path.exists(path):
os.makedirs(path)
return path
def copyfile(self, file, src, dest):
shutil.copy(os.path.join(src, file), os.path.join(dest, file))
def load_template(self, template):
return Template(filename=template, output_encoding='utf-8', encoding_errors='replace')
def render_android_manifest(self):
template_dir = os.path.dirname(sys._getframe(0).f_code.co_filename)
tmpl = self.load_template(os.path.join(template_dir, 'templates', 'AndroidManifest.xml'))
return tmpl.render(config = self.config)
def render(self, template_dir, template_file, dest, dest_file, **kwargs):
tmpl = self.load_template(os.path.join(template_dir, 'templates', template_file))
f = None
try:
print "[TRACE] Generating %s" % os.path.join(dest, dest_file)
f = open(os.path.join(dest, dest_file), "w")
f.write(tmpl.render(config = self.config, **kwargs))
finally:
if f!=None: f.close
def build_app_info(self, project_dir):
tiapp = ElementTree()
assets_tiappxml = os.path.join(project_dir, 'build', 'android', 'bin', 'assets', 'tiapp.xml')
self.app_info = {'fullscreen':'false','navbar-hidden':'false'}
self.app_properties = {}
if not os.path.exists(assets_tiappxml):
shutil.copy(os.path.join(project_dir, 'tiapp.xml'), assets_tiappxml)
tiapp.parse(open(assets_tiappxml, 'r'))
for key in ['id', 'name', 'version', 'publisher', 'url', 'copyright',
'description', 'icon', 'analytics', 'guid', 'navbar-hidden', 'fullscreen']:
el = tiapp.find(key)
if el != None:
self.app_info[key] = el.text
for property_el in tiapp.findall("property"):
name = property_el.get("name")
type = property_el.get("type")
value = property_el.text
if name == None: continue
if type == None: type = "string"
if value == None: value = ""
self.app_properties[name] = {"type": type, "value": value}
def generate_activities(self, app_package_dir):
if not 'activities' in self.tiapp.android: return
for key in self.tiapp.android['activities'].keys():
activity = self.tiapp.android['activities'][key]
print '[DEBUG] generating activity class: ' + activity['classname']
self.render(template_dir, 'JSActivity.java', app_package_dir, activity['classname']+'.java', activity=activity)
def generate_services(self, app_package_dir):
if not 'services' in self.tiapp.android: return
for key in self.tiapp.android['services'].keys():
service = self.tiapp.android['services'][key]
service_type = service['service_type']
print '[DEBUG] generating service type "%s", class "%s"' %(service_type, service['classname'])
if service_type == 'interval':
self.render(template_dir, 'JSIntervalService.java', app_package_dir, service['classname']+'.java', service=service)
else:
self.render(template_dir, 'JSService.java', app_package_dir, service['classname']+'.java', service=service)
def build_modules_info(self, resources_dir, app_bin_dir, include_all_ti_modules=False):
self.app_modules = []
(modules, external_child_modules) = bindings.get_all_module_bindings()
compiler = Compiler(self.tiapp, resources_dir, self.java, app_bin_dir, os.path.dirname(app_bin_dir),
include_all_modules=include_all_ti_modules)
compiler.compile(compile_bytecode=False, info_message=None)
for module in compiler.modules:
module_bindings = []
# TODO: we should also detect module properties
for method in compiler.module_methods:
if method.lower().startswith(module+'.') and '.' not in method:
module_bindings.append(method[len(module)+1:])
module_onAppCreate = None
module_class = None
module_apiName = None
for m in modules.keys():
if modules[m]['fullAPIName'].lower() == module:
module_class = m
module_apiName = modules[m]['fullAPIName']
if 'onAppCreate' in modules[m]:
module_onAppCreate = modules[m]['onAppCreate']
break
if module_apiName == None: continue # module wasn't found
if '.' not in module:
ext_modules = []
if module_class in external_child_modules:
for child_module in external_child_modules[module_class]:
if child_module['fullAPIName'].lower() in compiler.modules:
ext_modules.append(child_module)
self.app_modules.append({
'api_name': module_apiName,
'class_name': module_class,
'bindings': module_bindings,
'external_child_modules': ext_modules,
'on_app_create': module_onAppCreate
})
# discover app modules
detector = ModuleDetector(self.project_dir)
missing, detected_modules = detector.find_app_modules(self.tiapp, 'android')
for missing_module in missing: print '[WARN] Couldn\'t find app module: %s' % missing_module['id']
self.custom_modules = []
for module in detected_modules:
if module.jar == None: continue
module_jar = zipfile.ZipFile(module.jar)
module_bindings = bindings.get_module_bindings(module_jar)
if module_bindings is None: continue
for module_class in module_bindings['modules'].keys():
module_proxy = module_bindings['proxies'][module_class]
module_id = module_proxy['proxyAttrs']['id']
module_onAppCreate = None
if 'onAppCreate' in module_proxy:
module_onAppCreate = module_proxy['onAppCreate']
print '[DEBUG] module_id = %s' % module_id
if module_id == module.manifest.moduleid:
print '[DEBUG] appending module: %s' % module_class
self.custom_modules.append({
'class_name': module_class,
'manifest': module.manifest,
'on_app_create': module_onAppCreate
})
def create(self, dir, build_time=False, project_dir=None, include_all_ti_modules=False):
template_dir = os.path.dirname(sys._getframe(0).f_code.co_filename)
# Build up output directory tree
if project_dir is None:
project_dir = self.newdir(dir, self.name)
self.project_dir = project_dir
# Paths to Titanium assets that need to be linked into eclipse structure
self.config['ti_tiapp_xml'] = os.path.join(project_dir, 'tiapp.xml')
self.tiapp = TiAppXML(self.config['ti_tiapp_xml'])
resource_dir = os.path.join(project_dir, 'Resources')
self.config['ti_resources_dir'] = resource_dir
app_build_dir = self.newdir(project_dir, 'build')
app_dir = self.newdir(app_build_dir, 'android')
#if os.path.exists(os.path.join(app_dir,'bin')):
# shutil.rmtree(os.path.join(app_dir,'bin'))
if os.path.exists(os.path.join(app_dir,'src')):
shutil.rmtree(os.path.join(app_dir,'src'))
if os.path.exists(os.path.join(app_dir,'res')):
shutil.rmtree(os.path.join(app_dir,'res'))
app_bin_dir = self.newdir(app_dir, 'bin')
app_lib_dir = self.newdir(app_dir, 'lib')
app_src_dir = self.newdir(app_dir, 'src')
app_res_dir = self.newdir(app_dir, 'res')
app_gen_dir = self.newdir(app_dir, 'gen')
app_bin_classes_dir = self.newdir(app_bin_dir, 'classes')
app_res_drawable_dir = self.newdir(app_res_dir, 'drawable')
app_assets_dir = self.newdir(app_dir, 'assets')
app_package_dir = self.newdir(app_gen_dir, *self.id.split('.'))
app_bin_assets_dir = self.newdir(app_bin_dir, 'assets')
self.build_app_info(project_dir)
self.build_modules_info(resource_dir, app_bin_dir, include_all_ti_modules=include_all_ti_modules)
# Create android source
self.render(template_dir, 'AppInfo.java', app_package_dir, self.config['classname'] + 'AppInfo.java',
app_properties = self.app_properties, app_info = self.app_info)
self.render(template_dir, 'AndroidManifest.xml', app_dir, 'AndroidManifest.xml')
self.render(template_dir, 'App.java', app_package_dir, self.config['classname'] + 'Application.java',
app_modules = self.app_modules, custom_modules = self.custom_modules)
self.render(template_dir, 'Activity.java', app_package_dir, self.config['classname'] + 'Activity.java')
self.generate_activities(app_package_dir)
self.generate_services(app_package_dir)
self.render(template_dir, 'classpath', app_dir, '.classpath')
self.render(template_dir, 'project', app_dir, '.project')
self.render(template_dir, 'default.properties', app_dir, 'default.properties')
# Don't override a pre-existing .gitignore in case users have their own preferences
# for what should be in it. (LH #2446)
if not os.path.exists(os.path.join(app_dir, '.gitignore')):
self.render(template_dir, 'gitignore', app_dir, '.gitignore')
else:
print "[TRACE] Skipping copying gitignore -> .gitignore because already exists"
android_project_resources = os.path.join(project_dir,'Resources','android')
if build_time==False and os.path.exists(android_project_resources):
shutil.rmtree(android_project_resources)
if not os.path.exists(android_project_resources):
copy_resources(os.path.join(template_dir,'resources'),android_project_resources)
if __name__ == '__main__':
# this is for testing only for the time being
if len(sys.argv) != 5 or sys.argv[1]=='--help':
print "Usage: %s <name> <id> <directory> <sdk>" % os.path.basename(sys.argv[0])
sys.exit(1)
sdk = AndroidSDK(sys.argv[4])
android = Android(sys.argv[1], sys.argv[2], sdk, None, 'java')
android.create(sys.argv[3])
|
arnaudsj/titanium_mobile
|
support/android/android.py
|
Python
|
apache-2.0
| 11,939
|
[
"VisIt"
] |
4ff0c17ce3f44d508fa81cb93b7b17d7305e4aaaec7d31c483c131db0510da7b
|
from hamcrest import *
from test.features import BrowserTest
from test.features.support import table_from
class GenerateHighVolumeServicesPages(BrowserTest):
def test_all_services_table(self):
self.browser.visit("http://0.0.0.0:8000/high-volume-services/by-transactions-per-year/descending.html")
table = table_from(self.browser.find_by_css('tbody tr'))
assert_that(table, is_([
[u'Service 4', u'DEF', u'\xa312.1m*', u'\xa31.30*', u'94.8%*', u'9,321,067*'],
[u'Service 1', u'ABC', u'\xa3482k', u'\xa30.10', u'100%', u'4,820,000'],
[u'Service 5', u'DEF', u'\xa313.1m*', u'\xa33.40*', u'100%*', u'3,847,098*'],
[u'Service 6', u'DEF', u'\xa341.9m*', u'\xa312.30*', u'30%*', u'3,404,261*'],
[u'Service 7', u'DEF', u'\xa381.1m', u'\xa334.40', u'97.6%', u'2,358,738'],
[u'Service 8', u'DEF', u'\xa39.25m', u'\xa34.23', u'86%', u'2,186,450']
]))
|
imclab/transactions-explorer
|
test/features/test_generate_high_volume_services.py
|
Python
|
mit
| 952
|
[
"VisIt"
] |
9abd79d739ee515c87ba3299f64b2eec72a46474ece9bcf96ee3fe962f573b3c
|
import matplotlib.pyplot as plt
from pycbc import distributions
# Create a mass distribution object that is uniform between 0.5 and 1.5
# solar masses.
mass1_distribution = distributions.Uniform(mass1=(0.5, 1.5))
# Take 100000 random variable samples from this uniform mass distribution.
mass1_samples = mass1_distribution.rvs(size=1000000)
# Draw another distribution that is Gaussian between 0.5 and 1.5 solar masses
# with a mean of 1.2 solar masses and a standard deviation of 0.15 solar
# masses. Gaussian takes the variance as an input so square the standard
# deviation.
variance = 0.15*0.15
mass2_gaussian = distributions.Gaussian(mass2=(0.5, 1.5), mass2_mean=1.2,
mass2_var=variance)
# Take 100000 random variable samples from this gaussian mass distribution.
mass2_samples = mass2_gaussian.rvs(size=1000000)
# We can make pairs of distributions together, instead of apart.
two_mass_distributions = distributions.Uniform(mass3=(1.6, 3.0),
mass4=(1.6, 3.0))
two_mass_samples = two_mass_distributions.rvs(size=1000000)
# Choose 50 bins for the histogram subplots.
n_bins = 50
# Plot histograms of samples in subplots
fig, axes = plt.subplots(nrows=2, ncols=2)
ax0, ax1, ax2, ax3, = axes.flat
ax0.hist(mass1_samples['mass1'], bins = n_bins)
ax1.hist(mass2_samples['mass2'], bins = n_bins)
ax2.hist(two_mass_samples['mass3'], bins = n_bins)
ax3.hist(two_mass_samples['mass4'], bins = n_bins)
ax0.set_title('Mass 1 samples')
ax1.set_title('Mass 2 samples')
ax2.set_title('Mass 3 samples')
ax3.set_title('Mass 4 samples')
plt.tight_layout()
plt.show()
|
ahnitz/pycbc
|
examples/distributions/mass_examples.py
|
Python
|
gpl-3.0
| 1,651
|
[
"Gaussian"
] |
d355e4629af4d944e3de1f17ee7af603a96f84175ef1230ec3834264b099a427
|
import tensorflow as tf
import numpy as np
class SOM(object):
"""
2-D Self-Organizing Map with Gaussian Neighbourhood function
and linearly decreasing learning rate.
"""
#To check if the SOM has been trained
_trained = False
def __init__(self, m, n, dim, n_iterations=100, alpha=None, sigma=None):
"""
Initializes all necessary components of the TensorFlow
Graph.
m X n are the dimensions of the SOM. 'n_iterations' should
should be an integer denoting the number of iterations undergone
while training.
'dim' is the dimensionality of the training inputs.
'alpha' is a number denoting the initial time(iteration no)-based
learning rate. Default value is 0.3
'sigma' is the the initial neighbourhood value, denoting
the radius of influence of the BMU while training. By default, its
taken to be half of max(m, n).
"""
#Assign required variables first
self._m = m
self._n = n
if alpha is None:
alpha = 0.3
else:
alpha = float(alpha)
if sigma is None:
sigma = max(m, n) / 2.0
else:
sigma = float(sigma)
self._n_iterations = abs(int(n_iterations))
##INITIALIZE GRAPH
self._graph = tf.Graph()
##POPULATE GRAPH WITH NECESSARY COMPONENTS
with self._graph.as_default():
##VARIABLES AND CONSTANT OPS FOR DATA STORAGE
#Randomly initialized weightage vectors for all neurons,
#stored together as a matrix Variable of size [m*n, dim]
self._weightage_vects = tf.Variable(tf.random_normal(
[m*n, dim]))
#Matrix of size [m*n, 2] for SOM grid locations
#of neurons
self._location_vects = tf.constant(np.array(
list(self._neuron_locations(m, n))))
##PLACEHOLDERS FOR TRAINING INPUTS
#We need to assign them as attributes to self, since they
#will be fed in during training
#The training vector
self._vect_input = tf.placeholder(tf.float32, [dim])
#Iteration number
self._iter_input = tf.placeholder(tf.float32)
##CONSTRUCT TRAINING OP PIECE BY PIECE
#Only the final, 'root' training op needs to be assigned as
#an attribute to self, since all the rest will be executed
#automatically during training
#To compute the Best Matching Unit given a vector
#Basically calculates the Euclidean distance between every
#neuron's weightage vector and the input, and returns the
#index of the neuron which gives the least value
bmu_index = tf.argmin(tf.sqrt(tf.reduce_sum(
tf.pow(tf.subtract(self._weightage_vects, tf.stack(
[self._vect_input for i in range(m*n)])), 2), 1)),
0)
#This will extract the location of the BMU based on the BMU's
#index
slice_input = tf.pad(tf.reshape(bmu_index, [1]),
np.array([[0, 1]]))
bmu_loc = tf.reshape(tf.slice(self._location_vects, slice_input,
tf.constant(np.array([1, 2]))),
[2])
#To compute the alpha and sigma values based on iteration
#number
learning_rate_op = tf.subtract(1.0, tf.div(self._iter_input,
self._n_iterations))
_alpha_op = tf.multiply(alpha, learning_rate_op)
_sigma_op = tf.multiply(sigma, learning_rate_op)
#Construct the op that will generate a vector with learning
#rates for all neurons, based on iteration number and location
#wrt BMU.
bmu_distance_squares = tf.reduce_sum(tf.pow(tf.subtract(
self._location_vects, tf.stack(
[bmu_loc for i in range(m*n)])), 2), 1)
neighbourhood_func = tf.exp(tf.negative(tf.div(tf.cast(
bmu_distance_squares, "float32"), tf.pow(_sigma_op, 2))))
learning_rate_op = tf.multiply(_alpha_op, neighbourhood_func)
#Finally, the op that will use learning_rate_op to update
#the weightage vectors of all neurons based on a particular
#input
learning_rate_multiplier = tf.stack([tf.tile(tf.slice(
learning_rate_op, np.array([i]), np.array([1])), [dim])
for i in range(m*n)])
weightage_delta = tf.multiply(
learning_rate_multiplier,
tf.subtract(tf.stack([self._vect_input for i in range(m*n)]),
self._weightage_vects))
new_weightages_op = tf.add(self._weightage_vects,
weightage_delta)
self._training_op = tf.assign(self._weightage_vects,
new_weightages_op)
##INITIALIZE SESSION
self._sess = tf.Session()
##INITIALIZE VARIABLES
#init_op = tf.initialize_all_variables()
init_op = tf.global_variables_initializer()
self._sess.run(init_op)
def _neuron_locations(self, m, n):
"""
Yields one by one the 2-D locations of the individual neurons
in the SOM.
"""
#Nested iterations over both dimensions
#to generate all 2-D locations in the map
for i in range(m):
for j in range(n):
yield np.array([i, j])
def train(self, input_vects):
"""
Trains the SOM.
'input_vects' should be an iterable of 1-D NumPy arrays with
dimensionality as provided during initialization of this SOM.
Current weightage vectors for all neurons(initially random) are
taken as starting conditions for training.
"""
#Training iterations
for iter_no in range(self._n_iterations):
#Train with each vector one by one
for input_vect in input_vects:
self._sess.run(self._training_op,
feed_dict={self._vect_input: input_vect,
self._iter_input: iter_no})
print(str(iter_no)+'/'+str(self._n_iterations))
#Store a centroid grid for easy retrieval later on
centroid_grid = [[] for i in range(self._m)]
self._weightages = list(self._sess.run(self._weightage_vects))
self._locations = list(self._sess.run(self._location_vects))
for i, loc in enumerate(self._locations):
centroid_grid[loc[0]].append(self._weightages[i])
self._centroid_grid = centroid_grid
self._trained = True
def get_centroids(self):
"""
Returns a list of 'm' lists, with each inner list containing
the 'n' corresponding centroid locations as 1-D NumPy arrays.
"""
if not self._trained:
raise ValueError("SOM not trained yet")
return self._centroid_grid
def map_vects(self, input_vects):
"""
Maps each input vector to the relevant neuron in the SOM
grid.
'input_vects' should be an iterable of 1-D NumPy arrays with
dimensionality as provided during initialization of this SOM.
Returns a list of 1-D NumPy arrays containing (row, column)
info for each input vector(in the same order), corresponding
to mapped neuron.
"""
if not self._trained:
raise ValueError("SOM not trained yet")
to_return = []
for vect in input_vects:
min_index = min([i for i in range(len(self._weightages))],
key=lambda x: np.linalg.norm(vect-
self._weightages[x]))
to_return.append(self._locations[min_index])
return to_return
#For plotting the images
from matplotlib import pyplot as plt
#Training inputs for RGBcolors
colors = np.array(
[[0., 0., 0.],
[0., 0., 1.],
[0., 0., 0.5],
[0.125, 0.529, 1.0],
[0.33, 0.4, 0.67],
[0.6, 0.5, 1.0],
[0., 1., 0.],
[1., 0., 0.],
[0., 1., 1.],
[1., 0., 1.],
[1., 1., 0.],
[1., 1., 1.],
[.33, .33, .33],
[.5, .5, .5],
[.66, .66, .66]])
color_names = \
['black', 'blue', 'darkblue', 'skyblue',
'greyblue', 'lilac', 'green', 'red',
'cyan', 'violet', 'yellow', 'white',
'darkgrey', 'mediumgrey', 'lightgrey']
#Train a 20x30 SOM with 400 iterations
som = SOM(20, 20, 3, 400)
som.train(colors)
#Get output grid
image_grid = som.get_centroids()
#Map colours to their closest neurons
mapped = som.map_vects(colors)
#Plot
plt.imshow(image_grid)
plt.title('Color SOM')
for i, m in enumerate(mapped):
plt.text(m[1], m[0], color_names[i], ha='center', va='center',
bbox=dict(facecolor='white', alpha=0.5, lw=0))
plt.show()
|
uqyge/combustionML
|
som/som.py
|
Python
|
mit
| 9,210
|
[
"Gaussian",
"NEURON"
] |
280e244b65b8f3a93347b6f26bdcec7622da66751b59b607f011e947912c7892
|
import os
from django.test import TransactionTestCase
from django.contrib.auth.models import Group
from django.conf import settings
from hs_core import hydroshare
from hs_core.hydroshare.utils import resource_post_create_actions
from hs_core.testing import TestCaseCommonUtilities
from utils import assert_ref_time_series_file_type_metadata
from hs_file_types.models import RefTimeseriesLogicalFile, GenericLogicalFile
class RefTimeSeriesFileTypeMetaDataTest(TestCaseCommonUtilities, TransactionTestCase):
def setUp(self):
super(RefTimeSeriesFileTypeMetaDataTest, self).setUp()
if not super(RefTimeSeriesFileTypeMetaDataTest, self).is_federated_irods_available():
return
self.group, _ = Group.objects.get_or_create(name='Resource Author')
self.user = hydroshare.create_account(
'user1@nowhere.com',
username='user1',
first_name='Creator_FirstName',
last_name='Creator_LastName',
superuser=False,
groups=[self.group]
)
super(RefTimeSeriesFileTypeMetaDataTest, self).create_irods_user_in_user_zone()
self.refts_file_name = 'multi_sites_formatted_version1.0.json.refts'
self.refts_file = 'hs_file_types/tests/{}'.format(self.refts_file_name)
# transfer this valid tif file to user zone space for testing
# only need to test that netcdf file stored in iRODS user zone space can be used to create a
# composite resource and metadata can be extracted when the file type is set to netcdf file
# type.
# Other detailed tests don't need to be retested for irods user zone space scenario since
# as long as the netcdf file in iRODS user zone space can be read with metadata extracted
# correctly, other functionalities are done with the same common functions regardless of
# where the netcdf file comes from, either from local disk or from a federated user zone
irods_target_path = '/' + settings.HS_USER_IRODS_ZONE + '/home/' + self.user.username + '/'
file_list_dict = {self.refts_file: irods_target_path + self.refts_file_name}
super(RefTimeSeriesFileTypeMetaDataTest, self).save_files_to_user_zone(file_list_dict)
def tearDown(self):
super(RefTimeSeriesFileTypeMetaDataTest, self).tearDown()
if not super(RefTimeSeriesFileTypeMetaDataTest, self).is_federated_irods_available():
return
super(RefTimeSeriesFileTypeMetaDataTest, self).delete_irods_user_in_user_zone()
def test_refts_set_file_type_to_reftimeseries(self):
# only do federation testing when REMOTE_USE_IRODS is True and irods docker containers
# are set up properly
if not super(RefTimeSeriesFileTypeMetaDataTest, self).is_federated_irods_available():
return
# here we are using a valid ref time series for setting it
# to RefTimeseries file type which includes metadata extraction
fed_test_file_full_path = '/{zone}/home/{username}/{fname}'.format(
zone=settings.HS_USER_IRODS_ZONE, username=self.user.username,
fname=self.refts_file_name)
res_upload_files = []
fed_res_path = hydroshare.utils.get_federated_zone_home_path(fed_test_file_full_path)
res_title = 'Untitled resource'
self.composite_resource = hydroshare.create_resource(
resource_type='CompositeResource',
owner=self.user,
title=res_title,
files=res_upload_files,
source_names=[fed_test_file_full_path],
fed_res_path=fed_res_path,
move=False,
metadata=[]
)
# test resource is created on federated zone
self.assertNotEqual(self.composite_resource.resource_federation_path, '')
# set the logical file -which get sets as part of the post resource creation signal
resource_post_create_actions(resource=self.composite_resource, user=self.user,
metadata=self.composite_resource.metadata)
self.assertEqual(self.composite_resource.files.all().count(), 1)
res_file = self.composite_resource.files.first()
# check that the resource file is associated with GenericLogicalFile
self.assertEqual(res_file.has_logical_file, True)
self.assertEqual(res_file.logical_file_type_name, "GenericLogicalFile")
# check that there is one GenericLogicalFile object
self.assertEqual(GenericLogicalFile.objects.count(), 1)
fed_file_path = "data/contents/{}".format(self.refts_file_name)
self.assertEqual(os.path.join('data', 'contents', res_file.short_path), fed_file_path)
# set the tif file to RefTimeseries file type
RefTimeseriesLogicalFile.set_file_type(self.composite_resource, res_file.id, self.user)
# test that the content of the json file is same is what we have
# saved in json_file_content field of the file metadata object
res_file = self.composite_resource.files.first()
logical_file = res_file.logical_file
self.assertEqual(logical_file.metadata.json_file_content,
res_file.fed_resource_file.read())
# test extracted ref time series file type metadata
assert_ref_time_series_file_type_metadata(self)
self.composite_resource.delete()
|
ResearchSoftwareInstitute/MyHPOM
|
hs_file_types/tests/test_reftimeseries_metadata_user_zone.py
|
Python
|
bsd-3-clause
| 5,406
|
[
"NetCDF"
] |
6eb30682515ed60baced6e2d5cc1d44b488cbf2a3e061b5e14ec845cc9ddd3a3
|
# $Id$
#
# Copyright (c) 2007, Novartis Institutes for BioMedical Research Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Novartis Institutes for BioMedical Research Inc.
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Created by Greg Landrum, July 2007
_version = "0.13.0"
_usage="""
CreateDb [optional arguments] <filename>
NOTES:
- the property names for the database are the union of those for
all molecules.
- missing property values will be set to 'N/A', though this can be
changed with the --missingPropertyVal argument.
- The property names may be altered on loading the database. Any
non-alphanumeric character in a property name will be replaced
with '_'. e.g. "Gold.Goldscore.Constraint.Score" becomes
"Gold_Goldscore_Constraint_Score". This is important to know
when querying.
- Property names are not case sensitive in the database; this may
cause some problems if they are case sensitive in the sd file.
"""
from rdkit import RDConfig
from rdkit import Chem
from rdkit.Dbase.DbConnection import DbConnect
from rdkit.Dbase import DbModule
from rdkit.RDLogger import logger
from rdkit.Chem.MolDb import Loader
logger = logger()
import cPickle,sys,os
from rdkit.Chem.MolDb.FingerprintUtils import BuildSigFactory,LayeredOptions
from rdkit.Chem.MolDb import FingerprintUtils
# ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ----
from optparse import OptionParser
parser=OptionParser(_usage,version='%prog '+_version)
parser.add_option('--outDir','--dbDir',default='',
help='name of the output directory')
parser.add_option('--molDbName',default='Compounds.sqlt',
help='name of the molecule database')
parser.add_option('--molIdName',default='compound_id',
help='name of the database key column')
parser.add_option('--regName',default='molecules',
help='name of the molecular registry table')
parser.add_option('--pairDbName',default='AtomPairs.sqlt',
help='name of the atom pairs database')
parser.add_option('--pairTableName',default='atompairs',
help='name of the atom pairs table')
parser.add_option('--fpDbName',default='Fingerprints.sqlt',
help='name of the 2D fingerprints database')
parser.add_option('--fpTableName',default='rdkitfps',
help='name of the 2D fingerprints table')
parser.add_option('--layeredTableName',default='layeredfps',
help='name of the layered fingerprints table')
parser.add_option('--descrDbName',default='Descriptors.sqlt',
help='name of the descriptor database')
parser.add_option('--descrTableName',default='descriptors_v1',
help='name of the descriptor table')
parser.add_option('--descriptorCalcFilename',default=os.path.join(RDConfig.RDBaseDir,'Projects',
'DbCLI','moe_like.dsc'),
help='name of the file containing the descriptor calculator')
parser.add_option('--errFilename',default='loadErrors.txt',
help='name of the file to contain information about molecules that fail to load')
parser.add_option('--noPairs',default=True,dest='doPairs',action='store_false',
help='skip calculating atom pairs')
parser.add_option('--noFingerprints',default=True,dest='doFingerprints',action='store_false',
help='skip calculating 2D fingerprints')
parser.add_option('--noLayeredFps',default=True,dest='doLayered',action='store_false',
help='skip calculating layered fingerprints')
parser.add_option('--noDescriptors',default=True,dest='doDescriptors',action='store_false',
help='skip calculating descriptors')
parser.add_option('--noProps',default=False,dest='skipProps',action='store_true',
help="don't include molecular properties in the database")
parser.add_option('--noSmiles',default=False,dest='skipSmiles',action='store_true',
help="don't include SMILES in the database (can make loading somewhat faster)")
parser.add_option('--maxRowsCached',default=-1,
help="maximum number of rows to cache before doing a database commit")
parser.add_option('--silent',default=False,action='store_true',
help='do not provide status messages')
parser.add_option('--molFormat',default='',choices=('smiles','sdf',''),
help='specify the format of the input file')
parser.add_option('--nameProp',default='_Name',
help='specify the SD property to be used for the molecule names. Default is to use the mol block name')
parser.add_option('--missingPropertyVal',default='N/A',
help='value to insert in the database if a property value is missing. Default is %default.')
parser.add_option('--addProps',default=False,action='store_true',
help='add computed properties to the output')
parser.add_option('--noExtras',default=False,action='store_true',
help='skip all non-molecule databases')
parser.add_option('--skipLoad','--skipMols',action="store_false",dest='loadMols',default=True,
help='skip the molecule loading (assumes mol db already exists)')
parser.add_option('--doPharm2D',default=False,
action='store_true',
help='skip calculating Pharm2D fingerprints')
parser.add_option('--pharm2DTableName',default='pharm2dfps',
help='name of the Pharm2D fingerprints table')
parser.add_option('--fdefFile','--fdef',
default=os.path.join(RDConfig.RDDataDir,'Novartis1.fdef'),
help='provide the name of the fdef file to use for 2d pharmacophores')
parser.add_option('--doGobbi2D',default=False,
action='store_true',
help='skip calculating Gobbi 2D fingerprints')
parser.add_option('--gobbi2DTableName',default='gobbi2dfps',
help='name of the Gobbi 2D fingerprints table')
parser.add_option('--noMorganFps','--noCircularFps',default=True,dest='doMorganFps',action='store_false',
help='skip calculating Morgan (circular) fingerprints')
parser.add_option('--morganFpTableName',default='morganfps',
help='name of the Morgan fingerprints table')
parser.add_option('--delimiter','--delim',default=' ',
help='the delimiter in the input file')
parser.add_option('--titleLine',default=False,action='store_true',
help='the input file contains a title line')
parser.add_option('--smilesColumn','--smilesCol',default=0,type='int',
help='the column index with smiles')
parser.add_option('--nameColumn','--nameCol',default=1,type='int',
help='the column index with mol names')
def CreateDb(options,dataFilename='',supplier=None):
if not dataFilename and supplier is None:
raise ValueError,'Please provide either a data filename or a supplier'
if options.errFilename:
errFile=file(os.path.join(options.outDir,options.errFilename),'w+')
else:
errFile=None
if options.noExtras:
options.doPairs=False
options.doDescriptors=False
options.doFingerprints=False
options.doPharm2D=False
options.doGobbi2D=False
options.doLayered=False
options.doMorganFps=False
if options.loadMols:
if supplier is None:
if not options.molFormat:
ext = os.path.splitext(dataFilename)[-1].lower()
if ext=='.sdf':
options.molFormat='sdf'
elif ext in ('.smi','.smiles','.txt','.csv'):
options.molFormat='smiles'
if not options.delimiter:
# guess the delimiter
import csv
sniffer = csv.Sniffer()
dlct=sniffer.sniff(file(dataFilename,'r').read(2000))
options.delimiter=dlct.delimiter
if not options.silent:
logger.info('Guessing that delimiter is %s. Use --delimiter argument if this is wrong.'%repr(options.delimiter))
if not options.silent:
logger.info('Guessing that mol format is %s. Use --molFormat argument if this is wrong.'%repr(options.molFormat))
if options.molFormat=='smiles':
if options.delimiter=='\\t': options.delimiter='\t'
supplier=Chem.SmilesMolSupplier(dataFilename,
titleLine=options.titleLine,
delimiter=options.delimiter,
smilesColumn=options.smilesColumn,
nameColumn=options.nameColumn
)
else:
supplier = Chem.SDMolSupplier(dataFilename)
if not options.silent: logger.info('Reading molecules and constructing molecular database.')
Loader.LoadDb(supplier,os.path.join(options.outDir,options.molDbName),
errorsTo=errFile,regName=options.regName,nameCol=options.molIdName,
skipProps=options.skipProps,defaultVal=options.missingPropertyVal,
addComputedProps=options.addProps,uniqNames=True,
skipSmiles=options.skipSmiles,maxRowsCached=int(options.maxRowsCached),
silent=options.silent,nameProp=options.nameProp,
lazySupplier=int(options.maxRowsCached)>0)
if options.doPairs:
pairConn = DbConnect(os.path.join(options.outDir,options.pairDbName))
pairCurs = pairConn.GetCursor()
try:
pairCurs.execute('drop table %s'%(options.pairTableName))
except:
pass
pairCurs.execute('create table %s (guid integer not null primary key,%s varchar not null unique,atompairfp blob,torsionfp blob)'%(options.pairTableName,
options.molIdName))
if options.doFingerprints or options.doPharm2D or options.doGobbi2D or options.doLayered:
fpConn = DbConnect(os.path.join(options.outDir,options.fpDbName))
fpCurs=fpConn.GetCursor()
try:
fpCurs.execute('drop table %s'%(options.fpTableName))
except:
pass
try:
fpCurs.execute('drop table %s'%(options.pharm2DTableName))
except:
pass
try:
fpCurs.execute('drop table %s'%(options.gobbi2DTableName))
except:
pass
try:
fpCurs.execute('drop table %s'%(options.layeredTableName))
except:
pass
if options.doFingerprints:
fpCurs.execute('create table %s (guid integer not null primary key,%s varchar not null unique,rdkfp blob)'%(options.fpTableName,
options.molIdName))
if options.doLayered:
layeredQs = ','.join('?'*LayeredOptions.nWords)
colDefs=','.join(['Col_%d integer'%(x+1) for x in range(LayeredOptions.nWords)])
fpCurs.execute('create table %s (guid integer not null primary key,%s varchar not null unique,%s)'%(options.layeredTableName,
options.molIdName,
colDefs))
if options.doPharm2D:
fpCurs.execute('create table %s (guid integer not null primary key,%s varchar not null unique,pharm2dfp blob)'%(options.pharm2DTableName,
options.molIdName))
sigFactory = BuildSigFactory(options)
if options.doGobbi2D:
fpCurs.execute('create table %s (guid integer not null primary key,%s varchar not null unique,gobbi2dfp blob)'%(options.gobbi2DTableName,
options.molIdName))
from rdkit.Chem.Pharm2D import Generate,Gobbi_Pharm2D
if options.doMorganFps :
fpConn = DbConnect(os.path.join(options.outDir,options.fpDbName))
fpCurs=fpConn.GetCursor()
try:
fpCurs.execute('drop table %s'%(options.morganFpTableName))
except:
pass
fpCurs.execute('create table %s (guid integer not null primary key,%s varchar not null unique,morganfp blob)'%(options.morganFpTableName,
options.molIdName))
if options.doDescriptors:
descrConn=DbConnect(os.path.join(options.outDir,options.descrDbName))
calc = cPickle.load(file(options.descriptorCalcFilename,'rb'))
nms = [x for x in calc.GetDescriptorNames()]
descrCurs = descrConn.GetCursor()
descrs = ['guid integer not null primary key','%s varchar not null unique'%options.molIdName]
descrs.extend(['%s float'%x for x in nms])
try:
descrCurs.execute('drop table %s'%(options.descrTableName))
except:
pass
descrCurs.execute('create table %s (%s)'%(options.descrTableName,','.join(descrs)))
descrQuery=','.join([DbModule.placeHolder]*len(descrs))
pairRows = []
fpRows = []
layeredRows = []
descrRows = []
pharm2DRows=[]
gobbi2DRows=[]
morganRows = []
if not options.silent: logger.info('Generating fingerprints and descriptors:')
molConn = DbConnect(os.path.join(options.outDir,options.molDbName))
molCurs = molConn.GetCursor()
if not options.skipSmiles:
molCurs.execute('select guid,%s,smiles,molpkl from %s'%(options.molIdName,options.regName))
else:
molCurs.execute('select guid,%s,molpkl from %s'%(options.molIdName,options.regName))
i=0
while 1:
try:
tpl = molCurs.fetchone()
molGuid = tpl[0]
molId = tpl[1]
pkl = tpl[-1]
i+=1
except:
break
mol = Chem.Mol(str(pkl))
if not mol: continue
if options.doPairs:
pairs = FingerprintUtils.BuildAtomPairFP(mol)
torsions = FingerprintUtils.BuildTorsionsFP(mol)
pkl1 = DbModule.binaryHolder(pairs.ToBinary())
pkl2 = DbModule.binaryHolder(torsions.ToBinary())
row = (molGuid,molId,pkl1,pkl2)
pairRows.append(row)
if options.doFingerprints:
fp2 = FingerprintUtils.BuildRDKitFP(mol)
pkl = DbModule.binaryHolder(fp2.ToBinary())
row = (molGuid,molId,pkl)
fpRows.append(row)
if options.doLayered:
words = LayeredOptions.GetWords(mol)
row = [molGuid,molId]+words
layeredRows.append(row)
if options.doDescriptors:
descrs= calc.CalcDescriptors(mol)
row = [molGuid,molId]
row.extend(descrs)
descrRows.append(row)
if options.doPharm2D:
FingerprintUtils.sigFactory=sigFactory
fp= FingerprintUtils.BuildPharm2DFP(mol)
pkl = DbModule.binaryHolder(fp.ToBinary())
row = (molGuid,molId,pkl)
pharm2DRows.append(row)
if options.doGobbi2D:
FingerprintUtils.sigFactory=Gobbi_Pharm2D.factory
fp= FingerprintUtils.BuildPharm2DFP(mol)
pkl = DbModule.binaryHolder(fp.ToBinary())
row = (molGuid,molId,pkl)
gobbi2DRows.append(row)
if options.doMorganFps:
morgan = FingerprintUtils.BuildMorganFP(mol)
pkl = DbModule.binaryHolder(morgan.ToBinary())
row = (molGuid,molId,pkl)
morganRows.append(row)
if not i%500:
if len(pairRows):
pairCurs.executemany('insert into %s values (?,?,?,?)'%options.pairTableName,
pairRows)
pairRows = []
pairConn.Commit()
if len(fpRows):
fpCurs.executemany('insert into %s values (?,?,?)'%options.fpTableName,
fpRows)
fpRows = []
fpConn.Commit()
if len(layeredRows):
fpCurs.executemany('insert into %s values (?,?,%s)'%(options.layeredTableName,layeredQs),
layeredRows)
layeredRows = []
fpConn.Commit()
if len(descrRows):
descrCurs.executemany('insert into %s values (%s)'%(options.descrTableName,descrQuery),
descrRows)
descrRows = []
descrConn.Commit()
if len(pharm2DRows):
fpCurs.executemany('insert into %s values (?,?,?)'%options.pharm2DTableName,
pharm2DRows)
pharm2DRows = []
fpConn.Commit()
if len(gobbi2DRows):
fpCurs.executemany('insert into %s values (?,?,?)'%options.gobbi2DTableName,
gobbi2DRows)
gobbi2DRows = []
fpConn.Commit()
if len(morganRows):
fpCurs.executemany('insert into %s values (?,?,?)'%options.morganFpTableName,
morganRows)
morganRows = []
fpConn.Commit()
if not options.silent and not i%500:
logger.info(' Done: %d'%(i))
if len(pairRows):
pairCurs.executemany('insert into %s values (?,?,?,?)'%options.pairTableName,
pairRows)
pairRows = []
pairConn.Commit()
if len(fpRows):
fpCurs.executemany('insert into %s values (?,?,?)'%options.fpTableName,
fpRows)
fpRows = []
fpConn.Commit()
if len(layeredRows):
fpCurs.executemany('insert into %s values (?,?,%s)'%(options.layeredTableName,layeredQs),
layeredRows)
layeredRows = []
fpConn.Commit()
if len(descrRows):
descrCurs.executemany('insert into %s values (%s)'%(options.descrTableName,descrQuery),
descrRows)
descrRows = []
descrConn.Commit()
if len(pharm2DRows):
fpCurs.executemany('insert into %s values (?,?,?)'%options.pharm2DTableName,
pharm2DRows)
pharm2DRows = []
fpConn.Commit()
if len(gobbi2DRows):
fpCurs.executemany('insert into %s values (?,?,?)'%options.gobbi2DTableName,
gobbi2DRows)
gobbi2DRows = []
fpConn.Commit()
if len(morganRows):
fpCurs.executemany('insert into %s values (?,?,?)'%options.morganFpTableName,
morganRows)
morganRows = []
fpConn.Commit()
if not options.silent:
logger.info('Finished.')
if __name__=='__main__':
options,args = parser.parse_args()
if options.loadMols:
if len(args)!=1:
parser.error('please provide a filename argument')
dataFilename = args[0]
try:
dataFile = file(dataFilename,'r')
except IOError:
logger.error('input file %s does not exist'%(dataFilename))
sys.exit(0)
dataFile=None
if not options.outDir:
prefix = os.path.splitext(dataFilename)[0]
options.outDir=prefix
if not os.path.exists(options.outDir):
try:
os.mkdir(options.outDir)
except:
logger.error('could not create output directory %s'%options.outDir)
sys.exit(1)
if 1:
CreateDb(options,dataFilename)
else:
import cProfile
cProfile.run("CreateDb(options,dataFilename)","create.prof")
import pstats
p = pstats.Stats('create.prof')
p.strip_dirs().sort_stats('cumulative').print_stats(25)
|
rdkit/rdkit-orig
|
Projects/DbCLI/CreateDb.py
|
Python
|
bsd-3-clause
| 20,427
|
[
"RDKit"
] |
205a089b933831ad049f4edde4c323cbafc1308a5ca1914564ef4d3f36ac1d49
|
# -*- coding: utf-8 -*-
"""
========================================================================
Transforming positions and velocities to and from a Galactocentric frame
========================================================================
This document shows a few examples of how to use and customize the
`~astropy.coordinates.Galactocentric` frame to transform Heliocentric sky
positions, distance, proper motions, and radial velocities to a Galactocentric,
Cartesian frame, and the same in reverse.
The main configurable parameters of the `~astropy.coordinates.Galactocentric`
frame control the position and velocity of the solar system barycenter within
the Galaxy. These are specified by setting the ICRS coordinates of the
Galactic center, the distance to the Galactic center (the sun-galactic center
line is always assumed to be the x-axis of the Galactocentric frame), and the
Cartesian 3-velocity of the sun in the Galactocentric frame. We'll first
demonstrate how to customize these values, then show how to set the solar motion
instead by inputting the proper motion of Sgr A*.
Note that, for brevity, we may refer to the solar system barycenter as just "the
sun" in the examples below.
*By: Adrian Price-Whelan*
*License: BSD*
"""
##############################################################################
# Make `print` work the same in all versions of Python, set up numpy,
# matplotlib, and use a nicer set of plot parameters:
import numpy as np
import matplotlib.pyplot as plt
from astropy.visualization import astropy_mpl_style
plt.style.use(astropy_mpl_style)
##############################################################################
# Import the necessary astropy subpackages
import astropy.coordinates as coord
import astropy.units as u
##############################################################################
# Let's first define a barycentric coordinate and velocity in the ICRS frame.
# We'll use the data for the star HD 39881 from the `Simbad
# <simbad.harvard.edu/simbad/>`_ database:
c1 = coord.ICRS(ra=89.014303*u.degree, dec=13.924912*u.degree,
distance=(37.59*u.mas).to(u.pc, u.parallax()),
pm_ra_cosdec=372.72*u.mas/u.yr,
pm_dec=-483.69*u.mas/u.yr,
radial_velocity=0.37*u.km/u.s)
##############################################################################
# This is a high proper-motion star; suppose we'd like to transform its position
# and velocity to a Galactocentric frame to see if it has a large 3D velocity
# as well. To use the Astropy default solar position and motion parameters, we
# can simply do:
gc1 = c1.transform_to(coord.Galactocentric)
##############################################################################
# From here, we can access the components of the resulting
# `~astropy.coordinates.Galactocentric` instance to see the 3D Cartesian
# velocity components:
print(gc1.v_x, gc1.v_y, gc1.v_z)
##############################################################################
# The default parameters for the `~astropy.coordinates.Galactocentric` frame
# are detailed in the linked documentation, but we can modify the most commonly
# changes values using the keywords ``galcen_distance``, ``galcen_v_sun``, and
# ``z_sun`` which set the sun-Galactic center distance, the 3D velocity vector
# of the sun, and the height of the sun above the Galactic midplane,
# respectively. The velocity of the sun can be specified as an
# `~astropy.units.Quantity` object with velocity units and is interepreted as a
# Cartesian velocity, as in the example below. Note that, as with the positions,
# the Galactocentric frame is a right-handed system (i.e., the Sun is at negative
# x values) so ``v_x`` is opposite of the Galactocentric radial velocity:
v_sun = [11.1, 244, 7.25] * (u.km / u.s) # [vx, vy, vz]
gc_frame = coord.Galactocentric(
galcen_distance=8*u.kpc,
galcen_v_sun=v_sun,
z_sun=0*u.pc)
##############################################################################
# We can then transform to this frame instead, with our custom parameters:
gc2 = c1.transform_to(gc_frame)
print(gc2.v_x, gc2.v_y, gc2.v_z)
##############################################################################
# It's sometimes useful to specify the solar motion using the `proper motion
# of Sgr A* <https://arxiv.org/abs/astro-ph/0408107>`_ instead of Cartesian
# velocity components. With an assumed distance, we can convert proper motion
# components to Cartesian velocity components using `astropy.units`:
galcen_distance = 8*u.kpc
pm_gal_sgrA = [-6.379, -0.202] * u.mas/u.yr # from Reid & Brunthaler 2004
vy, vz = -(galcen_distance * pm_gal_sgrA).to(u.km/u.s, u.dimensionless_angles())
##############################################################################
# We still have to assume a line-of-sight velocity for the Galactic center,
# which we will again take to be 11 km/s:
vx = 11.1 * u.km/u.s
v_sun2 = u.Quantity([vx, vy, vz]) # List of Quantity -> a single Quantity
gc_frame2 = coord.Galactocentric(galcen_distance=galcen_distance,
galcen_v_sun=v_sun2,
z_sun=0*u.pc)
gc3 = c1.transform_to(gc_frame2)
print(gc3.v_x, gc3.v_y, gc3.v_z)
##############################################################################
# The transformations also work in the opposite direction. This can be useful
# for transforming simulated or theoretical data to observable quantities. As
# an example, we'll generate 4 theoretical circular orbits at different
# Galactocentric radii with the same circular velocity, and transform them to
# Heliocentric coordinates:
ring_distances = np.arange(10, 25+1, 5) * u.kpc
circ_velocity = 220 * u.km/u.s
phi_grid = np.linspace(90, 270, 512) * u.degree # grid of azimuths
ring_rep = coord.CylindricalRepresentation(
rho=ring_distances[:,np.newaxis],
phi=phi_grid[np.newaxis],
z=np.zeros_like(ring_distances)[:,np.newaxis])
angular_velocity = (-circ_velocity / ring_distances).to(u.mas/u.yr,
u.dimensionless_angles())
ring_dif = coord.CylindricalDifferential(
d_rho=np.zeros(phi_grid.shape)[np.newaxis]*u.km/u.s,
d_phi=angular_velocity[:,np.newaxis],
d_z=np.zeros(phi_grid.shape)[np.newaxis]*u.km/u.s
)
ring_rep = ring_rep.with_differentials(ring_dif)
gc_rings = coord.Galactocentric(ring_rep)
##############################################################################
# First, let's visualize the geometry in Galactocentric coordinates. Here are
# the positions and velocities of the rings; note that in the velocity plot,
# the velocities of the 4 rings are identical and thus overlaid under the same
# curve:
fig,axes = plt.subplots(1, 2, figsize=(12,6))
# Positions
axes[0].plot(gc_rings.x.T, gc_rings.y.T, marker='None', linewidth=3)
axes[0].text(-8., 0, r'$\odot$', fontsize=20)
axes[0].set_xlim(-30, 30)
axes[0].set_ylim(-30, 30)
axes[0].set_xlabel('$x$ [kpc]')
axes[0].set_ylabel('$y$ [kpc]')
# Velocities
axes[1].plot(gc_rings.v_x.T, gc_rings.v_y.T, marker='None', linewidth=3)
axes[1].set_xlim(-250, 250)
axes[1].set_ylim(-250, 250)
axes[1].set_xlabel('$v_x$ [{0}]'.format((u.km/u.s).to_string("latex_inline")))
axes[1].set_ylabel('$v_y$ [{0}]'.format((u.km/u.s).to_string("latex_inline")))
fig.tight_layout()
##############################################################################
# Now we can transform to Galactic coordinates and visualize the rings in
# observable coordinates:
gal_rings = gc_rings.transform_to(coord.Galactic)
fig,ax = plt.subplots(1, 1, figsize=(8,6))
for i in range(len(ring_distances)):
ax.plot(gal_rings[i].l.degree, gal_rings[i].pm_l_cosb.value,
label=str(ring_distances[i]), marker='None', linewidth=3)
ax.set_xlim(360, 0)
ax.set_xlabel('$l$ [deg]')
ax.set_ylabel(r'$\mu_l \, \cos b$ [{0}]'.format((u.mas/u.yr).to_string('latex_inline')))
ax.legend()
|
stargaser/astropy
|
examples/coordinates/plot_galactocentric-frame.py
|
Python
|
bsd-3-clause
| 7,979
|
[
"Galaxy"
] |
1bb6b9f163b33a096befcf7b88b8323f9312d7b4eb453c4f9ba7c792ee2c8f83
|
from string import atoi,atof
import sys,os,commands
def get_interface(pdb,interfaceid):
cmd="curl --compressed http://eppic-web.org/ewui/ewui/fileDownload?type=interface\&id=%s\&interface=%s > %s-%s.%s.pdb"%(pdb,interfaceid,pdb,pdb,interfaceid)
os.system(cmd)
chains=commands.getoutput("cat %s-%s.%s.pdb | grep SEQRES | awk '{print $3}' | sort | uniq"%(pdb,pdb,interfaceid)).split("\n")
return chains
def get_pdbinterfaces(pdb1,interfaceid1,pdb2,interfaceid2):
chain1=get_interface(pdb1,interfaceid1)
chain2=get_interface(pdb2,interfaceid2)
fname="%s_%s-%s_%s.pml"%(pdb1,interfaceid1,pdb2,interfaceid2)
f=open(fname,'w')
f.write("reinitialize\n")
f.write("cd /home/baskaran_k/asym\n")
f.write("load %s-%s.%s.pdb\n"%(pdb1,pdb1,interfaceid1))
f.write("load %s-%s.%s.pdb\n"%(pdb2,pdb2,interfaceid2))
f.write("show cartoon\n")
f.write("hide lines\n")
f.write("align %s-%s.%s//%s//, %s-%s.%s//%s//\n"%(pdb1,pdb1,interfaceid1,chain1[0],pdb2,pdb2,interfaceid2,chain2[0]))
f.write("center\n")
f.write("color cyan, %s-%s.%s//%s//\n"%(pdb1,pdb1,interfaceid1,chain1[0]))
f.write("color yellow, %s-%s.%s//%s//\n"%(pdb1,pdb1,interfaceid1,chain1[1]))
f.write("color green, %s-%s.%s//%s//\n"%(pdb2,pdb2,interfaceid2,chain2[0]))
f.write("color red, %s-%s.%s//%s//\n"%(pdb2,pdb2,interfaceid2,chain2[1]))
f.close()
os.system("pymol %s"%(fname))
def doit_for_all(fname):
f=open(fname,'r')
exclude="xxxx"
for l in f:
w=l.split("\n")[0].split(" ")
#print w[0],w[6],w[3],w[9]
if exclude != w[0] :
get_pdbinterfaces(w[0],w[1],w[2],w[3])
c=raw_input("Want to exclude from list y/n:")
if c == "y" : exclude=w[0]
else: pass
if __name__=="__main__":
pdb1=sys.argv[1]
interfaceid1=sys.argv[2]
pdb2=sys.argv[3]
interfaceid2=sys.argv[4]
get_pdbinterfaces(pdb1,interfaceid1,pdb2,interfaceid2)
# fname=sys.argv[1]
# doit_for_all(fname)
|
kumar-physics/general-scripts
|
python/sp.py
|
Python
|
gpl-3.0
| 1,860
|
[
"PyMOL"
] |
17134a42ceaaa1d91320c4614c4ded2afabd00c0762d18bdaec69bf3148783ab
|
from collections import defaultdict
import logging
import pickle
import json
from typing import Dict, Optional, Tuple
from ray.tune import ExperimentAnalysis
from ray.tune.sample import Domain, Float, Quantized
from ray.tune.suggest.variant_generator import parse_spec_vars
from ray.tune.utils.util import unflatten_dict
try: # Python 3 only -- needed for lint test.
import bayes_opt as byo
except ImportError:
byo = None
from ray.tune.suggest import Searcher
from ray.tune.utils import flatten_dict
logger = logging.getLogger(__name__)
def _dict_hash(config, precision):
flatconfig = flatten_dict(config)
for param, value in flatconfig.items():
if isinstance(value, float):
flatconfig[param] = "{:.{digits}f}".format(value, digits=precision)
hashed = json.dumps(flatconfig, sort_keys=True, default=str)
return hashed
class BayesOptSearch(Searcher):
"""Uses fmfn/BayesianOptimization to optimize hyperparameters.
fmfn/BayesianOptimization is a library for Bayesian Optimization. More
info can be found here: https://github.com/fmfn/BayesianOptimization.
You will need to install fmfn/BayesianOptimization via the following:
.. code-block:: bash
pip install bayesian-optimization
This algorithm requires setting a search space using the
`BayesianOptimization search space specification`_.
Args:
space (dict): Continuous search space. Parameters will be sampled from
this space which will be used to run trials.
metric (str): The training result objective value attribute.
mode (str): One of {min, max}. Determines whether objective is
minimizing or maximizing the metric attribute.
utility_kwargs (dict): Parameters to define the utility function.
The default value is a dictionary with three keys:
- kind: ucb (Upper Confidence Bound)
- kappa: 2.576
- xi: 0.0
random_state (int): Used to initialize BayesOpt.
random_search_steps (int): Number of initial random searches.
This is necessary to avoid initial local overfitting
of the Bayesian process.
analysis (ExperimentAnalysis): Optionally, the previous analysis
to integrate.
verbose (int): Sets verbosity level for BayesOpt packages.
max_concurrent: Deprecated.
use_early_stopped_trials: Deprecated.
Tune automatically converts search spaces to BayesOptSearch's format:
.. code-block:: python
from ray import tune
from ray.tune.suggest.bayesopt import BayesOptSearch
config = {
"width": tune.uniform(0, 20),
"height": tune.uniform(-100, 100)
}
bayesopt = BayesOptSearch(metric="mean_loss", mode="min")
tune.run(my_func, config=config, search_alg=bayesopt)
If you would like to pass the search space manually, the code would
look like this:
.. code-block:: python
from ray import tune
from ray.tune.suggest.bayesopt import BayesOptSearch
space = {
'width': (0, 20),
'height': (-100, 100),
}
bayesopt = BayesOptSearch(space, metric="mean_loss", mode="min")
tune.run(my_func, search_alg=bayesopt)
"""
# bayes_opt.BayesianOptimization: Optimization object
optimizer = None
def __init__(self,
space: Optional[Dict] = None,
metric: Optional[str] = None,
mode: Optional[str] = None,
utility_kwargs: Optional[Dict] = None,
random_state: int = 42,
random_search_steps: int = 10,
verbose: int = 0,
patience: int = 5,
skip_duplicate: bool = True,
analysis: Optional[ExperimentAnalysis] = None,
max_concurrent: Optional[int] = None,
use_early_stopped_trials: Optional[bool] = None):
"""Instantiate new BayesOptSearch object.
Args:
space (dict): Continuous search space.
Parameters will be sampled from
this space which will be used to run trials.
metric (str): The training result objective value attribute.
mode (str): One of {min, max}. Determines whether objective is
minimizing or maximizing the metric attribute.
utility_kwargs (dict): Parameters to define the utility function.
Must provide values for the keys `kind`, `kappa`, and `xi`.
random_state (int): Used to initialize BayesOpt.
random_search_steps (int): Number of initial random searches.
This is necessary to avoid initial local overfitting
of the Bayesian process.
patience (int): Must be > 0. If the optimizer suggests a set of
hyperparameters more than 'patience' times,
then the whole experiment will stop.
skip_duplicate (bool): If true, BayesOptSearch will not create
a trial with a previously seen set of hyperparameters. By
default, floating values will be reduced to a digit precision
of 5. You can override this by setting
``searcher.repeat_float_precision``.
analysis (ExperimentAnalysis): Optionally, the previous analysis
to integrate.
verbose (int): Sets verbosity level for BayesOpt packages.
max_concurrent: Deprecated.
use_early_stopped_trials: Deprecated.
"""
assert byo is not None, (
"BayesOpt must be installed!. You can install BayesOpt with"
" the command: `pip install bayesian-optimization`.")
if mode:
assert mode in ["min", "max"], "`mode` must be 'min' or 'max'."
self.max_concurrent = max_concurrent
self._config_counter = defaultdict(int)
self._patience = patience
# int: Precision at which to hash values.
self.repeat_float_precision = 5
if self._patience <= 0:
raise ValueError("patience must be set to a value greater than 0!")
self._skip_duplicate = skip_duplicate
super(BayesOptSearch, self).__init__(
metric=metric,
mode=mode,
max_concurrent=max_concurrent,
use_early_stopped_trials=use_early_stopped_trials)
if utility_kwargs is None:
# The defaults arguments are the same
# as in the package BayesianOptimization
utility_kwargs = dict(
kind="ucb",
kappa=2.576,
xi=0.0,
)
if mode == "max":
self._metric_op = 1.
elif mode == "min":
self._metric_op = -1.
self._live_trial_mapping = {}
self._buffered_trial_results = []
self.random_search_trials = random_search_steps
self._total_random_search_trials = 0
self.utility = byo.UtilityFunction(**utility_kwargs)
# Registering the provided analysis, if given
if analysis is not None:
self.register_analysis(analysis)
self._space = space
self._verbose = verbose
self._random_state = random_state
self.optimizer = None
if space:
self.setup_optimizer()
def setup_optimizer(self):
self.optimizer = byo.BayesianOptimization(
f=None,
pbounds=self._space,
verbose=self._verbose,
random_state=self._random_state)
def set_search_properties(self, metric: Optional[str], mode: Optional[str],
config: Dict) -> bool:
if self.optimizer:
return False
space = self.convert_search_space(config)
self._space = space
if metric:
self._metric = metric
if mode:
self._mode = mode
if self._mode == "max":
self._metric_op = 1.
elif self._mode == "min":
self._metric_op = -1.
self.setup_optimizer()
return True
def suggest(self, trial_id: str) -> Optional[Dict]:
"""Return new point to be explored by black box function.
Args:
trial_id (str): Id of the trial.
This is a short alphanumerical string.
Returns:
Either a dictionary describing the new point to explore or
None, when no new point is to be explored for the time being.
"""
if not self.optimizer:
raise RuntimeError(
"Trying to sample a configuration from {}, but no search "
"space has been defined. Either pass the `{}` argument when "
"instantiating the search algorithm, or pass a `config` to "
"`tune.run()`.".format(self.__class__.__name__, "space"))
# If we have more active trials than the allowed maximum
total_live_trials = len(self._live_trial_mapping)
if self.max_concurrent and self.max_concurrent <= total_live_trials:
# we stop the suggestion and return None.
return None
# We compute the new point to explore
config = self.optimizer.suggest(self.utility)
config_hash = _dict_hash(config, self.repeat_float_precision)
# Check if already computed
already_seen = config_hash in self._config_counter
self._config_counter[config_hash] += 1
top_repeats = max(self._config_counter.values())
# If patience is set and we've repeated a trial numerous times,
# we terminate the experiment.
if self._patience is not None and top_repeats > self._patience:
return Searcher.FINISHED
# If we have seen a value before, we'll skip it.
if already_seen and self._skip_duplicate:
logger.info("Skipping duplicated config: {}.".format(config))
return None
# If we are still in the random search part and we are waiting for
# trials to complete
if len(self._buffered_trial_results) < self.random_search_trials:
# We check if we have already maxed out the number of requested
# random search trials
if self._total_random_search_trials == self.random_search_trials:
# If so we stop the suggestion and return None
return None
# Otherwise we increase the total number of rndom search trials
if config:
self._total_random_search_trials += 1
# Save the new trial to the trial mapping
self._live_trial_mapping[trial_id] = config
# Return a deep copy of the mapping
return unflatten_dict(config)
def register_analysis(self, analysis: ExperimentAnalysis):
"""Integrate the given analysis into the gaussian process.
Args:
analysis (ExperimentAnalysis): Optionally, the previous analysis
to integrate.
"""
for (_, report), params in zip(
analysis.dataframe(metric=self._metric,
mode=self._mode).iterrows(),
analysis.get_all_configs().values()):
# We add the obtained results to the
# gaussian process optimizer
self._register_result(params, report)
def on_trial_complete(self,
trial_id: str,
result: Optional[Dict] = None,
error: bool = False):
"""Notification for the completion of trial.
Args:
trial_id (str): Id of the trial.
This is a short alphanumerical string.
result (dict): Dictionary of result.
May be none when some error occurs.
error (bool): Boolean representing a previous error state.
The result should be None when error is True.
"""
# We try to get the parameters used for this trial
params = self._live_trial_mapping.pop(trial_id, None)
# The results may be None if some exception is raised during the trial.
# Also, if the parameters are None (were already processed)
# we interrupt the following procedure.
# Additionally, if somehow the error is True but
# the remaining values are not we also block the method
if result is None or params is None or error:
return
# If we don't have to execute some random search steps
if len(self._buffered_trial_results) >= self.random_search_trials:
# we simply register the obtained result
self._register_result(params, result)
return
# We store the results into a temporary cache
self._buffered_trial_results.append((params, result))
# If the random search finished,
# we update the BO with all the computer points.
if len(self._buffered_trial_results) == self.random_search_trials:
for params, result in self._buffered_trial_results:
self._register_result(params, result)
def _register_result(self, params: Tuple[str], result: Dict):
"""Register given tuple of params and results."""
self.optimizer.register(params, self._metric_op * result[self.metric])
def save(self, checkpoint_path: str):
"""Storing current optimizer state."""
with open(checkpoint_path, "wb") as f:
pickle.dump(
(self.optimizer, self._buffered_trial_results,
self._total_random_search_trials, self._config_counter), f)
def restore(self, checkpoint_path: str):
"""Restoring current optimizer state."""
with open(checkpoint_path, "rb") as f:
(self.optimizer, self._buffered_trial_results,
self._total_random_search_trials,
self._config_counter) = pickle.load(f)
@staticmethod
def convert_search_space(spec: Dict) -> Dict:
spec = flatten_dict(spec, prevent_delimiter=True)
resolved_vars, domain_vars, grid_vars = parse_spec_vars(spec)
if grid_vars:
raise ValueError(
"Grid search parameters cannot be automatically converted "
"to a BayesOpt search space.")
def resolve_value(domain: Domain) -> Tuple[float, float]:
sampler = domain.get_sampler()
if isinstance(sampler, Quantized):
logger.warning(
"BayesOpt search does not support quantization. "
"Dropped quantization.")
sampler = sampler.get_sampler()
if isinstance(domain, Float):
if domain.sampler is not None:
logger.warning(
"BayesOpt does not support specific sampling methods. "
"The {} sampler will be dropped.".format(sampler))
return (domain.lower, domain.upper)
raise ValueError("BayesOpt does not support parameters of type "
"`{}`".format(type(domain).__name__))
# Parameter name is e.g. "a/b/c" for nested dicts
bounds = {
"/".join(path): resolve_value(domain)
for path, domain in domain_vars
}
return bounds
|
robertnishihara/ray
|
python/ray/tune/suggest/bayesopt.py
|
Python
|
apache-2.0
| 15,477
|
[
"Gaussian"
] |
fd28f068eaa6d310c41303f76d39f54a02415e411e5bcaab61860bc96c815497
|
import sympy.mpmath
from sympy.mpmath import *
from sympy.mpmath.libmp import *
import random
def test_type_compare():
assert mpf(2) == mpc(2,0)
assert mpf(0) == mpc(0)
assert mpf(2) != mpc(2, 0.00001)
assert mpf(2) == 2.0
assert mpf(2) != 3.0
assert mpf(2) == 2
assert mpf(2) != '2.0'
assert mpc(2) != '2.0'
def test_add():
assert mpf(2.5) + mpf(3) == 5.5
assert mpf(2.5) + 3 == 5.5
assert mpf(2.5) + 3.0 == 5.5
assert 3 + mpf(2.5) == 5.5
assert 3.0 + mpf(2.5) == 5.5
assert (3+0j) + mpf(2.5) == 5.5
assert mpc(2.5) + mpf(3) == 5.5
assert mpc(2.5) + 3 == 5.5
assert mpc(2.5) + 3.0 == 5.5
assert mpc(2.5) + (3+0j) == 5.5
assert 3 + mpc(2.5) == 5.5
assert 3.0 + mpc(2.5) == 5.5
assert (3+0j) + mpc(2.5) == 5.5
def test_sub():
assert mpf(2.5) - mpf(3) == -0.5
assert mpf(2.5) - 3 == -0.5
assert mpf(2.5) - 3.0 == -0.5
assert 3 - mpf(2.5) == 0.5
assert 3.0 - mpf(2.5) == 0.5
assert (3+0j) - mpf(2.5) == 0.5
assert mpc(2.5) - mpf(3) == -0.5
assert mpc(2.5) - 3 == -0.5
assert mpc(2.5) - 3.0 == -0.5
assert mpc(2.5) - (3+0j) == -0.5
assert 3 - mpc(2.5) == 0.5
assert 3.0 - mpc(2.5) == 0.5
assert (3+0j) - mpc(2.5) == 0.5
def test_mul():
assert mpf(2.5) * mpf(3) == 7.5
assert mpf(2.5) * 3 == 7.5
assert mpf(2.5) * 3.0 == 7.5
assert 3 * mpf(2.5) == 7.5
assert 3.0 * mpf(2.5) == 7.5
assert (3+0j) * mpf(2.5) == 7.5
assert mpc(2.5) * mpf(3) == 7.5
assert mpc(2.5) * 3 == 7.5
assert mpc(2.5) * 3.0 == 7.5
assert mpc(2.5) * (3+0j) == 7.5
assert 3 * mpc(2.5) == 7.5
assert 3.0 * mpc(2.5) == 7.5
assert (3+0j) * mpc(2.5) == 7.5
def test_div():
assert mpf(6) / mpf(3) == 2.0
assert mpf(6) / 3 == 2.0
assert mpf(6) / 3.0 == 2.0
assert 6 / mpf(3) == 2.0
assert 6.0 / mpf(3) == 2.0
assert (6+0j) / mpf(3.0) == 2.0
assert mpc(6) / mpf(3) == 2.0
assert mpc(6) / 3 == 2.0
assert mpc(6) / 3.0 == 2.0
assert mpc(6) / (3+0j) == 2.0
assert 6 / mpc(3) == 2.0
assert 6.0 / mpc(3) == 2.0
assert (6+0j) / mpc(3) == 2.0
def test_pow():
assert mpf(6) ** mpf(3) == 216.0
assert mpf(6) ** 3 == 216.0
assert mpf(6) ** 3.0 == 216.0
assert 6 ** mpf(3) == 216.0
assert 6.0 ** mpf(3) == 216.0
assert (6+0j) ** mpf(3.0) == 216.0
assert mpc(6) ** mpf(3) == 216.0
assert mpc(6) ** 3 == 216.0
assert mpc(6) ** 3.0 == 216.0
assert mpc(6) ** (3+0j) == 216.0
assert 6 ** mpc(3) == 216.0
assert 6.0 ** mpc(3) == 216.0
assert (6+0j) ** mpc(3) == 216.0
def test_mixed_misc():
assert 1 + mpf(3) == mpf(3) + 1 == 4
assert 1 - mpf(3) == -(mpf(3) - 1) == -2
assert 3 * mpf(2) == mpf(2) * 3 == 6
assert 6 / mpf(2) == mpf(6) / 2 == 3
assert 1.0 + mpf(3) == mpf(3) + 1.0 == 4
assert 1.0 - mpf(3) == -(mpf(3) - 1.0) == -2
assert 3.0 * mpf(2) == mpf(2) * 3.0 == 6
assert 6.0 / mpf(2) == mpf(6) / 2.0 == 3
def test_add_misc():
mp.dps = 15
assert mpf(4) + mpf(-70) == -66
assert mpf(1) + mpf(1.1)/80 == 1 + 1.1/80
assert mpf((1, 10000000000)) + mpf(3) == mpf((1, 10000000000))
assert mpf(3) + mpf((1, 10000000000)) == mpf((1, 10000000000))
assert mpf((1, -10000000000)) + mpf(3) == mpf(3)
assert mpf(3) + mpf((1, -10000000000)) == mpf(3)
assert mpf(1) + 1e-15 != 1
assert mpf(1) + 1e-20 == 1
assert mpf(1.07e-22) + 0 == mpf(1.07e-22)
assert mpf(0) + mpf(1.07e-22) == mpf(1.07e-22)
def test_complex_misc():
# many more tests needed
assert 1 + mpc(2) == 3
assert not mpc(2).ae(2 + 1e-13)
assert mpc(2+1e-15j).ae(2)
def test_complex_zeros():
for a in [0,2]:
for b in [0,3]:
for c in [0,4]:
for d in [0,5]:
assert mpc(a,b)*mpc(c,d) == complex(a,b)*complex(c,d)
def test_hash():
for i in range(-256, 256):
assert hash(mpf(i)) == hash(i)
assert hash(mpf(0.5)) == hash(0.5)
assert hash(mpc(2,3)) == hash(2+3j)
# Check that this doesn't fail
assert hash(inf)
# Check that overflow doesn't assign equal hashes to large numbers
assert hash(mpf('1e1000')) != hash('1e10000')
assert hash(mpc(100,'1e1000')) != hash(mpc(200,'1e1000'))
# Advanced rounding test
def test_add_rounding():
mp.dps = 15
a = from_float(1e-50)
assert mpf_sub(mpf_add(fone, a, 53, round_up), fone, 53, round_up) == from_float(2.2204460492503131e-16)
assert mpf_sub(fone, a, 53, round_up) == fone
assert mpf_sub(fone, mpf_sub(fone, a, 53, round_down), 53, round_down) == from_float(1.1102230246251565e-16)
assert mpf_add(fone, a, 53, round_down) == fone
def test_almost_equal():
assert mpf(1.2).ae(mpf(1.20000001), 1e-7)
assert not mpf(1.2).ae(mpf(1.20000001), 1e-9)
assert not mpf(-0.7818314824680298).ae(mpf(-0.774695868667929))
def test_arithmetic_functions():
import operator
ops = [(operator.add, fadd), (operator.sub, fsub), (operator.mul, fmul),
(operator.div, fdiv)]
a = mpf(0.27)
b = mpf(1.13)
c = mpc(0.51+2.16j)
d = mpc(1.08-0.99j)
for x in [a,b,c,d]:
for y in [a,b,c,d]:
for op, fop in ops:
if fop is not fdiv:
mp.prec = 200
z0 = op(x,y)
mp.prec = 60
z1 = op(x,y)
mp.prec = 53
z2 = op(x,y)
assert fop(x, y, prec=60) == z1
assert fop(x, y) == z2
if fop is not fdiv:
assert fop(x, y, prec=inf) == z0
assert fop(x, y, dps=inf) == z0
assert fop(x, y, exact=True) == z0
assert fneg(fneg(z1, exact=True), prec=inf) == z1
assert fneg(z1) == -(+z1)
mp.dps = 15
def test_exact_integer_arithmetic():
# XXX: re-fix this so that all operations are tested with all rounding modes
random.seed(0)
for prec in [6, 10, 25, 40, 100, 250, 725]:
for rounding in ['d', 'u', 'f', 'c', 'n']:
mp.dps = prec
M = 10**(prec-2)
M2 = 10**(prec//2-2)
for i in range(10):
a = random.randint(-M, M)
b = random.randint(-M, M)
assert mpf(a, rounding=rounding) == a
assert int(mpf(a, rounding=rounding)) == a
assert int(mpf(str(a), rounding=rounding)) == a
assert mpf(a) + mpf(b) == a + b
assert mpf(a) - mpf(b) == a - b
assert -mpf(a) == -a
a = random.randint(-M2, M2)
b = random.randint(-M2, M2)
assert mpf(a) * mpf(b) == a*b
assert mpf_mul(from_int(a), from_int(b), mp.prec, rounding) == from_int(a*b)
mp.dps = 15
def test_odd_int_bug():
assert to_int(from_int(3), round_nearest) == 3
def test_str_1000_digits():
mp.dps = 1001
# last digit may be wrong
assert str(mpf(2)**0.5)[-10:-1] == '9518488472'[:9]
assert str(pi)[-10:-1] == '2164201989'[:9]
mp.dps = 15
def test_str_10000_digits():
mp.dps = 10001
# last digit may be wrong
assert str(mpf(2)**0.5)[-10:-1] == '5873258351'[:9]
assert str(pi)[-10:-1] == '5256375678'[:9]
mp.dps = 15
def test_monitor():
f = lambda x: x**2
a = []
b = []
g = monitor(f, a.append, b.append)
assert g(3) == 9
assert g(4) == 16
assert a[0] == ((3,), {})
assert b[0] == 9
def test_nint_distance():
assert nint_distance(mpf(-3)) == (-3, -inf)
assert nint_distance(mpc(-3)) == (-3, -inf)
assert nint_distance(mpf(-3.1)) == (-3, -3)
assert nint_distance(mpf(-3.01)) == (-3, -6)
assert nint_distance(mpf(-3.001)) == (-3, -9)
assert nint_distance(mpf(-3.0001)) == (-3, -13)
assert nint_distance(mpf(-2.9)) == (-3, -3)
assert nint_distance(mpf(-2.99)) == (-3, -6)
assert nint_distance(mpf(-2.999)) == (-3, -9)
assert nint_distance(mpf(-2.9999)) == (-3, -13)
assert nint_distance(mpc(-3+0.1j)) == (-3, -3)
assert nint_distance(mpc(-3+0.01j)) == (-3, -6)
assert nint_distance(mpc(-3.1+0.1j)) == (-3, -3)
assert nint_distance(mpc(-3.01+0.01j)) == (-3, -6)
assert nint_distance(mpc(-3.001+0.001j)) == (-3, -9)
assert nint_distance(mpf(0)) == (0, -inf)
assert nint_distance(mpf(0.01)) == (0, -6)
assert nint_distance(mpf('1e-100')) == (0, -332)
def test_floor_ceil_nint_frac():
mp.dps = 15
for n in range(-10,10):
assert floor(n) == n
assert floor(n+0.5) == n
assert ceil(n) == n
assert ceil(n+0.5) == n+1
assert nint(n) == n
# nint rounds to even
if n % 2 == 1:
assert nint(n+0.5) == n+1
else:
assert nint(n+0.5) == n
assert floor(inf) == inf
assert floor(ninf) == ninf
assert isnan(floor(nan))
assert ceil(inf) == inf
assert ceil(ninf) == ninf
assert isnan(ceil(nan))
assert nint(inf) == inf
assert nint(ninf) == ninf
assert isnan(nint(nan))
assert floor(0.1) == 0
assert floor(0.9) == 0
assert floor(-0.1) == -1
assert floor(-0.9) == -1
assert floor(10000000000.1) == 10000000000
assert floor(10000000000.9) == 10000000000
assert floor(-10000000000.1) == -10000000000-1
assert floor(-10000000000.9) == -10000000000-1
assert floor(1e-100) == 0
assert floor(-1e-100) == -1
assert floor(1e100) == 1e100
assert floor(-1e100) == -1e100
assert ceil(0.1) == 1
assert ceil(0.9) == 1
assert ceil(-0.1) == 0
assert ceil(-0.9) == 0
assert ceil(10000000000.1) == 10000000000+1
assert ceil(10000000000.9) == 10000000000+1
assert ceil(-10000000000.1) == -10000000000
assert ceil(-10000000000.9) == -10000000000
assert ceil(1e-100) == 1
assert ceil(-1e-100) == 0
assert ceil(1e100) == 1e100
assert ceil(-1e100) == -1e100
assert nint(0.1) == 0
assert nint(0.9) == 1
assert nint(-0.1) == 0
assert nint(-0.9) == -1
assert nint(10000000000.1) == 10000000000
assert nint(10000000000.9) == 10000000000+1
assert nint(-10000000000.1) == -10000000000
assert nint(-10000000000.9) == -10000000000-1
assert nint(1e-100) == 0
assert nint(-1e-100) == 0
assert nint(1e100) == 1e100
assert nint(-1e100) == -1e100
assert floor(3.2+4.6j) == 3+4j
assert ceil(3.2+4.6j) == 4+5j
assert nint(3.2+4.6j) == 3+5j
for n in range(-10,10):
assert frac(n) == 0
assert frac(0.25) == 0.25
assert frac(1.25) == 0.25
assert frac(2.25) == 0.25
assert frac(-0.25) == 0.75
assert frac(-1.25) == 0.75
assert frac(-2.25) == 0.75
assert frac('1e100000000000000') == 0
u = mpf('1e-100000000000000')
assert frac(u) == u
assert frac(-u) == 1 # rounding!
u = mpf('1e-400')
assert frac(-u, prec=0) == fsub(1, u, exact=True)
assert frac(3.25+4.75j) == 0.25+0.75j
def test_isnan_etc():
from sympy.mpmath.rational import mpq
assert isnan(nan) == True
assert isnan(3) == False
assert isnan(mpf(3)) == False
assert isnan(inf) == False
assert isnan(mpc(2,nan)) == True
assert isnan(mpc(2,nan)) == True
assert isnan(mpc(nan,nan)) == True
assert isnan(mpc(2,2)) == False
assert isnan(mpc(nan,inf)) == True
assert isnan(mpc(inf,inf)) == False
assert isnan(mpq((3,2))) == False
assert isnan(mpq((0,1))) == False
assert isinf(inf) == True
assert isinf(-inf) == True
assert isinf(3) == False
assert isinf(nan) == False
assert isinf(3+4j) == False
assert isinf(mpc(inf)) == True
assert isinf(mpc(3,inf)) == True
assert isinf(mpc(inf,3)) == True
assert isinf(mpc(inf,inf)) == True
assert isinf(mpc(nan,inf)) == True
assert isinf(mpc(inf,nan)) == True
assert isinf(mpc(nan,nan)) == False
assert isinf(mpq((3,2))) == False
assert isinf(mpq((0,1))) == False
assert isnormal(3) == True
assert isnormal(3.5) == True
assert isnormal(mpf(3.5)) == True
assert isnormal(0) == False
assert isnormal(mpf(0)) == False
assert isnormal(0.0) == False
assert isnormal(inf) == False
assert isnormal(-inf) == False
assert isnormal(nan) == False
assert isnormal(float(inf)) == False
assert isnormal(mpc(0,0)) == False
assert isnormal(mpc(3,0)) == True
assert isnormal(mpc(0,3)) == True
assert isnormal(mpc(3,3)) == True
assert isnormal(mpc(0,nan)) == False
assert isnormal(mpc(0,inf)) == False
assert isnormal(mpc(3,nan)) == False
assert isnormal(mpc(3,inf)) == False
assert isnormal(mpc(3,-inf)) == False
assert isnormal(mpc(nan,0)) == False
assert isnormal(mpc(inf,0)) == False
assert isnormal(mpc(nan,3)) == False
assert isnormal(mpc(inf,3)) == False
assert isnormal(mpc(inf,nan)) == False
assert isnormal(mpc(nan,inf)) == False
assert isnormal(mpc(nan,nan)) == False
assert isnormal(mpc(inf,inf)) == False
assert isnormal(mpq((3,2))) == True
assert isnormal(mpq((0,1))) == False
assert isint(3) == True
assert isint(0) == True
assert isint(3L) == True
assert isint(0L) == True
assert isint(mpf(3)) == True
assert isint(mpf(0)) == True
assert isint(mpf(-3)) == True
assert isint(mpf(3.2)) == False
assert isint(3.2) == False
assert isint(nan) == False
assert isint(inf) == False
assert isint(-inf) == False
assert isint(mpc(0)) == True
assert isint(mpc(3)) == True
assert isint(mpc(3.2)) == False
assert isint(mpc(3,inf)) == False
assert isint(mpc(inf)) == False
assert isint(mpc(3,2)) == False
assert isint(mpc(0,2)) == False
assert isint(mpc(3,2),gaussian=True) == True
assert isint(mpc(3,0),gaussian=True) == True
assert isint(mpc(0,3),gaussian=True) == True
assert isint(3+4j) == False
assert isint(3+4j, gaussian=True) == True
assert isint(3+0j) == True
assert isint(mpq((3,2))) == False
assert isint(mpq((3,9))) == False
assert isint(mpq((9,3))) == True
assert isint(mpq((0,4))) == True
assert isint(mpq((1,1))) == True
assert isint(mpq((-1,1))) == True
|
mattpap/sympy-polys
|
sympy/mpmath/tests/test_basic_ops.py
|
Python
|
bsd-3-clause
| 14,066
|
[
"Gaussian"
] |
31382082556c1560ee2ff2370ffd04d35ed2f17530dbf4107962bed740db9059
|
"""Test options handling."""
import os
import sys
from collections import OrderedDict
import pytest
sys.path.append(os.path.abspath('.'))
import fyrd
def test_help():
"""Check that the output of option_help() matches saved output."""
if os.path.isfile(os.path.join('tests', 'options_help.txt')):
ofile = os.path.join('tests', 'options_help.txt')
elif os.path.isfile('options_help.txt'):
ofile = 'options_help.txt'
else:
raise Exception('Cannot find options_help.txt file')
assert fyrd.option_help(mode='string') == open(ofile).read()
fyrd.option_help(mode='table')
fyrd.option_help(mode='merged_table')
def test_dict_types():
"""Make sure all expected dictionaries exist and have the right type."""
assert hasattr(fyrd.options, 'COMMON')
assert hasattr(fyrd.options, 'CLUSTER_OPTS')
assert hasattr(fyrd.options, 'TORQUE')
assert hasattr(fyrd.options, 'SLURM')
assert hasattr(fyrd.options, 'SLURM_KWDS')
assert hasattr(fyrd.options, 'TORQUE_KWDS')
assert hasattr(fyrd.options, 'CLUSTER_KWDS')
assert hasattr(fyrd.options, 'ALLOWED_KWDS')
assert isinstance(fyrd.options.COMMON, OrderedDict)
assert isinstance(fyrd.options.CLUSTER_OPTS, OrderedDict)
assert isinstance(fyrd.options.TORQUE, OrderedDict)
assert isinstance(fyrd.options.SLURM, OrderedDict)
assert isinstance(fyrd.options.SLURM_KWDS, OrderedDict)
assert isinstance(fyrd.options.TORQUE_KWDS, OrderedDict)
assert isinstance(fyrd.options.CLUSTER_KWDS, OrderedDict)
assert isinstance(fyrd.options.ALLOWED_KWDS, OrderedDict)
def test_sane_keywords():
"""Run check_arguments() on some made up keywords."""
# Should succeed
fyrd.options.check_arguments(
{
'cores': 49,
'mem': '60GB',
'modules': ['python', 'jeremy'],
'imports': 'pysam',
'filedir': '/tmp',
'dir': '.',
'suffix': 'bob',
'outfile': 'hi!',
'errfile': 'err',
'threads': 6,
'nodes': 2,
'features': 'bigmem',
'partition': 'default',
'account': 'richjoe',
'export': 'PYTHONPATH',
'begin': '00:02:00',
}
)
# Should fail
with pytest.raises(TypeError):
fyrd.options.check_arguments({'nodes': 'bob'})
# Should succeed
fyrd.options.check_arguments({'nodes': '14'})
def test_memory_formatting():
"""Format memory several different ways."""
assert fyrd.options.check_arguments({'mem': '4000b'}) == {'mem': 5}
assert fyrd.options.check_arguments({'mem': '40000KB'}) == {'mem': 39}
assert fyrd.options.check_arguments({'mem': '4000mB'}) == {'mem': 4000}
assert fyrd.options.check_arguments({'mem': '4GB'}) == {'mem': 4096}
assert fyrd.options.check_arguments({'mem': '4TB'}) == {'mem': 4194304}
assert fyrd.options.check_arguments({'memory': 4000}) == {'mem': 4000}
with pytest.raises(ValueError):
fyrd.options.check_arguments({'mem': 'bob'})
with pytest.raises(ValueError):
fyrd.options.check_arguments({'mem': '4000zb'})
with pytest.raises(ValueError):
fyrd.options.check_arguments({'mem': '4000tb0'})
with pytest.raises(ValueError):
fyrd.options.check_arguments({'mem': 'tb0'})
with pytest.raises(ValueError):
fyrd.options.check_arguments({'mem': 'tb'})
def test_time_formatting():
"""Format time several different ways."""
assert fyrd.options.check_arguments(
{'time': '01-00:00:00'}
) == {'time': '24:00:00'}
assert fyrd.options.check_arguments(
{'walltime': '03'}
) == {'time': '00:00:03'}
assert fyrd.options.check_arguments(
{'time': '99:99'}
) == {'time': '01:40:39'}
with pytest.raises(fyrd.options.OptionsError):
fyrd.options.check_arguments({'time': '00:00:00:03'})
def test_split():
"""Run with good and bad arguments, expect split."""
good, bad = fyrd.options.split_keywords(
{'cores': 2, 'memory': '4GB', 'bob': 'dylan'}
)
assert good == {'cores': 2, 'mem': 4096}
assert bad == {'bob': 'dylan'}
def test_string_formatting():
"""Test options_to_string."""
test_options = {
'nodes': '2', 'cores': 5, 'account': 'richguy',
'features': 'bigmem', 'time': '20', 'mem': 2000,
'partition': 'large', 'export': 'PATH', 'outfile': 'joe',
'errfile': 'john'
}
assert sorted(
fyrd.options.options_to_string(
test_options,
qtype='torque'
)[0].split('\n')
) == [
'#PBS -A richguy',
'#PBS -e john',
'#PBS -l mem=2000MB',
'#PBS -l nodes=2:ppn=5:bigmem',
'#PBS -l walltime=00:00:20',
'#PBS -o joe',
'#PBS -q large',
'#PBS -v PATH'
]
assert sorted(
fyrd.options.options_to_string(
test_options,
qtype='slurm'
)[0].split('\n')
) == [
'#SBATCH --account=richguy',
"#SBATCH --constraint=['bigmem']",
'#SBATCH --cpus-per-task 5',
'#SBATCH --export=PATH',
'#SBATCH --mem=2000',
'#SBATCH --ntasks 2',
'#SBATCH --time=00:00:20',
'#SBATCH -e john',
'#SBATCH -o joe',
'#SBATCH -p large'
]
with pytest.raises(fyrd.options.OptionsError):
fyrd.options.option_to_string('nodes', 2)
with pytest.raises(ValueError):
fyrd.options.option_to_string({'nodes': 2})
def test_back_to_normal():
"""Return the queue to the normal setting."""
fyrd.batch_systems.get_cluster_environment()
|
MikeDacre/fyrd
|
tests/test_options.py
|
Python
|
mit
| 5,648
|
[
"pysam"
] |
80f1cd1101614cd8b5fdc4b1427a938f81bda20380109ab3e1a3985f0bf64ef0
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import add_days, getdate, cint, cstr
from frappe import throw, _
from erpnext.utilities.transaction_base import TransactionBase, delete_events
from erpnext.stock.utils import get_valid_serial_nos
class MaintenanceSchedule(TransactionBase):
def get_item_details(self, item_code):
item = frappe.db.sql("""select item_name, description from `tabItem`
where name=%s""", (item_code), as_dict=1)
ret = {
'item_name': item and item[0]['item_name'] or '',
'description' : item and item[0]['description'] or ''
}
return ret
def generate_schedule(self):
self.set('schedules', [])
frappe.db.sql("""delete from `tabMaintenance Schedule Detail`
where parent=%s""", (self.name))
count = 1
for d in self.get('items'):
self.validate_maintenance_detail()
s_list = []
s_list = self.create_schedule_list(d.start_date, d.end_date, d.no_of_visits, d.sales_person)
for i in range(d.no_of_visits):
child = self.append('schedules')
child.item_code = d.item_code
child.item_name = d.item_name
child.scheduled_date = s_list[i].strftime('%Y-%m-%d')
if d.serial_no:
child.serial_no = d.serial_no
child.idx = count
count = count + 1
child.sales_person = d.sales_person
self.save()
def on_submit(self):
if not self.get('schedules'):
throw(_("Please click on 'Generate Schedule' to get schedule"))
self.check_serial_no_added()
self.validate_schedule()
email_map = {}
for d in self.get('items'):
if d.serial_no:
serial_nos = get_valid_serial_nos(d.serial_no)
self.validate_serial_no(serial_nos, d.start_date)
self.update_amc_date(serial_nos, d.end_date)
if d.sales_person not in email_map:
sp = frappe.get_doc("Sales Person", d.sales_person)
email_map[d.sales_person] = sp.get_email_id()
scheduled_date = frappe.db.sql("""select scheduled_date from
`tabMaintenance Schedule Detail` where sales_person=%s and item_code=%s and
parent=%s""", (d.sales_person, d.item_code, self.name), as_dict=1)
for key in scheduled_date:
if email_map[d.sales_person]:
description = "Reference: %s, Item Code: %s and Customer: %s" % \
(self.name, d.item_code, self.customer)
frappe.get_doc({
"doctype": "Event",
"owner": email_map[d.sales_person] or self.owner,
"subject": description,
"description": description,
"starts_on": cstr(key["scheduled_date"]) + " 10:00:00",
"event_type": "Private",
"ref_type": self.doctype,
"ref_name": self.name
}).insert(ignore_permissions=1)
frappe.db.set(self, 'status', 'Submitted')
def create_schedule_list(self, start_date, end_date, no_of_visit, sales_person):
schedule_list = []
start_date_copy = start_date
date_diff = (getdate(end_date) - getdate(start_date)).days
add_by = date_diff / no_of_visit
for visit in range(cint(no_of_visit)):
if (getdate(start_date_copy) < getdate(end_date)):
start_date_copy = add_days(start_date_copy, add_by)
if len(schedule_list) < no_of_visit:
schedule_date = self.validate_schedule_date_for_holiday_list(getdate(start_date_copy),
sales_person)
if schedule_date > getdate(end_date):
schedule_date = getdate(end_date)
schedule_list.append(schedule_date)
return schedule_list
def validate_schedule_date_for_holiday_list(self, schedule_date, sales_person):
from erpnext.accounts.utils import get_fiscal_year
validated = False
fy_details = ""
try:
fy_details = get_fiscal_year(date=schedule_date, verbose=0)
except Exception:
pass
if fy_details and fy_details[0]:
# check holiday list in employee master
holiday_list = frappe.db.sql_list("""select h.holiday_date from `tabEmployee` emp,
`tabSales Person` sp, `tabHoliday` h, `tabHoliday List` hl
where sp.name=%s and emp.name=sp.employee
and hl.name=emp.holiday_list and
h.parent=hl.name and
hl.fiscal_year=%s""", (sales_person, fy_details[0]))
if not holiday_list:
# check global holiday list
holiday_list = frappe.db.sql("""select h.holiday_date from
`tabHoliday` h, `tabHoliday List` hl
where h.parent=hl.name and ifnull(hl.is_default, 0) = 1
and hl.fiscal_year=%s""", fy_details[0])
if not validated and holiday_list:
if schedule_date in holiday_list:
schedule_date = add_days(schedule_date, -1)
else:
validated = True
return schedule_date
def validate_dates_with_periodicity(self):
for d in self.get("items"):
if d.start_date and d.end_date and d.periodicity and d.periodicity!="Random":
date_diff = (getdate(d.end_date) - getdate(d.start_date)).days + 1
days_in_period = {
"Weekly": 7,
"Monthly": 30,
"Quarterly": 90,
"Half Yearly": 180,
"Yearly": 365
}
if date_diff < days_in_period[d.periodicity]:
throw(_("Row {0}: To set {1} periodicity, difference between from and to date \
must be greater than or equal to {2}")
.format(d.idx, d.periodicity, days_in_period[d.periodicity]))
def validate_maintenance_detail(self):
if not self.get('items'):
throw(_("Please enter Maintaince Details first"))
for d in self.get('items'):
if not d.item_code:
throw(_("Please select item code"))
elif not d.start_date or not d.end_date:
throw(_("Please select Start Date and End Date for Item {0}".format(d.item_code)))
elif not d.no_of_visits:
throw(_("Please mention no of visits required"))
elif not d.sales_person:
throw(_("Please select Incharge Person's name"))
if getdate(d.start_date) >= getdate(d.end_date):
throw(_("Start date should be less than end date for Item {0}").format(d.item_code))
def validate_sales_order(self):
for d in self.get('items'):
if d.prevdoc_docname:
chk = frappe.db.sql("""select ms.name from `tabMaintenance Schedule` ms,
`tabMaintenance Schedule Item` msi where msi.parent=ms.name and
msi.prevdoc_docname=%s and ms.docstatus=1""", d.prevdoc_docname)
if chk:
throw(_("Maintenance Schedule {0} exists against {0}").format(chk[0][0], d.prevdoc_docname))
def validate(self):
self.validate_maintenance_detail()
self.validate_dates_with_periodicity()
self.validate_sales_order()
def on_update(self):
frappe.db.set(self, 'status', 'Draft')
def update_amc_date(self, serial_nos, amc_expiry_date=None):
for serial_no in serial_nos:
serial_no_doc = frappe.get_doc("Serial No", serial_no)
serial_no_doc.amc_expiry_date = amc_expiry_date
serial_no_doc.save()
def validate_serial_no(self, serial_nos, amc_start_date):
for serial_no in serial_nos:
sr_details = frappe.db.get_value("Serial No", serial_no,
["warranty_expiry_date", "amc_expiry_date", "status", "delivery_date"], as_dict=1)
if not sr_details:
frappe.throw(_("Serial No {0} not found").format(serial_no))
if sr_details.warranty_expiry_date and sr_details.warranty_expiry_date>=amc_start_date:
throw(_("Serial No {0} is under warranty upto {1}").format(serial_no, sr_details.warranty_expiry_date))
if sr_details.amc_expiry_date and sr_details.amc_expiry_date >= amc_start_date:
throw(_("Serial No {0} is under maintenance contract upto {1}").format(serial_no, sr_details.amc_start_date))
if sr_details.status=="Delivered" and sr_details.delivery_date and \
sr_details.delivery_date >= amc_start_date:
throw(_("Maintenance start date can not be before delivery date for Serial No {0}").format(serial_no))
def validate_schedule(self):
item_lst1 =[]
item_lst2 =[]
for d in self.get('items'):
if d.item_code not in item_lst1:
item_lst1.append(d.item_code)
for m in self.get('schedules'):
if m.item_code not in item_lst2:
item_lst2.append(m.item_code)
if len(item_lst1) != len(item_lst2):
throw(_("Maintenance Schedule is not generated for all the items. Please click on 'Generate Schedule'"))
else:
for x in item_lst1:
if x not in item_lst2:
throw(_("Please click on 'Generate Schedule'"))
def check_serial_no_added(self):
serial_present =[]
for d in self.get('items'):
if d.serial_no:
serial_present.append(d.item_code)
for m in self.get('schedules'):
if serial_present:
if m.item_code in serial_present and not m.serial_no:
throw(_("Please click on 'Generate Schedule' to fetch Serial No added for Item {0}").format(m.item_code))
def on_cancel(self):
for d in self.get('items'):
if d.serial_no:
serial_nos = get_valid_serial_nos(d.serial_no)
self.update_amc_date(serial_nos)
frappe.db.set(self, 'status', 'Cancelled')
delete_events(self.doctype, self.name)
def on_trash(self):
delete_events(self.doctype, self.name)
@frappe.whitelist()
def make_maintenance_visit(source_name, target_doc=None):
from frappe.model.mapper import get_mapped_doc
def update_status(source, target, parent):
target.maintenance_type = "Scheduled"
doclist = get_mapped_doc("Maintenance Schedule", source_name, {
"Maintenance Schedule": {
"doctype": "Maintenance Visit",
"field_map": {
"name": "maintenance_schedule"
},
"validation": {
"docstatus": ["=", 1]
},
"postprocess": update_status
},
"Maintenance Schedule Item": {
"doctype": "Maintenance Visit Purpose",
"field_map": {
"parent": "prevdoc_docname",
"parenttype": "prevdoc_doctype",
"sales_person": "service_person"
}
}
}, target_doc)
return doclist
|
treejames/erpnext
|
erpnext/support/doctype/maintenance_schedule/maintenance_schedule.py
|
Python
|
agpl-3.0
| 9,573
|
[
"VisIt"
] |
e6e4297f2e95e76a0a9da9338bae6c7440d2937e5e212ba1ee233323da566338
|
''' This is a test of the chain
ResourceManagementClient -> ResourceManagementHandler -> ResourceManagementDB
It supposes that the DB is present, and that the service is running
The DB is supposed to be empty when the test starts
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=invalid-name,wrong-import-position
import sys
import datetime
import unittest
from DIRAC.Core.Base.Script import parseCommandLine
parseCommandLine()
from DIRAC import gLogger
from DIRAC.ResourceStatusSystem.Client.ResourceManagementClient import ResourceManagementClient
gLogger.setLevel('DEBUG')
dateEffective = datetime.datetime.now()
lastCheckTime = datetime.datetime.now()
class TestClientResourceManagementTestCase(unittest.TestCase):
def setUp(self):
self.rmClient = ResourceManagementClient()
def tearDown(self):
pass
class ResourceManagementClientChain(TestClientResourceManagementTestCase):
def test_AccountingCache(self):
"""
DowntimeCache table
"""
res = self.rmClient.deleteAccountingCache('TestName12345') # just making sure it's not there (yet)
self.assertTrue(res['OK'])
# TEST addOrModifyAccountingCache
res = self.rmClient.addOrModifyAccountingCache('TestName12345', 'plotType', 'plotName', 'result',
datetime.datetime.now(), datetime.datetime.now())
self.assertTrue(res['OK'])
res = self.rmClient.selectAccountingCache('TestName12345')
self.assertTrue(res['OK'])
# check if the name that we got is equal to the previously added 'TestName12345'
self.assertEqual(res['Value'][0][0], 'TestName12345')
res = self.rmClient.addOrModifyAccountingCache('TestName12345', 'plotType', 'plotName', 'changedresult',
dateEffective, lastCheckTime)
self.assertTrue(res['OK'])
res = self.rmClient.selectAccountingCache('TestName12345')
# check if the result has changed
self.assertEqual(res['Value'][0][4], 'changedresult')
# TEST deleteAccountingCache
# ...............................................................................
res = self.rmClient.deleteAccountingCache('TestName12345')
self.assertTrue(res['OK'])
res = self.rmClient.selectAccountingCache('TestName12345')
self.assertTrue(res['OK'])
self.assertFalse(res['Value'])
def test_DowntimeCache(self):
"""
DowntimeCache table
"""
res = self.rmClient.deleteDowntimeCache('TestName12345') # just making sure it's not there (yet)
self.assertTrue(res['OK'])
# TEST addOrModifyDowntimeCache
res = self.rmClient.addOrModifyDowntimeCache('TestName12345', 'element', 'name',
datetime.datetime.now(), datetime.datetime.now(),
'severity', 'description', 'link',
datetime.datetime.now(), datetime.datetime.now(),
'gOCDBServiceType')
self.assertTrue(res['OK'])
res = self.rmClient.selectDowntimeCache('TestName12345')
self.assertTrue(res['OK'])
# check if the name that we got is equal to the previously added 'TestName12345'
self.assertEqual(res['Value'][0][0], 'TestName12345')
res = self.rmClient.addOrModifyDowntimeCache('TestName12345', 'element', 'name', severity='changedSeverity')
self.assertTrue(res['OK'])
res = self.rmClient.selectDowntimeCache('TestName12345')
# check if the result has changed
self.assertEqual(res['Value'][0][4], 'changedSeverity')
# TEST deleteDowntimeCache
# ...............................................................................
res = self.rmClient.deleteDowntimeCache('TestName12345')
self.assertTrue(res['OK'])
res = self.rmClient.selectDowntimeCache('TestName12345')
self.assertTrue(res['OK'])
self.assertFalse(res['Value'])
def test_GGUSTicketsCache(self):
"""
GGUSTicketsCache table
"""
res = self.rmClient.deleteGGUSTicketsCache('TestName12345') # just making sure it's not there (yet)
self.assertTrue(res['OK'])
# TEST addOrModifyGGUSTicketsCache
res = self.rmClient.addOrModifyGGUSTicketsCache('TestName12345', 'link', 0, 'tickets', datetime.datetime.now())
self.assertTrue(res['OK'])
res = self.rmClient.selectGGUSTicketsCache('TestName12345')
self.assertTrue(res['OK'])
# check if the name that we got is equal to the previously added 'TestName12345'
self.assertEqual(res['Value'][0][0], 'TestName12345')
res = self.rmClient.addOrModifyGGUSTicketsCache('TestName12345', 'newLink')
self.assertTrue(res['OK'])
res = self.rmClient.selectGGUSTicketsCache('TestName12345')
# check if the result has changed
self.assertEqual(res['Value'][0][3], 'newLink')
# TEST deleteGGUSTicketsCache
# ...............................................................................
res = self.rmClient.deleteGGUSTicketsCache('TestName12345')
self.assertTrue(res['OK'])
res = self.rmClient.selectGGUSTicketsCache('TestName12345')
self.assertTrue(res['OK'])
self.assertFalse(res['Value'])
def test_JobCache(self):
"""
JobCache table
"""
res = self.rmClient.deleteJobCache('TestName12345') # just making sure it's not there (yet)
self.assertTrue(res['OK'])
# TEST addOrModifyJobCache
res = self.rmClient.addOrModifyJobCache('TestName12345', 'maskstatus', 50.89, 'status', datetime.datetime.now())
self.assertTrue(res['OK'])
res = self.rmClient.selectJobCache('TestName12345')
self.assertTrue(res['OK'])
# check if the name that we got is equal to the previously added 'TestName12345'
self.assertEqual(res['Value'][0][0], 'TestName12345')
res = self.rmClient.addOrModifyJobCache('TestName12345', status='newStatus')
self.assertTrue(res['OK'])
res = self.rmClient.selectJobCache('TestName12345')
# check if the result has changed
self.assertEqual(res['Value'][0][1], 'newStatus')
# TEST deleteJobCache
# ...............................................................................
res = self.rmClient.deleteJobCache('TestName12345')
self.assertTrue(res['OK'])
res = self.rmClient.selectJobCache('TestName12345')
self.assertTrue(res['OK'])
self.assertFalse(res['Value'])
def test_PilotCache(self):
"""
PilotCache table
"""
res = self.rmClient.deletePilotCache('TestName12345') # just making sure it's not there (yet)
self.assertTrue(res['OK'])
# TEST addOrModifyPilotCache
res = self.rmClient.addOrModifyPilotCache('TestName12345', 'CE', 0.0, 25.5, 'status', datetime.datetime.now())
self.assertTrue(res['OK'])
res = self.rmClient.selectPilotCache('TestName12345')
self.assertTrue(res['OK'])
# check if the name that we got is equal to the previously added 'TestName12345'
self.assertEqual(res['Value'][0][0], 'TestName12345')
self.assertEqual(res['Value'][0][6], 'all') # default value for vo, as the last element
res = self.rmClient.addOrModifyPilotCache('TestName12345', status='newStatus')
self.assertTrue(res['OK'])
res = self.rmClient.selectPilotCache('TestName12345')
# check if the result has changed.
self.assertEqual(res['Value'][0][2], 'newStatus')
# TEST deletePilotCache
# ...............................................................................
res = self.rmClient.deletePilotCache('TestName12345')
self.assertTrue(res['OK'])
res = self.rmClient.selectPilotCache('TestName12345')
self.assertTrue(res['OK'])
self.assertFalse(res['Value'])
def test_PolicyResult(self):
"""
PolicyResult table
"""
res = self.rmClient.deletePolicyResult('element', 'TestName12345',
'policyName', 'statusType') # just making sure it's not there (yet)
self.assertTrue(res['OK'])
# TEST addOrModifyPolicyResult
res = self.rmClient.addOrModifyPolicyResult('element', 'TestName12345', 'policyName',
'statusType', 'status', 'reason',
datetime.datetime.now(), datetime.datetime.now())
self.assertTrue(res['OK'])
res = self.rmClient.selectPolicyResult('element', 'TestName12345', 'policyName', 'statusType')
self.assertTrue(res['OK'])
# check if the name that we got is equal to the previously added 'TestName12345'
self.assertEqual(res['Value'][0][1], 'statusType')
self.assertEqual(res['Value'][0][8], 'all') # default value for vo, as the last element
res = self.rmClient.addOrModifyPolicyResult('element', 'TestName12345', 'policyName', 'statusType',
status='newStatus')
self.assertTrue(res['OK'])
res = self.rmClient.selectPolicyResult('element', 'TestName12345', 'policyName', 'statusType')
# check if the result has changed.
self.assertEqual(res['Value'][0][4], 'newStatus')
# TEST deletePolicyResult
# ...............................................................................
res = self.rmClient.deletePolicyResult('element', 'TestName12345', 'policyName', 'statusType')
self.assertTrue(res['OK'])
res = self.rmClient.selectPolicyResult('element', 'TestName12345', 'policyName', 'statusType')
self.assertTrue(res['OK'])
self.assertFalse(res['Value'])
def test_SpaceTokenOccupancy(self):
"""
SpaceTokenOccupancy table
"""
res = self.rmClient.deleteSpaceTokenOccupancyCache('endpoint', 'token') # just making sure it's not there (yet)
self.assertTrue(res['OK'])
# TEST addOrModifySpaceTokenOccupancy
res = self.rmClient.addOrModifySpaceTokenOccupancyCache('endpoint', 'token', 500.0, 1000.0, 200.0,
datetime.datetime.now())
self.assertTrue(res['OK'])
res = self.rmClient.selectSpaceTokenOccupancyCache('endpoint', 'token')
self.assertTrue(res['OK'])
# check if the name that we got is equal to the previously added 'token'
self.assertEqual(res['Value'][0][1], 'token')
res = self.rmClient.addOrModifySpaceTokenOccupancyCache('endpoint', 'token', free=100.0)
self.assertTrue(res['OK'])
res = self.rmClient.selectSpaceTokenOccupancyCache('endpoint', 'token')
# check if the result has changed
self.assertEqual(res['Value'][0][3], 100.0)
# TEST deleteSpaceTokenOccupancy
# ...............................................................................
res = self.rmClient.deleteSpaceTokenOccupancyCache('endpoint', 'token')
self.assertTrue(res['OK'])
res = self.rmClient.selectSpaceTokenOccupancyCache('endpoint', 'token')
self.assertTrue(res['OK'])
self.assertFalse(res['Value'])
def test_Transfer(self):
"""
TransferOccupancy table
"""
res = self.rmClient.deleteTransferCache('sourcename', 'destinationname') # just making sure it's not there (yet)
self.assertTrue(res['OK'])
# TEST addOrModifyTransferOccupancy
res = self.rmClient.addOrModifyTransferCache('sourcename', 'destinationname', 'metric', 1000.0,
datetime.datetime.now())
self.assertTrue(res['OK'])
res = self.rmClient.selectTransferCache('sourcename', 'destinationname')
self.assertTrue(res['OK'])
# check if the name that we got is equal to the previously added 'destinationname'
self.assertEqual(res['Value'][0][2], 'metric')
res = self.rmClient.addOrModifyTransferCache('sourcename', 'destinationname', value=200.0)
self.assertTrue(res['OK'])
res = self.rmClient.selectTransferCache('sourcename', 'destinationname')
# check if the result has changed
self.assertEqual(res['Value'][0][3], 200.0)
# TEST deleteTransferOccupancy
# ...............................................................................
res = self.rmClient.deleteTransferCache('sourcename', 'destinationname')
self.assertTrue(res['OK'])
res = self.rmClient.selectTransferCache('sourcename', 'destinationname')
self.assertTrue(res['OK'])
self.assertFalse(res['Value'])
if __name__ == '__main__':
suite = unittest.defaultTestLoader.loadTestsFromTestCase(TestClientResourceManagementTestCase)
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(ResourceManagementClientChain))
testResult = unittest.TextTestRunner(verbosity=2).run(suite)
sys.exit(not testResult.wasSuccessful())
# EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
|
yujikato/DIRAC
|
tests/Integration/ResourceStatusSystem/Test_ResourceManagement.py
|
Python
|
gpl-3.0
| 12,762
|
[
"DIRAC"
] |
4d0f8ec5b80fd9c713e3db697b0b502b074c8581f283d9925008f89891c56e86
|
"""DIRAC Administrator API Class
All administrative functionality is exposed through the DIRAC Admin API. Examples include
site banning and unbanning, WMS proxy uploading etc.
"""
from __future__ import print_function
__RCSID__ = "$Id$"
import os
from DIRAC import gConfig, S_OK, S_ERROR
from DIRAC.Core.Utilities.PromptUser import promptUser
from DIRAC.Core.Base.API import API
from DIRAC.ConfigurationSystem.Client.CSAPI import CSAPI
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getVOForGroup
from DIRAC.FrameworkSystem.Client.ProxyManagerClient import gProxyManager
from DIRAC.Core.Utilities.SiteCEMapping import getSiteCEMapping
from DIRAC.FrameworkSystem.Client.NotificationClient import NotificationClient
from DIRAC.WorkloadManagementSystem.Client.JobManagerClient import JobManagerClient
from DIRAC.WorkloadManagementSystem.Client.WMSAdministratorClient import WMSAdministratorClient
from DIRAC.WorkloadManagementSystem.Client.PilotManagerClient import PilotManagerClient
from DIRAC.ResourceStatusSystem.Client.ResourceStatusClient import ResourceStatusClient
from DIRAC.ResourceStatusSystem.Client.ResourceStatus import ResourceStatus
from DIRAC.ResourceStatusSystem.Client.SiteStatus import SiteStatus
from DIRAC.Core.Utilities.Grid import ldapSite, ldapCluster, ldapCE, ldapService
from DIRAC.Core.Utilities.Grid import ldapCEState, ldapCEVOView, ldapSE
voName = ''
ret = getProxyInfo(disableVOMS=True)
if ret['OK'] and 'group' in ret['Value']:
voName = getVOForGroup(ret['Value']['group'])
COMPONENT_NAME = '/Interfaces/API/DiracAdmin'
class DiracAdmin(API):
""" Administrative functionalities
"""
#############################################################################
def __init__(self):
"""Internal initialization of the DIRAC Admin API.
"""
super(DiracAdmin, self).__init__()
self.csAPI = CSAPI()
self.dbg = False
if gConfig.getValue(self.section + '/LogLevel', 'DEBUG') == 'DEBUG':
self.dbg = True
self.scratchDir = gConfig.getValue(self.section + '/ScratchDir', '/tmp')
self.currentDir = os.getcwd()
self.rssFlag = ResourceStatus().rssFlag
self.sitestatus = SiteStatus()
#############################################################################
def uploadProxy(self, group):
"""Upload a proxy to the DIRAC WMS. This method
Example usage:
>>> print diracAdmin.uploadProxy('dteam_pilot')
{'OK': True, 'Value': 0L}
:param group: DIRAC Group
:type job: string
:return: S_OK,S_ERROR
:param permanent: Indefinitely update proxy
:type permanent: boolean
"""
return gProxyManager.uploadProxy(diracGroup=group)
#############################################################################
def setProxyPersistency(self, userDN, userGroup, persistent=True):
"""Set the persistence of a proxy in the Proxy Manager
Example usage:
>>> print diracAdmin.setProxyPersistency( 'some DN', 'dirac group', True )
{'OK': True }
:param userDN: User DN
:type userDN: string
:param userGroup: DIRAC Group
:type userGroup: string
:param persistent: Persistent flag
:type persistent: boolean
:return: S_OK,S_ERROR
"""
return gProxyManager.setPersistency(userDN, userGroup, persistent)
#############################################################################
def checkProxyUploaded(self, userDN, userGroup, requiredTime):
"""Set the persistence of a proxy in the Proxy Manager
Example usage:
>>> print diracAdmin.setProxyPersistency( 'some DN', 'dirac group', True )
{'OK': True, 'Value' : True/False }
:param userDN: User DN
:type userDN: string
:param userGroup: DIRAC Group
:type userGroup: string
:param requiredTime: Required life time of the uploaded proxy
:type requiredTime: boolean
:return: S_OK,S_ERROR
"""
return gProxyManager.userHasProxy(userDN, userGroup, requiredTime)
#############################################################################
def getSiteMask(self, printOutput=False, status='Active'):
"""Retrieve current site mask from WMS Administrator service.
Example usage:
>>> print diracAdmin.getSiteMask()
{'OK': True, 'Value': 0L}
:return: S_OK,S_ERROR
"""
result = self.sitestatus.getSites(siteState=status)
if result['OK']:
sites = result['Value']
if printOutput:
sites.sort()
for site in sites:
print(site)
return result
#############################################################################
def getBannedSites(self, printOutput=False):
"""Retrieve current list of banned and probing sites.
Example usage:
>>> print diracAdmin.getBannedSites()
{'OK': True, 'Value': []}
:return: S_OK,S_ERROR
"""
bannedSites = self.sitestatus.getSites(siteState='Banned')
if not bannedSites['OK']:
return bannedSites
probingSites = self.sitestatus.getSites(siteState='Probing')
if not probingSites['OK']:
return probingSites
mergedList = sorted(bannedSites['Value'] + probingSites['Value'])
if printOutput:
print('\n'.join(mergedList))
return S_OK(mergedList)
#############################################################################
def getSiteSection(self, site, printOutput=False):
"""Simple utility to get the list of CEs for DIRAC site name.
Example usage:
>>> print diracAdmin.getSiteSection('LCG.CERN.ch')
{'OK': True, 'Value':}
:return: S_OK,S_ERROR
"""
gridType = site.split('.')[0]
if not gConfig.getSections('/Resources/Sites/%s' % (gridType))['OK']:
return S_ERROR('/Resources/Sites/%s is not a valid site section' % (gridType))
result = gConfig.getOptionsDict('/Resources/Sites/%s/%s' % (gridType, site))
if printOutput and result['OK']:
print(self.pPrint.pformat(result['Value']))
return result
#############################################################################
def allowSite(self, site, comment, printOutput=False):
"""Adds the site to the site mask.
Example usage:
>>> print diracAdmin.allowSite()
{'OK': True, 'Value': }
:return: S_OK,S_ERROR
"""
result = self.__checkSiteIsValid(site)
if not result['OK']:
return result
result = self.getSiteMask(status='Active')
if not result['OK']:
return result
siteMask = result['Value']
if site in siteMask:
if printOutput:
print('Site %s is already Active' % site)
return S_OK('Site %s is already Active' % site)
if self.rssFlag:
result = self.sitestatus.setSiteStatus(site, 'Active', comment)
else:
result = WMSAdministratorClient().allowSite(site, comment)
if not result['OK']:
return result
if printOutput:
print('Site %s status is set to Active' % site)
return result
#############################################################################
def getSiteMaskLogging(self, site=None, printOutput=False):
"""Retrieves site mask logging information.
Example usage:
>>> print diracAdmin.getSiteMaskLogging('LCG.AUVER.fr')
{'OK': True, 'Value': }
:return: S_OK,S_ERROR
"""
result = self.__checkSiteIsValid(site)
if not result['OK']:
return result
if self.rssFlag:
result = ResourceStatusClient().selectStatusElement('Site', 'History', name=site)
else:
result = WMSAdministratorClient().getSiteMaskLogging(site)
if not result['OK']:
return result
if printOutput:
if site:
print('\nSite Mask Logging Info for %s\n' % site)
else:
print('\nAll Site Mask Logging Info\n')
sitesLogging = result['Value']
if isinstance(sitesLogging, dict):
for siteName, tupleList in sitesLogging.iteritems():
if not siteName:
print('\n===> %s\n' % siteName)
for tup in tupleList:
print(str(tup[0]).ljust(8) + str(tup[1]).ljust(20) +
'( ' + str(tup[2]).ljust(len(str(tup[2]))) + ' ) "' + str(tup[3]) + '"')
print(' ')
elif isinstance(sitesLogging, list):
result = [(sl[1], sl[3], sl[4]) for sl in sitesLogging]
return result
#############################################################################
def banSite(self, site, comment, printOutput=False):
"""Removes the site from the site mask.
Example usage:
>>> print diracAdmin.banSite()
{'OK': True, 'Value': }
:return: S_OK,S_ERROR
"""
result = self.__checkSiteIsValid(site)
if not result['OK']:
return result
mask = self.getSiteMask(status='Banned')
if not mask['OK']:
return mask
siteMask = mask['Value']
if site in siteMask:
if printOutput:
print('Site %s is already Banned' % site)
return S_OK('Site %s is already Banned' % site)
if self.rssFlag:
result = self.sitestatus.setSiteStatus(site, 'Banned', comment)
else:
result = WMSAdministratorClient().banSite(site, comment)
if not result['OK']:
return result
if printOutput:
print('Site %s status is set to Banned' % site)
return result
#############################################################################
def __checkSiteIsValid(self, site):
"""Internal function to check that a site name is valid.
"""
sites = getSiteCEMapping()
if not sites['OK']:
return S_ERROR('Could not get site CE mapping')
siteList = sites['Value'].keys()
if site not in siteList:
return S_ERROR('Specified site %s is not in list of defined sites' % site)
return S_OK('%s is valid' % site)
#############################################################################
def getServicePorts(self, setup='', printOutput=False):
"""Checks the service ports for the specified setup. If not given this is
taken from the current installation (/DIRAC/Setup)
Example usage:
>>> print diracAdmin.getServicePorts()
{'OK': True, 'Value':''}
:return: S_OK,S_ERROR
"""
if not setup:
setup = gConfig.getValue('/DIRAC/Setup', '')
setupList = gConfig.getSections('/DIRAC/Setups', [])
if not setupList['OK']:
return S_ERROR('Could not get /DIRAC/Setups sections')
setupList = setupList['Value']
if setup not in setupList:
return S_ERROR('Setup %s is not in allowed list: %s' % (setup, ', '.join(setupList)))
serviceSetups = gConfig.getOptionsDict('/DIRAC/Setups/%s' % setup)
if not serviceSetups['OK']:
return S_ERROR('Could not get /DIRAC/Setups/%s options' % setup)
serviceSetups = serviceSetups['Value'] # dict
systemList = gConfig.getSections('/Systems')
if not systemList['OK']:
return S_ERROR('Could not get Systems sections')
systemList = systemList['Value']
result = {}
for system in systemList:
if system in serviceSetups:
path = '/Systems/%s/%s/Services' % (system, serviceSetups[system])
servicesList = gConfig.getSections(path)
if not servicesList['OK']:
self.log.warn('Could not get sections in %s' % path)
else:
servicesList = servicesList['Value']
if not servicesList:
servicesList = []
self.log.verbose('System: %s ServicesList: %s' % (system, ', '.join(servicesList)))
for service in servicesList:
spath = '%s/%s/Port' % (path, service)
servicePort = gConfig.getValue(spath, 0)
if servicePort:
self.log.verbose('Found port for %s/%s = %s' % (system, service, servicePort))
result['%s/%s' % (system, service)] = servicePort
else:
self.log.warn('No port found for %s' % spath)
else:
self.log.warn('%s is not defined in /DIRAC/Setups/%s' % (system, setup))
if printOutput:
print(self.pPrint.pformat(result))
return S_OK(result)
#############################################################################
def getProxy(self, userDN, userGroup, validity=43200, limited=False):
"""Retrieves a proxy with default 12hr validity and stores
this in a file in the local directory by default.
Example usage:
>>> print diracAdmin.getProxy()
{'OK': True, 'Value': }
:return: S_OK,S_ERROR
"""
return gProxyManager.downloadProxy(userDN, userGroup, limited=limited,
requiredTimeLeft=validity)
#############################################################################
def getVOMSProxy(self, userDN, userGroup, vomsAttr=False, validity=43200, limited=False):
"""Retrieves a proxy with default 12hr validity and VOMS extensions and stores
this in a file in the local directory by default.
Example usage:
>>> print diracAdmin.getVOMSProxy()
{'OK': True, 'Value': }
:return: S_OK,S_ERROR
"""
return gProxyManager.downloadVOMSProxy(userDN, userGroup, limited=limited,
requiredVOMSAttribute=vomsAttr,
requiredTimeLeft=validity)
#############################################################################
def getPilotProxy(self, userDN, userGroup, validity=43200):
"""Retrieves a pilot proxy with default 12hr validity and stores
this in a file in the local directory by default.
Example usage:
>>> print diracAdmin.getVOMSProxy()
{'OK': True, 'Value': }
:return: S_OK,S_ERROR
"""
return gProxyManager.getPilotProxyFromDIRACGroup(userDN, userGroup, requiredTimeLeft=validity)
#############################################################################
def resetJob(self, jobID):
"""Reset a job or list of jobs in the WMS. This operation resets the reschedule
counter for a job or list of jobs and allows them to run as new.
Example::
>>> print dirac.reset(12345)
{'OK': True, 'Value': [12345]}
:param job: JobID
:type job: integer or list of integers
:return: S_OK,S_ERROR
"""
if isinstance(jobID, basestring):
try:
jobID = int(jobID)
except Exception as x:
return self._errorReport(str(x), 'Expected integer or convertible integer for existing jobID')
elif isinstance(jobID, list):
try:
jobID = [int(job) for job in jobID]
except Exception as x:
return self._errorReport(str(x), 'Expected integer or convertible integer for existing jobIDs')
result = JobManagerClient(useCertificates=False).resetJob(jobID)
return result
#############################################################################
def getJobPilotOutput(self, jobID, directory=''):
"""Retrieve the pilot output for an existing job in the WMS.
The output will be retrieved in a local directory unless
otherwise specified.
>>> print dirac.getJobPilotOutput(12345)
{'OK': True, StdOut:'',StdError:''}
:param job: JobID
:type job: integer or string
:return: S_OK,S_ERROR
"""
if not directory:
directory = self.currentDir
if not os.path.exists(directory):
return self._errorReport('Directory %s does not exist' % directory)
result = WMSAdministratorClient().getJobPilotOutput(jobID)
if not result['OK']:
return result
outputPath = '%s/pilot_%s' % (directory, jobID)
if os.path.exists(outputPath):
self.log.info('Remove %s and retry to continue' % outputPath)
return S_ERROR('Remove %s and retry to continue' % outputPath)
if not os.path.exists(outputPath):
self.log.verbose('Creating directory %s' % outputPath)
os.mkdir(outputPath)
outputs = result['Value']
if 'StdOut' in outputs:
stdout = '%s/std.out' % (outputPath)
with open(stdout, 'w') as fopen:
fopen.write(outputs['StdOut'])
self.log.verbose('Standard output written to %s' % (stdout))
else:
self.log.warn('No standard output returned')
if 'StdError' in outputs:
stderr = '%s/std.err' % (outputPath)
with open(stderr, 'w') as fopen:
fopen.write(outputs['StdError'])
self.log.verbose('Standard error written to %s' % (stderr))
else:
self.log.warn('No standard error returned')
self.log.always('Outputs retrieved in %s' % outputPath)
return result
#############################################################################
def getPilotOutput(self, gridReference, directory=''):
"""Retrieve the pilot output (std.out and std.err) for an existing job in the WMS.
>>> print dirac.getJobPilotOutput(12345)
{'OK': True, 'Value': {}}
:param job: JobID
:type job: integer or string
:return: S_OK,S_ERROR
"""
if not isinstance(gridReference, basestring):
return self._errorReport('Expected string for pilot reference')
if not directory:
directory = self.currentDir
if not os.path.exists(directory):
return self._errorReport('Directory %s does not exist' % directory)
result = PilotManagerClient().getPilotOutput(gridReference)
if not result['OK']:
return result
gridReferenceSmall = gridReference.split('/')[-1]
if not gridReferenceSmall:
gridReferenceSmall = 'reference'
outputPath = '%s/pilot_%s' % (directory, gridReferenceSmall)
if os.path.exists(outputPath):
self.log.info('Remove %s and retry to continue' % outputPath)
return S_ERROR('Remove %s and retry to continue' % outputPath)
if not os.path.exists(outputPath):
self.log.verbose('Creating directory %s' % outputPath)
os.mkdir(outputPath)
outputs = result['Value']
if 'StdOut' in outputs:
stdout = '%s/std.out' % (outputPath)
with open(stdout, 'w') as fopen:
fopen.write(outputs['StdOut'])
self.log.info('Standard output written to %s' % (stdout))
else:
self.log.warn('No standard output returned')
if 'StdErr' in outputs:
stderr = '%s/std.err' % (outputPath)
with open(stderr, 'w') as fopen:
fopen.write(outputs['StdErr'])
self.log.info('Standard error written to %s' % (stderr))
else:
self.log.warn('No standard error returned')
self.log.always('Outputs retrieved in %s' % outputPath)
return result
#############################################################################
def getPilotInfo(self, gridReference):
"""Retrieve info relative to a pilot reference
>>> print dirac.getPilotInfo(12345)
{'OK': True, 'Value': {}}
:param gridReference: Pilot Job Reference
:type gridReference: string
:return: S_OK,S_ERROR
"""
if not isinstance(gridReference, basestring):
return self._errorReport('Expected string for pilot reference')
result = PilotManagerClient().getPilotInfo(gridReference)
return result
#############################################################################
def killPilot(self, gridReference):
"""Kill the pilot specified
>>> print dirac.getPilotInfo(12345)
{'OK': True, 'Value': {}}
:param gridReference: Pilot Job Reference
:return: S_OK,S_ERROR
"""
if not isinstance(gridReference, basestring):
return self._errorReport('Expected string for pilot reference')
result = PilotManagerClient().killPilot(gridReference)
return result
#############################################################################
def getPilotLoggingInfo(self, gridReference):
"""Retrieve the pilot logging info for an existing job in the WMS.
>>> print dirac.getPilotLoggingInfo(12345)
{'OK': True, 'Value': {"The output of the command"}}
:param gridReference: Gridp pilot job reference Id
:type gridReference: string
:return: S_OK,S_ERROR
"""
if not isinstance(gridReference, basestring):
return self._errorReport('Expected string for pilot reference')
return PilotManagerClient().getPilotLoggingInfo(gridReference)
#############################################################################
def getJobPilots(self, jobID):
"""Extract the list of submitted pilots and their status for a given
jobID from the WMS. Useful information is printed to the screen.
>>> print dirac.getJobPilots()
{'OK': True, 'Value': {PilotID:{StatusDict}}}
:param job: JobID
:type job: integer or string
:return: S_OK,S_ERROR
"""
if isinstance(jobID, basestring):
try:
jobID = int(jobID)
except Exception as x:
return self._errorReport(str(x), 'Expected integer or string for existing jobID')
result = PilotManagerClient().getPilots(jobID)
if result['OK']:
print(self.pPrint.pformat(result['Value']))
return result
#############################################################################
def getPilotSummary(self, startDate='', endDate=''):
"""Retrieve the pilot output for an existing job in the WMS. Summary is
printed at INFO level, full dictionary of results also returned.
>>> print dirac.getPilotSummary()
{'OK': True, 'Value': {CE:{Status:Count}}}
:param job: JobID
:type job: integer or string
:return: S_OK,S_ERROR
"""
result = PilotManagerClient().getPilotSummary(startDate, endDate)
if not result['OK']:
return result
ceDict = result['Value']
headers = 'CE'.ljust(28)
i = 0
for ce, summary in ceDict.iteritems():
states = summary.keys()
if len(states) > i:
i = len(states)
for i in xrange(i):
headers += 'Status'.ljust(12) + 'Count'.ljust(12)
print(headers)
for ce, summary in ceDict.iteritems():
line = ce.ljust(28)
states = sorted(summary)
for state in states:
count = str(summary[state])
line += state.ljust(12) + count.ljust(12)
print(line)
return result
#############################################################################
def setSiteProtocols(self, site, protocolsList, printOutput=False):
"""
Allows to set the defined protocols for each SE for a given site.
"""
result = self.__checkSiteIsValid(site)
if not result['OK']:
return result
siteSection = '/Resources/Sites/%s/%s/SE' % (site.split('.')[0], site)
siteSEs = gConfig.getValue(siteSection, [])
if not siteSEs:
return S_ERROR('No SEs found for site %s in section %s' % (site, siteSection))
defaultProtocols = gConfig.getValue('/Resources/StorageElements/DefaultProtocols', [])
self.log.verbose('Default list of protocols are', ', '.join(defaultProtocols))
for protocol in protocolsList:
if protocol not in defaultProtocols:
return S_ERROR('Requested to set protocol %s in list but %s is not '
'in default list of protocols:\n%s' % (protocol, protocol, ', '.join(defaultProtocols)))
modifiedCS = False
result = promptUser('Do you want to add the following default protocols:'
' %s for SE(s):\n%s' % (', '.join(protocolsList), ', '.join(siteSEs)))
if not result['OK']:
return result
if result['Value'].lower() != 'y':
self.log.always('No protocols will be added')
return S_OK()
for se in siteSEs:
sections = gConfig.getSections('/Resources/StorageElements/%s/' % (se))
if not sections['OK']:
return sections
for section in sections['Value']:
if gConfig.getValue('/Resources/StorageElements/%s/%s/ProtocolName' % (se, section), '') == 'SRM2':
path = '/Resources/StorageElements/%s/%s/ProtocolsList' % (se, section)
self.log.verbose('Setting %s to %s' % (path, ', '.join(protocolsList)))
result = self.csSetOption(path, ', '.join(protocolsList))
if not result['OK']:
return result
modifiedCS = True
if modifiedCS:
result = self.csCommitChanges(False)
if not result['OK']:
return S_ERROR('CS Commit failed with message = %s' % (result['Message']))
else:
if printOutput:
print('Successfully committed changes to CS')
else:
if printOutput:
print('No modifications to CS required')
return S_OK()
#############################################################################
def csSetOption(self, optionPath, optionValue):
"""
Function to modify an existing value in the CS.
"""
return self.csAPI.setOption(optionPath, optionValue)
#############################################################################
def csSetOptionComment(self, optionPath, comment):
"""
Function to modify an existing value in the CS.
"""
return self.csAPI.setOptionComment(optionPath, comment)
#############################################################################
def csModifyValue(self, optionPath, newValue):
"""
Function to modify an existing value in the CS.
"""
return self.csAPI.modifyValue(optionPath, newValue)
#############################################################################
def csRegisterUser(self, username, properties):
"""
Registers a user in the CS.
- username: Username of the user (easy;)
- properties: Dict containing:
- DN
- groups : list/tuple of groups the user belongs to
- <others> : More properties of the user, like mail
"""
return self.csAPI.addUser(username, properties)
#############################################################################
def csDeleteUser(self, user):
"""
Deletes a user from the CS. Can take a list of users
"""
return self.csAPI.deleteUsers(user)
#############################################################################
def csModifyUser(self, username, properties, createIfNonExistant=False):
"""
Modify a user in the CS. Takes the same params as in addUser and
applies the changes
"""
return self.csAPI.modifyUser(username, properties, createIfNonExistant)
#############################################################################
def csListUsers(self, group=False):
"""
Lists the users in the CS. If no group is specified return all users.
"""
return self.csAPI.listUsers(group)
#############################################################################
def csDescribeUsers(self, mask=False):
"""
List users and their properties in the CS.
If a mask is given, only users in the mask will be returned
"""
return self.csAPI.describeUsers(mask)
#############################################################################
def csModifyGroup(self, groupname, properties, createIfNonExistant=False):
"""
Modify a user in the CS. Takes the same params as in addGroup and applies
the changes
"""
return self.csAPI.modifyGroup(groupname, properties, createIfNonExistant)
#############################################################################
def csListHosts(self):
"""
Lists the hosts in the CS
"""
return self.csAPI.listHosts()
#############################################################################
def csDescribeHosts(self, mask=False):
"""
Gets extended info for the hosts in the CS
"""
return self.csAPI.describeHosts(mask)
#############################################################################
def csModifyHost(self, hostname, properties, createIfNonExistant=False):
"""
Modify a host in the CS. Takes the same params as in addHost and applies
the changes
"""
return self.csAPI.modifyHost(hostname, properties, createIfNonExistant)
#############################################################################
def csListGroups(self):
"""
Lists groups in the CS
"""
return self.csAPI.listGroups()
#############################################################################
def csDescribeGroups(self, mask=False):
"""
List groups and their properties in the CS.
If a mask is given, only groups in the mask will be returned
"""
return self.csAPI.describeGroups(mask)
#############################################################################
def csSyncUsersWithCFG(self, usersCFG):
"""
Synchronize users in cfg with its contents
"""
return self.csAPI.syncUsersWithCFG(usersCFG)
#############################################################################
def csCommitChanges(self, sortUsers=True):
"""
Commit the changes in the CS
"""
return self.csAPI.commitChanges(sortUsers=False)
#############################################################################
def sendMail(self, address, subject, body, fromAddress=None, localAttempt=True, html=False):
"""
Send mail to specified address with body.
"""
notification = NotificationClient()
return notification.sendMail(address, subject, body, fromAddress, localAttempt, html)
#############################################################################
def sendSMS(self, userName, body, fromAddress=None):
"""
Send mail to specified address with body.
"""
if len(body) > 160:
return S_ERROR('Exceeded maximum SMS length of 160 characters')
notification = NotificationClient()
return notification.sendSMS(userName, body, fromAddress)
#############################################################################
def getBDIISite(self, site, host=None):
"""
Get information about site from BDII at host
"""
return ldapSite(site, host=host)
#############################################################################
def getBDIICluster(self, ce, host=None):
"""
Get information about ce from BDII at host
"""
return ldapCluster(ce, host=host)
#############################################################################
def getBDIICE(self, ce, host=None):
"""
Get information about ce from BDII at host
"""
return ldapCE(ce, host=host)
#############################################################################
def getBDIIService(self, ce, host=None):
"""
Get information about ce from BDII at host
"""
return ldapService(ce, host=host)
#############################################################################
def getBDIICEState(self, ce, useVO=voName, host=None):
"""
Get information about ce state from BDII at host
"""
return ldapCEState(ce, useVO, host=host)
#############################################################################
def getBDIICEVOView(self, ce, useVO=voName, host=None):
"""
Get information about ce voview from BDII at host
"""
return ldapCEVOView(ce, useVO, host=host)
#############################################################################
def getBDIISE(self, site, useVO=voName, host=None):
"""
Get information about SA from BDII at host
"""
return ldapSE(site, useVO, host=host)
# EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#
|
fstagni/DIRAC
|
Interfaces/API/DiracAdmin.py
|
Python
|
gpl-3.0
| 31,575
|
[
"DIRAC"
] |
9b0e2c59b869f537a5e9fc24d51cd012a8fac38d2da993e06a68971495aec6fa
|
import sys
import numpy as np
from mpi4py import MPI
from uvw.parallel import PRectilinearGrid, PImageData
from uvw import DataArray
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
if comm.Get_size() != 4:
if rank == 0:
print('Please execute with 4 MPI tasks', file=sys.stderr)
sys.exit(1)
N = 20
# Domain bounds per rank
bounds = [
{'x': (-2, 0), 'y': (-2, 0)},
{'x': (-2, 0), 'y': (0, 2)},
{'x': (0, 2), 'y': (-2, 2)},
{'x': (-2, 2), 'y': (2, 3)},
]
# Domain sizes per rank
sizes = [
{'x': N, 'y': N},
{'x': N, 'y': N},
{'x': N, 'y': 2*N-1}, # account for overlap
{'x': 2*N-1, 'y':N//2},
]
# Size offsets per rank
offsets = [
[0, 0],
[0, N],
[N, 0],
[0, 2*N-1],
]
x = np.linspace(*bounds[rank]['x'], sizes[rank]['x'])
y = np.linspace(*bounds[rank]['y'], sizes[rank]['y'])
out_name = 'parallel_mpi.pvtr'
xx, yy = np.meshgrid(x, y, indexing='ij', sparse=True)
r = np.sqrt(xx**2 + yy**2)
data = np.exp(-r**2)
# Indicating rank info with a cell array
proc = np.ones((x.size-1, y.size-1)) * rank
with PRectilinearGrid(out_name, (x, y), offsets[rank]) as rect:
rect.addPointData(DataArray(data, range(2), 'gaussian'))
rect.addCellData(DataArray(proc, range(2), 'proc'))
out_name = 'parallel_mpi.pvti'
ranges = [bounds[rank]['x'], bounds[rank]['y']]
points = [sizes[rank]['x'], sizes[rank]['y']]
with PImageData(out_name, ranges, points, offsets[rank]) as rect:
rect.addPointData(DataArray(data, range(2), 'gaussian'))
rect.addCellData(DataArray(proc, range(2), 'proc'))
|
prs513rosewood/uvw
|
examples/parallel_mpi.py
|
Python
|
mit
| 1,555
|
[
"Gaussian"
] |
d02fd0b4d109f26612218777f48664e195f28e93d023dbe0639c593fbb7e0064
|
from __future__ import absolute_import, division, print_function
import pprint
import sys
import textwrap
import pytest
from _pytest.main import Session, EXIT_NOTESTSCOLLECTED, _in_venv
class TestCollector(object):
def test_collect_versus_item(self):
from pytest import Collector, Item
assert not issubclass(Collector, Item)
assert not issubclass(Item, Collector)
def test_compat_attributes(self, testdir, recwarn):
modcol = testdir.getmodulecol(
"""
def test_pass(): pass
def test_fail(): assert 0
"""
)
recwarn.clear()
assert modcol.Module == pytest.Module
assert modcol.Class == pytest.Class
assert modcol.Item == pytest.Item
assert modcol.File == pytest.File
assert modcol.Function == pytest.Function
def test_check_equality(self, testdir):
modcol = testdir.getmodulecol(
"""
def test_pass(): pass
def test_fail(): assert 0
"""
)
fn1 = testdir.collect_by_name(modcol, "test_pass")
assert isinstance(fn1, pytest.Function)
fn2 = testdir.collect_by_name(modcol, "test_pass")
assert isinstance(fn2, pytest.Function)
assert fn1 == fn2
assert fn1 != modcol
if sys.version_info < (3, 0):
assert cmp(fn1, fn2) == 0 # NOQA
assert hash(fn1) == hash(fn2)
fn3 = testdir.collect_by_name(modcol, "test_fail")
assert isinstance(fn3, pytest.Function)
assert not (fn1 == fn3)
assert fn1 != fn3
for fn in fn1, fn2, fn3:
assert fn != 3
assert fn != modcol
assert fn != [1, 2, 3]
assert [1, 2, 3] != fn
assert modcol != fn
def test_getparent(self, testdir):
modcol = testdir.getmodulecol(
"""
class TestClass(object):
def test_foo():
pass
"""
)
cls = testdir.collect_by_name(modcol, "TestClass")
fn = testdir.collect_by_name(testdir.collect_by_name(cls, "()"), "test_foo")
parent = fn.getparent(pytest.Module)
assert parent is modcol
parent = fn.getparent(pytest.Function)
assert parent is fn
parent = fn.getparent(pytest.Class)
assert parent is cls
def test_getcustomfile_roundtrip(self, testdir):
hello = testdir.makefile(".xxx", hello="world")
testdir.makepyfile(
conftest="""
import pytest
class CustomFile(pytest.File):
pass
def pytest_collect_file(path, parent):
if path.ext == ".xxx":
return CustomFile(path, parent=parent)
"""
)
node = testdir.getpathnode(hello)
assert isinstance(node, pytest.File)
assert node.name == "hello.xxx"
nodes = node.session.perform_collect([node.nodeid], genitems=False)
assert len(nodes) == 1
assert isinstance(nodes[0], pytest.File)
def test_can_skip_class_with_test_attr(self, testdir):
"""Assure test class is skipped when using `__test__=False` (See #2007)."""
testdir.makepyfile(
"""
class TestFoo(object):
__test__ = False
def __init__(self):
pass
def test_foo():
assert True
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["collected 0 items", "*no tests ran in*"])
class TestCollectFS(object):
def test_ignored_certain_directories(self, testdir):
tmpdir = testdir.tmpdir
tmpdir.ensure("build", "test_notfound.py")
tmpdir.ensure("dist", "test_notfound.py")
tmpdir.ensure("_darcs", "test_notfound.py")
tmpdir.ensure("CVS", "test_notfound.py")
tmpdir.ensure("{arch}", "test_notfound.py")
tmpdir.ensure(".whatever", "test_notfound.py")
tmpdir.ensure(".bzr", "test_notfound.py")
tmpdir.ensure("normal", "test_found.py")
for x in tmpdir.visit("test_*.py"):
x.write("def test_hello(): pass")
result = testdir.runpytest("--collect-only")
s = result.stdout.str()
assert "test_notfound" not in s
assert "test_found" in s
@pytest.mark.parametrize(
"fname",
(
"activate",
"activate.csh",
"activate.fish",
"Activate",
"Activate.bat",
"Activate.ps1",
),
)
def test_ignored_virtualenvs(self, testdir, fname):
bindir = "Scripts" if sys.platform.startswith("win") else "bin"
testdir.tmpdir.ensure("virtual", bindir, fname)
testfile = testdir.tmpdir.ensure("virtual", "test_invenv.py")
testfile.write("def test_hello(): pass")
# by default, ignore tests inside a virtualenv
result = testdir.runpytest()
assert "test_invenv" not in result.stdout.str()
# allow test collection if user insists
result = testdir.runpytest("--collect-in-virtualenv")
assert "test_invenv" in result.stdout.str()
# allow test collection if user directly passes in the directory
result = testdir.runpytest("virtual")
assert "test_invenv" in result.stdout.str()
@pytest.mark.parametrize(
"fname",
(
"activate",
"activate.csh",
"activate.fish",
"Activate",
"Activate.bat",
"Activate.ps1",
),
)
def test_ignored_virtualenvs_norecursedirs_precedence(self, testdir, fname):
bindir = "Scripts" if sys.platform.startswith("win") else "bin"
# norecursedirs takes priority
testdir.tmpdir.ensure(".virtual", bindir, fname)
testfile = testdir.tmpdir.ensure(".virtual", "test_invenv.py")
testfile.write("def test_hello(): pass")
result = testdir.runpytest("--collect-in-virtualenv")
assert "test_invenv" not in result.stdout.str()
# ...unless the virtualenv is explicitly given on the CLI
result = testdir.runpytest("--collect-in-virtualenv", ".virtual")
assert "test_invenv" in result.stdout.str()
@pytest.mark.parametrize(
"fname",
(
"activate",
"activate.csh",
"activate.fish",
"Activate",
"Activate.bat",
"Activate.ps1",
),
)
def test__in_venv(self, testdir, fname):
"""Directly test the virtual env detection function"""
bindir = "Scripts" if sys.platform.startswith("win") else "bin"
# no bin/activate, not a virtualenv
base_path = testdir.tmpdir.mkdir("venv")
assert _in_venv(base_path) is False
# with bin/activate, totally a virtualenv
base_path.ensure(bindir, fname)
assert _in_venv(base_path) is True
def test_custom_norecursedirs(self, testdir):
testdir.makeini(
"""
[pytest]
norecursedirs = mydir xyz*
"""
)
tmpdir = testdir.tmpdir
tmpdir.ensure("mydir", "test_hello.py").write("def test_1(): pass")
tmpdir.ensure("xyz123", "test_2.py").write("def test_2(): 0/0")
tmpdir.ensure("xy", "test_ok.py").write("def test_3(): pass")
rec = testdir.inline_run()
rec.assertoutcome(passed=1)
rec = testdir.inline_run("xyz123/test_2.py")
rec.assertoutcome(failed=1)
def test_testpaths_ini(self, testdir, monkeypatch):
testdir.makeini(
"""
[pytest]
testpaths = gui uts
"""
)
tmpdir = testdir.tmpdir
tmpdir.ensure("env", "test_1.py").write("def test_env(): pass")
tmpdir.ensure("gui", "test_2.py").write("def test_gui(): pass")
tmpdir.ensure("uts", "test_3.py").write("def test_uts(): pass")
# executing from rootdir only tests from `testpaths` directories
# are collected
items, reprec = testdir.inline_genitems("-v")
assert [x.name for x in items] == ["test_gui", "test_uts"]
# check that explicitly passing directories in the command-line
# collects the tests
for dirname in ("env", "gui", "uts"):
items, reprec = testdir.inline_genitems(tmpdir.join(dirname))
assert [x.name for x in items] == ["test_%s" % dirname]
# changing cwd to each subdirectory and running pytest without
# arguments collects the tests in that directory normally
for dirname in ("env", "gui", "uts"):
monkeypatch.chdir(testdir.tmpdir.join(dirname))
items, reprec = testdir.inline_genitems()
assert [x.name for x in items] == ["test_%s" % dirname]
class TestCollectPluginHookRelay(object):
def test_pytest_collect_file(self, testdir):
wascalled = []
class Plugin(object):
def pytest_collect_file(self, path, parent):
if not path.basename.startswith("."):
# Ignore hidden files, e.g. .testmondata.
wascalled.append(path)
testdir.makefile(".abc", "xyz")
pytest.main([testdir.tmpdir], plugins=[Plugin()])
assert len(wascalled) == 1
assert wascalled[0].ext == ".abc"
def test_pytest_collect_directory(self, testdir):
wascalled = []
class Plugin(object):
def pytest_collect_directory(self, path, parent):
wascalled.append(path.basename)
testdir.mkdir("hello")
testdir.mkdir("world")
pytest.main(testdir.tmpdir, plugins=[Plugin()])
assert "hello" in wascalled
assert "world" in wascalled
class TestPrunetraceback(object):
def test_custom_repr_failure(self, testdir):
p = testdir.makepyfile(
"""
import not_exists
"""
)
testdir.makeconftest(
"""
import pytest
def pytest_collect_file(path, parent):
return MyFile(path, parent)
class MyError(Exception):
pass
class MyFile(pytest.File):
def collect(self):
raise MyError()
def repr_failure(self, excinfo):
if excinfo.errisinstance(MyError):
return "hello world"
return pytest.File.repr_failure(self, excinfo)
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(["*ERROR collecting*", "*hello world*"])
@pytest.mark.xfail(reason="other mechanism for adding to reporting needed")
def test_collect_report_postprocessing(self, testdir):
p = testdir.makepyfile(
"""
import not_exists
"""
)
testdir.makeconftest(
"""
import pytest
@pytest.hookimpl(hookwrapper=True)
def pytest_make_collect_report():
outcome = yield
rep = outcome.get_result()
rep.headerlines += ["header1"]
outcome.force_result(rep)
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(["*ERROR collecting*", "*header1*"])
class TestCustomConftests(object):
def test_ignore_collect_path(self, testdir):
testdir.makeconftest(
"""
def pytest_ignore_collect(path, config):
return path.basename.startswith("x") or \
path.basename == "test_one.py"
"""
)
sub = testdir.mkdir("xy123")
sub.ensure("test_hello.py").write("syntax error")
sub.join("conftest.py").write("syntax error")
testdir.makepyfile("def test_hello(): pass")
testdir.makepyfile(test_one="syntax error")
result = testdir.runpytest("--fulltrace")
assert result.ret == 0
result.stdout.fnmatch_lines(["*1 passed*"])
def test_ignore_collect_not_called_on_argument(self, testdir):
testdir.makeconftest(
"""
def pytest_ignore_collect(path, config):
return True
"""
)
p = testdir.makepyfile("def test_hello(): pass")
result = testdir.runpytest(p)
assert result.ret == 0
result.stdout.fnmatch_lines("*1 passed*")
result = testdir.runpytest()
assert result.ret == EXIT_NOTESTSCOLLECTED
result.stdout.fnmatch_lines("*collected 0 items*")
def test_collectignore_exclude_on_option(self, testdir):
testdir.makeconftest(
"""
collect_ignore = ['hello', 'test_world.py']
def pytest_addoption(parser):
parser.addoption("--XX", action="store_true", default=False)
def pytest_configure(config):
if config.getvalue("XX"):
collect_ignore[:] = []
"""
)
testdir.mkdir("hello")
testdir.makepyfile(test_world="def test_hello(): pass")
result = testdir.runpytest()
assert result.ret == EXIT_NOTESTSCOLLECTED
assert "passed" not in result.stdout.str()
result = testdir.runpytest("--XX")
assert result.ret == 0
assert "passed" in result.stdout.str()
def test_pytest_fs_collect_hooks_are_seen(self, testdir):
testdir.makeconftest(
"""
import pytest
class MyModule(pytest.Module):
pass
def pytest_collect_file(path, parent):
if path.ext == ".py":
return MyModule(path, parent)
"""
)
testdir.mkdir("sub")
testdir.makepyfile("def test_x(): pass")
result = testdir.runpytest("--collect-only")
result.stdout.fnmatch_lines(["*MyModule*", "*test_x*"])
def test_pytest_collect_file_from_sister_dir(self, testdir):
sub1 = testdir.mkpydir("sub1")
sub2 = testdir.mkpydir("sub2")
conf1 = testdir.makeconftest(
"""
import pytest
class MyModule1(pytest.Module):
pass
def pytest_collect_file(path, parent):
if path.ext == ".py":
return MyModule1(path, parent)
"""
)
conf1.move(sub1.join(conf1.basename))
conf2 = testdir.makeconftest(
"""
import pytest
class MyModule2(pytest.Module):
pass
def pytest_collect_file(path, parent):
if path.ext == ".py":
return MyModule2(path, parent)
"""
)
conf2.move(sub2.join(conf2.basename))
p = testdir.makepyfile("def test_x(): pass")
p.copy(sub1.join(p.basename))
p.copy(sub2.join(p.basename))
result = testdir.runpytest("--collect-only")
result.stdout.fnmatch_lines(["*MyModule1*", "*MyModule2*", "*test_x*"])
class TestSession(object):
def test_parsearg(self, testdir):
p = testdir.makepyfile("def test_func(): pass")
subdir = testdir.mkdir("sub")
subdir.ensure("__init__.py")
target = subdir.join(p.basename)
p.move(target)
subdir.chdir()
config = testdir.parseconfig(p.basename)
rcol = Session(config=config)
assert rcol.fspath == subdir
parts = rcol._parsearg(p.basename)
assert parts[0] == target
assert len(parts) == 1
parts = rcol._parsearg(p.basename + "::test_func")
assert parts[0] == target
assert parts[1] == "test_func"
assert len(parts) == 2
def test_collect_topdir(self, testdir):
p = testdir.makepyfile("def test_func(): pass")
id = "::".join([p.basename, "test_func"])
# XXX migrate to collectonly? (see below)
config = testdir.parseconfig(id)
topdir = testdir.tmpdir
rcol = Session(config)
assert topdir == rcol.fspath
# rootid = rcol.nodeid
# root2 = rcol.perform_collect([rcol.nodeid], genitems=False)[0]
# assert root2 == rcol, rootid
colitems = rcol.perform_collect([rcol.nodeid], genitems=False)
assert len(colitems) == 1
assert colitems[0].fspath == p
def get_reported_items(self, hookrec):
"""Return pytest.Item instances reported by the pytest_collectreport hook"""
calls = hookrec.getcalls("pytest_collectreport")
return [
x
for call in calls
for x in call.report.result
if isinstance(x, pytest.Item)
]
def test_collect_protocol_single_function(self, testdir):
p = testdir.makepyfile("def test_func(): pass")
id = "::".join([p.basename, "test_func"])
items, hookrec = testdir.inline_genitems(id)
item, = items
assert item.name == "test_func"
newid = item.nodeid
assert newid == id
pprint.pprint(hookrec.calls)
topdir = testdir.tmpdir # noqa
hookrec.assert_contains(
[
("pytest_collectstart", "collector.fspath == topdir"),
("pytest_make_collect_report", "collector.fspath == topdir"),
("pytest_collectstart", "collector.fspath == p"),
("pytest_make_collect_report", "collector.fspath == p"),
("pytest_pycollect_makeitem", "name == 'test_func'"),
("pytest_collectreport", "report.result[0].name == 'test_func'"),
]
)
# ensure we are reporting the collection of the single test item (#2464)
assert [x.name for x in self.get_reported_items(hookrec)] == ["test_func"]
def test_collect_protocol_method(self, testdir):
p = testdir.makepyfile(
"""
class TestClass(object):
def test_method(self):
pass
"""
)
normid = p.basename + "::TestClass::()::test_method"
for id in [
p.basename,
p.basename + "::TestClass",
p.basename + "::TestClass::()",
normid,
]:
items, hookrec = testdir.inline_genitems(id)
assert len(items) == 1
assert items[0].name == "test_method"
newid = items[0].nodeid
assert newid == normid
# ensure we are reporting the collection of the single test item (#2464)
assert [x.name for x in self.get_reported_items(hookrec)] == ["test_method"]
def test_collect_custom_nodes_multi_id(self, testdir):
p = testdir.makepyfile("def test_func(): pass")
testdir.makeconftest(
"""
import pytest
class SpecialItem(pytest.Item):
def runtest(self):
return # ok
class SpecialFile(pytest.File):
def collect(self):
return [SpecialItem(name="check", parent=self)]
def pytest_collect_file(path, parent):
if path.basename == %r:
return SpecialFile(fspath=path, parent=parent)
"""
% p.basename
)
id = p.basename
items, hookrec = testdir.inline_genitems(id)
pprint.pprint(hookrec.calls)
assert len(items) == 2
hookrec.assert_contains(
[
("pytest_collectstart", "collector.fspath == collector.session.fspath"),
(
"pytest_collectstart",
"collector.__class__.__name__ == 'SpecialFile'",
),
("pytest_collectstart", "collector.__class__.__name__ == 'Module'"),
("pytest_pycollect_makeitem", "name == 'test_func'"),
("pytest_collectreport", "report.nodeid.startswith(p.basename)"),
]
)
assert len(self.get_reported_items(hookrec)) == 2
def test_collect_subdir_event_ordering(self, testdir):
p = testdir.makepyfile("def test_func(): pass")
aaa = testdir.mkpydir("aaa")
test_aaa = aaa.join("test_aaa.py")
p.move(test_aaa)
items, hookrec = testdir.inline_genitems()
assert len(items) == 1
pprint.pprint(hookrec.calls)
hookrec.assert_contains(
[
("pytest_collectstart", "collector.fspath == test_aaa"),
("pytest_pycollect_makeitem", "name == 'test_func'"),
("pytest_collectreport", "report.nodeid.startswith('aaa/test_aaa.py')"),
]
)
def test_collect_two_commandline_args(self, testdir):
p = testdir.makepyfile("def test_func(): pass")
aaa = testdir.mkpydir("aaa")
bbb = testdir.mkpydir("bbb")
test_aaa = aaa.join("test_aaa.py")
p.copy(test_aaa)
test_bbb = bbb.join("test_bbb.py")
p.move(test_bbb)
id = "."
items, hookrec = testdir.inline_genitems(id)
assert len(items) == 2
pprint.pprint(hookrec.calls)
hookrec.assert_contains(
[
("pytest_collectstart", "collector.fspath == test_aaa"),
("pytest_pycollect_makeitem", "name == 'test_func'"),
("pytest_collectreport", "report.nodeid == 'aaa/test_aaa.py'"),
("pytest_collectstart", "collector.fspath == test_bbb"),
("pytest_pycollect_makeitem", "name == 'test_func'"),
("pytest_collectreport", "report.nodeid == 'bbb/test_bbb.py'"),
]
)
def test_serialization_byid(self, testdir):
testdir.makepyfile("def test_func(): pass")
items, hookrec = testdir.inline_genitems()
assert len(items) == 1
item, = items
items2, hookrec = testdir.inline_genitems(item.nodeid)
item2, = items2
assert item2.name == item.name
assert item2.fspath == item.fspath
def test_find_byid_without_instance_parents(self, testdir):
p = testdir.makepyfile(
"""
class TestClass(object):
def test_method(self):
pass
"""
)
arg = p.basename + "::TestClass::test_method"
items, hookrec = testdir.inline_genitems(arg)
assert len(items) == 1
item, = items
assert item.nodeid.endswith("TestClass::()::test_method")
# ensure we are reporting the collection of the single test item (#2464)
assert [x.name for x in self.get_reported_items(hookrec)] == ["test_method"]
class Test_getinitialnodes(object):
def test_global_file(self, testdir, tmpdir):
x = tmpdir.ensure("x.py")
with tmpdir.as_cwd():
config = testdir.parseconfigure(x)
col = testdir.getnode(config, x)
assert isinstance(col, pytest.Module)
assert col.name == "x.py"
assert col.parent.parent is None
for col in col.listchain():
assert col.config is config
def test_pkgfile(self, testdir):
"""Verify nesting when a module is within a package.
The parent chain should match: Module<x.py> -> Package<subdir> -> Session.
Session's parent should always be None.
"""
tmpdir = testdir.tmpdir
subdir = tmpdir.join("subdir")
x = subdir.ensure("x.py")
subdir.ensure("__init__.py")
with subdir.as_cwd():
config = testdir.parseconfigure(x)
col = testdir.getnode(config, x)
assert col.name == "x.py"
assert isinstance(col, pytest.Module)
assert isinstance(col.parent, pytest.Package)
assert isinstance(col.parent.parent, pytest.Session)
# session is batman (has no parents)
assert col.parent.parent.parent is None
for col in col.listchain():
assert col.config is config
class Test_genitems(object):
def test_check_collect_hashes(self, testdir):
p = testdir.makepyfile(
"""
def test_1():
pass
def test_2():
pass
"""
)
p.copy(p.dirpath(p.purebasename + "2" + ".py"))
items, reprec = testdir.inline_genitems(p.dirpath())
assert len(items) == 4
for numi, i in enumerate(items):
for numj, j in enumerate(items):
if numj != numi:
assert hash(i) != hash(j)
assert i != j
def test_example_items1(self, testdir):
p = testdir.makepyfile(
"""
def testone():
pass
class TestX(object):
def testmethod_one(self):
pass
class TestY(TestX):
pass
"""
)
items, reprec = testdir.inline_genitems(p)
assert len(items) == 3
assert items[0].name == "testone"
assert items[1].name == "testmethod_one"
assert items[2].name == "testmethod_one"
# let's also test getmodpath here
assert items[0].getmodpath() == "testone"
assert items[1].getmodpath() == "TestX.testmethod_one"
assert items[2].getmodpath() == "TestY.testmethod_one"
s = items[0].getmodpath(stopatmodule=False)
assert s.endswith("test_example_items1.testone")
print(s)
def test_class_and_functions_discovery_using_glob(self, testdir):
"""
tests that python_classes and python_functions config options work
as prefixes and glob-like patterns (issue #600).
"""
testdir.makeini(
"""
[pytest]
python_classes = *Suite Test
python_functions = *_test test
"""
)
p = testdir.makepyfile(
"""
class MyTestSuite(object):
def x_test(self):
pass
class TestCase(object):
def test_y(self):
pass
"""
)
items, reprec = testdir.inline_genitems(p)
ids = [x.getmodpath() for x in items]
assert ids == ["MyTestSuite.x_test", "TestCase.test_y"]
def test_matchnodes_two_collections_same_file(testdir):
testdir.makeconftest(
"""
import pytest
def pytest_configure(config):
config.pluginmanager.register(Plugin2())
class Plugin2(object):
def pytest_collect_file(self, path, parent):
if path.ext == ".abc":
return MyFile2(path, parent)
def pytest_collect_file(path, parent):
if path.ext == ".abc":
return MyFile1(path, parent)
class MyFile1(pytest.Item, pytest.File):
def runtest(self):
pass
class MyFile2(pytest.File):
def collect(self):
return [Item2("hello", parent=self)]
class Item2(pytest.Item):
def runtest(self):
pass
"""
)
p = testdir.makefile(".abc", "")
result = testdir.runpytest()
assert result.ret == 0
result.stdout.fnmatch_lines(["*2 passed*"])
res = testdir.runpytest("%s::hello" % p.basename)
res.stdout.fnmatch_lines(["*1 passed*"])
class TestNodekeywords(object):
def test_no_under(self, testdir):
modcol = testdir.getmodulecol(
"""
def test_pass(): pass
def test_fail(): assert 0
"""
)
values = list(modcol.keywords)
assert modcol.name in values
for x in values:
assert not x.startswith("_")
assert modcol.name in repr(modcol.keywords)
def test_issue345(self, testdir):
testdir.makepyfile(
"""
def test_should_not_be_selected():
assert False, 'I should not have been selected to run'
def test___repr__():
pass
"""
)
reprec = testdir.inline_run("-k repr")
reprec.assertoutcome(passed=1, failed=0)
COLLECTION_ERROR_PY_FILES = dict(
test_01_failure="""
def test_1():
assert False
""",
test_02_import_error="""
import asdfasdfasdf
def test_2():
assert True
""",
test_03_import_error="""
import asdfasdfasdf
def test_3():
assert True
""",
test_04_success="""
def test_4():
assert True
""",
)
def test_exit_on_collection_error(testdir):
"""Verify that all collection errors are collected and no tests executed"""
testdir.makepyfile(**COLLECTION_ERROR_PY_FILES)
res = testdir.runpytest()
assert res.ret == 2
res.stdout.fnmatch_lines(
[
"collected 2 items / 2 errors",
"*ERROR collecting test_02_import_error.py*",
"*No module named *asdfa*",
"*ERROR collecting test_03_import_error.py*",
"*No module named *asdfa*",
]
)
def test_exit_on_collection_with_maxfail_smaller_than_n_errors(testdir):
"""
Verify collection is aborted once maxfail errors are encountered ignoring
further modules which would cause more collection errors.
"""
testdir.makepyfile(**COLLECTION_ERROR_PY_FILES)
res = testdir.runpytest("--maxfail=1")
assert res.ret == 1
res.stdout.fnmatch_lines(
["*ERROR collecting test_02_import_error.py*", "*No module named *asdfa*"]
)
assert "test_03" not in res.stdout.str()
def test_exit_on_collection_with_maxfail_bigger_than_n_errors(testdir):
"""
Verify the test run aborts due to collection errors even if maxfail count of
errors was not reached.
"""
testdir.makepyfile(**COLLECTION_ERROR_PY_FILES)
res = testdir.runpytest("--maxfail=4")
assert res.ret == 2
res.stdout.fnmatch_lines(
[
"collected 2 items / 2 errors",
"*ERROR collecting test_02_import_error.py*",
"*No module named *asdfa*",
"*ERROR collecting test_03_import_error.py*",
"*No module named *asdfa*",
]
)
def test_continue_on_collection_errors(testdir):
"""
Verify tests are executed even when collection errors occur when the
--continue-on-collection-errors flag is set
"""
testdir.makepyfile(**COLLECTION_ERROR_PY_FILES)
res = testdir.runpytest("--continue-on-collection-errors")
assert res.ret == 1
res.stdout.fnmatch_lines(
["collected 2 items / 2 errors", "*1 failed, 1 passed, 2 error*"]
)
def test_continue_on_collection_errors_maxfail(testdir):
"""
Verify tests are executed even when collection errors occur and that maxfail
is honoured (including the collection error count).
4 tests: 2 collection errors + 1 failure + 1 success
test_4 is never executed because the test run is with --maxfail=3 which
means it is interrupted after the 2 collection errors + 1 failure.
"""
testdir.makepyfile(**COLLECTION_ERROR_PY_FILES)
res = testdir.runpytest("--continue-on-collection-errors", "--maxfail=3")
assert res.ret == 1
res.stdout.fnmatch_lines(["collected 2 items / 2 errors", "*1 failed, 2 error*"])
def test_fixture_scope_sibling_conftests(testdir):
"""Regression test case for https://github.com/pytest-dev/pytest/issues/2836"""
foo_path = testdir.mkdir("foo")
foo_path.join("conftest.py").write(
textwrap.dedent(
"""\
import pytest
@pytest.fixture
def fix():
return 1
"""
)
)
foo_path.join("test_foo.py").write("def test_foo(fix): assert fix == 1")
# Tests in `food/` should not see the conftest fixture from `foo/`
food_path = testdir.mkpydir("food")
food_path.join("test_food.py").write("def test_food(fix): assert fix == 1")
res = testdir.runpytest()
assert res.ret == 1
res.stdout.fnmatch_lines(
[
"*ERROR at setup of test_food*",
"E*fixture 'fix' not found",
"*1 passed, 1 error*",
]
)
def test_collect_init_tests(testdir):
"""Check that we collect files from __init__.py files when they patch the 'python_files' (#3773)"""
p = testdir.copy_example("collect/collect_init_tests")
result = testdir.runpytest(p, "--collect-only")
result.stdout.fnmatch_lines(
[
"*<Module '__init__.py'>",
"*<Function 'test_init'>",
"*<Module 'test_foo.py'>",
"*<Function 'test_foo'>",
]
)
|
ddboline/pytest
|
testing/test_collection.py
|
Python
|
mit
| 32,740
|
[
"VisIt"
] |
cad8298c234618b42a1d380ec58e8c7e5b30e6115f04b9df5b25bf2bca096d4b
|
# Copyright: (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
# Copyright: (c) 2016, Toshio Kuratomi <tkuratomi@ansible.com>
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import getpass
import os
import re
import subprocess
import sys
from abc import ABCMeta, abstractmethod
from ansible.cli.arguments import option_helpers as opt_help
from ansible import constants as C
from ansible import context
from ansible.errors import AnsibleError
from ansible.inventory.manager import InventoryManager
from ansible.module_utils.six import with_metaclass, string_types
from ansible.module_utils._text import to_bytes, to_text
from ansible.parsing.dataloader import DataLoader
from ansible.parsing.vault import PromptVaultSecret, get_file_vault_secret
from ansible.plugins.loader import add_all_plugin_dirs
from ansible.release import __version__
from ansible.utils.collection_loader import AnsibleCollectionLoader, get_collection_name_from_path, set_collection_playbook_paths
from ansible.utils.display import Display
from ansible.utils.path import unfrackpath
from ansible.utils.unsafe_proxy import to_unsafe_text
from ansible.vars.manager import VariableManager
try:
import argcomplete
HAS_ARGCOMPLETE = True
except ImportError:
HAS_ARGCOMPLETE = False
display = Display()
class CLI(with_metaclass(ABCMeta, object)):
''' code behind bin/ansible* programs '''
_ITALIC = re.compile(r"I\(([^)]+)\)")
_BOLD = re.compile(r"B\(([^)]+)\)")
_MODULE = re.compile(r"M\(([^)]+)\)")
_URL = re.compile(r"U\(([^)]+)\)")
_CONST = re.compile(r"C\(([^)]+)\)")
PAGER = 'less'
# -F (quit-if-one-screen) -R (allow raw ansi control chars)
# -S (chop long lines) -X (disable termcap init and de-init)
LESS_OPTS = 'FRSX'
SKIP_INVENTORY_DEFAULTS = False
def __init__(self, args, callback=None):
"""
Base init method for all command line programs
"""
if not args:
raise ValueError('A non-empty list for args is required')
self.args = args
self.parser = None
self.callback = callback
@abstractmethod
def run(self):
"""Run the ansible command
Subclasses must implement this method. It does the actual work of
running an Ansible command.
"""
self.parse()
display.vv(to_text(opt_help.version(self.parser.prog)))
if C.CONFIG_FILE:
display.v(u"Using %s as config file" % to_text(C.CONFIG_FILE))
else:
display.v(u"No config file found; using defaults")
# warn about deprecated config options
for deprecated in C.config.DEPRECATED:
name = deprecated[0]
why = deprecated[1]['why']
if 'alternatives' in deprecated[1]:
alt = ', use %s instead' % deprecated[1]['alternatives']
else:
alt = ''
ver = deprecated[1]['version']
display.deprecated("%s option, %s %s" % (name, why, alt), version=ver)
@staticmethod
def split_vault_id(vault_id):
# return (before_@, after_@)
# if no @, return whole string as after_
if '@' not in vault_id:
return (None, vault_id)
parts = vault_id.split('@', 1)
ret = tuple(parts)
return ret
@staticmethod
def build_vault_ids(vault_ids, vault_password_files=None,
ask_vault_pass=None, create_new_password=None,
auto_prompt=True):
vault_password_files = vault_password_files or []
vault_ids = vault_ids or []
# convert vault_password_files into vault_ids slugs
for password_file in vault_password_files:
id_slug = u'%s@%s' % (C.DEFAULT_VAULT_IDENTITY, password_file)
# note this makes --vault-id higher precedence than --vault-password-file
# if we want to intertwingle them in order probably need a cli callback to populate vault_ids
# used by --vault-id and --vault-password-file
vault_ids.append(id_slug)
# if an action needs an encrypt password (create_new_password=True) and we dont
# have other secrets setup, then automatically add a password prompt as well.
# prompts cant/shouldnt work without a tty, so dont add prompt secrets
if ask_vault_pass or (not vault_ids and auto_prompt):
id_slug = u'%s@%s' % (C.DEFAULT_VAULT_IDENTITY, u'prompt_ask_vault_pass')
vault_ids.append(id_slug)
return vault_ids
# TODO: remove the now unused args
@staticmethod
def setup_vault_secrets(loader, vault_ids, vault_password_files=None,
ask_vault_pass=None, create_new_password=False,
auto_prompt=True):
# list of tuples
vault_secrets = []
# Depending on the vault_id value (including how --ask-vault-pass / --vault-password-file create a vault_id)
# we need to show different prompts. This is for compat with older Towers that expect a
# certain vault password prompt format, so 'promp_ask_vault_pass' vault_id gets the old format.
prompt_formats = {}
# If there are configured default vault identities, they are considered 'first'
# so we prepend them to vault_ids (from cli) here
vault_password_files = vault_password_files or []
if C.DEFAULT_VAULT_PASSWORD_FILE:
vault_password_files.append(C.DEFAULT_VAULT_PASSWORD_FILE)
if create_new_password:
prompt_formats['prompt'] = ['New vault password (%(vault_id)s): ',
'Confirm new vault password (%(vault_id)s): ']
# 2.3 format prompts for --ask-vault-pass
prompt_formats['prompt_ask_vault_pass'] = ['New Vault password: ',
'Confirm New Vault password: ']
else:
prompt_formats['prompt'] = ['Vault password (%(vault_id)s): ']
# The format when we use just --ask-vault-pass needs to match 'Vault password:\s*?$'
prompt_formats['prompt_ask_vault_pass'] = ['Vault password: ']
vault_ids = CLI.build_vault_ids(vault_ids,
vault_password_files,
ask_vault_pass,
create_new_password,
auto_prompt=auto_prompt)
for vault_id_slug in vault_ids:
vault_id_name, vault_id_value = CLI.split_vault_id(vault_id_slug)
if vault_id_value in ['prompt', 'prompt_ask_vault_pass']:
# --vault-id some_name@prompt_ask_vault_pass --vault-id other_name@prompt_ask_vault_pass will be a little
# confusing since it will use the old format without the vault id in the prompt
built_vault_id = vault_id_name or C.DEFAULT_VAULT_IDENTITY
# choose the prompt based on --vault-id=prompt or --ask-vault-pass. --ask-vault-pass
# always gets the old format for Tower compatibility.
# ie, we used --ask-vault-pass, so we need to use the old vault password prompt
# format since Tower needs to match on that format.
prompted_vault_secret = PromptVaultSecret(prompt_formats=prompt_formats[vault_id_value],
vault_id=built_vault_id)
# a empty or invalid password from the prompt will warn and continue to the next
# without erroring globally
try:
prompted_vault_secret.load()
except AnsibleError as exc:
display.warning('Error in vault password prompt (%s): %s' % (vault_id_name, exc))
raise
vault_secrets.append((built_vault_id, prompted_vault_secret))
# update loader with new secrets incrementally, so we can load a vault password
# that is encrypted with a vault secret provided earlier
loader.set_vault_secrets(vault_secrets)
continue
# assuming anything else is a password file
display.vvvvv('Reading vault password file: %s' % vault_id_value)
# read vault_pass from a file
file_vault_secret = get_file_vault_secret(filename=vault_id_value,
vault_id=vault_id_name,
loader=loader)
# an invalid password file will error globally
try:
file_vault_secret.load()
except AnsibleError as exc:
display.warning('Error in vault password file loading (%s): %s' % (vault_id_name, to_text(exc)))
raise
if vault_id_name:
vault_secrets.append((vault_id_name, file_vault_secret))
else:
vault_secrets.append((C.DEFAULT_VAULT_IDENTITY, file_vault_secret))
# update loader with as-yet-known vault secrets
loader.set_vault_secrets(vault_secrets)
return vault_secrets
@staticmethod
def ask_passwords():
''' prompt for connection and become passwords if needed '''
op = context.CLIARGS
sshpass = None
becomepass = None
become_prompt = ''
become_prompt_method = "BECOME" if C.AGNOSTIC_BECOME_PROMPT else op['become_method'].upper()
try:
if op['ask_pass']:
sshpass = getpass.getpass(prompt="SSH password: ")
become_prompt = "%s password[defaults to SSH password]: " % become_prompt_method
else:
become_prompt = "%s password: " % become_prompt_method
if op['become_ask_pass']:
becomepass = getpass.getpass(prompt=become_prompt)
if op['ask_pass'] and becomepass == '':
becomepass = sshpass
except EOFError:
pass
# we 'wrap' the passwords to prevent templating as
# they can contain special chars and trigger it incorrectly
if sshpass:
sshpass = to_unsafe_text(sshpass)
if becomepass:
becomepass = to_unsafe_text(becomepass)
return (sshpass, becomepass)
def validate_conflicts(self, op, runas_opts=False, fork_opts=False):
''' check for conflicting options '''
if fork_opts:
if op.forks < 1:
self.parser.error("The number of processes (--forks) must be >= 1")
return op
@abstractmethod
def init_parser(self, usage="", desc=None, epilog=None):
"""
Create an options parser for most ansible scripts
Subclasses need to implement this method. They will usually call the base class's
init_parser to create a basic version and then add their own options on top of that.
An implementation will look something like this::
def init_parser(self):
super(MyCLI, self).init_parser(usage="My Ansible CLI", inventory_opts=True)
ansible.arguments.option_helpers.add_runas_options(self.parser)
self.parser.add_option('--my-option', dest='my_option', action='store')
"""
self.parser = opt_help.create_base_parser(os.path.basename(self.args[0]), usage=usage, desc=desc, epilog=epilog, )
@abstractmethod
def post_process_args(self, options):
"""Process the command line args
Subclasses need to implement this method. This method validates and transforms the command
line arguments. It can be used to check whether conflicting values were given, whether filenames
exist, etc.
An implementation will look something like this::
def post_process_args(self, options):
options = super(MyCLI, self).post_process_args(options)
if options.addition and options.subtraction:
raise AnsibleOptionsError('Only one of --addition and --subtraction can be specified')
if isinstance(options.listofhosts, string_types):
options.listofhosts = string_types.split(',')
return options
"""
# process tags
if hasattr(options, 'tags') and not options.tags:
# optparse defaults does not do what's expected
options.tags = ['all']
if hasattr(options, 'tags') and options.tags:
tags = set()
for tag_set in options.tags:
for tag in tag_set.split(u','):
tags.add(tag.strip())
options.tags = list(tags)
# process skip_tags
if hasattr(options, 'skip_tags') and options.skip_tags:
skip_tags = set()
for tag_set in options.skip_tags:
for tag in tag_set.split(u','):
skip_tags.add(tag.strip())
options.skip_tags = list(skip_tags)
# process inventory options except for CLIs that require their own processing
if hasattr(options, 'inventory') and not self.SKIP_INVENTORY_DEFAULTS:
if options.inventory:
# should always be list
if isinstance(options.inventory, string_types):
options.inventory = [options.inventory]
# Ensure full paths when needed
options.inventory = [unfrackpath(opt, follow=False) if ',' not in opt else opt for opt in options.inventory]
else:
options.inventory = C.DEFAULT_HOST_LIST
# Dup args set on the root parser and sub parsers results in the root parser ignoring the args. e.g. doing
# 'ansible-galaxy -vvv init' has no verbosity set but 'ansible-galaxy init -vvv' sets a level of 3. To preserve
# back compat with pre-argparse changes we manually scan and set verbosity based on the argv values.
if self.parser.prog in ['ansible-galaxy', 'ansible-vault'] and not options.verbosity:
verbosity_arg = next(iter([arg for arg in self.args if arg.startswith('-v')]), None)
if verbosity_arg:
display.deprecated("Setting verbosity before the arg sub command is deprecated, set the verbosity "
"after the sub command", "2.13")
options.verbosity = verbosity_arg.count('v')
return options
def parse(self):
"""Parse the command line args
This method parses the command line arguments. It uses the parser
stored in the self.parser attribute and saves the args and options in
context.CLIARGS.
Subclasses need to implement two helper methods, init_parser() and post_process_args() which
are called from this function before and after parsing the arguments.
"""
self.init_parser()
if HAS_ARGCOMPLETE:
argcomplete.autocomplete(self.parser)
options = self.parser.parse_args(self.args[1:])
options = self.post_process_args(options)
context._init_global_context(options)
@staticmethod
def version_info(gitinfo=False):
''' return full ansible version info '''
if gitinfo:
# expensive call, user with care
ansible_version_string = opt_help.version()
else:
ansible_version_string = __version__
ansible_version = ansible_version_string.split()[0]
ansible_versions = ansible_version.split('.')
for counter in range(len(ansible_versions)):
if ansible_versions[counter] == "":
ansible_versions[counter] = 0
try:
ansible_versions[counter] = int(ansible_versions[counter])
except Exception:
pass
if len(ansible_versions) < 3:
for counter in range(len(ansible_versions), 3):
ansible_versions.append(0)
return {'string': ansible_version_string.strip(),
'full': ansible_version,
'major': ansible_versions[0],
'minor': ansible_versions[1],
'revision': ansible_versions[2]}
@staticmethod
def pager(text):
''' find reasonable way to display text '''
# this is a much simpler form of what is in pydoc.py
if not sys.stdout.isatty():
display.display(text, screen_only=True)
elif 'PAGER' in os.environ:
if sys.platform == 'win32':
display.display(text, screen_only=True)
else:
CLI.pager_pipe(text, os.environ['PAGER'])
else:
p = subprocess.Popen('less --version', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.communicate()
if p.returncode == 0:
CLI.pager_pipe(text, 'less')
else:
display.display(text, screen_only=True)
@staticmethod
def pager_pipe(text, cmd):
''' pipe text through a pager '''
if 'LESS' not in os.environ:
os.environ['LESS'] = CLI.LESS_OPTS
try:
cmd = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=sys.stdout)
cmd.communicate(input=to_bytes(text))
except IOError:
pass
except KeyboardInterrupt:
pass
@classmethod
def tty_ify(cls, text):
t = cls._ITALIC.sub("`" + r"\1" + "'", text) # I(word) => `word'
t = cls._BOLD.sub("*" + r"\1" + "*", t) # B(word) => *word*
t = cls._MODULE.sub("[" + r"\1" + "]", t) # M(word) => [word]
t = cls._URL.sub(r"\1", t) # U(word) => word
t = cls._CONST.sub("`" + r"\1" + "'", t) # C(word) => `word'
return t
@staticmethod
def _play_prereqs():
options = context.CLIARGS
# all needs loader
loader = DataLoader()
basedir = options.get('basedir', False)
if basedir:
loader.set_basedir(basedir)
add_all_plugin_dirs(basedir)
set_collection_playbook_paths(basedir)
default_collection = get_collection_name_from_path(basedir)
if default_collection:
display.warning(u'running with default collection {0}'.format(default_collection))
AnsibleCollectionLoader().set_default_collection(default_collection)
vault_ids = list(options['vault_ids'])
default_vault_ids = C.DEFAULT_VAULT_IDENTITY_LIST
vault_ids = default_vault_ids + vault_ids
vault_secrets = CLI.setup_vault_secrets(loader,
vault_ids=vault_ids,
vault_password_files=list(options['vault_password_files']),
ask_vault_pass=options['ask_vault_pass'],
auto_prompt=False)
loader.set_vault_secrets(vault_secrets)
# create the inventory, and filter it based on the subset specified (if any)
inventory = InventoryManager(loader=loader, sources=options['inventory'])
# create the variable manager, which will be shared throughout
# the code, ensuring a consistent view of global variables
variable_manager = VariableManager(loader=loader, inventory=inventory, version_info=CLI.version_info(gitinfo=False))
return loader, inventory, variable_manager
@staticmethod
def get_host_list(inventory, subset, pattern='all'):
no_hosts = False
if len(inventory.list_hosts()) == 0:
# Empty inventory
if C.LOCALHOST_WARNING and pattern not in C.LOCALHOST:
display.warning("provided hosts list is empty, only localhost is available. Note that the implicit localhost does not match 'all'")
no_hosts = True
inventory.subset(subset)
hosts = inventory.list_hosts(pattern)
if not hosts and no_hosts is False:
raise AnsibleError("Specified hosts and/or --limit does not match any hosts")
return hosts
|
kustodian/ansible
|
lib/ansible/cli/__init__.py
|
Python
|
gpl-3.0
| 20,535
|
[
"Galaxy"
] |
11b4273604b3d61540324e8e940176823749ad0ef7f8b3f3467293b0b729f597
|
import csv
import os
import pytest
import shutil
import subprocess
from morphct import run_MorphCT
from morphct.definitions import TEST_ROOT
from testing_tools import TestCommand
from morphct.code import helper_functions as hf
from morphct.code import transfer_integrals as ti
class dummy_chromophore:
def __init__(self):
self.AAIDs = [0, 1]
@pytest.fixture(scope="module", params=[1, 3, 6, 9, 12, 15, 18])
def run_simulation(request):
fail_count = request.param
# ---==============================================---
# ---=============== Setup Prereqs ================---
# ---==============================================---
output_dir = os.path.join(TEST_ROOT, "output_OR")
orca_inp_dir = os.path.join(output_dir, "chromophores", "input_orca", "single")
orca_out_dir = os.path.join(output_dir, "chromophores", "output_orca", "single")
try:
shutil.rmtree(output_dir)
except OSError:
pass
os.makedirs(orca_inp_dir)
os.makedirs(orca_out_dir)
asset_name = os.path.join(TEST_ROOT, "assets", "orca_retrials", "00000.inp")
shutil.copy(asset_name, orca_inp_dir)
file_name = os.path.join("single", os.path.split(asset_name)[1])
# Create dummy inputs for rerun_fails
# First the failed_chromo_files dictionary that keeps track of the fail count and
# the chromo_ID
failed_chromo_files = {file_name: [fail_count, 0]}
# Then, the parameter dictionary (used to get the orca output directory and the
# proc_IDs
parameter_dict = {
"proc_IDs": [0],
"output_orca_directory": output_dir,
"output_morphology_directory": output_dir,
}
# Finally a blank chromophore class (used only to output AAIDs after 18 fails)
chromophore_list = [dummy_chromophore()]
ti.rerun_fails(failed_chromo_files, parameter_dict, chromophore_list)
modified_file = os.path.join(os.path.split(orca_inp_dir)[:-1][0], file_name)
return [fail_count, modified_file]
# ---==============================================---
# ---================= Run Tests ==================---
# ---==============================================---
class TestCompareOutputs(TestCommand):
def test_check_output_exists(self, run_simulation):
# No output is made for 18, so just return True
if run_simulation[0] == 18:
return True
self.confirm_file_exists(
os.path.join(
TEST_ROOT,
"output_OR",
"chromophores",
"output_orca",
"single",
"00000.out",
)
)
def test_check_orca_ran_correctly(self, run_simulation):
# No output is made for 18, so just return True
if run_simulation[0] == 18:
return True
output_file = os.path.join(
TEST_ROOT, "output_OR", "chromophores", "output_orca", "single", "00000.out"
)
with open(output_file, "r") as output_fh:
output_lines = output_fh.readlines()
SCF_success = False
terminated_normally = False
for line in output_lines:
if SCF_success and terminated_normally:
return True
if "SUCCESS" in line:
SCF_success = True
if "ORCA TERMINATED NORMALLY" in line:
terminated_normally = True
return False
def test_check_input_was_modified(self, run_simulation):
fail_count = run_simulation[0]
modified_inp_name = run_simulation[1]
check_inp_name = os.path.join(
TEST_ROOT, "assets", "orca_retrials", "00000_{:02d}.inp".format(fail_count)
)
with open(check_inp_name, "r") as expected_file:
expected_lines = expected_file.readlines()
with open(modified_inp_name, "r") as results_file:
results_lines = results_file.readlines()
self.compare_equal(expected_lines, response=results_lines)
def teardown_module():
shutil.rmtree(os.path.join(TEST_ROOT, "output_OR"))
if __name__ == "__main__":
class parameters:
def __init__(self, param):
self.param = param
run_simulation(parameters(1))
|
matty-jones/MorphCT
|
tests/test_orca_retrials.py
|
Python
|
gpl-3.0
| 4,210
|
[
"ORCA"
] |
1cbbe73625c83ebc35450457e25af02f1cb23ff4217b3c27a44407030856159c
|
#!/usr/bin/python
"""
Automaticaly generate Medobs 'visit template' by selecting office, start/end times and interval.
Running without any arguments prints list of available offices.
"""
import sys
import datetime
from django.core.management.base import BaseCommand, CommandError
from djcode.reservations.models import Medical_office, Visit_template
TEMPLATE_VALID_SINCE = '2000-01-01'
def print_offices_list():
print 'I: List of available medical offices:'
for office in Medical_office.objects.all():
print '\t* %s' % office.name
def create_visit_template(office, starttime, endtime, interval):
intervaltime = datetime.timedelta(minutes=interval)
templatetime = starttime
while templatetime.time() <= endtime.time():
for day in Visit_template.DAYS:
if day[0] <= 5: # do not create templates for Saturday and Sunday
if not Visit_template.objects.filter(office=office, day=day[0], starting_time=templatetime.time()):
print 'I: Creating template: %s %s %s' % (office.name, day[1], templatetime.time())
Visit_template.objects.create(office=office, day=day[0], starting_time=templatetime.time(), valid_since=TEMPLATE_VALID_SINCE)
else:
print 'W: Template already exists: %s %s %s ... (skipping)' % (office.name, day[1], templatetime.time())
templatetime = templatetime + intervaltime
class Command(BaseCommand):
help = __doc__
args = "officename starttime(HH:MM) endtime(HH:MM) interval(MM)"
def handle(self, *args, **options):
if len(args) == 0:
print_offices_list()
sys.exit(0)
elif len(args) in range(1, 4):
raise CommandError("Missing some command parameters.")
officename = args[0]
starttime = datetime.datetime.strptime(args[1], '%H:%M')
endtime = datetime.datetime.strptime(args[2], '%H:%M')
interval = int(args[3])
if Medical_office.objects.filter(name=officename):
office = Medical_office.objects.get(name=officename)
create_visit_template(office, starttime, endtime, interval)
else:
print 'E: Office does not exists.'
sys.exit(1)
# vim: set ts=8 sts=8 sw=8 noet:
|
mmincikova/medobs
|
djcode/reservations/management/commands/medobstemplates.py
|
Python
|
gpl-3.0
| 2,066
|
[
"VisIt"
] |
e26c1b502853ff979480f36ff0ee32056c3ae74ef88f65ce18956e9a18f8a5d0
|
#
# File : mkdir.py
# This file is part of RT-Thread RTOS
# COPYRIGHT (C) 2006 - 2018, RT-Thread Development Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Change Logs:
# Date Author Notes
# 2017-10-04 Bernard The first version
import os
import shutil
from shutil import ignore_patterns
def do_copy_file(src, dst):
# check source file
if not os.path.exists(src):
return
path = os.path.dirname(dst)
# mkdir if path not exist
if not os.path.exists(path):
os.makedirs(path)
shutil.copy2(src, dst)
def do_copy_folder(src_dir, dst_dir, ignore=None):
import shutil
# check source directory
if not os.path.exists(src_dir):
return
try:
if os.path.exists(dst_dir):
shutil.rmtree(dst_dir)
except:
print('Deletes folder: %s failed.' % dst_dir)
return
shutil.copytree(src_dir, dst_dir, ignore = ignore)
source_ext = ['c', 'h', 's', 'S', 'cpp', 'xpm']
source_list = []
def walk_children(child):
global source_list
global source_ext
# print child
full_path = child.rfile().abspath
file_type = full_path.rsplit('.',1)[1]
#print file_type
if file_type in source_ext:
if full_path not in source_list:
source_list.append(full_path)
children = child.all_children()
if children != []:
for item in children:
walk_children(item)
def walk_kconfig(RTT_ROOT, source_list):
for parent, dirnames, filenames in os.walk(RTT_ROOT):
if 'bsp' in parent:
continue
if '.git' in parent:
continue
if 'tools' in parent:
continue
if 'Kconfig' in filenames:
pathfile = os.path.join(parent, 'Kconfig')
source_list.append(pathfile)
if 'KConfig' in filenames:
pathfile = os.path.join(parent, 'KConfig')
source_list.append(pathfile)
def bsp_copy_files(bsp_root, dist_dir):
# copy BSP files
do_copy_folder(os.path.join(bsp_root), dist_dir,
ignore_patterns('build', 'dist', '*.pyc', '*.old', '*.map', 'rtthread.bin', '.sconsign.dblite', '*.elf', '*.axf', 'cconfig.h'))
def bsp_update_sconstruct(dist_dir):
with open(os.path.join(dist_dir, 'SConstruct'), 'r') as f:
data = f.readlines()
with open(os.path.join(dist_dir, 'SConstruct'), 'w') as f:
for line in data:
if line.find('RTT_ROOT') != -1:
if line.find('sys.path') != -1:
f.write('# set RTT_ROOT\n')
f.write('if not os.getenv("RTT_ROOT"): \n RTT_ROOT="rt-thread"\n\n')
f.write(line)
def bsp_update_kconfig(dist_dir):
# change RTT_ROOT in Kconfig
if not os.path.isfile(os.path.join(dist_dir, 'Kconfig')):
return
with open(os.path.join(dist_dir, 'Kconfig'), 'r') as f:
data = f.readlines()
with open(os.path.join(dist_dir, 'Kconfig'), 'w') as f:
found = 0
for line in data:
if line.find('RTT_ROOT') != -1:
found = 1
if line.find('default') != -1 and found:
position = line.find('default')
line = line[0:position] + 'default "rt-thread"\n'
found = 0
f.write(line)
def bsp_update_kconfig_library(dist_dir):
# change RTT_ROOT in Kconfig
if not os.path.isfile(os.path.join(dist_dir, 'Kconfig')):
return
with open(os.path.join(dist_dir, 'Kconfig'), 'r') as f:
data = f.readlines()
with open(os.path.join(dist_dir, 'Kconfig'), 'w') as f:
found = 0
for line in data:
if line.find('RTT_ROOT') != -1:
found = 1
if line.find('../libraries') != -1 and found:
position = line.find('../libraries')
line = line[0:position] + 'libraries/Kconfig"\n'
found = 0
f.write(line)
def bs_update_ide_project(bsp_root, rtt_root):
import subprocess
# default update the projects which have template file
tgt_dict = {'mdk4':('keil', 'armcc'),
'mdk5':('keil', 'armcc'),
'iar':('iar', 'iar'),
'vs':('msvc', 'cl'),
'vs2012':('msvc', 'cl'),
'cdk':('gcc', 'gcc')}
scons_env = os.environ.copy()
scons_env['RTT_ROOT'] = rtt_root
for item in tgt_dict:
child = subprocess.Popen('scons --target=' + item, cwd=bsp_root, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
stdout, stderr = child.communicate()
if child.returncode == 0:
print('update %s project' % item)
def zip_dist(dist_dir, dist_name):
import zipfile
zip_filename = os.path.join(dist_dir)
zip = zipfile.ZipFile(zip_filename + '.zip', 'w')
pre_len = len(os.path.dirname(dist_dir))
for parent, dirnames, filenames in os.walk(dist_dir):
for filename in filenames:
pathfile = os.path.join(parent, filename)
arcname = pathfile[pre_len:].strip(os.path.sep)
zip.write(pathfile, arcname)
zip.close()
def MkDist_Strip(program, BSP_ROOT, RTT_ROOT, Env):
global source_list
print('make distribution and strip useless files....')
dist_name = os.path.basename(BSP_ROOT)
dist_dir = os.path.join(BSP_ROOT, 'dist-strip', dist_name)
target_path = os.path.join(dist_dir, 'rt-thread')
print('=> %s' % os.path.basename(BSP_ROOT))
bsp_copy_files(BSP_ROOT, dist_dir)
# copy stm32 bsp libiary files
if os.path.basename(os.path.dirname(BSP_ROOT)) == 'stm32':
print("=> copy stm32 bsp library")
library_path = os.path.join(os.path.dirname(BSP_ROOT), 'libraries')
library_dir = os.path.join(dist_dir, 'libraries')
bsp_copy_files(os.path.join(library_path, 'HAL_Drivers'), os.path.join(library_dir, 'HAL_Drivers'))
bsp_copy_files(os.path.join(library_path, Env['bsp_lib_type']), os.path.join(library_dir, Env['bsp_lib_type']))
shutil.copyfile(os.path.join(library_path, 'Kconfig'), os.path.join(library_dir, 'Kconfig'))
# do bsp special dist handle
if 'dist_handle' in Env:
print("=> start dist handle")
dist_handle = Env['dist_handle']
dist_handle(BSP_ROOT)
# get all source files from program
for item in program:
walk_children(item)
source_list.sort()
# copy the source files without libcpu and components/libc in RT-Thread
target_list = []
libcpu_dir = os.path.join(RTT_ROOT, 'libcpu').lower()
libc_dir = os.path.join(RTT_ROOT, 'components', 'libc', 'compilers').lower()
sal_dir = os.path.join(RTT_ROOT, 'components', 'net', 'sal_socket').lower()
sources_include_sal = False
for src in source_list:
if src.lower().startswith(BSP_ROOT.lower()):
continue
# skip libc and libcpu dir
if src.lower().startswith(libcpu_dir):
continue
if src.lower().startswith(libc_dir):
continue
if src.lower().startswith(sal_dir):
sources_include_sal = True
continue
if src.lower().startswith(RTT_ROOT.lower()):
target_list.append(src)
source_list = target_list
# get source directory
src_dir = []
for src in source_list:
src = src.replace(RTT_ROOT, '')
if src[0] == os.sep or src[0] == '/':
src = src[1:]
path = os.path.dirname(src)
sub_path = path.split(os.sep)
full_path = RTT_ROOT
for item in sub_path:
full_path = os.path.join(full_path, item)
if full_path not in src_dir:
src_dir.append(full_path)
# add all of SConscript files
for item in src_dir:
source_list.append(os.path.join(item, 'SConscript'))
# add all of Kconfig files
walk_kconfig(RTT_ROOT, source_list)
# copy all files to target directory
source_list.sort()
for src in source_list:
dst = src.replace(RTT_ROOT, '')
if dst[0] == os.sep or dst[0] == '/':
dst = dst[1:]
print('=> %s' % dst)
dst = os.path.join(target_path, dst)
do_copy_file(src, dst)
# copy tools directory
print('=> tools')
do_copy_folder(os.path.join(RTT_ROOT, 'tools'), os.path.join(target_path, 'tools'), ignore_patterns('*.pyc'))
do_copy_file(os.path.join(RTT_ROOT, 'Kconfig'), os.path.join(target_path, 'Kconfig'))
do_copy_file(os.path.join(RTT_ROOT, 'AUTHORS'), os.path.join(target_path, 'AUTHORS'))
do_copy_file(os.path.join(RTT_ROOT, 'COPYING'), os.path.join(target_path, 'COPYING'))
do_copy_file(os.path.join(RTT_ROOT, 'README.md'), os.path.join(target_path, 'README.md'))
do_copy_file(os.path.join(RTT_ROOT, 'README_zh.md'), os.path.join(target_path, 'README_zh.md'))
print('=> %s' % os.path.join('components', 'libc', 'compilers'))
do_copy_folder(os.path.join(RTT_ROOT, 'components', 'libc', 'compilers'), os.path.join(target_path, 'components', 'libc', 'compilers'))
if sources_include_sal:
print('=> %s' % os.path.join('components', 'net', 'sal_socket'))
do_copy_folder(os.path.join(RTT_ROOT, 'components', 'net', 'sal_socket'), os.path.join(target_path, 'components', 'net', 'sal_socket'))
# copy all libcpu/ARCH directory
import rtconfig
print('=> %s' % (os.path.join('libcpu', rtconfig.ARCH, rtconfig.CPU)))
do_copy_folder(os.path.join(RTT_ROOT, 'libcpu', rtconfig.ARCH, rtconfig.CPU), os.path.join(target_path, 'libcpu', rtconfig.ARCH, rtconfig.CPU))
if os.path.exists(os.path.join(RTT_ROOT, 'libcpu', rtconfig.ARCH, 'common')):
print('=> %s' % (os.path.join('libcpu', rtconfig.ARCH, 'common')))
do_copy_folder(os.path.join(RTT_ROOT, 'libcpu', rtconfig.ARCH, 'common'), os.path.join(target_path, 'libcpu', rtconfig.ARCH, 'common'))
do_copy_file(os.path.join(RTT_ROOT, 'libcpu', 'Kconfig'), os.path.join(target_path, 'libcpu', 'Kconfig'))
do_copy_file(os.path.join(RTT_ROOT, 'libcpu', 'SConscript'), os.path.join(target_path, 'libcpu', 'SConscript'))
# change RTT_ROOT in SConstruct
bsp_update_sconstruct(dist_dir)
# change RTT_ROOT in Kconfig
bsp_update_kconfig(dist_dir)
bsp_update_kconfig_library(dist_dir)
# update all project files
bs_update_ide_project(dist_dir, target_path)
# make zip package
zip_dist(dist_dir, dist_name)
print('done!')
def MkDist(program, BSP_ROOT, RTT_ROOT, Env):
print('make distribution....')
dist_name = os.path.basename(BSP_ROOT)
dist_dir = os.path.join(BSP_ROOT, 'dist', dist_name)
target_path = os.path.join(dist_dir, 'rt-thread')
# copy BSP files
print('=> %s' % os.path.basename(BSP_ROOT))
bsp_copy_files(BSP_ROOT, dist_dir)
# copy stm32 bsp libiary files
if os.path.basename(os.path.dirname(BSP_ROOT)) == 'stm32':
print("=> copy stm32 bsp library")
library_path = os.path.join(os.path.dirname(BSP_ROOT), 'libraries')
library_dir = os.path.join(dist_dir, 'libraries')
bsp_copy_files(os.path.join(library_path, 'HAL_Drivers'), os.path.join(library_dir, 'HAL_Drivers'))
bsp_copy_files(os.path.join(library_path, Env['bsp_lib_type']), os.path.join(library_dir, Env['bsp_lib_type']))
shutil.copyfile(os.path.join(library_path, 'Kconfig'), os.path.join(library_dir, 'Kconfig'))
# do bsp special dist handle
if 'dist_handle' in Env:
print("=> start dist handle")
dist_handle = Env['dist_handle']
dist_handle(BSP_ROOT)
# copy tools directory
print('=> components')
do_copy_folder(os.path.join(RTT_ROOT, 'components'), os.path.join(target_path, 'components'))
# skip documentation directory
# skip examples
# copy include directory
print('=> include')
do_copy_folder(os.path.join(RTT_ROOT, 'include'), os.path.join(target_path, 'include'))
# copy all libcpu/ARCH directory
print('=> libcpu')
import rtconfig
do_copy_folder(os.path.join(RTT_ROOT, 'libcpu', rtconfig.ARCH), os.path.join(target_path, 'libcpu', rtconfig.ARCH))
do_copy_file(os.path.join(RTT_ROOT, 'libcpu', 'Kconfig'), os.path.join(target_path, 'libcpu', 'Kconfig'))
do_copy_file(os.path.join(RTT_ROOT, 'libcpu', 'SConscript'), os.path.join(target_path, 'libcpu', 'SConscript'))
# copy src directory
print('=> src')
do_copy_folder(os.path.join(RTT_ROOT, 'src'), os.path.join(target_path, 'src'))
# copy tools directory
print('=> tools')
do_copy_folder(os.path.join(RTT_ROOT, 'tools'), os.path.join(target_path, 'tools'), ignore_patterns('*.pyc'))
do_copy_file(os.path.join(RTT_ROOT, 'Kconfig'), os.path.join(target_path, 'Kconfig'))
do_copy_file(os.path.join(RTT_ROOT, 'AUTHORS'), os.path.join(target_path, 'AUTHORS'))
do_copy_file(os.path.join(RTT_ROOT, 'COPYING'), os.path.join(target_path, 'COPYING'))
do_copy_file(os.path.join(RTT_ROOT, 'README.md'), os.path.join(target_path, 'README.md'))
do_copy_file(os.path.join(RTT_ROOT, 'README_zh.md'), os.path.join(target_path, 'README_zh.md'))
# change RTT_ROOT in SConstruct
bsp_update_sconstruct(dist_dir)
# change RTT_ROOT in Kconfig
bsp_update_kconfig(dist_dir)
bsp_update_kconfig_library(dist_dir)
# update all project files
bs_update_ide_project(dist_dir, target_path)
# make zip package
zip_dist(dist_dir, dist_name)
print('done!')
|
zhaojuntao/rt-thread
|
tools/mkdist.py
|
Python
|
apache-2.0
| 14,163
|
[
"CDK"
] |
9eb14fe82a2951039e4e7e0a17b03e7007f6f322a03a441005b9aeb6f56a868e
|
#!/usr/bin/env python
# Authors: Andrew Jewett (jewett.aij at g mail)
# http://www.chem.ucsb.edu/~sheagroup
# License: 3-clause BSD License (See LICENSE.TXT)
# Copyright (c) 2011, Regents of the University of California
# All rights reserved.
"""
ttree Ttree is a simple program for recursively composing and generating
large redundant text files from small template files.
By default, the large number of unique template variables generated
in the process are automatically substituted with integers
(or other numeric counters, all of which can be overridden),
rendered, and the rendered templates are written to a file.
ttree was designed to be useful for generating input files for
molecular simulation software like LAMMPS or NAMD.
BasicUI This section of the code contains the user interface for ttree
when run as a stand-alone program, as described above. (This
section of code contains the "if __name__ == __main__:" code block.)
-- Data Types --
StaticObj Static nodes are data structures used to store ttree class definitions.
(Static nodes are useful for defining molecule types or
namespaces in LAMMPS or other molecular simulation programs.)
The nodes themselves are stored in a tree of nested class definitions.
Static variables (such as "@atom:C") are also associated with
StaticObjs.
InstanceObj Instance nodes are created when a user creates one (or many)
copies of a class, using the "new" command.
These classes in turn may instantiate other classes.
(Example: A user may manually instantiate several copies of a
molecule, such as a protein, however each of those
molecules may contain molecular subunits, such as
amino acids, which are automatically instantiated.)
Instance variables (such as "$atom:CA") are also associated with
InstanceObjs.
"""
import sys
from collections import defaultdict
import operator
import random
#import gc
try:
unicode
except NameError:
# Python 3
basestring = unicode = str
# -- ttree_lex.py --
# TtreeShlex is a backwards-compatible version of python's standard shlex module.
# It has the additional member: "self.wordterminators", which overrides
# the "self.wordchars" member. This enables better handling of unicode
# characters by allowing a much larger variety of characters to appear
# in words or tokens parsed by TtreeShlex. Otherwise it is identical to shlex.
from ttree_lex import *
if sys.version < '2.6':
raise InputError('Error: Using python '+sys.version+'\n'
' Alas, you must upgrade to a newer version of python (2.7 or later).')
elif sys.version < '2.7':
sys.stderr.write('--------------------------------------------------------\n'
'----------------- WARNING: OLD PYTHON VERSION ----------\n'
' This program is untested on your python version ('+sys.version+').\n'
' PLEASE LET ME KNOW IF THIS PROGRAM CRASHES (and upgrade python).\n'
' -Andrew 2014-11-28\n'
'--------------------------------------------------------\n'
'--------------------------------------------------------\n')
from ordereddict import OrderedDict
else:
from collections import OrderedDict
if sys.version > '3':
import io
else:
import cStringIO
# We keep track of the program name and version.
# (This is only used for generating error messages.)
#g_filename = 'ttree.py'
g_filename = __file__.split('/')[-1]
g_module_name = g_filename
if g_filename.rfind('.py') != -1:
g_module_name = g_filename[:g_filename.rfind('.py')]
g_date_str = '2015-8-17'
g_version_str = '0.80'
class ClassReference(object):
""" Every class defined by the user (stored in an StaticObj data structure)
may contain references to other classes (ie. other StaticObjs).
(Note: All of these StaticObjs are stored in the same tree, the
global static tree.)
Examples:
Whenever an instance of a class is created, this may automatically spawn
the creation of additional classes (which are instantiated because a 'new'
command appeared within the first class's definition). These are stored in
the "StaticObj.instance_commands[i].class_ref" attribute.
Similarly, each class (StaticObj) can optionally inherit some of its
traits (consisting of write() and new commands) from one or more
"class_parents" (also StaticObjs). A list of these parents is stored in the
"StaticObj.class_parents" attribute. In both cases (self.instance_commands
or self.class_parents) we need to storea pointer to the StaticObj(s)
corresponding to the instance-childen or class-parents.
(This stored in self.statobj).
However, for the purposes of debugging and interactivity, it is also
convenient to permanently keep track of the string that the user used to
specify the name/location of that class/StaticObj
(stored in self.statobj_str), in addition to the location
in the file where that string occurs (stored in self.srcloc)."""
__slots__=["statobj_str","srcloc","statobj"]
def __init__(self,
statobj_str=None,
srcloc=None,
statobj=None):
self.statobj_str = statobj_str
if srcloc is None:
self.srcloc = OSrcLoc('', -1)
else:
self.srcloc = srcloc
self.statobj = statobj
#def __repr__(self):
# return repr((self.statobj_str, self.srcloc))
# "Command"s are tasks to carry out.
# (...either immediately, or later during instantiation)
# Commands are used to write to files, create new instances, delete instances,
# or custom commands to modify an instance of a class.
# (For example "instance = new Class.move(1,0,0).rot(45,0,0,1)"
# The ".move(1,0,0)" and ".rot(45,0,0,1)" commands are "stackable" and
# have similar syntax to member functions in C++, JAVA, Python.)
class Command(object):
__slots__=["srcloc"]
def __init__(self, srcloc=None):
self.srcloc = srcloc
# COMMENTING OUT: "COUNT" AND "ORDER" ARE NO LONGER NEEDED
#count = 0
#def __init__(self, srcloc=None):
# self.srcloc = srcloc
# # The "order" member is a counter that keeps track of the order
# # in which the Command data types are created (issued by the user).
# Command.count += 1
# self.order = Command.count
#def __lt__(self, x):
# return self.order < x.order
class WriteFileCommand(Command):
""" WriteFileCommand
filename This is the name of the file that will be written to
when the command is executed.
tmpl_list This is the contents of what will be written to the file.
Text strings are often simple strings, however more
generally, they can be strings which include other variables
(ie templates). In general, templates are lists of alternating
TextBlocks and VarRefs, (with additional tags and data to
identify where they occur in in the original user's files).
"""
__slots__=["filename", "tmpl_list"]
def __init__(self,
filename = None,
tmpl_list = None,
srcloc = None):
self.filename = filename
if tmpl_list is None:
self.tmpl_list = []
else:
Command.__init__(self, srcloc)
self.tmpl_list = tmpl_list
def __str__(self):
if self.filename:
return 'WriteFileCommand(\"'+self.filename+'\")'
else:
return 'WriteFileCommand(NULL)'
def __copy__(self):
tmpl_list = []
CopyTmplList(self.tmpl_list, tmpl_list) #CHECK:IS_MEMORY_WASTED_HERE?
return WriteFileCommand(self.filename, tmpl_list, self.srcloc)
class InstantiateCommand(Command):
""" InstantiateCommand is a simple tuple-like datatype used to
store pairs of names (strings, stored in self.name),
and ClassReferences (see above, stored in self.class_ref).n
The "suffix" argument is an optional string which may contain
additional instructions how to instantiate the object.
"""
__slots__=["name",
"class_ref",
"instobj"]
def __init__(self,
name = None,
class_ref = None,
srcloc = None,
instobj = None):
Command.__init__(self, srcloc)
self.name = name
#if class_ref is None:
# self.class_ref = ClassReference()
#else:
self.class_ref = class_ref
self.instobj = instobj
def __str__(self):
return 'InstantiateCommand('+self.name+')'
def __copy__(self):
return InstantiateCommand(self.name,
self.class_ref,
self.srcloc,
self.instobj)
class DeleteCommand(Command):
__slots__=[]
def __init__(self,
srcloc = None):
Command.__init__(self, srcloc)
def __str__(self):
return 'DeleteCommand()'
def __copy__(self):
return DeleteCommand(self.srcloc)
class StackableCommand(Command):
""" StackableCommand is a class for storing commands
that effect the environment of the object being created.
The combined effect of these commands can be thought of as a "stack"
Commands can be pushed on the stack, or popped off
The actual commands themselves are represented by the "contents" member
which is usually a text string.
ttree.py does not attempt to understand the content of these commands.
That job is left up to the __main___ module. (IE. whatever script that
happens to be importing ttree.py. If there is no script, and
ttree.py IS the main module, then it simply ignores these commands.)
"""
__slots__=["context_node"]
def __init__(self,
srcloc,
context_node=None):
Command.__init__(self, srcloc)
self.context_node = context_node # if multiple stacks are present, then use "context_node"
# as a key to identify which stack you want
# the command to modify
class PushCommand(StackableCommand):
__slots__=["contents"]
def __init__(self,
contents,
srcloc,
context_node=None):
StackableCommand.__init__(self, srcloc, context_node)
self.contents = contents
def __copy__(self):
return PushCommand(self.contents, self.srcloc, self.context_node)
def __str__(self):
return 'PushCommand('+str(self.contents)+')'
class PushRightCommand(PushCommand):
__slots__=[]
def __init__(self,
contents,
srcloc,
context_node=None):
PushCommand.__init__(self, contents, srcloc, context_node)
def __copy__(self):
return PushRightCommand(self.contents, self.srcloc, self.context_node)
def __str__(self):
return 'PushRightCommand('+str(self.contents)+')'
class PushLeftCommand(PushCommand):
__slots__=[]
def __init__(self,
contents,
srcloc,
context_node=None):
PushCommand.__init__(self, contents, srcloc, context_node)
def __copy__(self):
return PushLeftCommand(self.contents, self.srcloc, self.context_node)
def __str__(self):
return 'PushLeftCommand('+str(self.contents)+')'
class PopCommand(StackableCommand):
__slots__=["partner"]
def __init__(self,
partner,
srcloc,
context_node=None):
StackableCommand.__init__(self, srcloc, context_node)
self.partner = partner
def __copy__(self):
return PopCommand(self.partner, self.srcloc, self.context_node)
def __str__(self):
return 'PopCommand('+str(self.partner.contents)+')'
class PopRightCommand(PopCommand):
__slots__=[]
def __init__(self,
partner,
srcloc,
context_node=None):
PopCommand.__init__(self, partner, srcloc, context_node)
assert((partner is None) or isinstance(partner, PushRightCommand))
def __copy__(self):
return PopRightCommand(self.partner, self.srcloc, self.context_node)
def __str__(self):
return 'PopRightCommand('+str(self.partner.contents)+')'
class PopLeftCommand(PopCommand):
__slots__=[]
def __init__(self,
partner,
srcloc,
context_node=None):
PopCommand.__init__(self, partner, srcloc, context_node)
assert((partner is None) or isinstance(partner, PushLeftCommand))
def __copy__(self):
return PopLeftCommand(self.partner, self.srcloc, self.context_node)
def __str__(self):
return 'PopLeftCommand('+str(self.partner.contents)+')'
# The ScopeCommand, ScopeBegin, and ScopeEnd commands are useful to designate
# which commands belong to a particular class definition (or class instance).
# (This is useful later on, when a linear list of commands has been created.)
# They are simply markers an do not do anything. These classes can be ignored.
class ScopeCommand(Command):
__slots__=["node"]
def __init__(self,
node,
srcloc):
Command.__init__(self, srcloc)
self.node = node
#self.srcloc = srcloc
def __copy__(self):
return ScopeCommand(self.node, self.srcloc)
def __str__(self):
if self.node:
return 'ScopeCommand('+self.node.name+')'
else:
return 'ScopeCommand(None)'
class ScopeBegin(ScopeCommand):
__slots__=[]
def __init__(self, node, srcloc):
ScopeCommand.__init__(self, node, srcloc)
def __copy__(self):
return ScopeBegin(self.node, self.srcloc)
def __str__(self):
if self.node:
return 'ScopeBegin('+NodeToStr(self.node)+')'
else:
return 'ScopeBegin(None)'
class ScopeEnd(ScopeCommand):
__slots__=[]
def __init__(self, node, srcloc):
ScopeCommand.__init__(self, node, srcloc)
def __copy__(self):
return ScopeEnd(self.node, self.srcloc)
def __str__(self):
if self.node:
return 'ScopeEnd('+NodeToStr(self.node)+')'
else:
return 'ScopeEnd(None)'
# COMMENTING OUT: NOT NEEDED AT THE MOMENT
#class VarAssignCommand(Command):
# """ VarAssignCommand
#
# This class is used whenever the user makes an explicit request to assign
# a variable to a value (values are text strings).
#
# var_ref The variable name (tecnically speaking, I call this
# a variable descriptor string and it includes at least one of
# the following: the name of a leaf node, a category node name,
# and category name)
# the location in the file where variable appears, and (eventually
# after subsequent lookup), references to the leaf_node, cat_node,
# "Category", and "VarBinding" data structures associated with it.
# text_tmpl Text strings are often simple strings, however more
# generally, they can be strings which include other variables
# (ie templates). In general, templates are lists of alternating
# TextBlocks and VarRefs, (with additional tags and data to
# identify where they occur in in the original user's files).
#
# """
# __slots__=["var_ref","text_tmpl"]
#
# def __init__(self,
# #command_name = '=', <-- ?!?
# var_ref = None,
# text_tmpl=None):
# Command.__init__(self, srcloc)
# self.var_ref = var_ref
# self.text_tmpl = text_tmpl
class ModCommand(object):
__slots__=["command","multi_descr_str"]
def __init__(self,
command,
multi_descr_str):
self.command = command
self.multi_descr_str = multi_descr_str
def __str__(self):
return 'ModCommand('+str(self.command)+')'
def __copy__(self):
return ModCommand(self.command.__copy__(), self.multi_descr_str)
def CopyTmplList(source_tmpl_list, dest_cpy):
for entry in source_tmpl_list:
if isinstance(entry, TextBlock):
dest_cpy.append(entry) # Then make a shallow copy
# (pointer assignment) to the text
# block (Text blocks do not change
# during instantiation.)
elif isinstance(entry, VarRef):
assert(len(entry.prefix)>0)
if entry.prefix[0] == '@': # '@' vars refer to static data
dest_cpy.append(entry) # Then make a shallow copy
# pointer assignment) to the static
# variable. (Static variables do
# not change during instantiation.)
elif entry.prefix[0] == '$': # new '$' vars are created
# during every instantiation.
# var_refs do change when you instantiate them. So
# create a new VarRef object, and copy the attributes.
var_ref = VarRef(entry.prefix,
entry.descr_str,
entry.suffix,
entry.srcloc)
# Note: for instance variables ('$' vars)
# "entry.nptr" should not contain
# any data yet, so we just ignore it.
# I assert this below:
assert((entry.nptr.cat_node is None) and
(entry.nptr.leaf_node is None))
dest_cpy.append(var_ref)
else:
assert(False) # prefix[0] should be either '@' or '$'
else:
assert(False) # type(entry) should be either TextBlock or VarRef
def RecursiveJoin(tokens_expr, delimiter = ''):
""" RecursiveJoin() converts a tree-like list/tuple of tokens, for example:
['a ', ('tree', '-', ['like', 'container']), [[' '], 'of'], ' strings']
to an ordinary string, eg:
'a tree-like container of strings'
This behavees similarly to "reduce(lambda a, b: a+b, tokens)",
except that it works with arbitrarily nested lists/tuples."""
text = ''
if isinstance(tokens_expr, basestring):
return tokens_expr
else:
text_lstr = []
for i in range(0, len(tokens_expr)):
text.append( TokensToStr(tokens_expr[i]) )
return ''.join(text_lstr, delimiter)
#----------------------------------------------------------
#----------------------------------------------------------
# The following code is specific to ttree.
#
# (Up until this point, we have only defined
# a few simple general text parsing routines.)
#----------------------------------------------------------
#----------------------------------------------------------
def PtknsToStr(path_tokens):
"""
There are three ways to store paths:
As a single string: '/Protein/Phe/Ca' <- the format entered by the user
As a list of tokens ['Protein', 'Phe', 'Ca'] <- split into tokens
As a list of nodes in a tree (pointers to nodes in a tree hierarchy)
This function converts between the first two formats.
"""
text = ''
if len(path_tokens) > 0:
text = path_tokens[0]
for i in range(1, len(path_tokens)):
text += '/' + path_tokens[i]
else:
text = ''
return text
def StrToPtkns(path_string):
""" The inverse of PtknsToStr(), this function splits a string like
'/usr/local/../bin/awk' into ['usr','local','..','bin','awk'].
For illustrative purposes only. Use text.split('/') directly instead."""
return orig_text.split('/')
def FindChild(name, node, dbg_loc):
""" FindChild looks over the list of node.children to find a child
which matches the name given in the first argument.
If it is not found, it returns None.
Note: I have not yet specified what kind of nodes FindChild() operates
on. Both StaticObjs and InstanceObjs have self.children and self.parent.
However only StaticObjs have "self.class_parents".
("class_parents" are "parents" in the object-oriented sense.)
If "node" (2nd argument) happens t be an StaticObj, this means it also
We must search over the children of these class_parents as well.
Terminology used here differs from Object Oriented Programming
Children in node.children are not children in the object-oriented
programming sense. However, in OOP, "children" are objects that share all
of the traits of their ancestors (and may have additionl traits as well).
I have implemented OOP style children and parents, but this informtion
is stored in "node.class_parents", instead of "node.parents".
For comparison, instantiated nodes (InstanceObjs) are different. Altough
instantiated classes (InstanceObjs) have access to the attributes of the
class_parents of the StaticObjs that define them, they do not remember the
ownership of that data. (It just gets merged with their own member data,
including their .children.)
Hence we must treat StaticObjs carefully because their are two ways we can
access child data. We should loop over both of them. We do that below:
"""
child = node.children.get(name)
if child:
return child
if isinstance(node, StaticObj):
# The object-oriented inheritance stuff appears here.
# If you don't care about OOP or inheritance,
# then comment out the loop that follows:
# Search recursively over the "children" (ie attributes or members)
# belonging to any OOP ancestors of this node.
for class_parent in node.class_parents:
child = FindChild(name, class_parent, dbg_loc)
if child != None:
return child
for namespace_node in node.namespaces:
child = FindChild(name, namespace_node, dbg_loc)
if child != None:
return child
else:
assert(isinstance(node, InstanceObjBasic))
# Otherwise, a child name match was not found
return None
def FollowPath(path_tokens, starting_node, dbg_loc):
""" FollowPath() returns the "last_node", a node whose position in the
tree is indicated by a list of path_tokens, describing the names
of nodes connecting "starting_node" to "last_node".
If it one of the strings in the list of path_tokens turns out
not to match then names of classes in the tree, then this function
returns the last_node that did match before the error occurred,
as well as an integer which stores the number of tokens in
the path_tokens list which were successfully processed.
In other words, the list of node naes is not a full path, but the
relative path that takes you from one node (not necessarily the root)
to another. Return Value:
Ideally, each node in the list should be a parent or a child of the
previous node. (See comment for PathTokensToStr(), for more details.)
This function returns the number of path_tokens successfully
parsed. Under normal termination, this is len(path_tokens).
If the path can not be followed (because at some point, a child
or parent does not exist), then this function returns a number
smaller than len(path_tokens).
We let the caller handle undefined paths. """
#print(' FollowPath() invoked on: ', path_tokens)
if len(path_tokens) == 0:
return 0, starting_node
node = starting_node
# Is this path a relative path, or a full path?
# If the path-string began with '/', then it's a full path. This means
# that after processing by split('/'), the first token will be ''
# Example: path_tokens='/Prot/Alanine'.split('/')
# --> path_tokens[0] == ''
if path_tokens[0] == '':
# In that case, then take us to the root node:
while node.parent != None:
node = node.parent
#sys.stdout.write('FollowPath(): Retreating to node \"'+node.name+'\"\n')
i0 = 1 # <- We've just processed the first token. Skip over it later.
else:
i0 = 0
i = i0
while i < len(path_tokens):
if path_tokens[i] == '..':
if node.parent is None:
return i, node # <-return the index into the token list
# Caller will know that something went awry
# if the return value is not equal to the
# length of the token list
else:
node = node.parent
i += 1
elif path_tokens[i] == '...':
node_before_ellipsis = node
if i == len(path_tokens)-1:
return i, node_before_ellipsis
search_target = path_tokens[i+1]
# Now search over the "children" of this node
# for one who's name matches path_tokens[i].
# If not found, then move up to the parent node's children.
# (This is not an exhaustive tree search. Only the nodes which
# are immediate children of this node's parents are searched.)
while node != None:
child = FindChild(search_target, node, dbg_loc)
if child is None:
node = node.parent
else:
node = child
break
if node is None:
# Caller will know that something went awry if the return
# value is not equal to the length of the token list.
return i, node_before_ellipsis
i += 2
elif path_tokens[i] in ('','.'): # <-Note we ignore empty tokens from now on.
# (Same convention is used in specifying a
# directory in a filesystem, eg. using /usr/local
# or /usr//local or /usr/./local. These are all equivalent.)
i += 1
else:
# Now search over the "children" of this
# node for one who's name matches path_tokens[i].
child = FindChild(path_tokens[i], node, dbg_loc)
if child is None:
# In that case, return with the node_list incomplete.
# Let the caller check to see if something went wrong.
return i, node # <-return the index into the token list (i)
# Caller will know that something went awry
# if the return value is not equal to the
# length of the token list
else:
node = child
i += 1
if node.IsDeleted():
#sys.stderr.write('(debug_msg: encountered deleted node: \"'+node.name+'\")\n')
break
return len(path_tokens), node
def PtknsToNode(path_tokens, starting_node, dbg_loc):
""" PtknsToNode() is identical to def FollowPath() except
that it raises syntax-error exceptions if the path is undefined."""
i_last_ptkn, last_node = FollowPath(path_tokens, starting_node, dbg_loc)
if i_last_ptkn < len(path_tokens):
#assert(isinstance(last_node,StaticObj)) <--why did I assert this? seems wrong
if (last_node.parent is None) and (path_tokens[i_last_ptkn] == '..'):
#In that case, we tried to back out beyond the root of the tree.
raise InputError('Error('+g_module_name+'.PtknsToNode()):\n'
' Invalid variable/class name:\n'
' \"'+PtknsToStr(path_tokens)+'\" located near '+ErrorLeader(dbg_loc.infile, dbg_loc.lineno)+'\n'
' There are too many \"..\" tokens in the path string.')
elif path_tokens[i_last_ptkn] == '...':
if i_last_ptkn+1 == len(path_tokens):
raise InputError('Error('+g_module_name+'.PtknsToNode()):\n'
' Error in '+ErrorLeader(dbg_loc.infile, dbg_loc.lineno)+'\n'
' Expected name following \"...\"\n')
else:
search_target = path_tokens[i_last_ptkn+1]
#In that case, we were unable to find the node referenced by "..."
raise InputError('Error('+g_module_name+'.PtknsToNode()):\n'
' Class or variable \"'+search_target+'\" not found\n'
' in this context: \"'+PtknsToStr(path_tokens)+'\"\n'
' located near '+ErrorLeader(dbg_loc.infile, dbg_loc.lineno))
else:
#Then the reason is: The string in path_tokens[i_last_ptkn]
#was supposed to be a child of last_node but a child
#of that name was not found.
err_msg = 'Error('+g_module_name+'.PtknsToNode()):\n'+\
' Undefined variable/class name:\n'+\
' \"'+PtknsToStr(path_tokens)+'\",\n'+\
' This occured near or before '+ErrorLeader(dbg_loc.infile, dbg_loc.lineno)+'\n'+\
' (Specifically \"'+path_tokens[i_last_ptkn]+\
'\" is not a subordinate of \"'+MaxLenStr(last_node.name,'/')+'\".)\n'+\
' This may be due to a typo located here or earlier.\n'+\
' It may also occur if you deleted the object earlier. (Referring to a\n'+\
' deleted object is only forgiven when using [0-9] or [0:10] notation.)\n'+\
' If this object refers to an array you must use brackets []\n'+\
' to explicitly specify the element(s) you want from that array.\n'+\
' (To select multiple elements, you can use [*] or [0-9] or [0:10].)\n'
if (path_tokens[i_last_ptkn] in NodeToPtkns(last_node)):
err_msg += '\nIn this case:\n'+\
' It seems like you may have omitted a } character somewhere before:\n'+\
' '+ErrorLeader(dbg_loc.infile, dbg_loc.lineno)
raise InputError(err_msg)
assert(False) # One of the two conditions above should be true.
return last_node
def StrToNode(obj_name, starting_node, dbg_loc):
path_tokens = obj_name.split('/')
return PtknsToNode(path_tokens, starting_node, dbg_loc)
def NodeListToPtkns(node_list, dbg_loc=None):
assert(len(node_list) > 0) #The path must contain at least the starting node
path_tokens = [node_list[0].name]
for i in range(1, len(node_list)):
if node_list[i] == node_list[i-1].parent:
path_tokens.append('..')
else:
path_tokens.append(node_list[i].name)
# Now check to make sure the user supplied consistent information:
if (node_list[i] not in node_list[i-1].children.values()):
raise InputError('Error('+g_module_name+'.NodeListToPtkns()):\n'
' Undefined variable/class name:\n'
' \"'+PtknsToStr(path_tokens)+'\" located near '+ErrorLeader(dbg_loc.infile, dbg_loc.lineno)+'\n'
' (\"'+path_tokens[i]+'\" is not subordinate to \"'+MaxLenStr(node_list[i-1].name,'/')+'\")\n'
' This could be an internal error.')
return path_tokens
def NodeListToStr(node_list, dbg_loc=None):
assert(len(node_list) > 0) #The path must contain at least the starting node
path_str = node_list[0].name
for i in range(1, len(node_list)):
if node_list[i] == node_list[i-1].parent:
path_str += '/..'
else:
path_str += '/' + node_list[i].name
# Now check to make sure the user supplied consistent information:
if (node_list[i] not in node_list[i-1].children.values()):
err_msg = 'Error('+g_module_name+'.NodeListToStr()):\n'+\
' Invalid variable/class name:\n' +\
' \"'+PtknsToStr(path_tokens)+'\"'
if dbg_loc != None:
err_msg += ' located near '+ErrorLeader(dbg_loc.infile, dbg_loc.lineno)
err_msg += '\n' +\
' (\"'+node_list[i].name+'\" is not a subordinate of \"'+MaxLenStr(node_list[i-1].name,'/')+'\")\n'+\
' This could be an internal error.'
raise InputError(err_msg)
return path_str
def NodeToPtkns(node):
ptkns = []
nd = node
while nd != None:
ptkns.append(nd.name)
nd = nd.parent
ptkns.reverse()
return ptkns
def NodeToStr(node):
ptkns = NodeToPtkns(node)
assert(len(ptkns) > 0)
if node.parent is None:
assert(node.name == '')
return '/'
path_str = ptkns[0]
i = 1
while i < len(ptkns):
path_str += '/'+ptkns[i]
i += 1
return path_str
def CatLeafNodesToTkns(cat_name, cat_node, leaf_node, dbg_loc):
assert((cat_node != None) and (leaf_node != None))
assert((cat_name != None) and (cat_name != ''))
# Determine the path of the cat node
cat_node_ptkns = NodeToPtkns(cat_node)
cat_node_ptkns.append(cat_name+':')
# Determine the path of the leaf node (which should inherit from cat)
deleted = False
leaf_node_ptkns = []
if cat_node != leaf_node:
node = leaf_node
while node.parent != None:
if node.IsDeleted():
deleted = True
leaf_node_ptkns.append('DELETED_'+node.name)
break
leaf_node_ptkns.append(node.name)
if node.parent == cat_node:
break
node = node.parent
leaf_node_ptkns.reverse()
if not deleted:
# Check that leaf inherits from cat. If not, print error.
if ((node.parent != cat_node) and (node != cat_node)):
err_msg = 'Error('+g_module_name+'.CatLeafNodesToPtkns()):\n'+\
' Invalid variable (category:leaf) pair\n'
if dbg_loc != None:
cat_node_str = NodeToStr(cat_node)
leaf_node_str = NodeToStr(leaf_node)
err_msg += ' located near '+ErrorLeader(dbg_loc.infile, dbg_loc.lineno)+'\n'+\
' (\"'+leaf_node.name+'\" is not in the scope of \"'+cat_node_str+'/'+cat_name+':\")\n'+\
' This will happen if you used the \"category\" command to manually\n'+\
' create a category/counter which is not defined globally.\n'+\
'\n'+\
' Note: Using the analogy of a unix style file system, \n'+\
' the problem is that \"'+leaf_node_str+'\"\n'+\
' is not a subdirectory of \"'+cat_node_str+'\".\n'+\
'\n'+\
' Note: This often occurs when \".../\" is used. In that case, you may\n'+\
' be able to avoid this error by referring to your variable explicitly\n'+\
' by using chains of \"../\" tokens in the path instead of \".../\".\n'
#' Make sure that your variable you are using is defined in \n'+\
#' an environment (currently \"'+leaf_node_str+'\")\n'+\
#' which lies WITHIN the environment where the category was defined.\n'+\
#' (currently \"'+cat_node_str+'\").\n'
raise InputError(err_msg)
else:
err_msg = 'Warning: Strange variable path'
if dbg_loc != None:
err_msg += ' near '+ErrorLeader(dbg_loc.infile, dbg_loc.lineno)
err_msg += '\n' +\
' The category and leaf nodes for variable \"'+cat_name+':'+leaf_node.name+'\" are the same.\n'+\
' Check to see that this variable is behaving the way you intended.\n'+\
' (It\'s possible this could be an internal error in the program.)\n'
sys.stderr.write(err_msg)
# Merge the list of strings together into a single string:
return cat_node_ptkns + leaf_node_ptkns
def CanonicalCatName(cat_name, cat_node, dbg_loc=None):
# Determine the path of the cat node
tkns = NodeToPtkns(cat_node)
tkns.append(cat_name)
#full_cat_name = tkns[0]
#for i in range(1,len(tkns)):
# full_cat_name += '/'+tkns[i]
# better way:
return '/'.join(tkns)
def CanonicalDescrStr(cat_name, cat_node, leaf_node, dbg_loc=None):
tkns = CatLeafNodesToTkns(cat_name, cat_node, leaf_node, dbg_loc)
descr_str = tkns[0]
for i in range(1, len(tkns)):
if (len(descr_str)>0) and (descr_str[-1] == ':'):
descr_str += tkns[i]
else:
descr_str += '/'+tkns[i]
return descr_str
def CollapsePath(path_tokens):
"""
CollapsePath() takes a list of Strings argument representing a
directory-like path string
(for example '/SUB1A/Sub2A/../Sub2B/sub3b/../sub3c/entry'),
and replaces it with a version which should contain no '..' patterns.
(In the example above, it returns /SUB1A/Sub2B/sub3c/entry')
"""
new_ptkns = []
ndelete = 0
i = len(path_tokens)-1
while i >= 0:
if path_tokens[i] == '..':
ndelete += 1
else:
if (ndelete > 0) and (path_tokens[i] != ''):
# Note: "path_tokens[i] != '')" means "/a/b//c" <-> "/a/b/c"
ndelete -= 1
else:
if len(path_tokens[i]) > 0:
new_ptkns.append(path_tokens[i])
i -= 1
new_ptkns.reverse()
if ndelete > 0:
return ndelete # <-- useful to let caller know an error ocurred
return new_ptkns
def FindCatNode(category_name, current_node, srcloc):
""" Search upwards (toward the ancester nodes), looking for a node
containing a category matching category_name (first argument).
Useful when the user specifies a category name, but neglects to
specify which node it was defined in.
Note: there is no gaurantee that the category node returned by this function
contains an entry in it's "categories" list corresponding to this
category name. You must check for this condition and handle it."""
cat_node = None
node = current_node
while True:
if category_name in node.categories:
cat_node = node
break
elif node.parent != None:
node = node.parent
else:
# node.parent is None, ... we're done
break
if cat_node is None:
assert(node.parent is None)
#sys.stderr.write('Warning near ' +
# ErrorLeader(srcloc.infile,
# srcloc.lineno)+'\n'+
# ' no category named \"'+category_name+'\" found.\n'+
# ' Creating a new global category: /'+
# category_name+':\n')
cat_node = node # the global node
assert(cat_node != None)
return cat_node
def RemoveNullTokens(in_ptkns):
"""This function just gets rid of useless empty tokens in the path ('', '.')
(However if '' appears at the beginning of a path, we leave it alone.)
"""
out_ptkns = []
for i in range(0,len(in_ptkns)):
if ((in_ptkns[i] != '.') and
((in_ptkns[i] != '') or (i==0))):
out_ptkns.append(in_ptkns[i])
# (I'm sure there are ways to write this in python
# using fewer lines of code. Sigh.)
return out_ptkns
def DescrToCatLeafPtkns(descr_str, dbg_loc):
"""
Review: Variables in this program have three parts:
1) A variable category name (designating the type of variable).
2) A variable category path, which consists of a node which is an ancestor
of the variable leaf (1) in the tree
3) A variable name ("leaf"), which refers to a node in the tree
(either a static type tree or instance tree)
DescrToCatLeafPtkns() takes a string describing a variable,
as it appears in a template (ie, a write() command, once it has been
stripped of it's '$' or '@' prefix, and surrounding {} brackets)
...and divides it into strings which specify the location of that leaf in
a static or instance tree, in addition to the name and location of the
category node. Descriptor examples for atoms in water:
"AtomType:/Water/O", There are only 2 --types-- of atoms in
"AtomType:/Water/H", a water molecule. We identify them this way.
"AtomID:O" However each water molecule has 3 atoms, and we
"AtomID:H1" can give each atom in each water molecule a unique
"AtomID:H2" AtomID number. "AtomID:H2" is the id number of the
second hydrogen atom in the current water molecule.
---- Output: This function returns a 3-tuple: ----
leaf_ptkns The name of the variable's leaf node, as well as the list of
tokens denoting the path (named list of nodes) which lead to it.
cat_name The name of the variable category (no path information)
cat_ptkns A --suggestion-- for where to find the node containing the
category mentioned in "cat_name". Same format as leaf_ptkns.
Examples:
"AtomType:/Water/O" cat_name='AtomType', cat_path=[], leaf_ptkns=['','Water','O']
"AtomType:/Water/H" cat_name='AtomType', cat_path=[], leaf_ptkns=['','Water','H']
"AtomID:O" cat_name='AtomID', cat_path=[], leaf_ptkns=['O']
"AtomID:H1" cat_name='AtomID', cat_path=[], leaf_ptkns=['H1']
"AtomID:H2" cat_name='AtomID', cat_path=[], leaf_ptkns=['H2']
"mol:/" cat_name='mol', cat_path=[], leaf_ptkns=['']
"mol:" cat_name='mol', cat_path=[], leaf_ptkns=[]
"mol:../" cat_name='mol', cat_path=[], leaf_ptkns=['..']
"../mol" cat_name='mol', cat_path=[], leaf_ptkns=['..']
"$/peptide[3]/ResID:res[25]" cat_name='ResID', cat_path=['', 'peptide[3]'], leaf_ptkns=['res[25]']
"""
split_colon = descr_str.split(':')
if len(split_colon) > 2:
raise InputError('Error('+g_module_name+'.DescrToCatLeafPtkns())\n'
' Error near '+ErrorLeader(dbg_loc.infile, dbg_loc.lineno)+'\n\n'
' Bad variable descriptor: \"'+descr_str+'\"\n'+
' There can be at most one \':\' character in a variable descriptor.\n')
# ---- Are we using colon syntax (example '$atom:H1')?
elif len(split_colon) == 2:
# The category name = text after the last '/' (if present)and before ':'
cat_ptkns = split_colon[0].split('/')
cat_name = cat_ptkns[-1]
# The text before that is the suggested (category) path
cat_ptkns = cat_ptkns[:-1]
# if len(cat_ptkns) == 0:
# cat_ptkns.append('.')
# The remaining text is the path leading to the leaf node.
if split_colon[1] != '':
leaf_ptkns = split_colon[1].split('/')
else:
leaf_ptkns = []
if (cat_name == ''):
raise InputError('Error('+g_module_name+'.DescrToCatLeafPtkns()):\n'
' Error near '+ErrorLeader(dbg_loc.infile, dbg_loc.lineno)+'\n\n'
' Bad variable descriptor: \"'+descr_str+'\"\n')
else:
# ---- Are we using colon-less syntax (example: "$../mol") ?
ptkns = split_colon[0].split('/')
cat_name = ptkns[-1] # last token (eg. "mol") is the cat_name
leaf_ptkns = ptkns[:-1] # the rest is the leaf's path ("..")
if len(leaf_ptkns) == 0:
leaf_ptkns.append('.')
#cat_ptkns = ptkns[:-1] # the same goes for the cat path suggestion
#if len(cat_ptkns) == 0:
# cat_ptkns.append('.')
cat_ptkns = []
# On 2012-8-22, I commented out this line:
#return cat_name, RemoveNullTokens(cat_ptkns), RemoveNullTokens(leaf_ptkns)
# and replaced it with:
return cat_name, RemoveNullTokens(cat_ptkns), leaf_ptkns
def DescrToCatLeafNodes(descr_str,
context_node,
dbg_loc,
create_missing_nodes=False):
"""
Variables in ttree correspond to nodes in a tree
(and also categories to which they belong).
DescrToCatLeafNodes() reads the name of a variable,
(its descriptor) and determines where in the tree
does this variable reside, and what is it's category?
This function is the heart of ttree because it is
the function used to interpret ttree variable syntax.
(It is very messy right now. I will clean up the code later. AJ 2011-9-06)
Arguments:
descr_str The complete name that the user gave
to the variable. (Excluding '$' or '@')
context_node The class (node) in which the variable
was used. descr_str is interpeted
relative to this context. (This argument
is similar to the current directory
in which a command was issued in unix.)
dbg_loc The location in the user's input file(s)
where this variable is referred to.
create_missing_nodes
If we lookup a variable whose leaf node
does not exist yet, should we create it?
Setting this argument to "True" allows
us to augment the tree to add nodes
corresponding to variables.
-- Here is a greatly simplified version of DescrToCatLeafNodes(): --
def DescrToCatLeafNodes(descr_str, context_node, dbg_loc):
cat_name, cat_ptkns, leaf_ptkns = DescrToCatLeafPtkns(descr_str, dbg_loc)
cat_node = PtknsToNode(cat_ptkns, context_node, dbg_loc)
if len(cat_ptkns) > 0:
leaf_node = PtknsToNode(leaf_ptkns, cat_node, dbg_loc)
else:
leaf_node = PtknsToNode(leaf_ptkns, context_node, dbg_loc)
return cat_name, cat_node, leaf_node
(This version works, but it does not handle "..." corectly,
and it does not create missing nodes when needed.)
-- Here is a (probably unnecessary) review of terminology: --
Descriptor String:
The first argument ("descr_str") is a descriptor string.
A descriptor string typically contains ":" and "/"
characters to to divide the string into pieces in order
to identify a category name, category node, and leaf node.
Conceptually, the variable's NAME is the leaf node.
The variable's TYPE is the category (node and name).
Node:
Nodes are used to represent both class objects and variable names
1) class objects
Each type of class objects is represented by an StaticObj.
Each instantiated object is represented by an InstanceObj.
2) variable names (leaf nodes)
However variable names are also represented using either
StaticObjs (for @ static variables) or
InstanceObjs (for $ instance variables)
Again, all variables in ttree are members of a class object.
In this case, the name of the node corresponds to the variable's
name, and it's position in the tree refers to the class to which
it belongs.
However "leaf nodes" do not uniquely identify the
actual variable itself. A single node can refer to two different
variables if they are in different categories.
All 3 identifiers (leaf node, category node, category name)
are needed to uniquely identify a ttree variable. See below.
Ptkn (Path Token)
Strings containing multiple '/' characters are typically used
to identify the location of the category and leaf nodes in the
tree (ie the path to the node). The '/' characters are
delimiters which break up the string into small pieces, (which
are usually the names of classes).
These pieces are called "path tokens" or "ptkns"
Leaf Node:
It exists as a node in a tree (instead of a simple string)
because, just like member variables in a class in an
object oriented programming language (or in a C struct)
language, variables in ttree belong to the class in
which they are defined. The node's location in the
tree represents which class it belongs to.
If a variable's leaf node name
refers to a node which does no exist yet, then we create it
(assuming the "create_missing_nodes" argument is "True").
Category Node/Name:
Categories are a peculiar feature of ttree. Categories
are groups of variables that share the same counter when
numeric values are automatically given to each variable.
So you can think of a category as a counter with a name.
Variables in different categories have different counters,
and are assigned numeric values independently.
Consequently two variables in different categories
may be assigned the same number. But two variables
in the same category are always given unique values.
Counters are typically global, but can have local scope.
(ie, only defined within a Class, or an instantiated
class, and whatever other classes are nested or
instantiated beneath it.)
Therefore to identify a counter/category you must specify
both a name AND a node. The node identifies the class where
the scope is defined. It is assumed that the Leaf Node
(see above) lies within this scope (ie. somewhere after
it in the tree).
Example: local counters are used to keep track of the
residues within in a protein chain. If we use a class to
represent the protein, we can create a local residue-
counter (category) within that protein. Then when we
instantiate the protein multiple times, this counter
is reset for every new instance of of the protein.
"""
cat_name, cat_ptkns, leaf_ptkns = DescrToCatLeafPtkns(descr_str, dbg_loc)
# ---- ellipsis hack ----
#
# Search for class:
# Most users expect ttree.py to behave like a
# standard programming language: If the class they are
# instantiating was not defined in this specific
# location, they expect ttree.py to search for
# it outwards, first in the parent's environment,
# and then in the parent's parent's environment,
# and so on, until the object is found.
# For example, most users expect this to work:
# class Res{
# write("Atoms") {
# $atom:CA @atom:CA 0.123 1.234 2.345
# $atom:CB @atom:CB 1.234 2.345 3.456
# }
# }
# class Protein{
# write_once("AnglesByType") {
# @angle:backbone @atom:Res/CA @atom:Res/CA @atom:Res/CA
# }
# Notice that in class Protein, we did not have to specify
# where "Res" was defined because it is defined in the parent
# environment (ie. immediately outside Proteins's environment).
# The general way to do this in ttree.py, is to
# use ellipsis syntax "@atom:.../Res/CA" symbol. The
# ellipsis ".../" tells ttree.py to search upwards
# for the object to the right of it ("Res")
# In order to make ttree.py behave the way
# most users are expecting, we artificially insert a
# ".../" before the class name here. (Later on, the
# code that processes the ".../" symbol will take
# care of finding A. We don't have to worry about
# about doing that now.)
#
# I think we only want to do this for variables with path information
# such as "@atom:Res/CA" (which means that leaf_ptkns = ['Res', 'CA']).
# For simple variables like "@atom:CA", we don't automatically look upwards
# unless the user eplicitly requests it.
# (That's why we check to make sure that len(leaf_ptkns) > 1 below
# before we insert '...' into the leaf_ptkns.)
# In other words, the two variables "@atom:CA" below are treated differently
#
# A {
# write("Atoms") {
# @atom:CA
# }
# class B {
# write("Atoms") {
# @atom:CA
# }
# }
# }
#
if ((descr_str.find(':') != -1) and
#(not ((len(leaf_ptkns) == 1) and
# (leaf_ptkns[0] == context_node.name))) and
#(len(leaf_ptkns) > 0) and
(len(leaf_ptkns) > 1) and
(len(leaf_ptkns[0]) > 0) and
(leaf_ptkns[0][0] not in ('.','*','?'))):
leaf_ptkns.insert(0, '...')
# ---- Done with "ellipsis hack" -----
#sys.stderr.write(' DescrToCatLeafNodes(): (cat_ptkns, cat_name, lptkns) = ('+
# str(cat_ptkns)+', \"'+cat_name+'\", '+str(leaf_ptkns)+')\n')
cat_node = None
cat_start_node = context_node
leaf_start_node = context_node
if (len(cat_ptkns) > 0):
if cat_ptkns[-1] == '...':
# The "..." in this position means trace the path from the
# current node (context_node) up to cat_ptkns[:-1].
cat_start_node = PtknsToNode(cat_ptkns[:-1], context_node, dbg_loc)
# Later on, we will search upwards until we find an ancestor
# node containing a category matching cat_name. This will
# be taken care of later. (See "if cat_node is None:" below.)
else:
# In this case, the user supplied an explicit path
# for the category node. Find it now.
cat_node = PtknsToNode(cat_ptkns, context_node, dbg_loc)
# Whenever the user supplies an explicit path, then
# the cat node should be the starting location from
# which the leaf path is interpreted. This nearly
# insures that the leaf node will be an ancestor
# of the category node, which is what we want.
leaf_start_node = cat_node
if cat_node is None:
# Otherwise, the user did not indicate where the category
# node is defined, but only supplied the category name.
# (This is the most common scenario.)
# In this case, climb up the tree to the parent
# until you find an ancestor with a category whose
# name matches cat_name.
cat_node = FindCatNode(cat_name, cat_start_node, dbg_loc)
if (cat_name not in cat_node.categories):
if create_missing_nodes:
# If this is the first time we encountered a variable in this
# category (ie if it's the first time we encountered a variable
# with this category's name and node), then we must create a
# new entry in the cat_node.categories associative container
# (using cat_name as the dictionary key).
cat_node.categories[cat_name] = Category(cat_name)
else:
raise InputError('Error('+g_module_name+'.DescrToCatLeafNodes()):\n'
' Error near '+ErrorLeader(dbg_loc.infile, dbg_loc.lineno)+'\n'
' Category named \"'+cat_name+'\" not found at\n'
' position '+NodeToStr(cat_node)+'\n')
# ---------- Now look up the leaf node -----------
if (len(leaf_ptkns) > 0) and (leaf_ptkns[-1] == 'query()'):
# Special case: "query()"
# Variables named "query()" are not really variables.
# (They are a way for users to query a category's counter.)
# But we treat them as such internally. Consequently we
# give them unique names to avoid clashes (just in case
# "query()" appears multiple times in the same context).
#leaf_ptkns[-1] = '__query__'+dbg_loc.infile+'_'+str(dbg_loc.lineno)
leaf_ptkns[-1] = '__query__' + str(dbg_loc.order)
# Lookup the path for the leaf:
#
# Often, the leaf that the path refers to does not
# exist yet. For example, it is common for a template to
# contain a reference to "$atom:CA". If the current context_node
# is "/protein1/res22", this means that the leaf should be
# at "/protein1/res22/CA". (However in this example, "CA"
# is not a class that has been defined yet. It is the name
# of a variable which which may not have even been mentioned
# before. Think of "CA" as a variable placeholder.
#
# So we follow the path tokens as far as we can:
i_last_ptkn, last_node = FollowPath(leaf_ptkns,
leaf_start_node,
dbg_loc)
# Did we find the node?
if i_last_ptkn == len(leaf_ptkns):
leaf_node = last_node
else:
# If we are here, then we did not find the node.
# The unrecognized token is stored in
# leaf_ptkns[i_last_ptkn]
if leaf_ptkns[i_last_ptkn] == '...':
# ----------------------------------------------
# ---- UGHH I hate dealing with '...' ----
# ----(Messy code to follow in this section)----
# ----------------------------------------------
# The "..." means different things depending on
# whether or not it is the last token in leaf_ptkns.
if i_last_ptkn+1 < len(leaf_ptkns):
# If "..." is NOT the last token in leaf_ptkns, we
# should search for an ancestor of this node who has
# a child whose name matches a the requested target
# string (located in leaf_ptkns[i_last_ptkn+1])
search_target = leaf_ptkns[i_last_ptkn+1]
# If such an ancestor exists, then FollowPath()
# should have already found it for us.
# This means it was not found.
# So if there is only one more token in the
# list of tokens, then create the needed node
if (create_missing_nodes and
(i_last_ptkn+1 == len(leaf_ptkns)-1)):
# Create a new leaf node and link it:
new_leaf_name = leaf_ptkns[-1]
parent_node = last_node
# Is this parent_node an StaticObj? (..or inherit from StaticObj?)
if isinstance(parent_node, StaticObj):
parent_node.children[new_leaf_name] = StaticObj(new_leaf_name, parent_node)
elif isinstance(parent_node, InstanceObj):
parent_node.children[new_leaf_name] = InstanceObjBasic(new_leaf_name, parent_node)
else:
assert(False) # (only 2 types of nodes are possible)
# Now assign the pointer
leaf_node = parent_node.children[new_leaf_name]
else:
#In that case, we were unable to find the node referenced by "..."
raise InputError('Error('+g_module_name+'.DescrToCatLeafNodes()):\n'
' Broken path.\n' # containing ellipsis (...)\n'
' class/variable \"'+search_target+'\" not found in this\n'
' context: \"'
#+var_ref.prefix + var_ref.descr_str + var_ref.suffix+'\"\n'
+descr_str+'\"\n'
' located near '+ErrorLeader(dbg_loc.infile, dbg_loc.lineno))
else: # if i_last_ptkn+1 < len(leaf_ptkns):
# If "..." IS the last token, then it means:
# we want to search for the CATEGORY NAME,
# This is very different.
# It means we need to:
# search backwards up the ancestor tree until
# we find an ancestor variable (of last_node)
# which has the right category, (ie until you
# find an ancestor node with a variable (VarRef)
# pointing to it with belonging to the correct
# category node and name (determined above).)
# If not found, then use the current context_node.
assert(cat_name in cat_node.categories)
var_bindings = cat_node.categories[cat_name].bindings
node = last_node
while (node != None):
# Recall that cat_node.categories[cat_name]
# is a dictionary whose keys are leaf nodes
# corresponding to the variables in this category.
if node in var_bindings:
# then we found it, and we're done
break
else:
node = node.parent
if node != None:
leaf_node = node
else:
# If not found, have it point to the
# current (context) node.
leaf_node = context_node
# -----------------------------------------------
# -- Finished dealing with '...' in leaf_ptkns --
# -----------------------------------------------
elif (create_missing_nodes and
((i_last_ptkn == len(leaf_ptkns)-1) or
HasWildCard('/'.join(leaf_ptkns)))):
#elif (create_missing_nodes and
# (i_last_ptkn == len(leaf_ptkns)-1)):
# Again, another reason the leaf-node was not found is
# that it refers to a leaf node which has not yet been
# created. If the path was valid until up to the last
# token, then we sould create a new node with this name.
# -- This is a common scenario. --
# -- This is how all new variables are created. --
# Anyway, we handle that here:
# Create a new leaf node and link it:
new_leaf_name = leaf_ptkns[-1]
new_leaf_name = '/'.join(leaf_ptkns[i_last_ptkn:])
parent_node = last_node
# Is this parent_node an StaticObj? (..or does it inherit from StaticObj?)
if isinstance(parent_node, StaticObj):
parent_node.children[new_leaf_name] = StaticObj(new_leaf_name, parent_node)
elif isinstance(parent_node, InstanceObj):
parent_node.children[new_leaf_name] = InstanceObjBasic(new_leaf_name, parent_node)
else:
assert(False) # (only 2 types of nodes are possible)
# Now assign the pointer
leaf_node = parent_node.children[new_leaf_name]
else:
# Otherwise, the user made a mistake in the path.
# Figure out which kind of mistake and print an error.
if (last_node.parent is None) and (leaf_ptkns[i_last_ptkn] == '..'):
#In that case, we tried to back out beyond the root of the tree.
raise InputError('Error('+g_module_name+'.DescrToCatLeafNodes()):\n'
' Broken path in variable:\n'
#' \"'+var_ref.prefix + var_ref.descr_str + var_ref.suffix+'\"\n'
' \"'+ descr_str + '\"\n'
' located near '+
ErrorLeader(dbg_loc.infile,
dbg_loc.lineno)+'\n'
' There are too many \"..\" tokens in the path string.')
else:
#Then the reason is: The string in leaf_ptkns[i_last_ptkn]
#was supposed to be a child of last_node but a child
#of that name was not found.
raise InputError('Error('+g_module_name+'.DescrToCatLeafNodes()):\n'
' Broken path / Undefined variable:\n'
#' \"'+var_ref.prefix + var_ref.descr_str + var_ref.suffix+'\"\n'
' \"'+ descr_str + '\"\n'
' located near '+
ErrorLeader(dbg_loc.infile,
dbg_loc.lineno)+'\n'
' Undefined: \"'+PtknsToStr(leaf_ptkns)+'\"\n'
' (Specifically \"'+leaf_ptkns[i_last_ptkn]+
'\" is not a subordinate of \"'+MaxLenStr(last_node.name,'/')+'\")')
#'\n This could be a typo or spelling error.')
return cat_name, cat_node, leaf_node
def DescrToVarBinding(descr_str, context_node, dbg_loc):
""" DescrToVarBinding() is identical to LookupVar(), but it has a name
that is harder to remember. See comment for LookupVar() below.
"""
cat_name, cat_node, leaf_node = DescrToCatLeafNodes(descr_str,
context_node,
dbg_loc)
if cat_name in cat_node.categories:
category = cat_node.categories[cat_name]
var_bindings = category.bindings
if leaf_node in var_bindings:
var_binding = var_bindings[leaf_node]
else:
raise InputError('Error('+g_module_name+'.DescrToVarBinding()):\n'
' Error near '+ErrorLeader(dbg_loc.infile, dbg_loc.lineno)+'\n'
' Bad variable reference: \"'+descr_str+'\". There is\n'
' There no category named \"'+cat_name+'\" defined for "'+NodeToStr(cat_node)+'\".\n')
else:
raise InputError('Error('+g_module_name+'.DescrToVarBinding()):\n'
' Error near '+ErrorLeader(dbg_loc.infile, dbg_loc.lineno)+'\n'
' Bad variable reference: \"'+descr_str+'\". There is\n'
' no category named \"'+cat_name+'\" defined for "'+NodeToStr(cat_node)+'\".\n')
return var_binding
# Wrappers:
def LookupVar(descr_str, context_node, dbg_loc):
""" LookupVar() looks up a string (a variable descriptor, which is the
variable's name, excluding the '$', '@' prefixes and any '{}' brackets.)
This function returns the variable's "VarBinding" (the variable-name:value
pair). This is useful for querying or changing the value of a variable.
Because nearly all variables are local, you must specify the starting
node (ie. the node corresponding to the class in which this class
or variable was referred to). This is typically the global node.
"""
return DescrToVarBinding(descr_str, context_node, dbg_loc)
def LookupNode(obj_name, starting_node, dbg_loc):
""" LookupNode() parses through a string like
'../ClassA/NestedClassB'
and returns the corresponding node.
Nodes are data types used for representing a class or class instance.
They are also used for storing variables.
'ClassA/NestedClassB/VariableC'
Because nearly all variables are local, you must specify the starting
node (ie. the node corresponding to the class in which this class
or variable was referred to). This is typically the global node.
"""
return StrToNode(obj_name, starting_node, dbg_loc)
class SimpleCounter(object):
__slots__=["n","nincr"]
def __init__(self, n0 = 1, nincr = 1):
self.n = n0 - nincr
self.nincr = nincr
def query(self):
return self.n
def incr(self):
self.n += self.nincr
def __copy__(self): #makes a (deep) copy of the counter in its current state
return SimpleCounter(self.n + self.nincr, self.nincr)
class Category(object):
"""
Category contains a list of all of the variables which belong
to the same category, as well as some supporting information.
Attributes:
name The name of the category (a string)
bindings An OrderedDict() containing leaf_node:VarBinding
(key:value) pairs. Variables are looked up by their leaf node.
The actual variable name (which simply refers to the leaf node)
and values are both stored in the VarBinding data structure.
counter A counter object like "SimpleCounter". Each time counter.incr()
is invoked it should return a unique string (typically this is
simply a string representing an integer which is incremented).
"""
__slots__=["name","bindings","counter","manual_assignments","reserved_values"]
def __init__(self,
name = '',
bindings = None,
counter = None,
manual_assignments = None,
reserved_values = None):
self.name = name
if bindings is None:
self.bindings = OrderedDict()
else:
self.bindings = bindings
if counter is None:
self.counter = SimpleCounter(1,1)
else:
self.counter = counter
if manual_assignments is None:
self.manual_assignments = OrderedDict()
else:
self.manual_assignments = manual_assignments
if reserved_values is None:
self.reserved_values = OrderedDict()
else:
self.reserved_values = reserved_values
class StaticObj(object):
""" StaticObjs and InstanceObjs:
The state of the system is stored in two different trees:
1) The static tree:
StaticObj trees are similar "class" definitions in an OOP language.
These trees contains class definitions, and their nested classes,
and instructions for how to create new instances (copies) of this class.
Nodes in this tree are stored using StaticObjs:
2) The instance tree:
This tree contains classes that have been instantiated, and any sub-
classes (members or attributes) that are instantiated as a result.
This tree is automatically generated by instantiating the root
StaticObj. Nodes in this tree are stored using InstanceObjs.
StaticObjs and InstanceObjs both contain
"commands" (commands which usually involve instructions
for writing templates)
"categories" (local counters used to assign variables. See below.)
"children" (Nested class definitions -NOT- OOP child classes. See below.)
StaticObjs also contain
"instance_commands"
"instance_categories"
These three members contain information to create a new instance/copy
of this class (how to construct an InstanceObj from an StaticObj).
StaticObj contains the member function Parse() which builds the global static
tree by parsing the contents of a text file supplied by the user.
The external function BuildInstanceTree(), creates the global instance tree
from the global static tree (a tree of StaticObjs).
----- CLASS MEMBERS OF StaticObj: ----
0) Name:
Every class (object type) has a name. It is stored in self.name.
To make it easier to distinguish the names of classes from the names of
individual instances of that class, I recommend using a capital letter
for the name of a class type (and lower-case letters for instances).
1) Commands
Commands are usually instructions for writing templates.
Templates are blocks of ordinary text which contain variables.
(Variables in this program consist of variable names, categories,
and (eventually) bound values (usually generated automatically),
which will be substituted into the template to generate a text file.)
A class can contain multiple templates, each one having a unique name
which also happens to be the name of the file that will be created when
the template is written.
Variants:
self.commands:
Some templates are written immediate after the class is defined
(stored in "self.commands").
Example: The "write_once()" command.
self.instance_commands:
Other templates are written when an instance/copy of the class is created
(stored in "self.instance_commands".
Example: The "write()" command.
2) Children
self.children:
Class definitions can be defined from within the definition of other
("parent") classes. These nested classes are referred to as "children".
These sub-classes are not "children" in the OOP sense of the word at
all (they do not have any of the the traits of their "parents").
However in the source code I refer to them as "children" because here
they are implemented as "child" nodes (branches) in the tree-like
data-structure used to store class definitions (the static tree).
3) Categories
This is a new concept and is difficult to explain.
Recall that each class contains a list of templates containing raw text,
interspersed with variables (whose values will determined later).
In most cases, variables are assigned to integer values which are
automatically generated by incrementing a counter. Simply put,
"categories" are collections of variables which share the same counter.
Within a category, the goal is to assign a unique integer (or other
symbol) to each distinct variable in this category.
To avoid name-clashes, variable names have local "scope".
This scope is the "leaf_token"
Categories can be specific to a particular class (node), and any of the
classes (nodes) which are nested within it, but by default are global.
(This means they "belong" to the global (outermost) node by default.)
All the various categories which are defined within a particular
StaticObj are stored in self.categories.
Static variables (ie. variables with a '@' prefix) are stored this way.
"self.categories"
If a class contains a new category, it means that if any nested
classes defined within that class definition contain (static, '@')
variables whose categories match the category name, their values will
be determined by looking up the couter associated with this category
stored locally (here) in self.categories. All variables belonging
to this category are stored in "self.categories[category_name]".
"self.instance_categories"
Recall that creating a new copy (instance) of a class automatically
creates an InstanceObj in the instance-tree. InstanceObj's have a
".categories" attribute of their own, the contents of which are
copied from this StaticObj's "self.instance_categories" attribute.
Instantiating a new class also spawns the instantiation of any
sub-classes.
If any of these "instance children" contain variables whose category
names match a category stored in the parent InstanceObj's .categories
dictionary, then their values will be determined by that InstanceObj's
counter for that category name.
4) Parent:
A link to the parent StaticObj is stored in self.parent.
"""
__slots__=["name",
"parent",
"children",
"categories",
"commands",
"srcloc_begin",
"srcloc_end",
"deleted",
"class_parents",
"namespaces",
"instname_refs",
"instance_categories",
"instance_commands_push",
"instance_commands",
"instance_commands_pop"]
def __init__(self,
name='',
parent=None):
"""
The members/attributes of StaticObj are defined in the comment
for StaticObj above. """
# The following members are shared by both InstanceObj and StaticObj:
self.name = name
self.parent = parent #For traversing the global static template tree
self.children = OrderedDict() # Nested class definitions.
self.categories=OrderedDict() #<- new variable categories that are only defined
# in the context of this molecule's type definition
self.commands=[] # Commands to carry out (only once)
##vb##self.var_bindings=[] # List of variables assigned to this object.
self.srcloc_begin = None # Keep track of location in user files
self.srcloc_end = None # (useful for error message reporting)
self.deleted = False # Users can delete static objects?
# (why not?)
# The following members are not shared with InstanceObj:
self.class_parents = [] # classes we inherit traits from (this is
# similar to the parent/child relationship
# in an object-oriented-programming language)
self.namespaces = [] # A list of classes we also look in when searching
# for other static nodes or variables. (similar to
# class_parents, but only used for searches.)
self.instname_refs = {} # <-- used for debugging to insure that
# two instances do not have the same name
self.instance_categories=OrderedDict()#<-new variable categories that are defined
#within the scope of this molecule's instance
self.instance_commands_push=[] #1)process these commands first by adding
# these commands to InstanceObj.commands
# (before you deal with class_parents)
self.instance_commands=[] #2) then add this to InstanceObj.commands
self.instance_commands_pop=[] #3) finally add these commands
def DeleteSelf(self):
for child in self.children.values():
child.DeleteSelf()
self.deleted = True
def IsDeleted(self):
return self.deleted
##vb##def AddVarBinding(self, var_binding):
##vb## if self.var_bindings is None:
##vb## self.var_bindings = [var_binding]
##vb## else:
##vb## self.var_bindings.append(var_binding)
def Parse(self, lex):
""" Parse() builds a static tree of StaticObjs by parsing text file.
-The "lex" argument is a file or input stream which has been converted
to a "TemplateLexer" object (similar to the python's built-in shlex lexer).
"""
#sys.stdout.write(' -- Parse() invoked --\n')
# Keep track of the location in the users' input files where this
# class object is first defined. (Keep in mind that the user might
# augment their original class definition, adding new content to an
# existing class. In that case self.srcloc_begin will have already
# been assigned. We don't want to overwrite it in that case.)
if self.srcloc_begin is None: # <-- not defined yet?
self.srcloc_begin = lex.GetSrcLoc()
while True:
cmd_token = lex.get_token()
#print('Parse(): token = \"'+cmd_token+'\", '+lex.error_leader())
if cmd_token == lex.eof:
#print('Parse(): EOF encountered\n')
break
if ((cmd_token == 'write') or
(cmd_token == 'write_once') or
(cmd_token == 'create_var') or
(cmd_token == 'create_vars')):
open_paren = lex.get_token()
#print('Parse(): open_paren=\"'+open_paren+'\"')
if open_paren=='{':
# ..then the user neglected to specify the "dest" file-name
# argument. In that case, supply the default, ''.
# (which is shorthand for the standard out in this case)
open_curly = open_paren[0]
open_paren = ''
close_paren = ''
tmpl_filename = ''
srcloc = lex.GetSrcLoc()
else:
tmpl_filename = lex.get_token()
if tmpl_filename == ')':
tmpl_filename = ''
close_paren = ')'
else:
close_paren = lex.get_token()
open_curly = lex.get_token()
srcloc = lex.GetSrcLoc()
if ((cmd_token == 'create_var') or
(cmd_token == 'create_vars')):
tmpl_filename = None
# This means: define the template without attaching
# a file name to it. (IE., don't write the contents
# of what's enclosed in the curly brackets { } to a file.)
if ((open_curly != '{') or
((open_paren == '') and (close_paren != '')) or
((open_paren == '(') and (close_paren != ')'))):
raise InputError('Error('+g_module_name+'.StaticObj.Parse()):\n'
' Error in '+lex.error_leader()+'\n\n'
'Syntax error at the beginning of the \"'+cmd_token+'\" command.')
if tmpl_filename != None:
tmpl_filename = RemoveOuterQuotes(tmpl_filename, lex.quotes)
# ( The previous line is similar to:
# tmpl_filename = tmpl_filename.strip(lex.quotes) )
tmpl_contents = lex.ReadTemplate()
StaticObj.CleanupReadTemplate(tmpl_contents, lex)
#sys.stdout.write(' Parse() after ReadTemplate, tokens:\n\n')
#print(tmpl_contents)
#sys.stdout.write('\n----------------\n')
if cmd_token == 'write_once':
# Check for a particular bug:
# Ordinary instance variables (preceded by a '$')
# should never appear in a write_once() statement.
for entry in tmpl_contents:
if (isinstance(entry, VarRef) and
(entry.prefix[0]=='$')):
raise InputError('Error('+g_module_name+'.StaticObj.Parse()):\n'
' Error near '+ErrorLeader(entry.srcloc.infile,
entry.srcloc.lineno)+'\n'
' Illegal variable: \"'+entry.prefix+entry.descr_str+entry.suffix+'\"\n'
' All variables in a \"write_once()\" statement must be statically\n'
' defined, and hence they must begin with a \'@\' prefix character.\n'
' (not a \'$\' character).\n'
' Suggestion: Use the \"write()\" command instead.\n')
if cmd_token == 'write':
commands = self.instance_commands
elif cmd_token == 'write_once':
commands = self.commands
elif ((cmd_token == 'create_var') or
(cmd_token == 'create_vars')):
commands = self.instance_commands
else:
assert(False)
command = WriteFileCommand(tmpl_filename,
tmpl_contents,
srcloc)
commands.append(command)
# end of "if (cmd_token == 'write') or (cmd_token == 'write_once'):"
elif cmd_token == 'delete':
instobj_descr_str = lex.get_token()
instobj_srcloc = lex.GetSrcLoc()
delete_command = DeleteCommand(instobj_srcloc)
mod_command = ModCommand(delete_command,
instobj_descr_str)
self.instance_commands.append(mod_command)
elif cmd_token == 'using':
namespacecom_str = lex.get_token()
if namespacecom_str != 'namespace':
raise InputError('Error('+g_module_name+'.StaticObj.Parse()):\n'
' Error near '+lex.error_leader()+'\n'
' The \"'+cmd_token+'\" command must be followed by the \"namespace\" keyword.')
namespace_str = lex.get_token()
stnode = StrToNode(namespace_str,
self,
lex.GetSrcLoc())
self.namespaces.append(stnode)
elif cmd_token == 'category':
cat_name = lex.get_token()
cat_count_start = 1
cat_count_incr = 1
open_paren = lex.get_token()
if (open_paren == '('):
token = lex.get_token()
if token == ',':
token = lex.get_token()
if token != ')':
# Interpret token as an integer, float, or string
try:
cat_count_start = int(token)
except ValueError:
try:
cat_count_start = float(token)
except ValueError:
cat_count_start = RemoveOuterQuotes(token, '\'\"')
token = lex.get_token()
if token == ',':
token = lex.get_token()
if token != ')':
# Interpret token as an integer,float,or string
try:
cat_count_incr = int(token)
except ValueError:
try:
cat_count_incr = float(token)
except ValueError:
cat_count_incr = RemoveOuterQuotes(token, '\'\"')
token = lex.get_token()
if token != ')':
raise InputError('Error('+g_module_name+'.StaticObj.Parse()):\n'
' Error near '+lex.error_leader()+'\n'
' \"'+cmd_token+' '+cat_name+'...\" has too many arguments,\n'
' or lacks a close-paren \')\'.\n')
else:
lex.push_token(open_paren)
if (isinstance(cat_count_start, basestring) or
isinstance(cat_count_incr, basestring)):
raise InputError('Error('+g_module_name+'.StaticObj.Parse()):\n'
' Error near '+lex.error_leader()+'\n'
' \"'+cmd_token+' '+cat_name+'('+
str(cat_count_start)+','+
str(cat_count_incr)+')\"\n'
' Only numeric counters are currently supported.\n')
# check for really stupid and unlikely errors:
if type(cat_count_start) is not type(cat_count_incr):
if ((isinstance(cat_count_start, int) or
isinstance(cat_count_start, float))
and
(isinstance(cat_count_incr, int) or
isinstance(cat_count_incr, float))):
cat_count_start = float(cat_count_start)
cat_count_incr = float(cat_count_incr)
else:
raise InputError('Error('+g_module_name+'.StaticObj.Parse()):\n'
' Error near '+lex.error_leader()+'\n'
' Problem with \"'+cmd_token+'\" command.\n')
prefix = cat_name[0]
cat_name = cat_name[1:]
# Add this category to the list.
if prefix == '@':
self.categories[cat_name] = Category(cat_name)
self.categories[cat_name].counter=SimpleCounter(cat_count_start,
cat_count_incr)
elif prefix == '$':
self.instance_categories[cat_name] = Category(cat_name)
self.instance_categories[cat_name].counter=SimpleCounter(cat_count_start,
cat_count_incr)
else:
raise InputError('Error('+g_module_name+'.StaticObj.Parse()):\n'
' Error near '+lex.error_leader()+'\n'
' category name = \"'+cat_name+'\" lacks a \'$\' or \'&\' prefix.\n'
' This one-character prefix indicates whether the variables in this\n'
' new category will be static or dynamics variables\n')
elif (cmd_token == '}') or (cmd_token == ''):
# a '}' character means we have reached the end of our scope.
# Stop parsing and let the caller deal with the remaining text.
# (And a '' means we reached the end of the file... I think.)
break
#elif (cmd_token == 'include'):
# "include filename" loads a file (adds it to the file stack)
# The "TtreeShlex" class (from which "lex" inherits) handles
# "include" statements (ie. "source" statements) automatically.
else:
# Otherwise, 'cmd_token' is not a command at all.
# Instead it's the name of an object which needs to be
# defined or instantiated.
# First, let's figure out which.
# (small detail: The "class" keyword is optional
# and can be skipped.)
if cmd_token == 'class':
object_name = lex.get_token()
else:
object_name = cmd_token
next_symbol = lex.get_token()
#print('Parse(): next_token=\"'+next_symbol+'\"')
class_parents = []
if next_symbol == 'inherits':
# Then read in the list of classes which are parents of
# of this class. (Multiple inheritance is allowed.)
# (We don't yet check to insure that these are valid class
# names. We'll do this later in LookupStaticRefs().)
syntax_err_inherits = False
while True:
next_symbol = lex.get_token()
if ((next_symbol == '{') or
(next_symbol == lex.eof)):
break
elif (next_symbol == '='):
syntax_err_inherits = True
break
else:
class_parents.append(StrToNode(next_symbol,
self,
lex.GetSrcLoc()))
if len(class_parents) == 0:
syntax_err_inherits = True
if syntax_err_inherits:
raise InputError('Error('+g_module_name+'.StaticObj.Parse()):\n'
' Error near '+lex.error_leader()+'\n'
' \"inherits\" should be followed by one or more class names.\n')
if next_symbol == '{':
child_name = object_name
# Check to see if this class has already been defined.
# (IE. check if it present in the list of children.)
# If the name (child_name) matches another class (child),
# then the contents of the new class will be appended to
# the old. This way, class definitions can be augmented
# later. (This is the way "namespaces" work in C++.)
child = self.children.get(child_name)
# If found, we refer to it as "child".
# If not, then we create a new StaticObj named "child".
if child is None:
child = StaticObj(child_name, self)
self.children[child_name] = child
assert(child.name == child_name)
# Either way we invoke child.Parse(), to
# add contents (class commands) to child.
child.Parse(lex)
child.class_parents += class_parents
elif next_symbol == '=':
next_symbol = lex.get_token()
if next_symbol == 'new':
base_name = object_name
base_srcloc = lex.GetSrcLoc()
array_slice_str = ''
if base_name.find('/') != -1:
raise InputError('Error('+g_module_name+'.StaticObj.Parse()):\n'
' Error near '+ErrorLeader(base_srcloc.infile,
base_srcloc.lineno)+'\n'
' (You can not instantiate some other object\'s members.)\n'
' Invalid instance name: \"'+base_name+'\"\n')
elif base_name in self.instname_refs:
ref_srcloc = self.instname_refs[base_name]
raise InputError('Error('+g_module_name+'.StaticObj.Parse()):\n'
' Duplicate class/array \"'+base_name+'\"\n'
' This occurs near:\n'
' '+ErrorLeader(ref_srcloc.infile,
ref_srcloc.lineno)+'\n'
' and also near:\n'
' '+ErrorLeader(base_srcloc.infile,
base_srcloc.lineno)+'\n')
else:
self.instname_refs[base_name] = base_srcloc
# Check for syntax allowing the user to instantiate
# PART of an array. For example, check for this syntax:
# "monomers[20-29] = new ...". This only fills in a
# portion of the array from: monomers[20]...monomers[29]
#
# We also have to deal with multidimensional syntax
# like this: "cells[3][2-3][1][4-7] = new..."
# Split the "cells[3][2-3][2][4-7][2]" string into
# "cells[3][", "][1][", and "]".
# Later, we will instantiate InstanceObjs with names:
# "cells[3][2][1][4]"
# "cells[3][2][1][5]"
# "cells[3][2][1][6]"
# "cells[3][2][1][7]"
# "cells[3][3][1][4]"
# "cells[3][3][1][5]"
# "cells[3][3][1][6]"
# "cells[3][3][1][7]"
p1 = base_name.find('[')
if p1 == -1:
p1 = len(base_name)
else:
p1 += 1
array_name_tkns = [ base_name[0:p1] ]
array_name_offsets = []
p2 = -1
p4 = p1
while p4 < len(base_name):
p3 = base_name.find(']', p1)
if p3 == -1:
raise InputError('Error('+g_module_name+'.StaticObj.Parse()):\n'
' Expected a \']\' character following:\n'
' \"'+base_name[0:p1]+'\", located near:\n'
' '+ErrorLeader(ref_srcloc.infile,
ref_srcloc.lineno)+'\n')
# Search for a '-', ':', or '*' character between []
# For example "monomers[20-29] = "
# If present, the user wants us to fill a range
# inside an array. This could be a multi-dimensional
# array, (eg "cells[3][2-6][4-11] = "), so we must
# figure out which entries in the array the user
# wants us to fill (in this case, "[2-6][4-11]")
p2 = base_name.find('-', p1)
if p2 == -1:
p2 = len(base_name)
if p2 > p3:
p2 = base_name.find(':', p1)
if p2 == -1:
p2 = len(base_name)
if p2 > p3:
p2 = base_name.find('*', p1)
if p2 == -1:
p2 = len(base_name)
p4 = p3 + 1
if p4 < len(base_name):
if base_name[p4] == '[':
p4 += 1 # skip over it
else:
raise InputError('Error('+g_module_name+'.StaticObj.Parse()):\n'
' Expected a \'[\' character forllowing a \']\' character in\n'
' \"'+base_name[0:p2+1]+'\", located near:\n'
' '+ErrorLeader(ref_srcloc.infile,
ref_srcloc.lineno)+'\n')
if p2 > p3:
# Then no '-', ':', or '*' character was found
# between '[' and the subsequent ']' character
# In that case, ignore this token
token = base_name[p1:p4]
# append all this text to the previous token
if len(array_name_tkns) == 0:
array_name_tkns.append(token)
else:
array_name_tkns[-1] = array_name_tkns[-1]+token
array_slice_str = 'slice '
else:
assert((p1 < p2) and (p2 < p3))
index_offset_str = base_name[p1:p2]
if len(index_offset_str) == 0:
index_offset = 0
elif (not str.isdigit(index_offset_str)):
raise InputError('Error('+g_module_name+'.StaticObj.Parse()):\n'
' Expected a nonnegative integer preceding the \''+base_name[p2]+'\' character in:\n'
' \"'+base_name[0:p2+1]+'\", located near:\n'
' '+ErrorLeader(ref_srcloc.infile,
ref_srcloc.lineno)+'\n')
else:
index_offset = int(index_offset_str)
token=base_name[p3:p4]
array_name_tkns.append(token)
array_name_offsets.append(index_offset)
p1 = p4
# If the statobj_str token contains a ']' character
# then this means the user wants us to make multiple
# copies of this template. The number of copies
# to instantiate is enclosed in the [] characters
# (Example wat = new Water[3000] creates
# 3000 instantiations of the Water template
# named wat[1], wat[2], wat[3], ... wat[3000]).
# Note: Here '[' and ']' have a special meaning.
# So lex.get_token() should not treat them as
# ordinary word characters. To prevent this:
orig_wordterminators = lex.wordterminators
lex.wordterminators += '[],'
class_name_str = lex.get_token()
if ((class_name_str == lex.eof) or
(class_name_str == '}')):
raise InputError('Error('+g_module_name+'.StaticObj.Parse()):\n'
' Error near '+lex.error_leader()+'\n'
'Class ends prematurely. (Incomplete \"new\" statement.)')
assert(len(class_name_str) > 0)
if (class_name_str[0] == '['):
raise InputError('Error('+g_module_name+'.StaticObj.Parse()):\n'
' Error near '+lex.error_leader()+'\n'
' new '+class_name_str+'\n'
'Bracketed number should be preceeded by a class name.')
class_names = []
weights = []
num_by_type = []
if class_name_str == 'random':
class_names, weights, num_by_type = self._ParseRandom(lex)
tmp_token = lex.get_token()
if len(tmp_token)>0:
if tmp_token[0]=='.':
raise InputError('Error('+g_module_name+'.StaticObj.Parse()):\n'
' Error near '+lex.error_leader()+'\n'
' \"'+tmp_token+'\" should not follow random()\n'
'\n'
' Coordinate transformations and other commands (such as \"'+tmp_token+'\")\n'
' should appear after each class name inside the random() statement,\n'
' not after it. For example, do not use:\n'
' \"lipids=new random([DPPC,DLPC],[0.5,0.5]).move(0,0,23.6)\"\n'
' Use this instead:\n'
' \"lipids=new random([DPPC.move(0,0,23.6),DLPC.move(0,0,23.6)],[0.5,0.5])\"\n')
lex.push_token(tmp_token)
else:
class_name, class_suffix, class_suffix_srcloc = \
self._ProcessClassName(class_name_str, lex)
array_size = []
array_suffixes = []
array_srclocs = []
# A general "new" statement could look like this:
# "m = new Mol.scale(3) [2].trans(0,4.5,0).rotate(30,0,0,1)
# [3].trans(0,0,4.5)"
# So far we have processed "m = new Mol.scale(3)".
# Now, we need to deal with:
# "[2].trans(0,4.5,0).rotate(30,0,0,1) [3].trans(0,0,4.5)"
while True:
new_token = lex.get_token()
#if ((new_token == '') or (new_token == lex.eof)):
# break
if new_token == '[':
number_str = lex.get_token()
close_bracket = lex.get_token()
if ((not str.isdigit(number_str)) or
(close_bracket != ']')):
raise InputError('Error('+g_module_name+'.StaticObj.Parse()):\n'
' Error in \"new\" statement near '+lex.error_leader()+'\n'
' A \'[\' character should be followed by a number and a \']\' character.')
array_size.append(int(number_str))
suffix = lex.get_token()
if ((suffix == '') or (suffix == lex.eof)):
array_suffixes.append('')
array_srclocs.append(base_srcloc)
break
if suffix[0] == '.':
lex.push_token(suffix[1:])
suffix_func = lex.GetParenExpr()
suffix = '.' + suffix_func
array_suffixes.append(suffix)
array_srclocs.append(lex.GetSrcLoc())
else:
array_suffixes.append('')
array_srclocs.append(base_srcloc)
lex.push_token(suffix)
if suffix != '[':
break
else:
lex.push_token(new_token)
break
srcloc_final = lex.GetSrcLoc()
lex.wordterminators = orig_wordterminators
assert(len(array_size) == len(array_suffixes))
if len(array_size) > 0:
if len(array_name_offsets) == 0:
assert(len(array_name_tkns) == 1)
array_name_offsets = [0] * len(array_size)
array_name_tkns[0] = array_name_tkns[0] + '['
for d in range(0, len(array_size)-1):
array_name_tkns.append('][')
array_name_tkns.append(']')
if len(array_name_offsets) != len(array_size):
raise InputError('Error('+g_module_name+'.StaticObj.Parse()):\n'
' Error in \"new\" statement near/before '+lex.error_leader()+'\n'
' Array '+array_slice_str+'dimensionality on the left side of the \'=\' character ('+str(len(array_name_offsets))+')\n'
' does not match the array dimensionality on the right side ('+str(len(array_size))+').\n')
# If the user wants us to instantiate a
# multidimensional array of class instances
# then we must loop through this multidimensional
# array and create a new instance for each entry.
# For example fill a 3 dimensional volume
# with 1000 water molecules
# Example 1:
# solvent = new Water [10][10][10]
# (The coordinates must be read separately.)
# In this example array_size = [10,10,10]
# array_suffixes = ['','','']
# Example 2:
# solvent = new Water.transcm(0,0,0)
# [10].trans(0,0,4)
# [10].trans(0,4,0).rot(45,0,0,1)
# [10].trans(4,0,0)
# (This command generates a 10x10x10 lattice
# simple cubic lattice of regularly spaced
# water molecules pointing the same direction.)
# In this example array_size = [10,10,10]
# and
# class_suffix = 'transcm(0,0,0)'
# and
# array_suffixes = ['trans(0,0,4)',
# 'trans(0,4,0).rot(45,0,0,1)',
# 'trans(4,0,0)']
# Note that tree ignores the "trans()"
# commands, it stores them so that inherited
# classes can attempt to process them.
D = len(array_size)
if D > 0:
i_elem = 0 #(used to look up selection_list[])
if len(num_by_type) > 0:
selection_list = []
for i in range(0, len(num_by_type)):
selection_list += [i]*num_by_type[i]
random.shuffle(selection_list)
num_elements = 1
for d in range(0,D):
num_elements *= array_size[d]
err_msg_str = str(array_size[0])
for d in range(1,D):
err_msg_str += '*'+str(array_size[d])
if num_elements != len(selection_list):
raise InputError('Error('+g_module_name+'.StaticObj.Parse()):\n'
' Error near or before '+lex.error_leader()+'\n'
' The sum of the numbers in the \"new random([],[])\" command ('+str(len(selection_list))+')\n'
' does not equal the number of elements in the array ('+err_msg_str+')\n')
digits = [0 for d in range(0, D)]
table_filled = False
pushed_commands = []
while (not table_filled):
instance_name = array_name_tkns[0]
for d in range(0, D):
i = digits[d]
instance_name+=str(i+
array_name_offsets[d])+\
array_name_tkns[d+1]
# Does the user want us to select
# a class at random?
if len(class_names) > 0:
if len(num_by_type) > 0:
class_name_str = class_names[selection_list[i_elem]]
else:
class_name_str = RandomSelect(class_names,
weights)
class_name, class_suffix, class_suffix_srcloc= \
self._ProcessClassName(class_name_str, lex)
if class_suffix != '':
class_suffix_command = \
PushRightCommand(class_suffix.lstrip('.'),
class_suffix_srcloc)
self.instance_commands.append(class_suffix_command)
command = \
InstantiateCommand(instance_name,
ClassReference(class_name,
base_srcloc),
base_srcloc)
self.instance_commands.append(command)
if class_suffix != '':
command = \
PopRightCommand(class_suffix_command,
srcloc_final)
self.instance_commands.append(command)
# Now go to the next entry in the table.
# The indices of this table are similar to
# a D-digit integer. We increment this d-digit number now.
d_carry = D-1
while True:
digits[d_carry] += 1
if digits[d_carry] >= array_size[d_carry]:
digits[d_carry] = 0
if array_suffixes[d_carry] != '':
for i in range(0, array_size[d_carry]-1):
partner = pushed_commands.pop()
command = PopRightCommand(partner,
srcloc_final)
self.instance_commands.append(command)
d_carry -= 1
else:
if array_suffixes[d_carry] != '':
command = PushRightCommand(array_suffixes[d_carry].lstrip('.'),
array_srclocs[d_carry])
pushed_commands.append(command)
self.instance_commands.append(command)
break
if d_carry < 0:
table_filled = True
break
i_elem += 1 #(used to look up selection_list[])
pass
else:
if len(class_names) > 0:
assert(len(num_by_type) == 0)
#if len(num_by_type) > 0:
# class_name_str = class_names[selection_list[i_elem]]
#else:
# class_name_str = RandomSelect(class_names,
# weights)
class_name_str = RandomSelect(class_names,
weights)
class_name, class_suffix, class_suffix_srcloc= \
self._ProcessClassName(class_name_str, lex)
if class_suffix != '':
class_suffix_command = \
PushRightCommand(class_suffix.lstrip('.'),
class_suffix_srcloc)
self.instance_commands.append(class_suffix_command)
command = \
InstantiateCommand(base_name,
ClassReference(class_name,
base_srcloc),
base_srcloc)
self.instance_commands.append(command)
if class_suffix != '':
command = \
PopRightCommand(class_suffix_command,
srcloc_final)
self.instance_commands.append(command)
else:
# Now check for commands using this syntax:
#
# "MolNew = MolOld.rot(45,1,0,0).scale(100.0)"
# /|\ /|\ `-----------.------------'
# | | |
# child_name parent_name optional suffix
child_name = object_name
parent_name_str = next_symbol
child = StaticObj(child_name, self)
parent_name, suffix, suffix_srcloc = \
self._ProcessClassName(parent_name_str, lex)
child.class_parents.append(StrToNode(parent_name,
self,
lex.GetSrcLoc()))
if suffix != '':
# Assume the command is a StackableCommand. (This
# way it will enclose the commands of the parents.)
# Stackable commands come in (Push...Pop) pairs.
push_command = PushLeftCommand(suffix,
suffix_srcloc)
pop_command = PopLeftCommand(push_command,
suffix_srcloc)
push_mod_command = ModCommand(push_command, './')
pop_mod_command = ModCommand(pop_command, './')
child.instance_commands_push.append(push_mod_command)
child.instance_commands_pop.insert(0,pop_mod_command)
#sys.stderr.write('child.instance_commands_push = '+str(child.instance_commands_push)+'\n')
#sys.stderr.write('child.instance_commands_pop = '+str(child.instance_commands_pop)+'\n')
# Check to see if this class has already been defined.
if self.children.get(child_name) is not None:
if self.children[i].IsDeleted():
del self.children[child_name]
else:
raise InputError('Error('+g_module_name+'.StaticObj.Parse()):\n'
' Error near '+lex.error_leader()+'\n'
' The name \"'+child_name+'\" is already in use.')
self.children[child_name] = child
else:
# Otherwise hopefully this is a post-instance command
# (a command applied to a class which has been instantiated)
# In that case, the object_name would be followed by
# a dot and a function-call containing a '(' paren (which
# would have ended up stored in the next_symbol variable).
open_paren_encountered = False
if (next_symbol == '('):
open_paren_encountered = True
lex.push_token(next_symbol) #put '(' back in the stream
i_dot = object_name.rfind('.')
i_slash = object_name.rfind('/')
dot_encountered = ((i_dot != -1) and
((i_slash == -1) or (i_slash < i_dot)))
if (open_paren_encountered and dot_encountered and
(object_name[:1] != '[')):
obj_descr_str, suffix, suffix_srcloc = \
self._ExtractSuffix(object_name, lex)
path_tokens = obj_descr_str.split('/')
i_last_ptkn, staticobj = FollowPath(path_tokens,
self,
lex.GetSrcLoc())
instobj_descr_str = './'+'/'.join(path_tokens[i_last_ptkn:])
# I still support the "object_name.delete()" syntax for
# backwards compatibility. (However newer input files
# use this equivalent syntax: "delete object_name")
if suffix == 'delete()':
delete_command = DeleteCommand(suffix_srcloc)
mod_command = ModCommand(delete_command,
instobj_descr_str)
staticobj.instance_commands.append(mod_command)
else:
push_command = PushLeftCommand(suffix,
suffix_srcloc,
'.')
pop_command = PopLeftCommand(push_command,
suffix_srcloc,
'.')
push_mod_command = ModCommand(push_command,
instobj_descr_str)
pop_mod_command = ModCommand(pop_command,
instobj_descr_str)
if instobj_descr_str != './':
#sys.stderr.write('DEBUG: Adding '+str(push_command)+' to '+
# staticobj.name+'.instance_commands\n')
staticobj.instance_commands.append(push_mod_command)
staticobj.instance_commands.append(pop_mod_command)
else:
#sys.stderr.write('DEBUG: Adding '+str(push_command)+' to '+
# staticobj.name+'.instance_commands_push\n')
# CONTINUEHERE: should I make these PushRight commands and
# append them in the opposite order?
# If so I also have to worry about the case above.
staticobj.instance_commands_push.append(push_mod_command)
staticobj.instance_commands_pop.insert(0,pop_mod_command)
else:
# Otherwise, the cmd_token is not any of these:
# "write", "write_once", "create_vars"
# "delete", or "category".
# ... and it is ALSO not any of these:
# the name of a class (StaticObj), or
# the name of an instance (InstanceObj)
# followed by either a '.' or "= new"
#
# In that case, it is a syntax error:
raise InputError('Error('+g_module_name+'.StaticObj.Parse()):\n'
' Syntax error at or before '+lex.error_leader()+'\n'
' \"'+object_name+' '+next_symbol+'\".')
# Keep track of the location in the user's input files
# where the definition of this object ends.
self.srcloc_end = lex.GetSrcLoc()
@staticmethod
def CleanupReadTemplate(tmpl_contents, lex):
#1) Remove any newlines at the beginning of the first text block
# in tmpl_content.(Sometimes they cause ugly extra blank lines)
assert(len(tmpl_contents) > 0)
if isinstance(tmpl_contents[0], TextBlock):
first_token_strip = tmpl_contents[0].text.lstrip(' ')
if ((len(first_token_strip) > 0) and
(first_token_strip[0] in lex.newline)):
tmpl_contents[0].text = first_token_strip[1:]
tmpl_contents[0].srcloc.lineno += 1
#2) Remove any trailing '}' characters, and complain if absent.
# The last token
assert(isinstance(tmpl_contents[-1], TextBlock))
assert(tmpl_contents[-1].text in ['}',''])
if tmpl_contents[-1].text == '}':
del tmpl_contents[-1]
else:
tmpl_begin = None
if isinstance(tmpl_contents[0], TextBlock):
tmpl_begin = tmpl_contents[0].srcloc
elif isinstance(tmpl_contents[0], VarRef):
tmpl_begin = tmpl_contents[0].srcloc
else:
assert(False)
raise InputError('Error('+g_module_name+'.StaticObj.Parse()):\n'
' Error near '+lex.error_leader()+'\n\n'
' Premature end to template.\n'
'(Missing terminator character, usually a \'}\'.) The\n'
'incomplete template begins near '+ErrorLeader(tmpl_begin.infile, tmpl_begin.lineno)+'\n')
#3) Finally, if there is nothing but whitespace between the
# last newline and the end, then strip that off too.
if isinstance(tmpl_contents[-1], TextBlock):
i = len(tmpl_contents[-1].text)-1
if i >= 0:
while ((i >= 0) and
(tmpl_contents[-1].text[i] in lex.whitespace) and
(tmpl_contents[-1].text[i] not in lex.newline)):
i -= 1
if (tmpl_contents[-1].text[i] in lex.newline):
tmpl_contents[-1].text = tmpl_contents[-1].text[0:i+1]
def LookupStaticRefs(self):
""" Whenever the user requests to instantiate a new copy of a class,
the name of that class is stored in self.instance_commands.
This name is stored as a string. After all of the classes have been
defined, then we go back through the tree and replace these names
with pointers to actual StaticObjs which correspond to those classes.
(This was deferred until all of the classes have been defined so
that users can refer to classes that they will define later on.)
"""
# Now do the same for any children which
# are created during instantiation:
for command in self.instance_commands:
# Does this command create/instantiate a new copy of a class?
if isinstance(command, InstantiateCommand):
# If so, figure out which statobj is referred to by statobj_str.
assert(isinstance(command.class_ref.statobj_str, basestring))
command.class_ref.statobj = StrToNode(command.class_ref.statobj_str,
self,
command.class_ref.srcloc)
# Now recursively resolve StaticObj pointers for the "children"
# (in this case, "children" refers to classes whose definitions
# are nested within this one).
for child in self.children.values():
child.LookupStaticRefs()
def _ExtractSuffix(self, class_name_str, lex):
"""
This ugly function helps process "new" commands such as:
mola = new ForceFieldA/../MoleculeA.move(30,0,0).rot(45,0,0,1)
This function expects a string,
(such as "ForceFieldA/../MoleculeA.move(30,0,0).rot(45,0,0,1)")
It extracts the class name "ForceFieldA/../MoleculeA"
and suffix "move(30,0,0).rot(45,0,0,1)"
"""
# Dots in class names can appear for 2 reasons:
# 1) as part of a path like "../" describing the location
# where this class was defined relative to the caller.
# In that case it will be preceeded or followed by
# either another dot '.', or a slash '/'
# 2) as part of a "suffix" which appears after the name
# containing instructions which modify how to
# instantiate that class.
# Case 1 is handled elsewhere. Case 2 is handled here.
i_dot = 0
while i_dot < len(class_name_str):
i_dot = class_name_str.find('.', i_dot)
if i_dot == -1:
break
# Is the '.' character followed by another '.', as in ".."?
# If so, it's part of a path such as "../Parent/Mol', (if
# so, it's not what we're looking for, so keep searching)
if i_dot < len(class_name_str)-1:
if class_name_str[i_dot+1] == '.':
i_dot += 1
#otherwise, check to see if it is followed by a '/'?
elif class_name_str[i_dot+1] != '/':
# if not, then it must be part of a function name
break;
class_suffix = ''
class_name = class_name_str
class_suffix_srcloc = None
if ((i_dot != -1) and
(i_dot < len(class_name_str))):
class_suffix = class_name_str[i_dot:]
class_name = class_name_str[:i_dot]
if class_name_str[-1] != ')':
# If it does not already contains the parenthesis?
class_suffix += lex.GetParenExpr()
class_suffix_srcloc = lex.GetSrcLoc()
#sys.stderr.write(' splitting class name into class_name.suffix\n'
# ' class_name=\"'+class_name+'\"\n'
# ' suffix=\"'+class_suffix+'\"\n')
return class_name, class_suffix.lstrip('.'), class_suffix_srcloc
def _ProcessClassName(self, class_name_str, lex):
"""
This function does some additional
processing (occasionaly inserting "..." before class_name).
"""
class_name, class_suffix, class_suffix_srcloc = \
self._ExtractSuffix(class_name_str, lex)
# ---- ellipsis hack ----
# (Note-to-self 2012-4-15)
# Most users expect ttree.py to behave like a
# standard programming language: If the class they are
# instantiating was not defined in this specific
# location, they expect ttree.py to search for
# it outwards, first in the parent's environment,
# and then in the parent's parent's environment,
# and so on, until the object is found.
# For example, most users expect this to work:
# class A{
# <definition_of_a_goes_here...>
# }
# class B{
# a = new A
# }
# Notice in the example above we did not have to specify where "A"
# was defined, because it is defined in the parent's
# environment (ie. immediately outside B's environment).
#
# One can obtain the equivalent behavior in ttree.py
# using ellipsis syntax: "a = new .../A" symbol.
# The ellipsis ".../" tells ttree.py to search upwards
# for the object to the right of it ("A")
# In order to make ttree.py behave the way
# most users are expecting, we artificially insert a
# ".../" before the class name here. (Later on, the
# code that processes the ".../" symbol will take
# care of finding A.)
if (len(class_name)>0) and (class_name[0] not in ('.','/','*','?')):
class_name = '.../' + class_name
return class_name, class_suffix, class_suffix_srcloc
def _ParseRandom(self, lex):
bracket1 = lex.get_token()
bracket2 = lex.get_token()
if ((bracket1 != '(') and (bracket1 != '[')):
raise InputError('Error('+g_module_name+'.StaticObj.Parse()):\n'
' Error near '+lex.error_leader()+'\n'
'Expected a \"([\" following '+class_name+'.')
class_names = []
token = ''
prev_token = '['
while True:
token = lex.get_token()
if (token == '('):
lex.push_token(token)
token = lex.GetParenExpr()
if (prev_token not in (',','[','(')):
assert(len(class_names) > 0)
class_names[-1] = prev_token + token
prev_token = prev_token + token
else:
class_names.append(token)
prev_token = token
else:
if ((token == ']') or
(token == lex.eof) or
(token == '}') or
((token in lex.wordterminators) and
(token != ','))):
if (prev_token in (',','[','(')):
class_names.append('')
break
if token != ',':
class_names.append(token)
elif (prev_token in (',','[','(')):
class_names.append('')
prev_token = token
token_comma = lex.get_token()
bracket1 = lex.get_token()
if ((token != ']') or
(token_comma != ',') or
(bracket1 != '[')):
raise InputError('Error('+g_module_name+'.StaticObj.Parse()):\n'
' Error near '+lex.error_leader()+'\n'
'Expected a list of class names enclosed in [] brackets, followed by\n'
'a comma, and then a list of probabilities also enclosed in [] brackets.\n'
'(A random-seed following another comma is optional.)')
weights = []
while True:
token = lex.get_token()
if ((token == ']') or
(token == lex.eof) or
(token == '}') or
((token in lex.wordterminators) and
(token != ','))):
break
if token != ',':
try:
weight = float(token)
except ValueError:
raise InputError('Error('+g_module_name+'.StaticObj.Parse()):\n'
' Error near '+lex.error_leader()+'\n'
' \"'+token+'\"\n'
'Expected a list of numbers enclosed in [] brackets.')
if (weight < 0.0):
raise InputError('Error('+g_module_name+'.StaticObj.Parse()):\n'
' Error near '+lex.error_leader()+'\n'
' Negative numbers are not allowed in \"random(\" argument list.\n')
weights.append(weight)
bracket2 = lex.get_token()
if ((token != ']') or
(bracket2 not in (')',','))):
raise InputError('Error('+g_module_name+'.StaticObj.Parse()):\n'
' Error near '+lex.error_leader()+'\n'
'Expected a \")\" or a \",\" following the list of numeric weights.')
if len(class_names) != len(weights):
raise InputError('Error('+g_module_name+'.StaticObj.Parse()):\n'
' Error near '+lex.error_leader()+'\n'
'Unequal number of entries in object list and probability list.\n')
# Are all the entries in the "weights" array integers?
# If they are then, treat them as molecule counters,
# ot probabilities
num_by_type = []
for i in range(0, len(weights)):
# are the weights all positive integers?
n = int(weights[i])
if n == weights[i]:
num_by_type.append(n)
if len(num_by_type) < len(weights):
num_by_type = []
tot_weight = sum(weights)
if (tot_weight <= 0.0):
raise InputError('Error('+g_module_name+'.StaticObj.Parse()):\n'
' Error near '+lex.error_leader()+'\n'
' The numbers in the \"random(\" argument list can not all be zero.\n')
for i in range(0,len(weights)):
weights[i] /= tot_weight
if bracket2 == ',':
try:
token = lex.get_token()
seed = int(token)
random.seed(seed)
except ValueError:
raise InputError('Error('+g_module_name+'.StaticObj.Parse()):\n'
' Error near '+lex.error_leader()+'\n'
' \"'+token+'\"\n'
'Expected an integer (a seed) following the list of weights.')
bracket2 = lex.get_token()
if (bracket2 != ')'):
raise InputError('Error('+g_module_name+'.StaticObj.Parse()):\n'
' Error near '+lex.error_leader()+'\n'
' \"'+token+'\"\n'
'Expected a \")\".')
else:
random.seed()
return (class_names, weights, num_by_type)
def BuildCommandList(self, command_list):
"""
Search the commands in the tree and make a linear list of commands
in the order they should be carried out.
"""
if self.IsDeleted():
return
# Add a special note to the list of commands to indicate which object
# the commands refer to. (This might be useful one day.)
# Later we can loop through this command list and still be able to tell
# whether or not we are within the scope of a particular class or instance
# (by seeing if we are between a "ScopeBegin" and "ScopeEnd" pair).
command_list.append(ScopeBegin(self, self.srcloc_begin))
# We want to append commands to the command_list in the same order
# that these commands appear in the user's input files.
# Unfortunately the commands may be interspersed with the creation of
# new StaticObjs which have their own commands which we have to explore
# recursively.
# Fortunately each child (StaticObj) has a srcloc_begin member, so we
# can infer the correct order of all the commands belonging to the
# children and correctly insert them into the correct place in between
# the commands of the parent.
srcloc2command_or_child = {}
for command in self.commands:
srcloc2command_or_child[command.srcloc] = command
for child in self.children.values():
srcloc = child.srcloc_begin
# special case: Some children do not have a srcloc because
# they were generated automatically. These children should
# not have any commands either so we can ignore them.
if srcloc != None:
srcloc2command_or_child[srcloc] = child
else:
assert(len(child.commands) == 0)
for srcloc in sorted(srcloc2command_or_child.keys()):
entry = srcloc2command_or_child[srcloc]
if isinstance(entry, StaticObj):
child = entry
child_commands = []
child.BuildCommandList(child_commands)
command_list += child_commands
else:
command_list.append(entry)
command_list.append(ScopeEnd(self, self.srcloc_end))
def __str__(self):
out_str = self.name
if len(self.children) > 0:
out_str += '('
i = 0
for child in self.children.values():
if i+1 < len(self.children):
out_str += str(child)+', '
else:
out_str += str(child)+')'
i += 1
return out_str
def RandomSelect(entries, weights):
""" Return an entry from a list at random using
a (normalized) list of probabilities. """
assert(len(entries) == len(weights))
x = random.random()
i = 0
tot_probability = 0.0
while i < len(weights)-1:
tot_probability += weights[i]
if x <= tot_probability:
break
i += 1
return entries[i]
class InstanceObjBasic(object):
""" A simplified version of InstanceObj.
See the documentation/comments for InstanceObj for more details.
(Leaf nodes (variables) are typically stored as InstanceObjBasic objects
More general, non-leaf nodes are stored using InstanceObj objects.)
"""
__slots__=["name","parent"]
def __init__(self,
name = '',
parent = None):
self.parent = parent # the environment/object which created this object
# Example:
# Suppose this "molecule" is an amino acid monomer
# belonging to a protein. The "parent" refers to
# the InstanceObj for the protein. ".parent" is
# useful for traversing the global instance tree.
# (use InstanceObj.statobj.parent for
# traversing the global static tree)
self.name = name # A string uniquely identifying this object in
# in it's "parent" environment.
# (It is always the same for every instance
# of the parent object. It would save memory to
# get rid of this member. Andrew 2012/9/13)
#self.deleted = False
##vb##self.var_bindings=None # List of variables assigned to this object
##vb## # or None (None takes up less space than an
##vb## # empty list.)
##vb##def AddVarBinding(self, var_binding):
##vb## if self.var_bindings is None:
##vb## self.var_bindings = [var_binding]
##vb## else:
##vb## self.var_bindings.append(var_binding)
#def DeleteSelf(self):
# self.deleted = True
def DeleteSelf(self):
#self.Dealloc()
self.parent = self # This condition (normally never true)
# flags the node as "deleted". (Nodes are never
# actually deleted, just flagged.)
# I used to have a separate boolean member variable
# which was set True when deleted, but I started
# eliminated unnecessary data members to save space.
#def IsDeleted(self):
# return self.deleted
def IsDeleted(self):
# Return true if self.deleted == True or self.parent == self
# for this node (or for any ancestor node).
node = self
while node.parent != None:
if hasattr(node, 'deleted'):
if node.deleted:
return True
elif node.parent == node:
return True
node = node.parent
return False
#def Dealloc(self):
# pass
##vb##if self.var_bindings is None:
##vb## return
##vb##N = len(self.var_bindings)-1
##vb##for i in range(0,len(self.var_bindings)):
##vb## vb = self.var_bindings[N-i]
##vb## cat = vb.category
##vb## assert(self in cat.bindings)
##vb## del cat.bindings[self]
##vb## del self.var_bindings[N-i]
##vb##self.var_bindings = None
class InstanceObj(InstanceObjBasic):
""" InstanceObjs are used to store nodes in the global
"instance tree", the tree of all classes (molecules) which have
been instantiated. Recall that whenever a class is instantiated,
it's members will be instantiated as well. Since these
members can also be classes, this relationship is hierarchical,
and can be represented as a tree.
"InstanceObjs" are the data type used to store the nodes in that tree."""
__slots__=["statobj",
"children",
"categories",
"commands",
"commands_push",
"commands_pop",
"srcloc_begin",
"srcloc_end",
"deleted"]
#"LookupMultiDescrStr",
##"Dealloc",
##"DeleteSelf",
##"IsDeleted",
##"UndeleteSelf",
##"DeleteProgeny",
#"BuildInstanceTree",
#"ProcessCommand",
#"ProcessContextNodes",
#"BuildCommandList"]
def __init__(self,
name = '',
parent = None):
InstanceObjBasic.__init__(self, name, parent)
self.statobj = None # The statobj node refered to by this instance
self.children = {} # A list of statobjs corresponding to
# constituent parts (members) of the
# current class instance.
# The typical example is to consider the
# multiple amino acids (child-molecules)
# which must be created in order to create a
# new protein (instance) to which they belong
# (which would be "self" in this example)
self.categories = {} # This member stores the same data as the
# Instance variables (ie. variables
# with a '$' prefix) are stored in a
# category belonging to node.categories
# where "node" is of type InstanceObj.
# (There is a long explanation of
# "categories" in the comments
# of class StaticObj.)
self.commands = [] # An ordered list of commands to carry out
# during instantiation
self.commands_push = [] # Stackable commands to carry out (first, before children)
self.commands_pop = [] # Stackable commands to carry out (last, after children)
self.srcloc_begin = None # Keep track of location in user files
self.srcloc_end = None # (useful for error message reporting)
self.deleted = False
def LookupMultiDescrStr(self,
multi_descr_str,
srcloc,
null_list_warning=False,
null_list_error=False):
"""
Post-Instance (PI) modifiers/commands are commands which modify
an instance of a class after it has already been instantiated.
Simple Example:
class A {
...
}
class B {
a = new A.command_1()
a.command_2()
}
In the example above "command_2()" is a ModCommand, and
"a" is the multi_descr_str (string describing the correspond InstanceObj).
The "command_2()" command will be retroactively pushed onto the
list of commands to execute once "a" is instantiated.
(This is somewhat counter-intuitive.)
When array brackets [] and wildcards are used, a single ModCommand
can modify many different instances, for example suppose:
a = new A [2][5][3]
then "a[1][2][*].command_3()" is equivalent to
a[0][2][0].command_3()
a[0][2][1].command_3()
a[0][2][2].command_3()
In this example "a[1][2][*]" is the multi_descr_str
"a[*][3][*].command_4()" is equivalent to
a[0][3][0].command_4()
a[0][3][1].command_4()
a[1][3][0].command_4()
a[1][3][1].command_4()
In this function, we interpret strings like "a" and "a[*][3][*]"
in the examples above, and figure out which InstanceObjs they refer to,
and push the corresponding command into that InstanceObjs instance
command stack retroactively.
In addition to [*], you can use [a-b] and [a:b] syntax. For example:
"a[0][1-2][0-1].command_3()" and
"a[0][1:3][0:2].command_3()" are both equivalent to:
a[0][1][0].command_3()
a[0][1][1].command_3()
a[0][2][0].command_3()
a[0][2][1].command_3()
"""
pattern_str = multi_descr_str
# Suppose pattern_str = 'a[1][*][3]/b[**][2]'
# We want to split this string into a list of string fragments
# which omits the '*' characters: [ 'a[', '][3]/b', '][2]' ]
# However, we only want to do this when * is enclosed in [].
pattern_fragments = []
ranges_ab = []
i_close_prev = 0
i_close = 0
i_open = 0
while True:
i_open = pattern_str.find('[', i_open+1)
if i_open == -1:
pattern_fragments.append(pattern_str[i_close_prev:])
break
else:
i_close = pattern_str.find(']', i_open+1)
if i_close == -1:
pattern_fragments.append(pattern_str[i_close_prev:])
break
# If there is a '*' or a ':' character between
# the [] brackets, then split the string at '['
# (at i_open) and resume reading again at ']'
# (at i_close) (and create a new entry in the
# pattern_fragments[] and ranges_ab[] lists)
wildcard_here = True
range_ab = [0,-1]
for j in range(i_open+1, i_close):
if ((pattern_str[j] == ':') or
((pattern_str[j] == '-') and (j > i_open+1)) or
(pattern_str[j] == '*')):
i_wildcard = len(pattern_fragments)
range_a_str = pattern_str[i_open+1 : j]
range_b_str = pattern_str[j+1 : i_close]
if (range_a_str != ''):
if str.isdigit(range_a_str):
range_ab[0] = int(range_a_str)
else:
raise InputError('Error near '+
ErrorLeader(srcloc.infile,
srcloc.lineno)+'\n'
' Expected colon-separated integers.\n')
if (range_b_str != ''):
if str.isdigit(range_b_str):
range_ab[1] = int(range_b_str)
# special case: When [a-b] type syntax is
# used, it selects from a to b inclusive.
# (IE. b is not a strict upper bound.)
if pattern_str[j] == '-':
range_ab[1] += 1
else:
raise InputError('Error near '+
ErrorLeader(srcloc.infile,
srcloc.lineno)+'\n'
' Expected colon-separated integers.\n')
break
elif j == i_close-1:
wildcard_here = False
if wildcard_here:
pattern_fragments.append(pattern_str[i_close_prev:i_open+1])
ranges_ab.append(range_ab)
i_close_prev = i_close
assert(len(pattern_fragments)-1==len(ranges_ab))
# Now figure out which InstanceObj or InstanceObjs correspond to
# the name or set of names suggested by the multi_descr_str,
# (after wildcard characters have been substituted with integers).
instobj_list = []
if len(pattern_fragments) == 1:
# commenting out:
# instobj_list.append(StrToNode(pattern_str, self, srcloc))
#
# Line above will print an error message if the node is not found.
# However sometimes we don't want this. Use this code instead:
path_tokens = pattern_str.split('/')
i_last_ptkn, instobj = FollowPath(path_tokens,
self,
srcloc)
# If found add to instobj_list
if ((i_last_ptkn == len(path_tokens))
and (not instobj.IsDeleted())):
instobj_list.append(instobj)
else:
# num_counters equals the number of bracket-enclosed wildcards
num_counters= len(pattern_fragments)-1
multi_counters = [ranges_ab[i][0] for i in range(0, num_counters)]
all_matches_found = False
d_carry = 0
while d_carry < num_counters:
# Find the next InstanceObj in the set of InstanceObjs which
# satisfy the wild-card pattern in pattern_fragments.
while d_carry < num_counters:
candidate_descr_str = ''.join([pattern_fragments[i] +
str(multi_counters[i])
for i in range(0,num_counters)] \
+ \
[pattern_fragments[num_counters]])
#sys.stderr.write('DEBUG: /'+self.name+
# '.LookupMultiDescrStr()\n'
# ' looking up \"'+
# candidate_descr_str+'\"\n')
path_tokens = candidate_descr_str.split('/')
i_last_ptkn, instobj = FollowPath(path_tokens,
self,
srcloc)
# If there is an InstanceObj with that name,
# then add it to the list of InstanceObjs to
# which we will apply this modifier function,
# and increment the counters
# If found (and if the counter is within the range)...
if ((i_last_ptkn == len(path_tokens)) and
((ranges_ab[d_carry][1] == -1) or
(multi_counters[d_carry]<ranges_ab[d_carry][1]))):
# (make sure it has not yet been "deleted")
if (not instobj.IsDeleted()):
instobj_list.append(instobj)
d_carry = 0
multi_counters[0] += 1
#sys.stderr.write('DEBUG: InstanceObj found.\n')
break
# If there is no InstanceObj with that name,
# then perhaps it is because we have incremented
# the counter too high. If there are multiple
# counters, increment the next most significant
# counter, and reset this counter to 0.
# Keep looking
# (We only do this if the user neglected to explicitly
# specify an upper bound --> ranges_ab[d_carry[1]==-1)
elif ((ranges_ab[d_carry][1] == -1) or
(multi_counters[d_carry]>=ranges_ab[d_carry][1])):
#sys.stderr.write('DEBUG: InstanceObj not found.\n')
multi_counters[d_carry] = ranges_ab[d_carry][0]
d_carry += 1
if d_carry >= num_counters:
break
multi_counters[d_carry] += 1
else:
# Object was not found but we keep going. Skip
# to the next entry in the multi-dimensional list.
d_carry = 0
multi_counters[0] += 1
break
if (null_list_warning and (len(instobj_list) == 0)):
sys.stderr.write('WARNING('+g_module_name+'.LookupMultiDescrStr()):\n'
' Potential problem near '+
ErrorLeader(srcloc.infile,
srcloc.lineno)+'\n'
' No objects (yet) matching name \"'+pattern_str+'\".\n')
if (null_list_error and
(len(instobj_list) == 0)):
if len(pattern_fragments) == 1:
raise InputError('Error('+g_module_name+'.LookupMultiDescrStr()):\n'
' Syntax error near '+
ErrorLeader(srcloc.infile,
srcloc.lineno)+'\n'
' No objects matching name \"'+pattern_str+'\".')
else:
sys.stderr.write('WARNING('+g_module_name+'.LookupMultiDescrStr()):\n'
' Potential problem near '+
ErrorLeader(srcloc.infile,
srcloc.lineno)+'\n'
' No objects (yet) matching name \"'+pattern_str+'\".\n')
return instobj_list
def __str__(self):
out_str = self.name
if len(self.children) > 0:
out_str += '('
i = 0
for child in self.children.values():
if i+1 < len(self.children):
out_str += str(child)+', '
else:
out_str += str(child)+')'
i += 1
return out_str
def DeleteSelf(self):
self.deleted = True
# COMMENT1: Don't get rid of pointers to yourself. Knowing which
# objects you instantiated and destroyed might be useful
# in case you want to apply multiple delete [*] commands
# COMMENT2: Don't delete all the child nodes, and commands. These are
# needed later (so that text-templates containing references
# to these nodes don't cause moltemplate to crash.)
#def UndeleteSelf(self):
# self.deleted = False
#
#
#def DeleteProgeny(self):
# for child in self.children.values():
# if hasattr(child, 'DeleteProgeny'):
# child.DeleteProgeny()
# else:
# child.DeleteSelf()
# self.DeleteSelf();
def BuildInstanceTree(self,
statobj,
class_parents_in_use):
"""
This takes care of the details of copying relevant data from an StaticObj
into a newly-created InstanceObj. It allocates space for and performs
a deep-copy of any instance variables (and new instance categories), but
it performs a shallow copy of everything else (template text, etc..).
This is done recursively for every child that this class instantiates.
"""
if self.IsDeleted():
return
#sys.stderr.write(' DEBUG: '+self.name+
# '.BuildInstanceTree('+statobj.name+')\n')
#instance_refs = {}
# Keep track of which line in the file (and which file) we were
# in when we began parsing the class which defines this instance,
# as well as when we stopped parsing.
# (Don't do this if you are recusively searching class_parents because
# in that case you would be overwritting .statobj with with the parent.)
if len(class_parents_in_use) == 0:
self.statobj = statobj
self.srcloc_begin = statobj.srcloc_begin
self.srcloc_end = statobj.srcloc_end
# Make copies of the class_parents' StaticObj data.
# First deal with the "self.instance_commands_push"
# These commands should be carried out before any of the commands
# in "self.instance_commands".
for command in statobj.instance_commands_push:
#self.commands.append(command)
self.ProcessCommand(command)
# Then deal with class parents
for class_parent in statobj.class_parents:
# Avoid the "Diamond of Death" multiple inheritance problem
if class_parent not in class_parents_in_use:
#sys.stderr.write(' DEBUG: '+self.name+'.class_parent = '+
# class_parent.name+'\n')
self.BuildInstanceTree(class_parent,
class_parents_in_use)
class_parents_in_use.add(class_parent)
# Now, deal with the data in THIS object and its children
assert((self.commands != None) and (self.categories != None))
# "instance_categories" contains a list of new "categories" (ie new
# types of variables) to create whenever this class is instantiated.
# (This is used whenever we create a local counter variable: Suppose we
# want to count the residues within a particular protein, when there
# are multiple copies of the same protein in the simulation.)
for cat_name, cat in statobj.instance_categories.items():
assert(len(cat.bindings) == 0)
self.categories[cat_name] = Category(cat_name)
self.categories[cat_name].counter = cat.counter.__copy__()
# Note: Later on we will generate leaf nodes corresponding to
# variables, and put references to them in this category.
# Deal with the "instance_commands",
for command in statobj.instance_commands:
#self.commands.append(command)
self.ProcessCommand(command)
# Finally deal with the "self.instance_commands_pop"
# These commands should be carried out after all of the commands
# in "self.instance_commands".
for command in statobj.instance_commands_pop:
#self.commands.append(command)
self.ProcessCommand(command)
def ProcessCommand(self, command):
if isinstance(command, ModCommand):
sys.stderr.write(' processing command \"'+str(command)+'\"\n')
mod_command = command
instobj_list = self.LookupMultiDescrStr(mod_command.multi_descr_str,
mod_command.command.srcloc)
if isinstance(mod_command.command, DeleteCommand):
# Delete any objects we have created so far
# whose name matches mod_command.multi_descr_str:
for instobj in instobj_list:
instobj.DeleteSelf()
#instobj.DeleteProgeny()
elif len(instobj_list) == 0:
raise InputError('Error('+g_module_name+'.ProcessCommand()):\n'
' Syntax error at or before '+
ErrorLeader(mod_command.command.srcloc.infile,
mod_command.command.srcloc.lineno)+'\n'
' No objects matching name \"'+
mod_command.multi_descr_str+'\"\n'
' (If the object is an array, include brackets. Eg. \"molecules[*][*][*]\")')
else:
for instobj in instobj_list:
assert(not isinstance(mod_command.command, DeleteCommand))
command = mod_command.command.__copy__()
self.ProcessContextNodes(command)
if isinstance(command, PushCommand):
instobj.commands_push.append(command)
elif isinstance(mod_command.command, PopCommand):
instobj.commands_pop.insert(0, command)
else:
# I don't know if any other types commands will ever
# occur but I handle them below, just in case...
assert(not isinstance(command, InstantiateCommand))
instobj.commands.append(command.__copy__())
return # ends "if isinstance(command, ModCommand):"
# Otherwise:
command = command.__copy__()
self.ProcessContextNodes(command)
if isinstance(command, InstantiateCommand):
sys.stderr.write(' processing command \"'+str(command)+'\"\n')
self.commands.append(command) #<- useful later to keep track of the
# order that children were created
# check to make sure no child of that name was previously defined
prev_child = self.children.get(command.name)
if ((prev_child != None) and (not prev_child.IsDeleted())):
raise InputError('Error near '+
ErrorLeader(command.srcloc.infile,
command.srcloc.lineno)+'\n'
' Object \"'+command.name+'\" is already defined.\n')
child = InstanceObj(command.name, self)
command.instobj = child
if command.class_ref.statobj_str == '':
child.DeleteSelf()
# Why? This if-then check handles the case when the user
# wants to create an array of molecules with random vacancies.
# When this happens, some of the instance commands will
# contain instructions to create a copy of a molecule with
# an empty molecule-type-string (class_ref.statobj_str).
# Counter-intuitively, ...
# ...we DO want to create something here so that the user can
# safely loop over the array without generating an error.
# (Such as to delete elements, or move the remaining
# members in the array.) We just want to mark it as
# 'deleted'. (That's what "DeleteSelf()" does.)
else:
# This is the heart of "BuildInstanceTree()"
# (which implements object composition)
new_class_parents_in_use = set([])
child.BuildInstanceTree(command.class_ref.statobj,
new_class_parents_in_use)
self.children[child.name] = child
elif isinstance(command, WriteFileCommand):
#sys.stderr.write(' processing command \"'+str(command)+'\"\n')
self.commands.append(command)
for var_ref in command.tmpl_list:
# Process the VarRef entries in the tmpl_list,
# (and check they have the correct prefix: either '$' or '@')
# Ignore other entries (for example, ignore TextBlocks).
if (isinstance(var_ref, VarRef) and (var_ref.prefix[0] == '$')):
if (var_ref.descr_str[:4] == 'mol:'):
pass
var_ref.nptr.cat_name, var_ref.nptr.cat_node, var_ref.nptr.leaf_node = \
DescrToCatLeafNodes(var_ref.descr_str,
self,
var_ref.srcloc,
True)
categories = var_ref.nptr.cat_node.categories
# "categories" is a dictionary storing "Category" objects
# indexed by category names.
# Note to self: Always use the ".categories" member,
# (never the ".instance_categories" member.
# ".instance_categories" are only used temporarilly before
# we instantiate, ie. before we build the tree of InstanceObjs.)
category = categories[var_ref.nptr.cat_name]
# "category" is a Category object containing a
# dictionary of VarBinding objects, and an internal counter.
var_bindings = category.bindings
# "var_bindings" is a dictionary storing "VarBinding"
# objects, indexed by leaf nodes. Each leaf node
# corresponds to a unique variable in this category.
# --- Now update "var_bindings" ---
# Search for the "VarBinding" object that
# corresponds to this leaf node.
# If not found, then create one.
if var_ref.nptr.leaf_node in var_bindings:
var_binding = var_bindings[var_ref.nptr.leaf_node]
# "var_binding" stores the information for a variable,
# including pointers to all of the places the variable
# is rerefenced, the variable's (full) name, and value.
#
# Keep track of all the places that varible is
# referenced by updating the ".refs" member
var_binding.refs.append(var_ref)
else:
# Not found, so we create a new binding.
var_binding = VarBinding()
# var_binding.refs contains a list of all the places
# this variable is referenced. Start with this var_ref:
var_binding.refs = [var_ref]
# keep track of the cat_node, cat_name, leaf_node:
var_binding.nptr = var_ref.nptr
# "var_binding.full_name" stores a unique string like
# '@/atom:Water/H' or '$/atom:water[1423]/H2',
# which contains the full path for the category and leaf
# nodes, and uniquely identifies this variable globally.
# Thus these strings correspond uniquely (ie. in a
# one-to-one fashion) with the nodes they represent.
var_binding.full_name = var_ref.prefix[0] + \
CanonicalDescrStr(var_ref.nptr.cat_name,
var_ref.nptr.cat_node,
var_ref.nptr.leaf_node,
var_ref.srcloc)
# (These names can always be generated later when needed
# but it doesn't hurt to keep track of it here too.)
# Now add this binding to the other
# bindings in this category:
var_bindings[var_ref.nptr.leaf_node] = var_binding
##vb## var_ref.nptr.leaf_node.AddVarBinding(var_binding)
var_binding.category = category
# It's convenient to add a pointer in the opposite direction
# so that later if we find the var_ref, we can find its
# binding and visa-versa. (Ie. two-way pointers)
var_ref.binding = var_binding
assert(var_ref.nptr.leaf_node in var_bindings)
else:
# Otherwise, we don't know what this command is yet.
# Append it to the list of commands and process it/ignore it later.
self.commands.append(command)
def ProcessContextNodes(self, command):
if hasattr(command, 'context_node'):
# Lookup any nodes pointers to instobjs
if command.context_node != None:
if type(command.context_node) is str:
command.context_node = StrToNode(command.context_node,
self,
command.srcloc)
# (Otherwise, just leave it as None)
def BuildCommandList(self, command_list):
"""
Search the commands in the tree and make a linear list of commands
in the order they should be carried out.
"""
if self.IsDeleted():
return
if (len(self.commands) == 0):
assert(len(self.children) == 0)
# To save memory don't generate any commands
# for trivial (leaf) nodes
return
# Add a special note to the list of commands to indicate which object
# the commands refer to. (This might be useful one day.)
# Later we can loop through this command list and still be able to tell
# whether or not we are within the scope of a particular class or instance
# (by seeing if we are between a "ScopeBegin" and "ScopeEnd" pair).
command_list.append(ScopeBegin(self, self.srcloc_begin))
# Note:
# The previous version looped over all commands in this node, and then
# recursively invoke BuildCommandList() on all the children of this node
# We don't do that anymore because it does not take into account the
# order that various child objects were created/instantiated
# which potentially could occur in-between other commands. Instead,
# now we loop through the command_list and recursively visit child
# nodes only when we encounter them in the command list.
for command in self.commands_push:
assert(isinstance(command, InstantiateCommand) == False)
command_list.append(command)
for command in self.commands:
if isinstance(command, InstantiateCommand):
#child = self.children[command.name]
# the above line does not work because you may have
# deleted that child after you created and then
# replaced it by somebody else. Store the node.
child = command.instobj
child.BuildCommandList(command_list)
else:
command_list.append(command)
for command in self.commands_pop:
assert(isinstance(command, InstantiateCommand) == False)
command_list.append(command)
command_list.append(ScopeEnd(self, self.srcloc_begin))
def AssignTemplateVarPtrs(tmpl_list, context_node):
"""
Now scan through all the variables within the templates defined
for this context_node (either static or dynamic depending on var_filter).
Each reference to a variable in the template has a descriptor which
indicates the variable's type, and in which molecule it is defined (ie
where it is located in the molecule instance tree or type definition tree).
(See comments for "class VarNPtr(object):" above for details.)
Eventually we want to assign a value to each variable.
This same variable (node) may appear multiple times in diffent templates.
So we also create a place to store this variable's value, and also assign
(two-way) pointers from the VarRef in the template, to this storage area so
that later on when we write out the contents of the template to a file, we
can substitute this variable with it's value, in all the places it appears.
"""
for var_ref in tmpl_list:
# Process the VarRef entries in the tmpl_list,
# (and check they have the correct prefix: either '$' or '@')
# Ignore other entries (for example, ignore TextBlocks).
if (isinstance(var_ref, VarRef) and
((isinstance(context_node, StaticObj) and
(var_ref.prefix[0] == '@'))
or
(isinstance(context_node, InstanceObjBasic) and
(var_ref.prefix[0] == '$')))):
var_ref.nptr.cat_name, var_ref.nptr.cat_node, var_ref.nptr.leaf_node = \
DescrToCatLeafNodes(var_ref.descr_str,
context_node,
var_ref.srcloc,
True)
categories = var_ref.nptr.cat_node.categories
# "categories" is a dictionary storing "Category" objects
# indexed by category names.
# Note to self: Always use the ".categories" member,
# (never the ".instance_categories" member.
# ".instance_categories" are only used temporarilly before
# we instantiate, ie. before we build the tree of InstanceObjs.)
category = categories[var_ref.nptr.cat_name]
# "category" is a Category object containing a
# dictionary of VarBinding objects, and an internal counter.
var_bindings = category.bindings
# "var_bindings" is a dictionary storing "VarBinding"
# objects, indexed by leaf nodes. Each leaf node
# corresponds to a unique variable in this category.
# --- Now update "var_bindings" ---
# Search for the "VarBinding" object that
# corresponds to this leaf node.
# If not found, then create one.
if var_ref.nptr.leaf_node in var_bindings:
var_binding = var_bindings[var_ref.nptr.leaf_node]
# "var_binding" stores the information for a variable,
# including pointers to all of the places the variable
# is rerefenced, the variable's (full) name, and value.
#
# Keep track of all the places that varible is
# referenced by updating the ".refs" member
var_binding.refs.append(var_ref)
else:
# Not found, so we create a new binding.
var_binding = VarBinding()
# var_binding.refs contains a list of all the places
# this variable is referenced. Start with this var_ref:
var_binding.refs = [var_ref]
# keep track of the cat_node, cat_name, leaf_node:
var_binding.nptr = var_ref.nptr
# "var_binding.full_name" stores a unique string like
# '@/atom:Water/H' or '$/atom:water[1423]/H2',
# which contains the full path for the category and leaf
# nodes, and uniquely identifies this variable globally.
# Thus these strings correspond uniquely (ie. in a
# one-to-one fashion) with the nodes they represent.
var_binding.full_name = var_ref.prefix[0] + \
CanonicalDescrStr(var_ref.nptr.cat_name,
var_ref.nptr.cat_node,
var_ref.nptr.leaf_node,
var_ref.srcloc)
# (These names can always be generated later when needed
# but it doesn't hurt to keep track of it here too.)
# Now add this binding to the other
# bindings in this category:
var_bindings[var_ref.nptr.leaf_node] = var_binding
##vb## var_ref.nptr.leaf_node.AddVarBinding(var_binding)
var_binding.category = category
# It's convenient to add a pointer in the opposite direction
# so that later if we find the var_ref, we can find its
# binding and visa-versa. (Ie. two-way pointers)
var_ref.binding = var_binding
assert(var_ref.nptr.leaf_node in var_bindings)
def AssignStaticVarPtrs(context_node, search_instance_commands = False):
#sys.stdout.write('AssignVarPtrs() invoked on node: \"'+NodeToStr(context_node)+'\"\n')
if search_instance_commands:
assert(isinstance(context_node, StaticObj))
commands = context_node.instance_commands
else:
# Note: Leaf nodes contain no commands, so skip them
if (not hasattr(context_node, 'commands')):
return
# Otherwise process their commands
commands = context_node.commands
for command in commands:
if isinstance(command, WriteFileCommand):
AssignTemplateVarPtrs(command.tmpl_list, context_node)
# Recursively invoke AssignVarPtrs() on all (non-leaf) child nodes:
for child in context_node.children.values():
AssignStaticVarPtrs(child, search_instance_commands)
def AssignVarOrderByCommand(command_list, prefix_filter):
"""
For each category in context_node, and each variable in that category,
set the order of each variable according to the position of the
write(), write_once(), or other command that created it.
Only variables with the correct prefix ('$' or '@') are affected.
"""
count = 0
for command in command_list:
if isinstance(command, WriteFileCommand):
tmpl_list = command.tmpl_list
for var_ref in tmpl_list:
if isinstance(var_ref, VarRef):
if var_ref.prefix in prefix_filter:
count += 1
if ((var_ref.binding.order is None) or
(var_ref.binding.order > count)):
var_ref.binding.order = count
#def AssignVarOrderByFile(command_list, prefix_filter):
# """
# For each category in context_node, and each variable in that category,
# set the order of each variable equal to the position of that variable
# in the user's input file.
#
# """
#
# for command in command_list:
# if isinstance(command, WriteFileCommand):
# tmpl_list = command.tmpl_list
# for var_ref in tmpl_list:
# if isinstance(var_ref, VarRef):
# if var_ref.prefix in prefix_filter:
# if ((var_ref.binding.order is None) or
# (var_ref.binding.order > var_ref.srcloc.order)):
# var_ref.binding.order = var_ref.srcloc.order
def AssignVarOrderByFile(context_node, prefix_filter, search_instance_commands=False):
"""
For each category in context_node, and each variable in that category,
set the order of each variable equal to the position of that variable
in the user's input file.
"""
commands = context_node.commands
if search_instance_commands:
assert(isinstance(context_node, StaticObj))
commands.append(context_node.instance_commands_push + \
context_node.instance_commands + \
context_node.instance_commands_pop)
for command in commands:
if isinstance(command, WriteFileCommand):
tmpl_list = command.tmpl_list
for var_ref in tmpl_list:
if (isinstance(var_ref, VarRef) and
(var_ref.prefix in prefix_filter)):
if ((var_ref.binding.order == -1) or
(var_ref.binding.order > var_ref.srcloc.order)):
var_ref.binding.order = var_ref.srcloc.order
for child in context_node.children.values():
AssignVarOrderByFile(child, prefix_filter, search_instance_commands)
def AutoAssignVals(cat_node,
sort_variables,
reserved_values = None,
ignore_prior_values = False):
"""
This function automatically assigns all the variables
belonging to all the categories in cat_node.categories.
Each category has its own internal counter. For every variable in that
category, query the counter (which usually returns an integer),
and assign the variable to it. Exceptions can be made if the integer
is reserved by some other variable, or if it has been already assigned.
Afterwards, we recursively search the child nodes recursively
(in a depth-first-search order).
sort_variables: Sorting the variables according to their "binding.order"
counters is optional.
"""
if (not hasattr(cat_node, 'categories')):
# (sometimes leaf nodes lack a 'categories' member, to save memory)
return
# Search the tree in a depth-first-search manner.
# For each node, examine the "categories" associated with that node
# (ie the list of variables whose counters lie within that node's scope).
for cat_name, cat in cat_node.categories.items():
# Loop through all the variables in this category.
if sort_variables:
# Sort the list of variables according to var_binding.order
# First, print a progress indicator (this could be slow)
prefix = '$'
# Is this parent_node an StaticObj? (..or inherit from StaticObj?)
if isinstance(cat_node, StaticObj):
prefix = '@'
sys.stderr.write(' sorting variables in category: '+prefix+
CanonicalCatName(cat_name, cat_node)+':\n')
var_bind_iter = iter(sorted(cat.bindings.items(),
key=operator.itemgetter(1)))
else:
# Just iterate through them in the order that they were added
# to the category list. (This happens to be the same order as
# we found it earlier when searching the tree.)
var_bind_iter = iter(cat.bindings.items())
for leaf_node,var_binding in var_bind_iter:
if ((var_binding.value is None) or ignore_prior_values):
if var_binding.nptr.leaf_node.name[:9] == '__query__':
# -- THE "COUNT" HACK --
# '__query__...' variables are not really variables.
# They are a mechanism to allow the user to query the
# category counter without incrementing it.
var_binding.value = str(cat.counter.query())
elif HasWildCard(var_binding.full_name):
# -- The wildcard hack ---
# Variables containing * or ? characters in their names
# are not allowed. These are not variables, but patterns
# to match with other variables. Represent them by the
# (full-path-expanded) string containing the * or ?.
var_binding.value = var_binding.full_name
else:
if (not var_binding.nptr.leaf_node.IsDeleted()):
# For each (regular) variable, query this category's counter
# (convert it to a string), and see if it is already in use
# (in this category). If not, then set this variable's value
# to the counter's value. Either way, increment the counter.
while True:
cat.counter.incr()
value = str(cat.counter.query())
if ((reserved_values is None) or
((cat, value) not in reserved_values)):
break
var_binding.value = value
# Recursively invoke AssignVarValues() on all child nodes
for child in cat_node.children.values():
AutoAssignVals(child,
sort_variables,
reserved_values,
ignore_prior_values)
# Did the user ask us to reformat the output string?
# This information is encoded in the variable's suffix.
def ExtractFormattingCommands(suffix):
if (len(suffix) <= 1):
return None, None
if suffix[-1] == '}': # Get rid of any trailing '}' characters
suffix = suffix[:-1]
if suffix[-1] != ')': # Format functions are always followed by parens
return None, None
else:
idot = suffix.find('.') # Format functions usually preceeded by '.'
ioparen = suffix.find('(')
icparen = suffix.find(')')
format_fname = suffix[idot+1:ioparen]
args = suffix[ioparen+1:icparen]
args = args.split(',')
for i in range(0, len(args)):
args[i] = RemoveOuterQuotes(args[i].strip(), '\"\'')
return format_fname, args
def Render(tmpl_list, substitute_vars=True):
"""
This function converts a TextBlock,VarRef list into a string.
It is invoked by WriteTemplatesValue() in order to print
out the templates stored at each node of the tree.
"""
out_str_list = []
i = 0
while i < len(tmpl_list):
entry = tmpl_list[i]
if isinstance(entry, VarRef):
var_ref = entry
var_bindings = var_ref.nptr.cat_node.categories[var_ref.nptr.cat_name].bindings
#if var_ref.nptr.leaf_node not in var_bindings:
#assert(var_ref.nptr.leaf_node in var_bindings)
if var_ref.nptr.leaf_node.IsDeleted():
raise InputError('Error near '+
ErrorLeader(var_ref.srcloc.infile,
var_ref.srcloc.lineno)+'\n'
' The variable you referred to does not exist:\n\n'
' '+var_ref.prefix+var_ref.descr_str+var_ref.suffix+'\n\n'
' (You probably deleted it or something it belonged to earlier.)\n')
else:
if substitute_vars:
value = var_bindings[var_ref.nptr.leaf_node].value
format_fname, args = ExtractFormattingCommands(var_ref.suffix)
if format_fname == 'ljust':
if len(args) == 1:
value = value.ljust(int(args[0]))
else:
value = value.ljust(int(args[0]), args[1])
elif format_fname == 'rjust':
if len(args) == 1:
value = value.rjust(int(args[0]))
else:
value = value.rjust(int(args[0]), args[1])
out_str_list.append(value)
else:
out_str_list.append(var_ref.prefix +
SafelyEncodeString(var_bindings[var_ref.nptr.leaf_node].full_name[1:]) +
var_ref.suffix)
else:
assert(isinstance(entry, TextBlock))
out_str_list.append(entry.text)
i += 1
return ''.join(out_str_list)
def MergeWriteCommands(command_list):
""" Write commands are typically to the same file.
We can improve performance by appending all of
commands that write to the same file together before
carrying out the write commands.
"""
file_templates = defaultdict(list)
for command in command_list:
if isinstance(command, WriteFileCommand):
if command.filename != None:
file_templates[command.filename] += \
command.tmpl_list
return file_templates
def WriteTemplatesValue(file_templates):
""" Carry out the write() and write_once() commands (which
write out the contents of the templates contain inside them).
"""
for filename, tmpl_list in file_templates.items():
if filename == '':
out_file = sys.stdout
else:
out_file = open(filename, 'a')
out_file.write(Render(tmpl_list, substitute_vars=True))
if filename != '':
out_file.close()
# Alternate (old method):
#for command in command_list:
# if isinstance(command, WriteFileCommand):
# if command.filename != None:
# if command.filename == '':
# out_file = sys.stdout
# else:
# out_file = open(command.filename, 'a')
#
# out_file.write(Render(command.tmpl_list))
#
# if command.filename != '':
# out_file.close()
def WriteTemplatesVarName(file_templates):
""" Carry out the write() and write_once() commands (which
write out the contents of the templates contain inside them).
However variables within the templates are represented by their
full name instead of their assigned value.
"""
for filename, tmpl_list in file_templates.items():
if filename != '':
out_file = open(filename + '.template', 'a')
out_file.write(Render(tmpl_list, substitute_vars=False))
out_file.close()
def EraseTemplateFiles(command_list):
filenames = set([])
for command in command_list:
if isinstance(command, WriteFileCommand):
if (command.filename != None) and (command.filename != ''):
if command.filename not in filenames:
filenames.add(command.filename)
# Openning the files (in mode 'w') and closing them again
# erases their contents.
out_file = open(command.filename, 'w')
out_file.close()
out_file = open(command.filename + '.template', 'w')
out_file.close()
#def ClearTemplates(file_templates):
# for filename in file_templates:
# if filename != '':
# out_file = open(filename, 'w')
# out_file.close()
# out_file = open(filename + '.template', 'w')
# out_file.close()
def WriteVarBindingsFile(node):
""" Write out a single file which contains a list of all
of the variables defined (regardless of which class they
were defined in). Next to each variable name is the corresponding
information stored in that variable (a number) that variable.
"""
if (not hasattr(node, 'categories')):
# (sometimes leaf nodes lack a 'categories' member, to save memory)
return
out = open('ttree_assignments.txt', 'a')
for cat_name in node.categories:
var_bindings = node.categories[cat_name].bindings
for nd, var_binding in var_bindings.items():
if nd.IsDeleted():
continue # In that case, skip this variable
#if type(node) is type(nd):
if ((isinstance(node, InstanceObjBasic) and isinstance(nd, InstanceObjBasic))
or
(isinstance(node, StaticObj) and isinstance(nd, StaticObj))):
# Now omit variables whos names contain "*" or "?"
# (these are actually not variables, but wildcard patterns)
if not HasWildCard(var_binding.full_name):
if len(var_binding.refs) > 0:
usage_example = ' #'+\
ErrorLeader(var_binding.refs[0].srcloc.infile, \
var_binding.refs[0].srcloc.lineno)
else:
usage_example = ''
out.write(SafelyEncodeString(var_binding.full_name) +' '+
SafelyEncodeString(var_binding.value)
+usage_example+'\n')
out.close()
for child in node.children.values():
WriteVarBindingsFile(child)
def CustomizeBindings(bindings,
g_objectdefs,
g_objects):
var_assignments = set()
for name,vlpair in bindings.items():
prefix = name[0]
var_descr_str = name[1:]
value = vlpair.val
dbg_loc = vlpair.loc
if prefix == '@':
var_binding = LookupVar(var_descr_str,
g_objectdefs,
dbg_loc)
elif prefix == '$':
var_binding = LookupVar(var_descr_str,
g_objects,
dbg_loc)
else:
# If the user neglected a prefix, this should have generated
# an error earlier on.
assert(False)
# Change the assignment:
var_binding.value = value
var_assignments.add((var_binding.category, value))
#sys.stderr.write(' CustomizeBindings: descr=' + var_descr_str +
# ', value=' + value + '\n')
return var_assignments
##############################################################
##################### BasicUI functions #####################
# These functions are examples of how to use the StaticObj
# and InstanceObj data structures above, and to read a ttree file.
# These are examples only. New programs based on ttree_lib.py
# will probably require their own settings and functions.
##############################################################
def BasicUIReadBindingsFile(bindings_so_far, filename):
try:
f = open(filename, 'r')
except IOError:
sys.stderr.write('Error('+g_filename+'):\n'' : unable to open file\n'
'\n'
' \"'+filename+'\"\n'
' for reading.\n'
'\n'
' (If you were not trying to open a file with this name, then this could\n'
' occur if you forgot to enclose your command-line-argument in quotes,\n'
' For example, use: \'$atom:wat[2]/H1 20\' or "\$atom:wat[2]/H1 to 20"\n'
' to set the variable $atom:wat[2]/H1 to 20.)\n')
sys.exit(1)
BasicUIReadBindingsStream(bindings_so_far, f, filename)
f.close()
def BasicUIReadBindingsText(bindings_so_far, text, source_name=''):
if sys.version > '3':
in_stream = io.StringIO(text)
else:
in_stream = cStringIO.StringIO(text)
return BasicUIReadBindingsStream(bindings_so_far, in_stream, source_name)
class ValLocPair(object):
__slots__=["val","loc"]
def __init__(self,
val = None,
loc = None):
self.val = val
self.loc = loc
def BasicUIReadBindingsStream(bindings_so_far, in_stream, source_name=''):
# EXAMPLE (simple version)
# The simple version of this function commented out below
# does not handle variable whose names or values
# contain strange or escaped characters, quotes or whitespace.
# But I kept it in for illustrative purposes:
#
#for line in f:
# line = line.strip()
# tokens = line.split()
# if len(tokens) == 2:
# var_name = tokens[0]
# var_value = tokens[1]
# var_assignments[var_name] = var_value
#f.close()
lex = TemplateLexer(in_stream, source_name)
tmpllist = lex.ReadTemplate()
i = 0
if isinstance(tmpllist[0], TextBlock):
i += 1
while i+1 < len(tmpllist):
# process one line at a time (2 entries per line)
var_ref = tmpllist[i]
text_block = tmpllist[i+1]
assert(isinstance(var_ref, VarRef))
if (not isinstance(text_block, TextBlock)):
raise InputError('Error('+g_filename+'):\n'
' This is not a valid name-value pair:\n'
' \"'+var_ref.prefix+var_ref.descr_str+' '+text_block.text.rstrip()+'\"\n'
' Each variable asignment should contain a variable name (beginning with\n'
' @ or $) followed by a space, and then a string you want to assign to it.\n'
' (Surrounding quotes are optional and will be removed.)\n')
# Variables in the ttree_assignments.txt file use "full-path" style.
# In other words, the full name of the variable, (including all
# path information) is stored var_ref.descr_str,
# and the first character of the prefix stores either a @ or $
var_name = var_ref.prefix[:1] + var_ref.descr_str
text = SplitQuotedString(text_block.text.strip())
var_value = EscCharStrToChar(RemoveOuterQuotes(text, '\'\"'))
bindings_so_far[var_name] = ValLocPair(var_value, lex.GetSrcLoc())
i += 2
class BasicUISettings(object):
"""
BasicUISettings() contains several run-time user customisations
for ttree. (These effect the order and values assigned to variables
in a ttreee file).
This object, along with the other "UI" functions below are examples only.
(New programs based on ttree_lib.py will probably have their own settings
and functions.)
Members:
user_bindings
user_bindings_x
These are lists containing pairs of variable names,
and the string values they are bound to (which are typically numeric).
Values specified in the "user_bindings_x" list are "exclusive".
This means their values are reserved, so that later on, when other
variables (in the same category) are automatically assigned to values, care
care will be taken to avoid duplicating the values in user_bindings_x.
However variables in the "user_bindings" list are assigned without regard
to the values assigned to other variables, and may or may not be unique.
order_method
The order_method specifies the order in which values will be automatically
assigned to variables. (In the context of building molecular simulation
input files, this helps the user to insure that the order of the atoms
created by the ttree file matches the order they appear in other files
created by other programs.)
"""
def __init__(self,
user_bindings_x=None,
user_bindings=None,
order_method='by_command',
lex=None):
if user_bindings_x:
self.user_bindings_x = user_bindings_x
else:
self.user_bindings_x = OrderedDict()
if user_bindings:
self.user_bindings = user_bindings
else:
self.user_bindings = OrderedDict()
self.order_method = order_method
self.lex = lex
def BasicUIParseArgs(argv, settings):
"""
BasicUIParseArgs()
The following function contains part of the user interface for a
typical ttree-based program. This function processes an argument list
and extracts the common ttree user settings.
This function, along with the other "UI" functions below are examples only.
(New programs based on ttree_lib.py will probably have their own UI.)
"""
#argv = [arg for arg in orig_argv] # (make a deep copy of "orig_argv")
# This error message is used in multiple places:
bind_err_msg = 'should either be followed by a 2-column\n'+\
' file (containing variable-value pairs on each line).\n'+\
' --OR-- a quoted string (such as \"@atom:x 2\")\n'+\
' with the full variable name and its desired value.'
bind_err_msg_var = 'Missing value, or space needed separating variable\n'+\
' and value. (Remember to use quotes to surround the argument\n'+\
' containing the variable name, and it\'s assigned value.)'
i = 1
while i < len(argv):
#sys.stderr.write('argv['+str(i)+'] = \"'+argv[i]+'\"\n')
if argv[i] == '-a':
if ((i+1 >= len(argv)) or (argv[i+1][:1] == '-')):
raise InputError('Error('+g_filename+'):\n'
' Error in -a \"'+argv[i+1]+' argument.\"\n'
' The -a flag '+bind_err_msg)
if (argv[i+1][0] in '@$'):
#tokens = argv[i+1].strip().split(' ')
tokens = SplitQuotedString(argv[i+1].strip())
if len(tokens) < 2:
raise InputError('Error('+g_filename+'):\n'
' Error in -a \"'+argv[i+1]+'\" argument.\n'
' '+bind_err_msg_var)
BasicUIReadBindingsText(settings.user_bindings_x,
argv[i+1],
'__command_line_argument__')
else:
BasicUIReadBindingsFile(settings.user_bindings_x,
argv[i+1])
#i += 2
del(argv[i:i+2])
elif argv[i] == '-b':
if ((i+1 >= len(argv)) or (argv[i+1][:1] == '-')):
raise InputError('Error('+g_filename+'):\n'
' Error in -b \"'+argv[i+1]+' argument.\"\n'
' The -b flag '+bind_err_msg)
if (argv[i+1][0] in '@$'):
#tokens = argv[i+1].strip().split(' ')
tokens = SplitQuotedString(argv[i+1].strip())
if len(tokens) < 2:
raise InputError('Error('+g_filename+'):\n'
' Error in -b \"'+argv[i+1]+'\" argument.\n'
' '+bind_err_msg_var)
BasicUIReadBindingsText(settings.user_bindings,
argv[i+1],
'__command_line_argument__')
else:
BasicUIReadBindingsFile(settings.user_bindings,
argv[i+1])
#i += 2
del(argv[i:i+2])
elif argv[i] == '-order-command':
settings.order_method = 'by_command'
#i += 1
del(argv[i:i+1])
elif argv[i] == '-order-file':
settings.order_method = 'by_file'
#i += 1
del(argv[i:i+1])
elif ((argv[i] == '-order-tree') or (argv[i] == '-order-dfs')):
settings.order_method = 'by_tree'
del(argv[i:i+1])
elif ((argv[i] == '-importpath') or
(argv[i] == '-import-path') or
(argv[i] == '-import_path')):
if ((i+1 >= len(argv)) or (argv[i+1][:1] == '-')):
raise InputError('Error('+g_filename+'):\n'
' Error in \"'+argv[i]+'\" argument.\"\n'
' The \"'+argv[i]+'\" argument should be followed by the name of\n'
' an environment variable storing a path for including/importing files.\n')
TtreeShlex.custom_path = RemoveOuterQuotes(argv[i+1])
del(argv[i:i+2])
elif ((argv[i][0] == '-') and (__name__ == "__main__")):
#elif (__name__ == "__main__"):
raise InputError('Error('+g_filename+'):\n'
'Unrecogized command line argument \"'+argv[i]+'\"\n')
else:
i += 1
if __name__ == "__main__":
# Instantiate the lexer we will be using.
# (The lexer's __init__() function requires an openned file.
# Assuming __name__ == "__main__", then the name of that file should
# be the last remaining (unprocessed) argument in the argument list.
# Otherwise, then name of that file will be determined later by the
# python script which imports this module, so we let them handle it.)
if len(argv) == 1:
raise InputError('Error('+g_filename+'):\n'
' This program requires at least one argument\n'
' the name of a file containing ttree template commands\n')
elif len(argv) == 2:
try:
settings.lex = TemplateLexer(open(argv[1], 'r'), argv[1]) # Parse text from file
except IOError:
sys.stderr.write('Error('+g_filename+'):\n'
' unable to open file\n'
' \"'+argv[1]+'\"\n'
' for reading.\n')
sys.exit(1)
del(argv[1:2])
else:
# if there are more than 2 remaining arguments,
problem_args = ['\"'+arg+'\"' for arg in argv[1:]]
raise InputError('Syntax Error ('+g_filename+'):\n'
' Problem with argument list.\n'
' The remaining arguments are:\n\n'
' '+(' '.join(problem_args))+'\n\n'
' (The actual problem may be earlier in the argument list.\n'
' If these arguments are source files, then keep in mind\n'
' that this program can not parse multiple source files.)\n'
' Check the syntax of the entire argument list.\n')
def BasicUI(settings,
static_tree_root,
instance_tree_root,
static_commands,
instance_commands):
"""
BasicUI()
This function loads a ttree file and optional custom bindings for it,
creates a "static" tree (of defined ttree classes),
creates an "instance" tree (of instantiated ttree objects),
automatically assigns values to unbound variables,
substitutes them into text templates (renders the template).
The actual writing of the templates to a file is not handled here.
"""
# Parsing, and compiling is a multi-pass process.
# Step 1: Read in the StaticObj (class) defintions, without checking
# whether or not the instance_children refer to valid StaticObj types.
sys.stderr.write('parsing the class definitions...')
static_tree_root.Parse(settings.lex)
#gc.collect()
#sys.stderr.write('static = ' + str(static_tree_root) + '\n')
# Step 2: Now that the static tree has been constructed, lookup
# any references to classes (StaticObjs), contained within
# the instance_children or class_parents of each node in
# static_tree_root. Replace them with (pointers to)
# the StaticObjs they refer to (and check validity).
# (Note: Variables stored within the templates defined by write()
# and write_once() statements may also refer to StaticObjs in
# the tree, but we leave these references alone. We handle
# these assignments later using "AssignVarPtrs()" below.)
sys.stderr.write(' done\nlooking up classes...')
static_tree_root.LookupStaticRefs()
#gc.collect()
# Step 3: Now scan through all the (static) variables within the templates
# and replace the (static) variable references to pointers
# to nodes in the StaticObj tree:
sys.stderr.write(' done\nlooking up @variables...')
# Here we assign pointers for variables in "write_once(){text}" templates:
AssignStaticVarPtrs(static_tree_root, search_instance_commands=False)
# Here we assign pointers for variables in "write(){text}" templates:
AssignStaticVarPtrs(static_tree_root, search_instance_commands=True)
sys.stderr.write(' done\nconstructing the tree of class definitions...')
sys.stderr.write(' done\n\nclass_def_tree = ' + str(static_tree_root) + '\n\n')
#gc.collect()
# Step 4: Construct the instance tree (the tree of instantiated
# classes) from the static tree of type definitions.
sys.stderr.write('constructing the instance tree...\n')
class_parents_in_use = set([])
instance_tree_root.BuildInstanceTree(static_tree_root, class_parents_in_use)
#sys.stderr.write('done\n garbage collection...')
#gc.collect()
sys.stderr.write(' done\n')
#sys.stderr.write('instance_tree = ' + str(instance_tree_root) + '\n')
# Step 5: The commands must be carried out in a specific order.
# (for example, the "write()" and "new" commands).
# Search through the tree, and append commands to a command list.
# Then re-order the list in the order the commands should have
# been executed in. (We don't carry out the commands yet,
# we just store them and sort them.)
class_parents_in_use = set([])
static_tree_root.BuildCommandList(static_commands)
instance_tree_root.BuildCommandList(instance_commands)
#sys.stderr.write('static_commands = '+str(static_commands)+'\n')
#sys.stderr.write('instance_commands = '+str(instance_commands)+'\n')
# Step 6: We are about to assign numbers to the variables.
# We need to decide the order in which to assign them.
# By default static variables (@) are assigned in the order
# they appear in the file.
# And, by default instance variables ($)
# are assigned in the order they are created during instantiation.
#sys.stderr.write(' done\ndetermining variable count order...')
AssignVarOrderByFile(static_tree_root, '@', search_instance_commands=True)
AssignVarOrderByCommand(instance_commands, '$')
# Step 7: Assign the variables.
# (If the user requested any customized variable bindings,
# load those now.)
if len(settings.user_bindings_x) > 0:
reserved_values = CustomizeBindings(settings.user_bindings_x,
static_tree_root,
instance_tree_root)
else:
reserved_values = None
sys.stderr.write('sorting variables...\n')
AutoAssignVals(static_tree_root,
(settings.order_method != 'by_tree'),
reserved_values)
AutoAssignVals(instance_tree_root,
(settings.order_method != 'by_tree'),
reserved_values)
if len(settings.user_bindings) > 0:
CustomizeBindings(settings.user_bindings,
static_tree_root,
instance_tree_root)
sys.stderr.write(' done\n')
if __name__ == "__main__":
"""
This is is a "main module" wrapper for invoking ttree.py
as a stand alone program. This program:
1)reads a ttree file,
2)constructs a tree of class definitions (g_objectdefs)
3)constructs a tree of instantiated class objects (g_objects),
4)automatically assigns values to the variables,
5)and carries out the "write" commands to write the templates a file(s).
"""
####### Main Code Below: #######
g_program_name = g_filename
sys.stderr.write(g_program_name+' v'+g_version_str+' '+g_date_str+' ')
sys.stderr.write('\n(python version '+str(sys.version)+')\n')
try:
settings = BasicUISettings()
BasicUIParseArgs(sys.argv, settings)
# Data structures to store the class definitionss and instances
g_objectdefs = StaticObj('', None) # The root of the static tree
# has name '' (equivalent to '/')
g_objects = InstanceObj('', None) # The root of the instance tree
# has name '' (equivalent to '/')
# A list of commands to carry out
g_static_commands = []
g_instance_commands = []
BasicUI(settings,
g_objectdefs,
g_objects,
g_static_commands,
g_instance_commands)
# Now write the files
# (Finally carry out the "write()" and "write_once()" commands.)
# Optional: Multiple commands to write to the same file can be merged to
# reduce the number of times the file is openned and closed.
sys.stderr.write('writing templates...\n')
# Erase the files that will be written to:
EraseTemplateFiles(g_static_commands)
EraseTemplateFiles(g_instance_commands)
g_static_commands = MergeWriteCommands(g_static_commands)
g_instance_commands = MergeWriteCommands(g_instance_commands)
# Write the files with the original variable names present
WriteTemplatesVarName(g_static_commands)
WriteTemplatesVarName(g_instance_commands)
# Write the files with the variable names substituted by values
WriteTemplatesValue(g_static_commands)
WriteTemplatesValue(g_instance_commands)
sys.stderr.write(' done\n')
# Step 11: Now write the variable bindings/assignments table.
sys.stderr.write('writing \"ttree_assignments.txt\" file...')
open('ttree_assignments.txt', 'w').close() # <-- erase previous version.
WriteVarBindingsFile(g_objectdefs)
WriteVarBindingsFile(g_objects)
sys.stderr.write(' done\n')
except (ValueError, InputError) as err:
sys.stderr.write('\n\n'+str(err)+'\n')
sys.exit(-1)
|
qipa/lammps
|
tools/moltemplate/src/ttree.py
|
Python
|
gpl-2.0
| 221,891
|
[
"LAMMPS",
"NAMD",
"VisIt"
] |
6d93a16237cefc8f682dc5962843c62d80c1871683f524f65e7b419fff671fcb
|
#! /usr/bin/env python
"""
Test NIFTI support in VTK by reading a file, writing it, and
then re-reading it to ensure that the contents are identical.
"""
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
from vtk.util.misc import vtkGetTempDir
VTK_DATA_ROOT = vtkGetDataRoot()
VTK_TEMP_DIR = vtkGetTempDir()
import sys
import os
testfiles = [
["minimal.nii.gz", "out_minimal.nii.gz"],
["minimal.img.gz", "out_minimal.hdr"]
]
dispfile = "avg152T1_RL_nifti.nii.gz"
def TestDisplay(file1):
"""Display the output"""
inpath = os.path.join(str(VTK_DATA_ROOT), "Data", file1)
reader = vtk.vtkNIFTIImageReader()
reader.SetFileName(inpath)
reader.Update()
size = reader.GetOutput().GetDimensions()
center = reader.GetOutput().GetCenter()
spacing = reader.GetOutput().GetSpacing()
center1 = (center[0], center[1], center[2])
center2 = (center[0], center[1], center[2])
if size[2] % 2 == 1:
center1 = (center[0], center[1], center[2] + 0.5*spacing[2])
if size[0] % 2 == 1:
center2 = (center[0] + 0.5*spacing[0], center[1], center[2])
vrange = reader.GetOutput().GetScalarRange()
map1 = vtk.vtkImageSliceMapper()
map1.BorderOn()
map1.SliceAtFocalPointOn()
map1.SliceFacesCameraOn()
map1.SetInputConnection(reader.GetOutputPort())
map2 = vtk.vtkImageSliceMapper()
map2.BorderOn()
map2.SliceAtFocalPointOn()
map2.SliceFacesCameraOn()
map2.SetInputConnection(reader.GetOutputPort())
slice1 = vtk.vtkImageSlice()
slice1.SetMapper(map1)
slice1.GetProperty().SetColorWindow(vrange[1]-vrange[0])
slice1.GetProperty().SetColorLevel(0.5*(vrange[0]+vrange[1]))
slice2 = vtk.vtkImageSlice()
slice2.SetMapper(map2)
slice2.GetProperty().SetColorWindow(vrange[1]-vrange[0])
slice2.GetProperty().SetColorLevel(0.5*(vrange[0]+vrange[1]))
ratio = size[0]*1.0/(size[0]+size[2])
ren1 = vtk.vtkRenderer()
ren1.SetViewport(0,0,ratio,1.0)
ren2 = vtk.vtkRenderer()
ren2.SetViewport(ratio,0.0,1.0,1.0)
ren1.AddViewProp(slice1)
ren2.AddViewProp(slice2)
cam1 = ren1.GetActiveCamera()
cam1.ParallelProjectionOn()
cam1.SetParallelScale(0.5*spacing[1]*size[1])
cam1.SetFocalPoint(center1[0], center1[1], center1[2])
cam1.SetPosition(center1[0], center1[1], center1[2] - 100.0)
cam2 = ren2.GetActiveCamera()
cam2.ParallelProjectionOn()
cam2.SetParallelScale(0.5*spacing[1]*size[1])
cam2.SetFocalPoint(center2[0], center2[1], center2[2])
cam2.SetPosition(center2[0] + 100.0, center2[1], center2[2])
if "-I" in sys.argv:
style = vtk.vtkInteractorStyleImage()
style.SetInteractionModeToImageSlicing()
iren = vtk.vtkRenderWindowInteractor()
iren.SetInteractorStyle(style)
renwin = vtk.vtkRenderWindow()
renwin.SetSize(size[0] + size[2], size[1])
renwin.AddRenderer(ren1)
renwin.AddRenderer(ren2)
renwin.Render()
if "-I" in sys.argv:
renwin.SetInteractor(iren)
iren.Initialize()
iren.Start()
return renwin
def TestReadWriteRead(infile, outfile):
"""Read, write, and re-read a file, return difference."""
inpath = os.path.join(str(VTK_DATA_ROOT), "Data", infile)
outpath = os.path.join(str(VTK_TEMP_DIR), outfile)
# read a NIFTI file
reader = vtk.vtkNIFTIImageReader()
reader.SetFileName(inpath)
reader.TimeAsVectorOn()
reader.Update()
writer = vtk.vtkNIFTIImageWriter()
writer.SetInputConnection(reader.GetOutputPort())
writer.SetFileName(outpath)
# copy most information directoy from the header
writer.SetNIFTIHeader(reader.GetNIFTIHeader())
# this information will override the reader's header
writer.SetQFac(reader.GetQFac())
writer.SetTimeDimension(reader.GetTimeDimension())
writer.SetQFormMatrix(reader.GetQFormMatrix())
writer.SetSFormMatrix(reader.GetSFormMatrix())
writer.Write()
reader2 = vtk.vtkNIFTIImageReader()
reader2.SetFileName(outpath)
reader2.TimeAsVectorOn()
reader2.Update()
diff = vtk.vtkImageMathematics()
diff.SetOperationToSubtract()
diff.SetInputConnection(0,reader.GetOutputPort())
diff.SetInputConnection(1,reader2.GetOutputPort())
diff.Update()
diffrange = diff.GetOutput().GetScalarRange()
differr = diffrange[0]**2 + diffrange[1]**2
return differr
for infile, outfile in testfiles:
err = TestReadWriteRead(infile, outfile)
if err:
sys.stderr.write(
"Input " + infile + " differs from outfile " + outfile)
sys.exit(1)
renWin = TestDisplay(dispfile)
|
HopeFOAM/HopeFOAM
|
ThirdParty-0.1/ParaView-5.0.1/VTK/IO/Image/Testing/Python/TestNIFTIReaderWriter.py
|
Python
|
gpl-3.0
| 4,659
|
[
"VTK"
] |
311c9e468657cba65f84ff60cdcfe8801525f17bad3bb2dffdbefaa44724fdb6
|
#!/usr/bin/env python
class codata2016:
def __init__(self):
self.mp = 1.672621898e-27 # kg
self.me = 9.10938356e-31 # kg
# end def
# end class
# HELPER FUNCTIONS
# ======================================
from lxml import etree
def xml_print(node):
print etree.tostring(node,pretty_print=True)
# end def
from copy import deepcopy
import numpy as np
def matrix_to_text(matrix):
text = "\n"+\
"\n".join([" ".join( map(str,pos) ) for pos in matrix])\
+"\n"
return text
# end def matrix_to_text
def find_dimers(axes,pos,sep_min=1.0,sep_max=1.4):
natom,ndim = pos.shape
import pymatgen as mg
elem = ['H'] * natom
struct = mg.Structure(axes,elem,pos,coords_are_cartesian=True)
dtable = struct.distance_matrix
xidx,yidx = np.where( (sep_min<dtable) & (dtable<sep_max) )
pairs = set()
for myx,myy in zip(xidx,yidx):
mypair1 = (myx,myy)
mypair2 = (myy,myx)
if (not mypair2 in pairs) and (not mypair2 in pairs):
pairs.add(mypair1)
# end if
# end for
return pairs
# end def find_dimers
def Hu_idx(axes,pos,sep_min=1.0,sep_max=1.4):
''' locate atoms to be set to spin up '''
pairs = find_dimers(axes,pos,sep_min,sep_max)
Hu_idx_set = set()
for pair in pairs:
Hu_idx_set.add(pair[0])
# end for pair
return Hu_idx_set
# end def Hu_idx
# START EDIT QMCPACK INPUT XML
# ======================================
def change_ion0_to_wf_ceneters(ion0,axes):
# change ion0 particle set to be artificial centers around which
# to expand the wave function
# ======================================
ion0.attrib['name'] = 'wf_centers'
protons = ion0.find("group")
if 'mass' in protons.attrib:
protons.attrib.pop("mass")
protons.attrib.pop("size")
for child in protons:
if child.attrib["name"] != "position" and child.attrib["name"] != "charge":
protons.remove(child)
elif child.attrib["name"] == "charge":
child.text = child.text.replace("1","0")
# end if
# end for
# record and return the wf_centers for later use
# ======================================
centers_text = ion0.find("group/attrib").text
centers = [map(float,center.split()) for center in centers_text.split("\n")[1:-1]]
nions = len(centers)
ion0.find("group").attrib["size"] = str(nions)
# split centers to up and down groups
# ======================================
# get data
Hu_idx_set = Hu_idx(axes,np.array(centers))
Hu_centers = []
Hd_centers = []
for i in range(nions):
if i in Hu_idx_set:
Hu_centers.append(centers[i])
else:
Hd_centers.append(centers[i])
# end if
# end for i
from input_xml import InputXml
inp = InputXml()
# feed data to xml node
pos_node = protons.find('.//attrib[@name="position"]')
pos_node.text = inp.arr2text( np.array(Hu_centers) )
protons.set('name','Hu')
protons.set('size',str(nions/2))
dn_protons = deepcopy(protons)
dn_protons.set('name','Hd')
dn_protons.find('.//attrib[@name="position"]').text = inp.arr2text( np.array(Hd_centers) )
dn_protons.set('size',str(nions/2))
ion0.append(dn_protons)
return centers
# end def change_ion0_to_wf_ceneters
def edit_quantum_particleset(e_particleset,centers,rs,axes,ion_width):
# centers: wf_centers
nions = len(centers)
# locate even and odd sub-lattices
Hu_idx_set = Hu_idx(axes,np.array(centers))
# initialize electrons manually: remove random, add attrib positions
# ======================================
# do not initialize eletrons at random
# --------------------------------------
e_particleset.attrib.pop("random")
def gaussian_move(centers,sigma):
move = np.random.randn( *np.shape(centers) )*sigma
return np.copy(centers + move)
# end def
# sprinkle particles around ion positions
# --------------------------------------
electron_pos = gaussian_move(centers,0.2*rs)
# average <R^2>
sig2 = 1./(4.*ion_width) # not 3.* for each direction
ion_sig = np.sqrt(sig2)
proton_pos = gaussian_move(centers,ion_sig) # protons are slow, initialize well
Hu_centers = []
Hd_centers = []
eu_centers = []
ed_centers = []
for i in range(nions):
if i in Hu_idx_set:
Hu_centers.append(proton_pos[i])
eu_centers.append(electron_pos[i])
else:
Hd_centers.append(proton_pos[i])
ed_centers.append(electron_pos[i])
# end if
# end for i
# should be 2 groups in electronic wf, one u, one d
spin_groups = e_particleset.findall("group")
for i in range(2): # loop through u,d
# 0 for up spin, 1 for down spin
e_section = spin_groups[i]
# place u on Hu, d on Hd
if i==0:
epos = eu_centers
else: # i==1
epos = ed_centers
# end if
epos_text = "\n"+"\n".join([" ".join( map(str,pos) ) for pos in epos])+"\n"
new_section = etree.Element("attrib",
{"name":"position","datatype":"posArray","condition":"0"})
new_section.text = epos_text
e_section.append( new_section )
# end for i
# add protons to the particleset
# ======================================
# steal from electron section
unit = codata2016()
pu_section = deepcopy( e_section )
pu_section.attrib["name"] = "Hu"
pu_section.attrib['mass'] = str(unit.mp/unit.me)
pu_section.attrib['size'] = str(nions/2)
pu_section.xpath('.//parameter[@name="charge"]')[0].text = ' 1 '
pu_section.xpath('.//parameter[@name="mass"]')[0].text = ' %s ' % str(unit.mp/unit.me)
ppos_text = "\n"+"\n".join([" ".join( map(str,pos) ) for pos in Hu_centers])+"\n"
pu_section.find("attrib").text = ppos_text
e_particleset.append(pu_section)
p_section = deepcopy( e_section )
p_section.attrib["name"] = "Hd"
p_section.attrib['mass'] = str(unit.mp/unit.me)
p_section.attrib['size'] = str(nions/2)
p_section.xpath('.//parameter[@name="charge"]')[0].text = ' 1 '
p_section.xpath('.//parameter[@name="mass"]')[0].text = ' %s ' % str(unit.mp/unit.me)
ppos_text = "\n"+"\n".join([" ".join( map(str,pos) ) for pos in Hd_centers])+"\n"
p_section.find("attrib").text = ppos_text
e_particleset.append(p_section)
# end def
def edit_jastrows(wf):
# 1. grab Uep and remove ep jastrow
j1 = wf.xpath('//jastrow[@name="J1"]')[0]
Uep = j1.xpath(".//coefficients")[0].text
wf.remove(j1)
# 2. edit 2-body jastrow to add e-p correlation
j2 = wf.xpath('//jastrow[@name="J2"]')[0]
term = j2.find("correlation")
etypes = {0:"u",1:"d"}
for ie in range(2):
for hname in ["Hu","Hd"]:
eHterm = deepcopy(term)
etype = etypes[ie] # 0:u, 1:d
eHterm.attrib["speciesA"] = etype
eHterm.attrib["speciesB"] = hname
coeff = eHterm.find("coefficients")
coeff.attrib["id"] = etype+hname
coeff.text = Uep # initialize e-p Jastrow to clamped values
j2.append(eHterm)
# end for hname
# end for ie
# end def edit_jastrows
def edit_hamiltonian(ham):
# remove all interactions that requires the "ion0" particleset
for interaction in ham:
use_ion0 = False
for key in interaction.attrib.keys():
if interaction.attrib[key]=="ion0":
use_ion0 = True
# end if
# end for key
if use_ion0:
ham.remove(interaction)
# end if
# end for
# end def edit_hamiltonian
def edit_determinantset(wf,centers,ion_width,axes):
nions = len(centers)
# get electronic sposet_builder
ebuilder = wf.find('.//sposet_builder[@source="ion0"]')
if ebuilder is None:
raise RuntimeError('electronic sposet_builder with source="ion0" not found.')
# end if
# build single-particle orbitals around "wf_centers" instead of "ion0"
wf.find("sposet_builder").attrib['source'] = 'wf_centers'
# build electronic single-particle orbitals around "wf_centers" instead of "ion0"
ebuilder.set('source','wf_centers')
# use double precision
ebuilder.set('precision','double')
# start <sposet_builder> for protons
pbuilder = etree.Element('sposet_builder',attrib={
'type':'mo', # use MolecularOrbitalBuilder
'source':'wf_centers', # use static lattice sites for proton w.f.
'transform':'yes', # use numerical radial function and NGOBuilder
'name':'proton_builder'
}) # !!!! transformOpt flag forces cuspCorr="yes" and key="NMO"
# construct <basisset>
pbasis = etree.Element("basisset")
for hname in ['Hu','Hd']:
pao_basis = etree.Element('atomicBasisSet',attrib={
'elementType':hname, # center for atomic basis set
'angular':'cartesian', # Use Gamess-style order of angular functions
'type':'GTO', # use Gaussians in NGOBuilder::addRadialOrbital()
'normalized':'yes' # do not mess with my input coefficients
})
# build <grid>
bgrid = etree.Element('grid',{
'npts':'1001',
'rf':'100',
'ri':'1.e-6',
'type':'log'
})
# build <basisGroup>
bgroup = etree.Element('basisGroup',{
'l':'0',
'n':'1',
'rid':'R0'
})
bgroup.append(etree.Element('radfunc',{'contraction':'1.0','exponent':str(ion_width)}))
pao_basis.append(bgrid)
pao_basis.append(bgroup)
pbasis.append(pao_basis)
# end for hname
# finished construct </basisset>
pbuilder.append(pbasis)
# build <sposet>
## !!!! already done at the basis set level
## split into up and down protons
#Hu_idx_set = Hu_idx(axes,np.array(centers))
#identity = np.eye(nions)
#Hup_det = []
#Hdown_det= []
#for i in range(nions):
# if i in Hu_idx_set:
# Hup_det.append(identity[i])
# else:
# Hdown_det.append(identity[i])
# # end if
## end for i
pup_sposet = etree.Element('sposet',attrib={
'name':'spo_Hu',
'id':'proton_orbs_up',
'size':str(nions/2)
})
coeff = etree.Element("coefficient",
{"id":"HudetC","type":"constArray","size":str(nions/2)})
coeff.text = matrix_to_text( np.eye(nions)[:nions/2] )
pup_sposet.append(coeff)
pdn_sposet = etree.Element('sposet',attrib={
'name':'spo_Hd',
'id':'proton_orbs_down',
'size':str(nions/2)
})
coeff1 = etree.Element("coefficient",
{"id":"HddetC","type":"constArray","size":str(nions/2)})
coeff1.text = matrix_to_text( np.eye(nions)[nions/2:])
pdn_sposet.append(coeff1)
# done construct </sposet>
pbuilder.append(pup_sposet)
pbuilder.append(pdn_sposet)
# end </sposet_builder>
# build <determinant>
uHdet = etree.Element('determinant',{
'group':'Hu',
'id':'Hudet',
'size':str(nions/2),
'sposet':pup_sposet.get('name'),
'no_bftrans':'yes' # do not transform proton coordinates
})
dHdet = etree.Element('determinant',{
'group':'Hd',
'id':'Hddet',
'size':str(nions/2),
'sposet':pdn_sposet.get('name'),
'no_bftrans':'yes' # do not transform proton coordinates
})
# end </determinant>
slater = wf.find('determinantset/slaterdeterminant')
slater.append(uHdet)
slater.append(dHdet)
ebuilder_idx = wf.index(ebuilder)
wf.insert(ebuilder_idx+1,pbuilder) # insert proton_builder after electronic sposet_builder
# end def edit_determinantset
# Main Routine
# ======================================
def bo_to_nobo(bo_input_name,nobo_input_name,ion_width=9.0,rs=1.31):
parser = etree.XMLParser(remove_blank_text=True)
xml = etree.parse(bo_input_name,parser)
from input_xml import InputXml
inp = InputXml()
inp.read(bo_input_name)
axes = inp.lattice_vectors()
del inp
ion0 = xml.xpath('//particleset[@name="ion0"]')[0]
centers = change_ion0_to_wf_ceneters(ion0,axes)
# !! minimum atom spacing, special to cubic cell
alat = np.sort(np.unique(np.array(centers).flatten()))[1]
e_particleset = xml.xpath('//particleset[@name="e"]')[0]
edit_quantum_particleset(e_particleset,centers,rs,axes,ion_width)
wf = xml.xpath("//wavefunction")[0]
edit_jastrows(wf)
edit_determinantset(wf,centers,ion_width,axes)
ham = xml.xpath("//hamiltonian")[0]
edit_hamiltonian(ham)
xml.write(nobo_input_name,pretty_print=True)
# end def bo_to_nobo
if __name__ == "__main__":
import sys
#prefix = sys.argv[1]
#bo_to_nobo(prefix+"-dmc.in.xml",prefix+"-nobo.in.xml")
inp_xml = sys.argv[1]
out_xml = 'nobo-'+inp_xml
bo_to_nobo(inp_xml,out_xml)
# end __main__
|
Paul-St-Young/QMC
|
setup_unpolarized.py
|
Python
|
mit
| 12,997
|
[
"GAMESS",
"QMCPACK",
"pymatgen"
] |
fb1750cd0fb9c669f59da40e2fb88cb535590532e4879bd8447fda34fa3f17e4
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- http://www.mdanalysis.org
# Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
# Hydrogen Bonding Analysis
"""
Hydrogen Bond analysis --- :mod:`MDAnalysis.analysis.hbonds.hbond_analysis`
===========================================================================
:Author: David Caplan, Lukas Grossar, Oliver Beckstein
:Year: 2010-2012
:Copyright: GNU Public License v3
Given a :class:`~MDAnalysis.core.universe.Universe` (simulation
trajectory with 1 or more frames) measure all hydrogen bonds for each
frame between selections 1 and 2.
The :class:`HydrogenBondAnalysis` class is modeled after the `VMD
HBONDS plugin`_.
.. _`VMD HBONDS plugin`: http://www.ks.uiuc.edu/Research/vmd/plugins/hbonds/
Options:
- *update_selections* (``True``): update selections at each frame?
- *selection_1_type* ("both"): selection 1 is the: "donor", "acceptor", "both"
- donor-acceptor *distance* (Å): 3.0
- Angle *cutoff* (degrees): 120.0
- *forcefield* to switch between default values for different force fields
- *donors* and *acceptors* atom types (to add additional atom names)
.. _Analysis Output:
Output
------
The results are
- the **identities** of donor and acceptor heavy-atoms,
- the **distance** between the heavy atom acceptor atom and the hydrogen atom
that is bonded to the heavy atom donor,
- the **angle** donor-hydrogen-acceptor angle (180º is linear).
Hydrogen bond data are returned per frame, which is stored in
:attr:`HydrogenBondAnalysis.timeseries` (In the following description, ``#``
indicates comments that are not part of the output.)::
results = [
[ # frame 1
[ # hbond 1
<donor index (1-based)>, <acceptor index (1-based)>, <donor index (0-based)>,
<acceptor index (0-based)>, <donor string>, <acceptor string>,
<distance>, <angle>
],
[ # hbond 2
<donor index (1-based)>, <acceptor index (1-based)>, <donor index (0-based)>,
<acceptor index (0-based)>, <donor string>, <acceptor string>,
<distance>, <angle>
],
....
],
[ # frame 2
[ ... ], [ ... ], ...
],
...
]
.. Note::
For historic reasons, the *donor index* and *acceptor index* are a 1-based
indices. To get the :attr:`Atom.index` (the 0-based index typically used in
MDAnalysis simply subtract 1. For instance, to find an atom in
:attr:`Universe.atoms` by *index* from the output one would use
``u.atoms[index-1]``.
.. deprecated:: 0.15.0
This feature is being deprecated in favor of zero-based indices and is targeted
for removal in 0.16.0.
Using the :meth:`HydrogenBondAnalysis.generate_table` method one can reformat
the results as a flat "normalised" table that is easier to import into a
database for further processing. :meth:`HydrogenBondAnalysis.save_table` saves
the table to a pickled file. The table itself is a :class:`numpy.recarray`.
Detection of hydrogen bonds
---------------------------
Hydrogen bonds are recorded based on a geometric criterion:
1. The distance between acceptor and hydrogen is less than or equal to
*distance* (default is 3 Å).
2. The angle between donor-hydrogen-acceptor is greater than or equal to
*angle* (default is 120º).
The cut-off values *angle* and *distance* can be set as keywords to
:class:`HydrogenBondAnalysis`.
Donor and acceptor heavy atoms are detected from atom names. The current
defaults are appropriate for the CHARMM27 and GLYCAM06 force fields as defined
in Table `Default atom names for hydrogen bonding analysis`_.
Hydrogen atoms bonded to a donor are searched with one of two algorithms,
selected with the *detect_hydrogens* keyword.
*distance*
Searches for all hydrogens (name "H*" or name "[123]H" or type "H") in the
same residue as the donor atom within a cut-off distance of 1.2 Å.
*heuristic*
Looks at the next three atoms in the list of atoms following the donor and
selects any atom whose name matches (name "H*" or name "[123]H"). For
The *distance* search is more rigorous but slower and is set as the
default. Until release 0.7.6, only the heuristic search was implemented.
.. versionchanged:: 0.7.6
Distance search added (see
:meth:`HydrogenBondAnalysis._get_bonded_hydrogens_dist`) and heuristic
search improved (:meth:`HydrogenBondAnalysis._get_bonded_hydrogens_list`)
.. _Default atom names for hydrogen bonding analysis:
.. table:: Default heavy atom names for CHARMM27 force field.
=========== ============== =========== ====================================
group donor acceptor comments
=========== ============== =========== ====================================
main chain N O
water OH2, OW OH2, OW SPC, TIP3P, TIP4P (CHARMM27,Gromacs)
ARG NE, NH1, NH2
ASN ND2 OD1
ASP OD1, OD2
CYS SG
CYH SG possible false positives for CYS
GLN NE2 OE1
GLU OE1, OE2
HIS ND1, NE2 ND1, NE2 presence of H determines if donor
HSD ND1 NE2
HSE NE2 ND1
HSP ND1, NE2
LYS NZ
MET SD see e.g. [Gregoret1991]_
SER OG OG
THR OG1 OG1
TRP NE1
TYR OH OH
=========== ============== =========== ====================================
.. table:: Heavy atom types for GLYCAM06 force field.
=========== =========== ==================
element donor acceptor
=========== =========== ==================
N N,NT,N3 N,NT
O OH,OW O,O2,OH,OS,OW,OY
S SM
=========== =========== ==================
Donor and acceptor names for the CHARMM27 force field will also work for e.g.
OPLS/AA (tested in Gromacs). Residue names in the table are for information
only and are not taken into account when determining acceptors and donors.
This can potentially lead to some ambiguity in the assignment of
donors/acceptors for residues such as histidine or cytosine.
For more information about the naming convention in GLYCAM06 have a look at the
`Carbohydrate Naming Convention in Glycam`.
.. _`Carbohydrate Naming Convention in Glycam`:
http://glycam.ccrc.uga.edu/documents/FutureNomenclature.htm
The lists of donor and acceptor names can be extended by providing lists of
atom names in the *donors* and *acceptors* keywords to
:class:`HydrogenBondAnalysis`. If the lists are entirely inappropriate
(e.g. when analysing simulations done with a force field that uses very
different atom names) then one should either use the value "other" for *forcefield*
to set no default values, or derive a new class and set the default list oneself::
class HydrogenBondAnalysis_OtherFF(HydrogenBondAnalysis):
DEFAULT_DONORS = {"OtherFF": tuple(set([...]))}
DEFAULT_ACCEPTORS = {"OtherFF": tuple(set([...]))}
Then simply use the new class instead of the parent class and call it with
*forcefield* = "OtherFF". Please also consider to contribute the list of heavy
atom names to MDAnalysis.
.. rubric:: References
.. [Gregoret1991] L.M. Gregoret, S.D. Rader, R.J. Fletterick, and
F.E. Cohen. Hydrogen bonds involving sulfur atoms in proteins. Proteins,
9(2):99–107, 1991. `10.1002/prot.340090204`_.
.. _`10.1002/prot.340090204`: http://dx.doi.org/10.1002/prot.340090204
How to perform HydrogenBondAnalysis
-----------------------------------
All protein-water hydrogen bonds can be analysed with ::
import MDAnalysis
import MDAnalysis.analysis.hbonds
u = MDAnalysis.Universe('topology', 'trajectory')
h = MDAnalysis.analysis.hbonds.HydrogenBondAnalysis(u, 'protein', distance=3.0, angle=120.0)
h.run()
The results are stored as the attribute
:attr:`HydrogenBondAnalysis.timeseries`; see :ref:`Analysis Output`
for the format and further options.
.. Note::
Due to the way :class:`HydrogenBondAnalysis` is implemented, it is
more efficient to have the second selection (*selection2*) be the
*larger* group, e.g. the water when looking at water-protein
H-bonds or the whole protein when looking at ligand-protein
interactions.
.. Note::
The topology supplied and the trajectory must reflect the same total number
of atoms.
.. TODO: how to analyse the ouput and notes on selection updating
Classes
-------
.. autoclass:: HydrogenBondAnalysis
:members:
.. attribute:: timesteps
List of the times of each timestep. This can be used together with
:attr:`~HydrogenBondAnalysis.timeseries` to find the specific time point
of a hydrogen bond existence, or see :attr:`~HydrogenBondAnalysis.table`.
.. attribute:: timeseries
Results of the hydrogen bond analysis, stored for each frame. In
the following description, # indicates comments that are not
part of the output::
results = [
[ # frame 1
[ # hbond 1
<donor index (1-based)>, <acceptor index (1-based)>, <donor index (0-based)>,
<acceptor index (0-based)>, <donor string>, <acceptor string>,
<distance>, <angle>
],
[ # hbond 2
<donor index (1-based)>, <acceptor index (1-based)>, <donor index (0-based)>,
<acceptor index (0-based)>, <donor string>, <acceptor string>,
<distance>, <angle>
],
....
],
[ # frame 2
[ ... ], [ ... ], ...
],
...
]
The time of each step is not stored with each hydrogen bond frame but in
:attr:`~HydrogenBondAnalysis.timesteps`.
.. Note::
The *index* is a 1-based index. To get the :attr:`Atom.index` (the
0-based index typically used in MDAnalysis simply subtract 1. For
instance, to find an atom in :attr:`Universe.atoms` by *index* one
would use ``u.atoms[index-1]``.
.. attribute:: table
A normalised table of the data in
:attr:`HydrogenBondAnalysis.timeseries`, generated by
:meth:`HydrogenBondAnalysis.generate_table`. It is a
:class:`numpy.recarray` with the following columns:
0. "time"
1. "donor_idx"
2. "acceptor_idx"
3. "donor_index"
4. "acceptor_index"
5. "donor_resnm"
6. "donor_resid"
7. "donor_atom"
8. "acceptor_resnm"
9. "acceptor_resid"
10. "acceptor_atom"
11. "distance"
12. "angle"
It takes up more space than
:attr:`~HydrogenBondAnalysis.timeseries` but it is easier to
analyze and to import into databases (e.g. using recsql_).
.. Note::
The *index* is a 1-based index. To get the :attr:`Atom.index` (the
0-based index typically used in MDAnalysis simply subtract 1. For
instance, to find an atom in :attr:`Universe.atoms` by *index* one
would use ``u.atoms[idx_zero]``. The 1-based index is deprecated and
targeted for removal in 0.16.0
.. automethod:: _get_bonded_hydrogens
.. automethod:: _get_bonded_hydrogens_dist
.. automethod:: _get_bonded_hydrogens_list
.. deprecated:: 0.15.0
The donor and acceptor indices being 1-based is deprecated in favor of
a zero-based index. This can be accessed by "donor_index" or
"acceptor_index" removal of the 1-based indices is targeted
for version 0.16.0
"""
import six
from six.moves import range, zip, map, cPickle
from collections import defaultdict
import numpy as np
import warnings
import logging
from MDAnalysis import MissingDataWarning, NoDataError, SelectionError, SelectionWarning
from MDAnalysis.lib.util import parse_residue
from MDAnalysis.lib.mdamath import norm, angle
from MDAnalysis.lib.log import ProgressMeter, _set_verbose
from MDAnalysis.lib.NeighborSearch import AtomNeighborSearch
logger = logging.getLogger('MDAnalysis.analysis.hbonds')
class HydrogenBondAnalysis(object):
"""Perform a hydrogen bond analysis
The analysis of the trajectory is performed with the
:meth:`HydrogenBondAnalysis.run` method. The result is stored in
:attr:`HydrogenBondAnalysis.timeseries`. See
:meth:`~HydrogenBondAnalysis.run` for the format.
The default atom names are taken from the CHARMM 27 force field files, which
will also work for e.g. OPLS/AA in Gromacs, and GLYCAM06.
*Donors* (associated hydrogens are deduced from topology)
*CHARMM 27*
N of the main chain, water OH2/OW, ARG NE/NH1/NH2, ASN ND2, HIS ND1/NE2,
SER OG, TYR OH, CYS SG, THR OG1, GLN NE2, LYS NZ, TRP NE1
*GLYCAM06*
N,NT,N3,OH,OW
*Acceptors*
*CHARMM 27*
O of the main chain, water OH2/OW, ASN OD1, ASP OD1/OD2, CYH SG, GLN OE1,
GLU OE1/OE2, HIS ND1/NE2, MET SD, SER OG, THR OG1, TYR OH
*GLYCAM06*
N,NT,O,O2,OH,OS,OW,OY,P,S,SM
.. SeeAlso:: Table :ref:`Default atom names for hydrogen bonding analysis`
.. versionchanged:: 0.7.6
DEFAULT_DONORS/ACCEPTORS is now embedded in a dict to switch between
default values for different force fields.
"""
# use tuple(set()) here so that one can just copy&paste names from the
# table; set() takes care for removing duplicates. At the end the
# DEFAULT_DONORS and DEFAULT_ACCEPTORS should simply be tuples.
# : default heavy atom names whose hydrogens are treated as *donors*
# : (see :ref:`Default atom names for hydrogen bonding analysis`)
#: Use the keyword *donors* to add a list of additional donor names.
DEFAULT_DONORS = {
'CHARMM27': tuple(set([
'N', 'OH2', 'OW', 'NE', 'NH1', 'NH2', 'ND2', 'SG', 'NE2', 'ND1', 'NZ', 'OG', 'OG1', 'NE1', 'OH'])),
'GLYCAM06': tuple(set(['N', 'NT', 'N3', 'OH', 'OW'])),
'other': tuple(set([]))}
#: default atom names that are treated as hydrogen *acceptors*
#: (see :ref:`Default atom names for hydrogen bonding analysis`)
#: Use the keyword *acceptors* to add a list of additional acceptor names.
DEFAULT_ACCEPTORS = {
'CHARMM27': tuple(set([
'O', 'OH2', 'OW', 'OD1', 'OD2', 'SG', 'OE1', 'OE1', 'OE2', 'ND1', 'NE2', 'SD', 'OG', 'OG1', 'OH'])),
'GLYCAM06': tuple(set(['N', 'NT', 'O', 'O2', 'OH', 'OS', 'OW', 'OY', 'SM'])),
'other': tuple(set([]))}
#: A :class:`collections.defaultdict` of covalent radii of common donors
#: (used in :meth`_get_bonded_hydrogens_list` to check if a hydrogen is
#: sufficiently close to its donor heavy atom). Values are stored for
#: N, O, P, and S. Any other heavy atoms are assumed to have hydrogens
#: covalently bound at a maximum distance of 1.5 Å.
r_cov = defaultdict(lambda: 1.5, # default value
N=1.31, O=1.31, P=1.58, S=1.55)
def __init__(self, universe, selection1='protein', selection2='all', selection1_type='both',
update_selection1=True, update_selection2=True, filter_first=True, distance_type='hydrogen',
distance=3.0, angle=120.0,
forcefield='CHARMM27', donors=None, acceptors=None,
start=None, stop=None, step=None,
debug=None, detect_hydrogens='distance', verbose=None):
"""Set up calculation of hydrogen bonds between two selections in a universe.
The timeseries is accessible as the attribute :attr:`HydrogenBondAnalysis.timeseries`.
Some initial checks are performed. If there are no atoms selected by
*selection1* or *selection2* or if no donor hydrogens or acceptor atoms
are found then a :exc:`SelectionError` is raised for any selection that
does *not* update (*update_selection1* and *update_selection2*
keywords). For selections that are set to update, only a warning is
logged because it is assumed that the selection might contain atoms at
a later frame (e.g. for distance based selections).
If no hydrogen bonds are detected or if the initial check fails, look
at the log output (enable with :func:`MDAnalysis.start_logging` and set
*verbose* = ``True``). It is likely that the default names for donors
and acceptors are not suitable (especially for non-standard
ligands). In this case, either change the *forcefield* or use
customized *donors* and/or *acceptors*.
.. Note::
In order to speed up processing, atoms are filtered by a coarse
distance criterion before a detailed hydrogen bonding analysis is
performed (*filter_first* = ``True``). If one of your selections is
e.g. the solvent then *update_selection1* (or *update_selection2*)
must also be ``True`` so that the list of candidate atoms is updated
at each step: this is now the default.
If your selections will essentially remain the same for all time
steps (i.e. residues are not moving farther than 3 x *distance*), for
instance, if no water or large conformational changes are involved
or if the optimization is disabled (*filter_first* = ``False``) then
you can improve performance by setting the *update_selection*
keywords to ``False``.
Parameters
----------
universe : universe
Universe object
selection1 : str (optional)
Selection string for first selection ['protein']
selection2 : str (optional)
Selection string for second selection ['all']
selection1_type : str (optional)
Selection 1 can be 'donor', 'acceptor' or 'both'. Note that the
value for *selection1_type* automatically determines how
*selection2* handles donors and acceptors: If *selection1* contains
'both' then *selection2* will also contain *both*. If *selection1*
is set to 'donor' then *selection2* is 'acceptor' (and vice versa).
['both'].
update_selection1 : bool (optional)
Update selection 1 at each frame? [``False``]
update_selection2 : bool (optional)
Update selection 2 at each frame? [``False``]
filter_first : bool (optional)
Filter selection 2 first to only atoms 3*distance away [``True``]
distance : float (optional)
Distance cutoff for hydrogen bonds; only interactions with a H-A distance
<= *distance* (and the appropriate D-H-A angle, see *angle*) are
recorded. (Note: *distance_type* can change this to the D-A distance.) [3.0]
angle : float (optional)
Angle cutoff for hydrogen bonds; an ideal H-bond has an angle of
180º. A hydrogen bond is only recorded if the D-H-A angle is
>= *angle*. The default of 120º also finds fairly non-specific
hydrogen interactions and a possibly better value is 150º. [120.0]
forcefield : {"CHARMM27", "GLYCAM06", "other"} (optional)
Name of the forcefield used. Switches between different
:attr:`~HydrogenBondAnalysis.DEFAULT_DONORS` and
:attr:`~HydrogenBondAnalysis.DEFAULT_ACCEPTORS` values.
Available values: "CHARMM27", "GLYCAM06", "other" ["CHARMM27"]
donors : sequence (optional)
Extra H donor atom types (in addition to those in
:attr:`~HydrogenBondAnalysis.DEFAULT_DONORS`), must be a sequence.
acceptors : sequence (optional)
Extra H acceptor atom types (in addition to those in
:attr:`~HydrogenBondAnalysis.DEFAULT_ACCEPTORS`), must be a sequence.
start : int (optional)
starting frame-index for analysis, ``None`` is the first one, 0.
*start* and *stop* are 0-based frame indices and are used to slice
the trajectory (if supported) [``None``]
stop : int (optional)
last trajectory frame for analysis, ``None`` is the last one [``None``]
step : int (optional)
read every *step* between *start* and *stop*, ``None`` selects 1.
Note that not all trajectory reader from 1 [``None``]
debug : bool (optional)
If set to ``True`` enables per-frame debug logging. This is disabled
by default because it generates a very large amount of output in
the log file. (Note that a logger must have been started to see
the output, e.g. using :func:`MDAnalysis.start_logging`.)
detect_hydrogens : {"distance", "heuristic"} (optional)
Determine the algorithm to find hydrogens connected to donor
atoms. Can be "distance" (default; finds all hydrogens in the
donor's residue within a cutoff of the donor) or "heuristic"
(looks for the next few atoms in the atom list). "distance" should
always give the correct answer but "heuristic" is faster,
especially when the donor list is updated each
for each frame. ["distance"]
distance_type : {"hydrogen", "heavy"} (optional)
Measure hydrogen bond lengths between donor and acceptor heavy
attoms ("heavy") or between donor hydrogen and acceptor heavy
atom ("hydrogen"). If using "heavy" then one should set the *distance*
cutoff to a higher value such as 3.5 Å. ["hydrogen"]
Raises
------
:exc:`SelectionError`
is raised for each static selection without the required
donors and/or acceptors.
.. versionchanged:: 0.7.6
New *verbose* keyword (and per-frame debug logging disabled by
default).
New *detect_hydrogens* keyword to switch between two different
algorithms to detect hydrogens bonded to donor. "distance" is a new,
rigorous distance search within the residue of the donor atom,
"heuristic" is the previous list scan (improved with an additional
distance check).
New *forcefield* keyword to switch between different values of
DEFAULT_DONORS/ACCEPTORS to accomodate different force fields.
Also has an option "other" for no default values.
.. versionchanged:: 0.8
The new default for *update_selection1* and *update_selection2* is now
``True`` (see `Issue 138`_). Set to ``False`` if your selections only
need to be determined once (will increase performance).
.. versionchanged:: 0.9.0
New keyword *distance_type* to select between calculation between
heavy atoms or hydrogen-acceptor. It defaults to the previous
behavior (i.e. "hydrogen").
.. versionchanged:: 0.11.0
Initial checks for selections that potentially raise :exc:`SelectionError`.
.. deprecated:: 0.16
The *verbose* keyword argument is replaced by *debug*. Note that the
*verbose* keyword argument is now comsistently used to toggle
progress meters throuthout the library.
.. _`Issue 138`: https://github.com/MDAnalysis/mdanalysis/issues/138
"""
warnings.warn(
"The donor and acceptor indices being 1-based is deprecated in favor"
" of a zero-based index. These can be accessed by 'donor_index' or"
" 'acceptor_index', removal of the 1-based indices is targeted for"
" version 0.16.0", category=DeprecationWarning)
self._get_bonded_hydrogens_algorithms = {
"distance": self._get_bonded_hydrogens_dist, # 0.7.6 default
"heuristic": self._get_bonded_hydrogens_list, # pre 0.7.6
}
if not detect_hydrogens in self._get_bonded_hydrogens_algorithms:
raise ValueError("detect_hydrogens must be one of {0!r}".format(
self._get_bonded_hydrogens_algorithms.keys()))
self.detect_hydrogens = detect_hydrogens
self.u = universe
self.selection1 = selection1
self.selection2 = selection2
self.selection1_type = selection1_type
self.update_selection1 = update_selection1
self.update_selection2 = update_selection2
self.filter_first = filter_first
self.distance = distance
self.distance_type = distance_type # note: everything except 'heavy' will give the default behavior
self.angle = angle
self.traj_slice = slice(start, stop, step)
# set up the donors/acceptors lists
if donors is None:
donors = []
if acceptors is None:
acceptors = []
self.forcefield = forcefield
self.donors = tuple(set(self.DEFAULT_DONORS[forcefield]).union(donors))
self.acceptors = tuple(set(self.DEFAULT_ACCEPTORS[forcefield]).union(acceptors))
if not (self.selection1 and self.selection2):
raise ValueError('HydrogenBondAnalysis: invalid selections')
elif self.selection1_type not in ('both', 'donor', 'acceptor'):
raise ValueError('HydrogenBondAnalysis: Invalid selection type {0!s}'.format(self.selection1_type))
self.timeseries = None # final result
self.timesteps = None # time for each frame
self.table = None # placeholder for output table
self.debug = True # always enable debug output for initial selection update
self._update_selection_1()
self._update_selection_2()
# per-frame debugging output?
# This line must be changed at the end of the deprecation period for
# the *quiet* keyword argument. Then it must become:
# self.debug = debug
# In the signature, *verbose* must be removed and the default value
# for *debug* must be set to False.
# See the docstring for lib.log._set_verbose, the pull request #1150,
# and the issue #903.
self.debug = _set_verbose(debug, verbose, default=False,
was='verbose', now='debug')
self._log_parameters()
if self.selection1_type == 'donor':
self._sanity_check(1, 'donors')
self._sanity_check(2, 'acceptors')
elif self.selection1_type == 'acceptor':
self._sanity_check(1, 'acceptors')
self._sanity_check(2, 'donors')
else: # both
self._sanity_check(1, 'donors')
self._sanity_check(1, 'acceptors')
self._sanity_check(2, 'acceptors')
self._sanity_check(2, 'donors')
logger.info("HBond analysis: initial checks passed.")
def _sanity_check(self, selection, htype):
"""sanity check the selections 1 and 2
*selection* is 1 or 2, *htype* is "donors" or "acceptors"
If selections do not update and the required donor and acceptor
selections are empty then a :exc:`SelectionError` is immediately
raised.
If selections update dynamically then it is possible that the selection
will yield donors/acceptors at a later step and we only issue a
warning.
.. versionadded:: 0.11.0
"""
assert selection in (1, 2)
assert htype in ("donors", "acceptors")
# horrible data organization: _s1_donors, _s2_acceptors, etc, update_selection1, ...
atoms = getattr(self, "_s{0}_{1}".format(selection, htype))
update = getattr(self, "update_selection{0}".format(selection))
if not atoms:
errmsg = "No {1} found in selection {0}. " \
"You might have to specify a custom '{1}' keyword.".format(
selection, htype)
if not update:
logger.error(errmsg)
raise SelectionError(errmsg)
else:
errmsg += " Selection will update so continuing with fingers crossed."
warnings.warn(errmsg, category=SelectionWarning)
logger.warn(errmsg)
def _log_parameters(self):
"""Log important parameters to the logfile."""
logger.info("HBond analysis: selection1 = %r (update: %r)", self.selection1, self.update_selection1)
logger.info("HBond analysis: selection2 = %r (update: %r)", self.selection2, self.update_selection2)
logger.info("HBond analysis: criterion: donor %s atom and acceptor atom distance <= %.3f A", self.distance_type,
self.distance)
logger.info("HBond analysis: criterion: angle D-H-A >= %.3f degrees", self.angle)
logger.info("HBond analysis: force field %s to guess donor and acceptor names", self.forcefield)
logger.info("HBond analysis: bonded hydrogen detection algorithm: %r", self.detect_hydrogens)
def _get_bonded_hydrogens(self, atom, **kwargs):
"""Find hydrogens bonded to *atom*.
This method is typically not called by a user but it is documented to
facilitate understanding of the internals of
:class:`HydrogenBondAnalysis`.
:Returns: list of hydrogens (can be a
:class:`~MDAnalysis.core.groups.AtomGroup`) or empty list
``[]`` if none were found.
.. SeeAlso::
:meth:`_get_bonded_hydrogens_dist` and :meth:`_get_bonded_hydrogens_list`
.. versionchanged:: 0.7.6
Can switch algorithm by using the *detect_hydrogens* keyword to the
constructor. *kwargs* can be used to supply arguments for algorithm.
"""
return self._get_bonded_hydrogens_algorithms[self.detect_hydrogens](atom, **kwargs)
def _get_bonded_hydrogens_dist(self, atom):
"""Find hydrogens bonded within *cutoff* to *atom*.
* hydrogens are detected by either name ("H*", "[123]H*") or type
("H"); this is not fool-proof as the atom type is not always a
character but the name pattern should catch most typical occurrences.
* The distance from *atom* is calculated for all hydrogens in the
residue and only those within a cutoff are kept. The cutoff depends
on the heavy atom (more precisely, on its element, which is taken as
the first letter of its name ``atom.name[0]``) and is parameterized
in :attr:`HydrogenBondAnalysis.r_cov`. If no match is found then the
default of 1.5 Å is used.
The performance of this implementation could be improved once the
topology always contains bonded information; it currently uses the
selection parser with an "around" selection.
.. versionadded:: 0.7.6
"""
try:
return atom.residue.atoms.select_atoms(
"(name H* 1H* 2H* 3H* or type H) and around {0:f} name {1!s}"
"".format(self.r_cov[atom.name[0]], atom.name))
except NoDataError:
return []
def _get_bonded_hydrogens_list(self, atom, **kwargs):
"""Find "bonded" hydrogens to the donor *atom*.
At the moment this relies on the **assumption** that the
hydrogens are listed directly after the heavy atom in the
topology. If this is not the case then this function will
fail.
Hydrogens are detected by name ``H*``, ``[123]H*`` and they have to be
within a maximum distance from the heavy atom. The cutoff distance
depends on the heavy atom and is parameterized in
:attr:`HydrogenBondAnalysis.r_cov`.
.. versionchanged:: 0.7.6
Added detection of ``[123]H`` and additional check that a
selected hydrogen is bonded to the donor atom (i.e. its
distance to the donor is less than the covalent radius
stored in :attr:`HydrogenBondAnalysis.r_cov` or the default
1.5 Å).
Changed name to
:meth:`~HydrogenBondAnalysis._get_bonded_hydrogens_list`
and added *kwargs* so that it can be used instead of
:meth:`~HydrogenBondAnalysis._get_bonded_hydrogens_dist`.
"""
warnings.warn("_get_bonded_hydrogens_list() does not always find "
"all hydrogens; detect_hydrogens='distance' is safer.",
category=DeprecationWarning)
try:
hydrogens = [
a for a in self.u.atoms[atom.index + 1:atom.index + 4]
if a.name.startswith(('H', '1H', '2H', '3H')) \
and self.calc_eucl_distance(atom, a) < self.r_cov[atom.name[0]]]
except IndexError:
hydrogens = [] # weird corner case that atom is the last one in universe
return hydrogens
def _update_selection_1(self):
self._s1 = self.u.select_atoms(self.selection1)
self.logger_debug("Size of selection 1: {0} atoms".format(len(self._s1)))
if not self._s1:
logger.warn("Selection 1 '{0}' did not select any atoms.".format(str(self.selection1)[:80]))
self._s1_donors = {}
self._s1_donors_h = {}
self._s1_acceptors = {}
if self.selection1_type in ('donor', 'both'):
self._s1_donors = self._s1.select_atoms(
'name {0}'.format(' '.join(self.donors)))
self._s1_donors_h = {}
for i, d in enumerate(self._s1_donors):
tmp = self._get_bonded_hydrogens(d)
if tmp:
self._s1_donors_h[i] = tmp
self.logger_debug("Selection 1 donors: {0}".format(len(self._s1_donors)))
self.logger_debug("Selection 1 donor hydrogens: {0}".format(len(self._s1_donors_h)))
if self.selection1_type in ('acceptor', 'both'):
self._s1_acceptors = self._s1.select_atoms(
'name {0}'.format(' '.join(self.acceptors)))
self.logger_debug("Selection 1 acceptors: {0}".format(len(self._s1_acceptors)))
def _update_selection_2(self):
self._s2 = self.u.select_atoms(self.selection2)
if self.filter_first and self._s2:
self.logger_debug('Size of selection 2 before filtering:'
' {} atoms'.format(len(self._s2)))
ns_selection_2 = AtomNeighborSearch(self._s2)
self._s2 = ns_selection_2.search(self._s1, 3. * self.distance)
self.logger_debug('Size of selection 2: {0} atoms'.format(len(self._s2)))
if not self._s2:
logger.warn('Selection 2 "{0}" did not select any atoms.'.format(
str(self.selection2)[:80]))
self._s2_donors = {}
self._s2_donors_h = {}
self._s2_acceptors = {}
if self.selection1_type in ('donor', 'both'):
self._s2_acceptors = self._s2.select_atoms(
'name {0}'.format(' '.join(self.acceptors)))
self.logger_debug("Selection 2 acceptors: {0:d}".format(len(self._s2_acceptors)))
if self.selection1_type in ('acceptor', 'both'):
self._s2_donors = self._s2.select_atoms(
'name {0}'.format(' '.join(self.donors)))
self._s2_donors_h = {}
for i, d in enumerate(self._s2_donors):
tmp = self._get_bonded_hydrogens(d)
if tmp:
self._s2_donors_h[i] = tmp
self.logger_debug("Selection 2 donors: {0:d}".format(len(self._s2_donors)))
self.logger_debug("Selection 2 donor hydrogens: {0:d}".format(len(self._s2_donors_h)))
def logger_debug(self, *args):
if self.debug:
logger.debug(*args)
def run(self, **kwargs):
"""Analyze trajectory and produce timeseries.
Stores the hydrogen bond data per frame as
:attr:`HydrogenBondAnalysis.timeseries` (see there for output
format).
The method accepts a number of keywords, amongst them *verbose*
(default ``True``), which toggles the porgress output (see
:class:`~MDAnalysis.lib.log.ProgressMeter`) and *debug* which can
be used to change the debug value provided to the class constructor.
.. SeeAlso:: :meth:`HydrogenBondAnalysis.generate_table` for processing
the data into a different format.
.. versionchanged:: 0.7.6
Results are not returned, only stored in
:attr:`~HydrogenBondAnalysis.timeseries` and duplicate hydrogen bonds
are removed from output (can be suppressed with *remove_duplicates* =
``False``)
.. versionchanged:: 0.11.0
Accept *quiet* keyword. Analysis will now proceed through frames even if
no donors or acceptors were found in a particular frame.
.. deprecated:: 0.15.0
The donor and acceptor indices being 1-based is deprecated in favor
of a zero-based index. This can be accessed by "donor_index" or
"acceptor_index" removal of the 1-based indices is targeted
for version 0.16.0
.. deprecated:: 0.16
The *quiet* keyword argument is deprecated in favor of the *verbose*
one. Previous use of *verbose* now corresponds to the new keyword
argument *debug*.
"""
logger.info("HBond analysis: starting")
logger.debug("HBond analysis: donors %r", self.donors)
logger.debug("HBond analysis: acceptors %r", self.acceptors)
remove_duplicates = kwargs.pop('remove_duplicates', True) # False: old behaviour
if not remove_duplicates:
logger.warn("Hidden feature remove_duplicates=False activated: you will probably get duplicate H-bonds.")
debug = kwargs.pop('debug', None)
if debug is not None and debug != self.debug:
self.debug = debug
logger.debug("Toggling debug to %r", self.debug)
if not self.debug:
logger.debug("HBond analysis: For full step-by-step debugging output use debug=True")
self.timeseries = []
self.timesteps = []
logger.info("checking trajectory...") # n_frames can take a while!
try:
frames = np.arange(self.u.trajectory.n_frames)[self.traj_slice]
except:
logger.error("Problem reading trajectory or trajectory slice incompatible.")
logger.exception()
raise
verbose = _set_verbose(verbose=kwargs.get('verbose', None),
quiet=kwargs.get('quiet', None),
default=True)
pm = ProgressMeter(len(frames),
format="HBonds frame {current_step:5d}: {step:5d}/{numsteps} [{percentage:5.1f}%]\r",
verbose=verbose)
try:
self.u.trajectory.time
def _get_timestep():
return self.u.trajectory.time
logger.debug("HBond analysis is recording time step")
except NotImplementedError:
# chained reader or xyz(?) cannot do time yet
def _get_timestep():
return self.u.trajectory.frame
logger.warn("HBond analysis is recording frame number instead of time step")
logger.info("Starting analysis (frame index start=%d stop=%d, step=%d)",
(self.traj_slice.start or 0),
(self.traj_slice.stop or self.u.trajectory.n_frames), self.traj_slice.step or 1)
for progress, ts in enumerate(self.u.trajectory[self.traj_slice]):
# all bonds for this timestep
frame_results = []
# dict of tuples (atomid, atomid) for quick check if
# we already have the bond (to avoid duplicates)
already_found = {}
frame = ts.frame
timestep = _get_timestep()
self.timesteps.append(timestep)
pm.echo(progress, current_step=frame)
self.logger_debug("Analyzing frame %(frame)d, timestep %(timestep)f ps", vars())
if self.update_selection1:
self._update_selection_1()
if self.update_selection2:
self._update_selection_2()
if self.selection1_type in ('donor', 'both') and self._s2_acceptors:
self.logger_debug("Selection 1 Donors <-> Acceptors")
ns_acceptors = AtomNeighborSearch(self._s2_acceptors)
for i, donor_h_set in self._s1_donors_h.items():
d = self._s1_donors[i]
for h in donor_h_set:
res = ns_acceptors.search(h, self.distance)
for a in res:
angle = self.calc_angle(d, h, a)
donor_atom = h if self.distance_type != 'heavy' else d
dist = self.calc_eucl_distance(donor_atom, a)
if angle >= self.angle and dist <= self.distance:
self.logger_debug(
"S1-D: {0!s} <-> S2-A: {1!s} {2:f} A, {3:f} DEG".format(h.index + 1, a.index + 1, dist, angle))
#self.logger_debug("S1-D: %r <-> S2-A: %r %f A, %f DEG" % (h, a, dist, angle))
frame_results.append(
[h.index + 1, a.index + 1, h.index, a.index,
'{0!s}{1!s}:{2!s}'.format(h.resname, repr(h.resid), h.name),
'{0!s}{1!s}:{2!s}'.format(a.resname, repr(a.resid), a.name),
dist, angle])
already_found[(h.index + 1, a.index + 1)] = True
if self.selection1_type in ('acceptor', 'both') and self._s1_acceptors:
self.logger_debug("Selection 1 Acceptors <-> Donors")
ns_acceptors = AtomNeighborSearch(self._s1_acceptors)
for i, donor_h_set in self._s2_donors_h.items():
d = self._s2_donors[i]
for h in donor_h_set:
res = ns_acceptors.search(h, self.distance)
for a in res:
if remove_duplicates and (
(h.index + 1, a.index + 1) in already_found
or (a.index + 1, h.index + 1) in already_found):
continue
angle = self.calc_angle(d, h, a)
donor_atom = h if self.distance_type != 'heavy' else d
dist = self.calc_eucl_distance(donor_atom, a)
if angle >= self.angle and dist <= self.distance:
self.logger_debug(
"S1-A: {0!s} <-> S2-D: {1!s} {2:f} A, {3:f} DEG".format(a.index + 1, h.index + 1, dist, angle))
#self.logger_debug("S1-A: %r <-> S2-D: %r %f A, %f DEG" % (a, h, dist, angle))
frame_results.append(
[h.index + 1, a.index + 1, h.index, a.index,
'{0!s}{1!s}:{2!s}'.format(h.resname, repr(h.resid), h.name),
'{0!s}{1!s}:{2!s}'.format(a.resname, repr(a.resid), a.name),
dist, angle])
self.timeseries.append(frame_results)
logger.info("HBond analysis: complete; timeseries with %d hbonds in %s.timeseries",
self.count_by_time().count.sum(), self.__class__.__name__)
@staticmethod
def calc_angle(d, h, a):
"""Calculate the angle (in degrees) between two atoms with H at apex."""
v1 = h.position - d.position
v2 = h.position - a.position
if np.all(v1 == v2):
return 0.0
return np.rad2deg(angle(v1, v2))
@staticmethod
def calc_eucl_distance(a1, a2):
"""Calculate the Euclidean distance between two atoms. """
return norm(a2.position - a1.position)
def generate_table(self):
"""Generate a normalised table of the results.
The table is stored as a :class:`numpy.recarray` in the
attribute :attr:`~HydrogenBondAnalysis.table` and can be used
with e.g. `recsql`_.
Columns:
0. "time"
1. "donor_idx"
2. "acceptor_idx"
3. "donor_index"
4. "acceptor_index"
4. "donor_resnm"
5. "donor_resid"
6. "donor_atom"
7. "acceptor_resnm"
8. "acceptor_resid"
9. "acceptor_atom"
10. "distance"
11. "angle"
.. _recsql: http://pypi.python.org/pypi/RecSQL
"""
if self.timeseries is None:
msg = "No timeseries computed, do run() first."
warnings.warn(msg, category=MissingDataWarning)
logger.warn(msg)
return
num_records = np.sum([len(hframe) for hframe in self.timeseries])
# build empty output table
dtype = [
("time", float), ("donor_idx", int), ("acceptor_idx", int),
("donor_index", int), ("acceptor_index", int),
("donor_resnm", "|U4"), ("donor_resid", int), ("donor_atom", "|U4"),
("acceptor_resnm", "|U4"), ("acceptor_resid", int), ("acceptor_atom", "|U4"),
("distance", float), ("angle", float)]
# according to Lukas' notes below, using a recarray at this stage is ineffective
# and speedups of ~x10 can be achieved by filling a standard array, like this:
out = np.empty((num_records,), dtype=dtype)
cursor = 0 # current row
for t, hframe in zip(self.timesteps, self.timeseries):
for (donor_idx, acceptor_idx, donor_index, acceptor_index, donor,
acceptor, distance, angle) in hframe:
out[cursor] = (t, donor_idx, acceptor_idx, donor_index, acceptor_index) + \
parse_residue(donor) + parse_residue(acceptor) + (distance, angle)
cursor += 1
assert cursor == num_records, "Internal Error: Not all HB records stored"
self.table = out.view(np.recarray)
logger.debug("HBond: Stored results as table with %(num_records)d entries.", vars())
def save_table(self, filename="hbond_table.pickle"):
"""Saves :attr:`~HydrogenBondAnalysis.table` to a pickled file.
Load with ::
import cPickle
table = cPickle.load(open(filename))
.. SeeAlso:: :mod:`cPickle` module and :class:`numpy.recarray`
"""
if self.table is None:
self.generate_table()
cPickle.dump(self.table, open(filename, 'wb'), protocol=cPickle.HIGHEST_PROTOCOL)
def count_by_time(self):
"""Counts the number of hydrogen bonds per timestep.
:Returns: a class:`numpy.recarray`
"""
if self.timeseries is None:
msg = "No timeseries computed, do run() first."
warnings.warn(msg, category=MissingDataWarning)
logger.warn(msg)
return
out = np.empty((len(self.timesteps),), dtype=[('time', float), ('count', int)])
for cursor, time_count in enumerate(zip(self.timesteps,
map(len, self.timeseries))):
out[cursor] = time_count
return out.view(np.recarray)
def count_by_type(self):
"""Counts the frequency of hydrogen bonds of a specific type.
Processes :attr:`HydrogenBondAnalysis.timeseries` and returns
a :class:`numpy.recarray` containing atom indices, residue
names, residue numbers (for donors and acceptors) and the
fraction of the total time during which the hydrogen bond was
detected.
:Returns: a class:`numpy.recarray`
"""
if self.timeseries is None:
msg = "No timeseries computed, do run() first."
warnings.warn(msg, category=MissingDataWarning)
logger.warn(msg)
return
hbonds = defaultdict(int)
for hframe in self.timeseries:
for (donor_idx, acceptor_idx, donor_index, acceptor_index, donor,
acceptor, distance, angle) in hframe:
donor_resnm, donor_resid, donor_atom = parse_residue(donor)
acceptor_resnm, acceptor_resid, acceptor_atom = parse_residue(acceptor)
# generate unambigous key for current hbond \
# (the donor_heavy_atom placeholder '?' is added later)
# idx_zero is redundant for an unambigous key, but included for
# consistency.
hb_key = (
donor_idx, acceptor_idx, donor_index, acceptor_index,
donor_resnm, donor_resid, "?", donor_atom,
acceptor_resnm, acceptor_resid, acceptor_atom)
hbonds[hb_key] += 1
# build empty output table
dtype = [
('donor_idx', int), ('acceptor_idx', int),
("donor_index", int), ("acceptor_index", int), ('donor_resnm', 'U4'),
('donor_resid', int), ('donor_heavy_atom', 'U4'), ('donor_atom', 'U4'),
('acceptor_resnm', 'U4'), ('acceptor_resid', int), ('acceptor_atom', 'U4'),
('frequency', float)
]
out = np.empty((len(hbonds),), dtype=dtype)
# float because of division later
tsteps = float(len(self.timesteps))
for cursor, (key, count) in enumerate(six.iteritems(hbonds)):
out[cursor] = key + (count / tsteps,)
# return array as recarray
# The recarray has not been used within the function, because accessing the
# the elements of a recarray (3.65 us) is much slower then accessing those
# of a ndarray (287 ns).
r = out.view(np.recarray)
# patch in donor heavy atom names (replaces '?' in the key)
h2donor = self._donor_lookup_table_byindex()
r.donor_heavy_atom[:] = [h2donor[idx] for idx in r.donor_index]
return r
def timesteps_by_type(self):
"""Frames during which each hydrogen bond existed, sorted by hydrogen bond.
Processes :attr:`HydrogenBondAnalysis.timeseries` and returns
a :class:`numpy.recarray` containing atom indices, residue
names, residue numbers (for donors and acceptors) and a list
of timesteps at which the hydrogen bond was detected.
:Returns: a class:`numpy.recarray`
"""
if self.timeseries is None:
msg = "No timeseries computed, do run() first."
warnings.warn(msg, category=MissingDataWarning)
logger.warn(msg)
return
hbonds = defaultdict(list)
for (t, hframe) in zip(self.timesteps, self.timeseries):
for (donor_idx, acceptor_idx, donor_index, acceptor_index, donor,
acceptor, distance, angle) in hframe:
donor_resnm, donor_resid, donor_atom = parse_residue(donor)
acceptor_resnm, acceptor_resid, acceptor_atom = parse_residue(acceptor)
# generate unambigous key for current hbond
# (the donor_heavy_atom placeholder '?' is added later)
# idx_zero is redundant for key but added for consistency
hb_key = (
donor_idx, acceptor_idx,
donor_index, acceptor_index,
donor_resnm, donor_resid, "?", donor_atom,
acceptor_resnm, acceptor_resid, acceptor_atom)
hbonds[hb_key].append(t)
out_nrows = 0
# count number of timesteps per key to get length of output table
for ts_list in six.itervalues(hbonds):
out_nrows += len(ts_list)
# build empty output table
dtype = [
('donor_idx', int), ('acceptor_idx', int),('donor_index', int),
('acceptor_index', int), ('donor_resnm', 'U4'), ('donor_resid', int),
('donor_heavy_atom', 'U4'), ('donor_atom', 'U4'),('acceptor_resnm', 'U4'),
('acceptor_resid', int), ('acceptor_atom', 'U4'), ('time', float)]
out = np.empty((out_nrows,), dtype=dtype)
out_row = 0
for (key, times) in six.iteritems(hbonds):
for tstep in times:
out[out_row] = key + (tstep,)
out_row += 1
# return array as recarray
# The recarray has not been used within the function, because accessing the
# the elements of a recarray (3.65 us) is much slower then accessing those
# of a ndarray (287 ns).
r = out.view(np.recarray)
# patch in donor heavy atom names (replaces '?' in the key)
h2donor = self._donor_lookup_table_byindex()
r.donor_heavy_atom[:] = [h2donor[idx - 1] for idx in r.donor_idx]
return r
def _donor_lookup_table_byres(self):
"""Look-up table to identify the donor heavy atom from resid and hydrogen name.
Assumptions:
* resids are unique
* hydrogen atom names are unique within a residue
* selections have not changed (because we are simply looking at the last content
of the donors and donor hydrogen lists)
Donors from *selection1* and *selection2* are merged.
Output dictionary ``h2donor`` can be used as::
heavy_atom_name = h2donor[resid][hydrogen_name]
"""
s1d = self._s1_donors # list of donor Atom instances
s1h = self._s1_donors_h # dict indexed by donor position in donor list, containg AtomGroups of H
s2d = self._s2_donors
s2h = self._s2_donors_h
def _make_dict(donors, hydrogens):
# two steps so that entry for one residue can be UPDATED for multiple donors
d = dict((donors[k].resid, {}) for k in range(len(donors)) if k in hydrogens)
for k in range(len(donors)):
if k in hydrogens:
d[donors[k].resid].update(dict((atom.name, donors[k].name) for atom in hydrogens[k]))
return d
h2donor = _make_dict(s2d, s2h) # 2 is typically the larger group
# merge (in principle h2donor.update(_make_dict(s1d, s1h) should be sufficient
# with our assumptions but the following should be really safe)
for resid, names in _make_dict(s1d, s1h).items():
if resid in h2donor:
h2donor[resid].update(names)
else:
h2donor[resid] = names
return h2donor
def _donor_lookup_table_byindex(self):
"""Look-up table to identify the donor heavy atom from hydrogen atom index.
Assumptions:
* selections have not changed (because we are simply looking at the last content
of the donors and donor hydrogen lists)
Donors from *selection1* and *selection2* are merged.
Output dictionary ``h2donor`` can be used as::
heavy_atom_name = h2donor[index]
.. Note::
*index* is the 0-based MDAnalysis index
(:attr:`MDAnalysis.core.groups.Atom.index`). The
tables generated by :class:`HydrogenBondAnalysis` contain
1-based indices and zero-based indices.
.. deprecated:: 0.15.0
The 1-based indices are deprecated in favor of the zero-based indices
given by "idx_zero".
"""
s1d = self._s1_donors # list of donor Atom instances
s1h = self._s1_donors_h # dict indexed by donor position in donor list, containg AtomGroups of H
s2d = self._s2_donors
s2h = self._s2_donors_h
def _make_dict(donors, hydrogens):
#return dict(flatten_1([(atom.id, donors[k].name) for atom in hydrogens[k]] for k in range(len(donors))
# if k in hydrogens))
x = []
for k in range(len(donors)):
if k in hydrogens:
x.extend([(atom.index, donors[k].name) for atom in hydrogens[k]])
return dict(x)
h2donor = _make_dict(s2d, s2h) # 2 is typically the larger group
h2donor.update(_make_dict(s1d, s1h))
return h2donor
|
alejob/mdanalysis
|
package/MDAnalysis/analysis/hbonds/hbond_analysis.py
|
Python
|
gpl-2.0
| 57,367
|
[
"CHARMM",
"Gromacs",
"MDAnalysis",
"VMD"
] |
9ffce0c1143f95c611730e4161e46c6147a7dc9d0a76d1df7d6066bda9f05ffb
|
# -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2013 Raoul Snyman #
# Portions copyright (c) 2008-2013 Tim Bentley, Gerald Britton, Jonathan #
# Corwin, Samuel Findlay, Michael Gorven, Scott Guerrieri, Matthias Hub, #
# Meinert Jordan, Armin Köhler, Erik Lundin, Edwin Lunando, Brian T. Meyer. #
# Joshua Miller, Stevan Pettit, Andreas Preikschat, Mattias Põldaru, #
# Christian Richter, Philip Ridout, Simon Scudder, Jeffrey Smith, #
# Maikel Stuivenberg, Martin Thompson, Jon Tibble, Dave Warnock, #
# Frode Woldsund, Martin Zibricky, Patrick Zimmermann #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
from customxmlhandler import CustomXMLBuilder, CustomXMLParser
from mediaitem import CustomMediaItem
from customtab import CustomTab
|
marmyshev/transitions
|
openlp/plugins/custom/lib/__init__.py
|
Python
|
gpl-2.0
| 2,240
|
[
"Brian"
] |
008e5b86134658413f6593b2e049454a225bcc3bc53bc29d034e55e174b93137
|
# -*- coding: utf-8 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Canal para filmenoi
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from core import logger
from core import config
from core import scrapertools
from core.item import Item
from servers import servertools
__channel__ = "filmenoi"
__category__ = "F,S"
__type__ = "generic"
__title__ = "Filme-noi.com"
__language__ = "ES"
__creationdate__ = "20131223"
DEBUG = config.get_setting("debug")
def isGeneric():
return True
def mainlist(item):
logger.info("[filmenoi.py] mainlist")
item.url="http://www.filme-net.com/";
return novedades(item)
def novedades(item):
logger.info("[filmenoi.py] novedades")
itemlist = []
# Descarga la página
data = scrapertools.cachePage(item.url)
#esta es para web situl Cinemaxx.rs
#patron = '<ul class="pm-ul-browse-videos thumbnails" id="pm-grid">[^<]+'
#patron = '<li>[^<]+'
#patron += '<div class="pm-li-video">[^<]+'
#patron += '.*?<a href="([^"]+)".*?[^<]+<img src="([^"]+)" alt="([^"]+)".*?</li>'
#esta es para web Filme-noi.com
#patron = '<div class="home_posts_thumbnail">[^<]+'
#patron += '<a href="([^"]+)".*?[^<]+<img src="([^"]+)" alt="([^"]+)".*?</div>'
patron = '<div class="home_posts_thumbnail">[^<]+'
patron += '<a href="([^"]+)"[^<]+<img src="([^"]+)" alt="([^"]+)"'
matches = re.compile(patron,re.DOTALL).findall(data)
if DEBUG: scrapertools.printMatches(matches)
for scrapedurl,scrapedthumbnail,scrapedtitle in matches:
thumbnail = scrapertools.find_single_match(scrapedthumbnail,"(http\://www.filme-net.com/wp-content/uploads/.*?.jpg)")
scrapedplot = ""
#if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")
if (DEBUG): logger.info("url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"], title=["+scrapedtitle+"]") # Falla en sacar las imagenes por que tienen espacios
itemlist.append( Item(channel=__channel__, action="findvideos", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
patron = "<a href='([^']+)'>\&rsaquo\;</a>" #Falla no pone pagina siguente
matches = re.compile(patron,re.DOTALL).findall(data)
if DEBUG: scrapertools.printMatches(matches)
#if DEBUG: scrapertools.printMatches(item.url)
for match in matches:
scrapedtitle = "> Inainte"
scrapedplot = ""
scrapedurl = urlparse.urljoin(item.url,match)
scrapedthumbnail = ""
if (DEBUG): logger.info("url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"], title=["+scrapedtitle+"]")
itemlist.append( Item(channel=__channel__, action="novedades", title=scrapedtitle , url=scrapedurl, thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
return itemlist
def findvideos(item):
logger.info("[filmenoi.py] findvideos")
data = scrapertools.cache_page(item.url)
itemlist=[]
#<a href="http://67cfb0db.linkbucks.com"><img title="billionuploads" src="http://unsoloclic.info/wp-content/uploads/2012/11/billonuploads2.png" alt="" width="380" height="50" /></a></p>
#<a href="http://1bd02d49.linkbucks.com"><img class="colorbox-57103" title="Freakeshare" alt="" src="http://unsoloclic.info/wp-content/uploads/2013/01/freakshare.png" width="390" height="55" /></a></p>
patron = '<p+<iframe href="(http.//[a-z0-9]+.video.mail.r[^"]+)[^>]+><img.*?title="([^"]+)".*?src="([^"]+)"' #estoy buscando esto pero falla si es el segundo enlace y no esta en el misco <p> que el primer servidor
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for url,servertag,serverthumb in matches:
itemlist.append( Item(channel=__channel__, action="play", server="linkbucks", title=servertag+" [linkbucks]" , url=url , thumbnail=serverthumb , plot=item.plot , folder=False) )
from servers import servertools
itemlist.extend(servertools.find_video_items(data=data))
for videoitem in itemlist:
if videoitem.server!="linkbucks":
videoitem.channel=__channel__
videoitem.action="play"
videoitem.folder=False
videoitem.title = "["+videoitem.server+"]"
return itemlist
def play(item):
logger.info("[filmenoi.py] play")
itemlist=[]
if item.server=="linkbucks":
logger.info("Es linkbucks")
# Averigua el enlace
from servers import linkbucks
location = linkbucks.get_long_url(item.url)
logger.info("location="+location)
# Extrae la URL de saltar el anuncio en adf.ly
if location.startswith("http://adf"):
# Averigua el enlace
from servers import adfly
location = adfly.get_long_url(location)
logger.info("location="+location)
from servers import servertools
itemlist=servertools.find_video_items(data=location)
for videoitem in itemlist:
videoitem.channel=__channel__
videoitem.folder=False
else:
itemlist.append(item)
return itemlist
# Verificación automática de canales: Esta función debe devolver "True" si está ok el canal.
def test():
# mainlist
novedades_items = mainlist(Item())
# Da por bueno el canal si alguno de los vídeos de "Novedades" devuelve mirrors
bien = False
for singleitem in novedades_items:
mirrors_items = findvideos( item=singleitem )
for mirror_item in mirrors_items:
video_items = play(mirror_item)
if len(video_items)>0:
return True
return False
|
Zanzibar82/pelisalacarta
|
python/main-classic/channels/filmenoi.py
|
Python
|
gpl-3.0
| 5,883
|
[
"ADF"
] |
e0bdb60bf88b178ef7cdf07fbd7af77a7294193b5057e1bdc35db8a3d03b1eea
|
# -*- coding: iso-8859-1 -*-
'''Module for processing the data read from the output files of quantum chemical
software. '''
'''
orbkit
Gunter Hermann, Vincent Pohl, Lukas Eugen Marsoner Steinkasserer, Axel Schild, and Jean Christophe Tremblay
Institut fuer Chemie und Biochemie, Freie Universitaet Berlin, 14195 Berlin, Germany
This file is part of orbkit.
orbkit is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of
the License, or any later version.
orbkit is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with orbkit. If not, see <http://www.gnu.org/licenses/>.
'''
#from scipy.constants import value as physical_constants
import numpy
from os import path
from copy import copy,deepcopy
from .display import display
from .units import u_to_me, aa_to_a0
from .tools import get_atom_symbol, standard_mass
from .orbitals import AOClass, MOClass
class QCinfo:
'''Class managing all information from the from the output
files of quantum chemical software.
See :ref:`Central Variables` in the manual for details.
'''
def __init__(self, data=None):
self.geo_info = []
self.geo_spec = []
if data:
self.geo_spec = data['geo_spec']
self.geo_info = data['geo_info']
self.format_geo()
if isinstance(data['ao_spec'], numpy.ndarray):
ao_spec = data['ao_spec'][numpy.newaxis][0]
else:
ao_spec = data['ao_spec']
if isinstance(data['mo_spec'], numpy.ndarray):
mo_spec = data['mo_spec'][numpy.newaxis][0]
else:
mo_spec = data['mo_spec']
self.ao_spec = AOClass(restart=ao_spec)
self.mo_spec = MOClass(restart=mo_spec)
else:
self.geo_info = []
self.geo_spec = []
self.ao_spec = AOClass()
self.mo_spec = MOClass()
def __eq__(self, other):
if not isinstance(other, QCinfo):
raise TypeError('Comaring of QCinfo to non QCinfo object not defined')
same = [
self.comp_geo_info(other.geo_info),
numpy.allclose(self.geo_spec, other.geo_spec),
self.ao_spec == other.ao_spec,
self.mo_spec == other.mo_spec
]
return all(same)
def update(self):
self.ao_spec.update()
self.mo_spec.update()
def comp_geo_info(self, geo2):
same = True
for atom1, atom2 in zip(self.geo_info, geo2):
if not len(atom1) == len(atom2):
raise ValueError('Atom object are of different length!')
for i in range(len(atom1)):
if atom1[i] != atom2[i]:
same = False
return same
def copy(self):
from copy import deepcopy
qcinfo = deepcopy(self)
qcinfo.update()
return qcinfo
def format_geo(self, is_angstrom=False):
'''Converts geo_info and geo_spec to a universal format.
**Parameters:**
is_angstrom : bool, optional
If True, input is assumed to be in Angstrom and positions are converted to Bohr radii.
'''
for i in self.geo_info:
i[0] = get_atom_symbol(i[0])
i[2] = float(i[-1])
self.geo_info = numpy.array(self.geo_info)
self.geo_spec = numpy.array(self.geo_spec,dtype=float)
if is_angstrom:
self.geo_spec *= aa_to_a0
def get_com(self,nuc_list=None):
'''Computes the center of mass.
'''
com = numpy.zeros(3)
total_mass = 0.
if nuc_list is None:
nuc_list = list(range(len(self.geo_spec))) # iterate over all nuclei
for ii in nuc_list:
nuc_mass = standard_mass(self.geo_info[ii][0])
com += numpy.multiply(self.geo_spec[ii],nuc_mass)
total_mass += nuc_mass
com = com/total_mass
return com
def get_charge(self, nuclear=True, electron=True):
'''Computes total charge of the system.
'''
charge = 0.
if electron:
charge -= sum(self.mo_spec.get_occ())
if nuclear:
for ii in range(len(self.geo_info)):
charge += float(self.geo_info[ii][2])
return charge
def get_coc(self):
'''Computes the center of charge.
'''
coc = numpy.zeros(3)
for ii in range(len(self.geo_info)):
nuc_charge = float(self.geo_info[ii][2])
coc += numpy.multiply(self.geo_spec[ii],nuc_charge)
coc = coc / self.get_charge(nuclear=True)
return coc
def get_bc(self,matrix=None,is_vector=False):
'''Calculates Barycenter for scalar field
'''
# Initialize variable
self.bc = numpy.zeros(3)
# Calculation of barycenter
from orbkit import grid
if not is_vector:
grid.grid2vector()
xyz = grid.tolist()
for i in range(3):
self.bc[i] = (matrix.reshape((-1,))*xyz[i]).sum()
self.bc /= matrix.sum()
if not is_vector:
grid.vector2grid(*grid.N_)
return self.bc
def select_spin(self,restricted,spin=None):
'''For an unrestricted calculation, the name of the MO
('sym' keyword in ``qc.mo_spec``) is modified, e.g., 3.1_b for MO 3.1 with
beta spin and 3.1_a for MO 3.1 with alpha spin.
For restricted calculation, the 'spin' keyword from ``qc.mo_spec`` is
removed.
**Parameters:**
restricted : bool
If True, removes the 'spin' keyword from ``qc.mo_spec``.
spin : {None, 'alpha', or 'beta'}, optional
If not None, returns exclusively 'alpha' or 'beta' molecular orbitals.
'''
# Only molecular orbitals of one spin requested?
if spin is not None:
for i in range(len(self.mo_spec))[::-1]:
if self.mo_spec[i]['spin'] != spin:
del self.mo_spec[i]
if restricted:
# Closed shell calculation
for mo in self.mo_spec:
del mo['spin']
else:
# Rename MOs according to spin
for mo in self.mo_spec:
mo['sym'] += '_%s' % mo['spin'][0]
if not isinstance(self.mo_spec, MOClass):
self.mo_spec = MOClass(self.mo_spec)
self.mo_spec.get_spinstate()
def todict(self):
'''Returns the dictionary that is used to save QCinfo instance
'''
data = {}
data['ao_spec'] = self.ao_spec.todict()
data['mo_spec'] = self.mo_spec.todict()
data['geo_spec'] = self.geo_spec
data['geo_info'] = self.geo_info
data['parent_class_name'] = self.__module__ + '.' + self.__class__.__name__
return data
def get_ase_atoms(self,bbox=None,**kwargs):
'''Create an ASE atoms object.
(cf. https://wiki.fysik.dtu.dk/ase/ase/atoms.html )
**Parameters:**
bbox : list of floats (bbox=[xmin,xmax,ymin,ymax,zmin,zmax]), optional
If not None, sets the unit cell to the grid boundaries and moves the
molecule in its center.
**Returns:**
atoms : Atoms object
See https://wiki.fysik.dtu.dk/ase/ase/atoms.html for details
.. Note::
ASE has to be in the PYTHONPATH
'''
from ase import Atoms
from ase.units import Bohr
atoms = Atoms("".join(self.geo_info[:,0]),
positions=self.geo_spec*Bohr,
**kwargs)
if bbox is not None:
if len(bbox) != 6:
raise ValueError("bbox has to have 6 elements")
bbox = numpy.array(bbox)
atoms.translate(-bbox[::2]*Bohr)
atoms.cell = numpy.eye(3) * (bbox[1::2] - bbox[::2])*Bohr
return atoms
# Synonym
atoms = get_ase_atoms
def view(self,select=slice(None,None,None),bbox=None,**kwargs):
'''Opens ase-gui with the atoms of the QCinfo class.
(cf. https://wiki.fysik.dtu.dk/ase/ase/visualize/visualize.html )
**Parameters:**
select : slice or (array of int), default: all atoms
Specifies the atoms to be shown.
bbox : list of floats (bbox=[xmin,xmax,ymin,ymax,zmin,zmax]), optional
If not None, sets the unit cell to the grid boundaries and moves the
molecule in its center.
.. Note::
ASE has to be in the PYTHONPATH
'''
from ase import visualize
visualize.view(self.get_ase_atoms(bbox=bbox,**kwargs)[select])
@property
def nuclear_repulsion(self):
'''Calculates nuclear repulsion energy.'''
from numpy.linalg import norm
Vnn = 0
Natoms = self.geo_spec.shape[0]
for a in range(Natoms):
Za = float(self.geo_info[a,2])
Ra = self.geo_spec[a,:].astype(float)
for b in range(a+1, Natoms):
Zb = float(self.geo_info[b,2])
Rb = self.geo_spec[b,:].astype(float)
Vnn += Za*Zb / norm(Ra-Rb)
return Vnn
class CIinfo():
'''Class managing all information from the from the output
files of quantum chemical software for CI calculations.
The CI related features are in ongoing development.
'''
def __init__(self,method='ci'):
self.method = method
self.info = None
self.coeffs = []
self.occ = []
self.moocc = None
def __str__(self):
string = '%s' % self.method.upper()
if self.info is not None:
string += ' State %(state)s' % self.info
if 'spin' in self.info.keys() and self.info['spin'] != 'Unknown':
string += ' (%(spin)s)' % self.info
if numpy.shape(self.coeffs) != (0,):
string += ':\tNorm = %0.8f (%d Coefficients)' %(self.get_norm(),
len(self.coeffs))
return string
def __eq__(self, other):
try:
return self.__dict__ == other.__dict__
except ValueError:
return False
def get_norm(self):
return sum(self.coeffs**2)
def renormalize(self):
self.coeffs /= self.get_norm()
def apply_threshold(self,threshold,keep_length=False):
i = numpy.abs(self.coeffs) > threshold
if keep_length:
self.coeffs[numpy.invert(i)] = 0.0
else:
if self.info['state'] == '0':
self.coeffs = self.coeffs[i]
self.occ = self.occ[:1]
else:
self.coeffs = self.coeffs[i]
self.occ = self.occ[i]
def copy(self):
ciinfo = deepcopy(self)
return ciinfo
def get_moocc(self):
if self.moocc is None:
raise ValueError('ci.set_moocc(qc) has to be called first! (ci.moocc is not initialized)')
return self.moocc
def set_moocc(self,moocc):
assert (moocc.dtype == numpy.intc), 'moocc has to be numpy.intc'
self.moocc = moocc
def hdf5_save(self,fid='out.h5',group='/ci:0',mode='w'):
from orbkit.output import hdf5_open,hdf5_append
from copy import copy
for hdf5_file in hdf5_open(fid,mode=mode):
dct = copy(self.todict())
dct['info'] = numpy.array(dct['info'].items(),dtype=str)
hdf5_append(dct,hdf5_file,name=group)
def hdf5_read(self,fid='out.h5',group='/ci:0'):
from orbkit.output import hdf5_open,hdf52dict
for hdf5_file in hdf5_open(fid,mode='r'):
for key in self.__dict__.keys():
try:
self.__dict__[key] = hdf52dict('%s/%s' % (group,key),hdf5_file)
except KeyError:
self.__dict__[key] = hdf5_file['%s' % group].attrs[key]
self.__dict__['info'] = dict(self.__dict__['info'])
|
orbkit/orbkit
|
orbkit/qcinfo.py
|
Python
|
lgpl-3.0
| 11,118
|
[
"ASE"
] |
a4e2add350338731cb9f4aab58c35abc880b9497faf526a74a7da6a236e865c7
|
#
# packages.py: package management - mainly package installation
#
# Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006 Red Hat, Inc.
# All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author(s): Erik Troan <ewt@redhat.com>
# Matt Wilson <msw@redhat.com>
# Michael Fulbright <msf@redhat.com>
# Jeremy Katz <katzj@redhat.com>
#
import itertools
import glob
import iutil
import isys
import os
import time
import sys
import string
import language
import shutil
import traceback
from flags import flags
from product import *
from constants import *
from upgrade import bindMountDevDirectory
from storage.errors import *
import logging
log = logging.getLogger("anaconda")
import gettext
_ = lambda x: gettext.ldgettext("anaconda", x)
def doPostAction(anaconda):
anaconda.instClass.postAction(anaconda)
def firstbootConfiguration(anaconda):
if anaconda.firstboot == FIRSTBOOT_RECONFIG:
f = open(ROOT_PATH + '/etc/reconfigSys', 'w+')
f.close()
elif anaconda.firstboot == FIRSTBOOT_SKIP:
f = open(ROOT_PATH + '/etc/sysconfig/firstboot', 'w+')
f.write('RUN_FIRSTBOOT=NO')
f.close()
return
def writeKSConfiguration(anaconda):
log.info("Writing autokickstart file")
fn = ROOT_PATH + "/root/anaconda-ks.cfg"
anaconda.writeKS(fn)
def copyAnacondaLogs(anaconda):
log.info("Copying anaconda logs")
if not os.path.isdir (ROOT_PATH + '/var/log/anaconda'):
os.mkdir(ROOT_PATH + '/var/log/anaconda')
for (fn, dest) in (("/tmp/anaconda.log", "anaconda.log"),
("/tmp/syslog", "anaconda.syslog"),
("/tmp/X.log", "anaconda.xlog"),
("/tmp/program.log", "anaconda.program.log"),
("/tmp/storage.log", "anaconda.storage.log"),
("/tmp/ifcfg.log", "anaconda.ifcfg.log"),
("/tmp/yum.log", "anaconda.yum.log")):
if os.access(fn, os.R_OK):
try:
shutil.copyfile(fn, "%s/var/log/anaconda/%s" %(ROOT_PATH, dest))
os.chmod("%s/var/log/anaconda/%s" %(ROOT_PATH, dest), 0600)
except:
pass
def turnOnFilesystems(anaconda):
if anaconda.dir == DISPATCH_BACK:
rc = anaconda.intf.messageWindow(_("Warning"),
_("Filesystems have already been activated. You "
"cannot go back past this point.\n\nWould you like to "
"continue with the installation?"),
type="custom", custom_icon=["error","error"],
custom_buttons=[_("_Exit installer"), _("_Continue")])
if rc == 0:
sys.exit(0)
return DISPATCH_FORWARD
if not anaconda.upgrade:
if (flags.livecdInstall and
not flags.imageInstall and
not anaconda.storage.fsset.active):
# turn off any swaps that we didn't turn on
# needed for live installs
iutil.execWithRedirect("swapoff", ["-a"],
stdout = "/dev/tty5", stderr="/dev/tty5")
anaconda.storage.devicetree.teardownAll()
upgrade_migrate = False
if anaconda.upgrade:
for d in anaconda.storage.migratableDevices:
if d.format.migrate:
upgrade_migrate = True
title = None
message = None
details = None
try:
anaconda.storage.doIt()
except FSResizeError as (msg, device):
title = _("Resizing Failed")
message = _("There was an error encountered while "
"resizing the device %s.") % (device,)
if os.path.exists("/tmp/resize.out"):
details = open("/tmp/resize.out", "r").read()
else:
details = "%s" %(msg,)
except FSMigrateError as (msg, device):
title = _("Migration Failed")
message = _("An error was encountered while "
"migrating filesystem on device %s.") % (device,)
details = msg
except Exception as e:
raise
if title:
rc = anaconda.intf.detailedMessageWindow(title, message, details,
type = "custom",
custom_buttons = [_("_File Bug"), _("_Exit installer")])
if rc == 0:
raise
elif rc == 1:
sys.exit(1)
if not anaconda.upgrade:
anaconda.storage.turnOnSwap()
anaconda.storage.mountFilesystems(raiseErrors=False,
readOnly=False,
skipRoot=anaconda.backend.skipFormatRoot)
else:
if upgrade_migrate:
# we should write out a new fstab with the migrated fstype
shutil.copyfile("%s/etc/fstab" % ROOT_PATH,
"%s/etc/fstab.anaconda" % ROOT_PATH)
anaconda.storage.fsset.write()
# and make sure /dev is mounted so we can read the bootloader
bindMountDevDirectory(ROOT_PATH)
def setupTimezone(anaconda):
# we don't need this on an upgrade or going backwards
if anaconda.upgrade or flags.imageInstall or anaconda.dir == DISPATCH_BACK:
return
os.environ["TZ"] = anaconda.timezone.tz
tzfile = "/usr/share/zoneinfo/" + anaconda.timezone.tz
tzlocalfile = "/etc/localtime"
if not os.access(tzfile, os.R_OK):
log.error("unable to set timezone")
else:
try:
os.remove(tzlocalfile)
except OSError:
pass
try:
shutil.copyfile(tzfile, tzlocalfile)
except OSError as e:
log.error("Error copying timezone (from %s): %s" %(tzfile, e.strerror))
if iutil.isS390():
return
args = [ "--hctosys" ]
if anaconda.timezone.utc:
args.append("-u")
try:
iutil.execWithRedirect("/sbin/hwclock", args, stdin = None,
stdout = "/dev/tty5", stderr = "/dev/tty5")
except RuntimeError:
log.error("Failed to set clock")
# FIXME: this is a huge gross hack. hard coded list of files
# created by anaconda so that we can not be killed by selinux
def setFileCons(anaconda):
def contextCB(arg, directory, files):
for file in files:
path = os.path.join(directory, file)
if not os.access(path, os.R_OK):
log.warning("%s doesn't exist" % path)
continue
# If the path begins with rootPath, matchPathCon will never match
# anything because policy doesn't contain that path.
if path.startswith(ROOT_PATH):
path = path.replace(ROOT_PATH, "")
ret = isys.resetFileContext(path, ROOT_PATH)
if flags.selinux:
log.info("setting SELinux contexts for anaconda created files")
# Add "/mnt/sysimage" to the front of every path so the glob works.
# Then run glob on each element of the list and flatten it into a
# single list we can run contextCB across.
files = itertools.chain(*map(lambda f: glob.glob("%s%s" % (ROOT_PATH, f)),
relabelFiles))
contextCB(None, "", files)
for dir in relabelDirs + ["/dev/%s" % vg.name for vg in anaconda.storage.vgs]:
# Add "/mnt/sysimage" for similar reasons to above.
dir = "%s%s" % (ROOT_PATH, dir)
os.path.walk(dir, contextCB, None)
# os.path.walk won't include the directory we start walking at,
# so that needs its context set separtely.
contextCB(None, "", [dir])
return
# FIXME: using rpm directly here is kind of lame, but in the yum backend
# we don't want to use the metadata as the info we need would require
# the filelists. and since we only ever call this after an install is
# done, we can be guaranteed this will work. put here because it's also
# used for livecd installs
def rpmKernelVersionList():
import rpm
def get_version(header):
for f in header['filenames']:
if f.startswith('/boot/vmlinuz-'):
return f[14:]
elif f.startswith('/boot/efi/EFI/redhat/vmlinuz-'):
return f[29:]
return ""
def get_tag(header):
if header['name'] == "kernel":
return "base"
elif header['name'].startswith("kernel-"):
return header['name'][7:]
return ""
versions = []
iutil.resetRpmDb()
ts = rpm.TransactionSet(ROOT_PATH)
mi = ts.dbMatch('provides', 'kernel')
for h in mi:
v = get_version(h)
tag = get_tag(h)
if v == "" or tag == "":
log.warning("Unable to determine kernel type/version for %s-%s-%s.%s" %(h['name'], h['version'], h['release'], h['arch']))
continue
# rpm really shouldn't return the same kernel more than once... but
# sometimes it does (#467822)
if (v, h['arch'], tag) in versions:
continue
versions.append( (v, h['arch'], tag) )
return versions
def rpmSetupGraphicalSystem(anaconda):
import rpm
iutil.resetRpmDb()
ts = rpm.TransactionSet(ROOT_PATH)
# Only add "rhgb quiet" on non-s390, non-serial installs
if iutil.isConsoleOnVirtualTerminal() and \
(ts.dbMatch('provides', 'rhgb').count() or \
ts.dbMatch('provides', 'plymouth').count()):
anaconda.bootloader.boot_args.update(["rhgb", "quiet"])
if ts.dbMatch('provides', 'service(graphical-login)').count() and \
ts.dbMatch('provides', 'xorg-x11-server-Xorg').count() and \
anaconda.displayMode == 'g' and not flags.usevnc:
anaconda.desktop.setDefaultRunLevel(5)
#Recreate initrd for use when driver disks add modules
def recreateInitrd (kernelTag, instRoot):
log.info("recreating initrd for %s" % (kernelTag,))
iutil.execWithRedirect("/sbin/new-kernel-pkg",
[ "--mkinitrd", "--dracut", "--depmod", "--install", kernelTag ],
stdout = "/dev/null", stderr = "/dev/null",
root = instRoot)
def betaNagScreen(anaconda):
publicBetas = { "Red Hat Linux": "Red Hat Linux Public Beta",
"Red Hat Enterprise Linux": "Red Hat Enterprise Linux Public Beta",
"Fedora Core": "Fedora Core",
"Fedora": "Fedora" }
if anaconda.dir == DISPATCH_BACK:
return DISPATCH_DEFAULT
fileagainst = None
for (key, val) in publicBetas.items():
if productName.startswith(key):
fileagainst = val
if fileagainst is None:
fileagainst = "%s Beta" %(productName,)
while 1:
rc = anaconda.intf.messageWindow(_("Warning"),
_("Warning! This is pre-release software!\n\n"
"Thank you for downloading this "
"pre-release of %(productName)s.\n\n"
"This is not a final "
"release and is not intended for use "
"on production systems. The purpose of "
"this release is to collect feedback "
"from testers, and it is not suitable "
"for day to day usage.\n\n"
"To report feedback, please visit:\n\n"
" %(bugzillaUrl)s\n\n"
"and file a report against '%(fileagainst)s'.\n")
% {'productName': productName,
'bugzillaUrl': bugzillaUrl,
'fileagainst': fileagainst},
type="custom", custom_icon="warning",
custom_buttons=[_("_Exit"), _("_Install Anyway")])
if not rc:
msg = _("Your system will now be rebooted...")
buttons = [_("_Back"), _("_Reboot")]
rc = anaconda.intf.messageWindow( _("Warning! This is pre-release software!"),
msg,
type="custom", custom_icon="warning",
custom_buttons=buttons)
if rc:
sys.exit(0)
else:
break
def doReIPL(anaconda):
if not iutil.isS390() or anaconda.dir == DISPATCH_BACK:
return DISPATCH_DEFAULT
anaconda.reIPLMessage = iutil.reIPL(anaconda, os.getppid())
return DISPATCH_FORWARD
|
mattias-ohlsson/anaconda
|
pyanaconda/packages.py
|
Python
|
gpl-2.0
| 12,756
|
[
"VisIt"
] |
7a3408312e13b254e92fa4a3ba2142f8a0deaab88aad02258053f9dc2af688d2
|
"""
Generate samples of synthetic data sets.
"""
# Authors: B. Thirion, G. Varoquaux, A. Gramfort, V. Michel, O. Grisel,
# G. Louppe
# License: BSD 3 clause
from itertools import product
import numbers
import numpy as np
from scipy import linalg
from ..utils import array2d, check_random_state
from ..utils import shuffle as util_shuffle
from ..externals import six
map = six.moves.map
zip = six.moves.zip
def make_classification(n_samples=100, n_features=20, n_informative=2,
n_redundant=2, n_repeated=0, n_classes=2,
n_clusters_per_class=2, weights=None, flip_y=0.01,
class_sep=1.0, hypercube=True, shift=0.0, scale=1.0,
shuffle=True, random_state=None):
"""Generate a random n-class classification problem.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features. These comprise `n_informative`
informative features, `n_redundant` redundant features, `n_repeated`
dupplicated features and `n_features-n_informative-n_redundant-
n_repeated` useless features drawn at random.
n_informative : int, optional (default=2)
The number of informative features. Each class is composed of a number
of gaussian clusters each located around the vertices of a hypercube
in a subspace of dimension `n_informative`. For each cluster,
informative features are drawn independently from N(0, 1) and then
randomly linearly combined in order to add covariance. The clusters
are then placed on the vertices of the hypercube.
n_redundant : int, optional (default=2)
The number of redundant features. These features are generated as
random linear combinations of the informative features.
n_repeated : int, optional (default=2)
The number of dupplicated features, drawn randomly from the informative
and the redundant features.
n_classes : int, optional (default=2)
The number of classes (or labels) of the classification problem.
n_clusters_per_class : int, optional (default=2)
The number of clusters per class.
weights : list of floats or None (default=None)
The proportions of samples assigned to each class. If None, then
classes are balanced. Note that if `len(weights) == n_classes - 1`,
then the last class weight is automatically inferred.
flip_y : float, optional (default=0.01)
The fraction of samples whose class are randomly exchanged.
class_sep : float, optional (default=1.0)
The factor multiplying the hypercube dimension.
hypercube : boolean, optional (default=True)
If True, the clusters are put on the vertices of a hypercube. If
False, the clusters are put on the vertices of a random polytope.
shift : float or None, optional (default=0.0)
Shift all features by the specified value. If None, then features
are shifted by a random value drawn in [-class_sep, class_sep].
scale : float or None, optional (default=1.0)
Multiply all features by the specified value. If None, then features
are scaled by a random value drawn in [1, 100]. Note that scaling
happens after shifting.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for class membership of each sample.
Notes
-----
The algorithm is adapted from Guyon [1] and was designed to generate
the "Madelon" dataset.
References
----------
.. [1] I. Guyon, "Design of experiments for the NIPS 2003 variable
selection benchmark", 2003.
"""
generator = check_random_state(random_state)
# Count features, clusters and samples
if n_informative + n_redundant + n_repeated > n_features:
raise ValueError("Number of informative, redundant and repeated "
"features must sum to less than the number of total"
" features")
if 2 ** n_informative < n_classes * n_clusters_per_class:
raise ValueError("n_classes * n_clusters_per_class must"
"be smaller or equal 2 ** n_informative")
if weights and len(weights) not in [n_classes, n_classes - 1]:
raise ValueError("Weights specified but incompatible with number "
"of classes.")
n_useless = n_features - n_informative - n_redundant - n_repeated
n_clusters = n_classes * n_clusters_per_class
if weights and len(weights) == (n_classes - 1):
weights.append(1.0 - sum(weights))
if weights is None:
weights = [1.0 / n_classes] * n_classes
weights[-1] = 1.0 - sum(weights[:-1])
n_samples_per_cluster = []
for k in range(n_clusters):
n_samples_per_cluster.append(int(n_samples * weights[k % n_classes]
/ n_clusters_per_class))
for i in range(n_samples - sum(n_samples_per_cluster)):
n_samples_per_cluster[i % n_clusters] += 1
# Intialize X and y
X = np.zeros((n_samples, n_features))
y = np.zeros(n_samples)
# Build the polytope
C = np.array(list(product([-class_sep, class_sep], repeat=n_informative)))
if not hypercube:
for k in range(n_clusters):
C[k, :] *= generator.rand()
for f in range(n_informative):
C[:, f] *= generator.rand()
generator.shuffle(C)
# Loop over all clusters
pos = 0
pos_end = 0
for k in range(n_clusters):
# Number of samples in cluster k
n_samples_k = n_samples_per_cluster[k]
# Define the range of samples
pos = pos_end
pos_end = pos + n_samples_k
# Assign labels
y[pos:pos_end] = k % n_classes
# Draw features at random
X[pos:pos_end, :n_informative] = generator.randn(n_samples_k,
n_informative)
# Multiply by a random matrix to create co-variance of the features
A = 2 * generator.rand(n_informative, n_informative) - 1
X[pos:pos_end, :n_informative] = np.dot(X[pos:pos_end, :n_informative],
A)
# Shift the cluster to a vertice
X[pos:pos_end, :n_informative] += np.tile(C[k, :], (n_samples_k, 1))
# Create redundant features
if n_redundant > 0:
B = 2 * generator.rand(n_informative, n_redundant) - 1
X[:, n_informative:n_informative + n_redundant] = \
np.dot(X[:, :n_informative], B)
# Repeat some features
if n_repeated > 0:
n = n_informative + n_redundant
indices = ((n - 1) * generator.rand(n_repeated) + 0.5).astype(np.int)
X[:, n:n + n_repeated] = X[:, indices]
# Fill useless features
X[:, n_features - n_useless:] = generator.randn(n_samples, n_useless)
# Randomly flip labels
if flip_y >= 0.0:
for i in range(n_samples):
if generator.rand() < flip_y:
y[i] = generator.randint(n_classes)
# Randomly shift and scale
constant_shift = shift is not None
constant_scale = scale is not None
for f in range(n_features):
if not constant_shift:
shift = (2 * generator.rand() - 1) * class_sep
if not constant_scale:
scale = 1 + 100 * generator.rand()
X[:, f] += shift
X[:, f] *= scale
# Randomly permute samples and features
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
return X, y
def make_multilabel_classification(n_samples=100, n_features=20, n_classes=5,
n_labels=2, length=50,
allow_unlabeled=True, random_state=None):
"""Generate a random multilabel classification problem.
For each sample, the generative process is:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that
n is never zero or more than `n_classes`, and that the document length
is never zero. Likewise, we reject classes which have already been chosen.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features.
n_classes : int, optional (default=5)
The number of classes of the classification problem.
n_labels : int, optional (default=2)
The average number of labels per instance. Number of labels follows
a Poisson distribution that never takes the value 0.
length : int, optional (default=50)
Sum of the features (number of words if documents).
allow_unlabeled : bool, optional (default=True)
If ``True``, some instances might not belong to any class.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
Y : list of tuples
The label sets.
"""
generator = check_random_state(random_state)
p_c = generator.rand(n_classes)
p_c /= p_c.sum()
p_w_c = generator.rand(n_features, n_classes)
p_w_c /= np.sum(p_w_c, axis=0)
def sample_example():
_, n_classes = p_w_c.shape
# pick a nonzero number of labels per document by rejection sampling
n = n_classes + 1
while (not allow_unlabeled and n == 0) or n > n_classes:
n = generator.poisson(n_labels)
# pick n classes
y = []
while len(y) != n:
# pick a class with probability P(c)
c = generator.multinomial(1, p_c).argmax()
if not c in y:
y.append(c)
# pick a non-zero document length by rejection sampling
k = 0
while k == 0:
k = generator.poisson(length)
# generate a document of length k words
x = np.zeros(n_features, dtype=int)
for i in range(k):
if len(y) == 0:
# if sample does not belong to any class, generate noise word
w = generator.randint(n_features)
else:
# pick a class and generate an appropriate word
c = y[generator.randint(len(y))]
w = generator.multinomial(1, p_w_c[:, c]).argmax()
x[w] += 1
return x, y
X, Y = zip(*[sample_example() for i in range(n_samples)])
return np.array(X, dtype=np.float64), Y
def make_hastie_10_2(n_samples=12000, random_state=None):
"""Generates data for binary classification used in
Hastie et al. 2009, Example 10.2.
The ten features are standard independent Gaussian and
the target ``y`` is defined by::
y[i] = 1 if np.sum(X[i] ** 2) > 9.34 else -1
Parameters
----------
n_samples : int, optional (default=12000)
The number of samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 10]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
"""
rs = check_random_state(random_state)
shape = (n_samples, 10)
X = rs.normal(size=shape).reshape(shape)
y = ((X ** 2.0).sum(axis=1) > 9.34).astype(np.float64)
y[y == 0.0] = -1.0
return X, y
def make_regression(n_samples=100, n_features=100, n_informative=10,
n_targets=1, bias=0.0, effective_rank=None,
tail_strength=0.5, noise=0.0, shuffle=True, coef=False,
random_state=None):
"""Generate a random regression problem.
The input set can either be well conditioned (by default) or have a low
rank-fat tail singular profile. See the `make_low_rank_matrix` for
more details.
The output is generated by applying a (potentially biased) random linear
regression model with `n_informative` nonzero regressors to the previously
generated input and some gaussian centered noise with some adjustable
scale.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
n_informative : int, optional (default=10)
The number of informative features, i.e., the number of features used
to build the linear model used to generate the output.
n_targets : int, optional (default=1)
The number of regression targets, i.e., the dimension of the y output
vector associated with a sample. By default, the output is a scalar.
bias : float, optional (default=0.0)
The bias term in the underlying linear model.
effective_rank : int or None, optional (default=None)
if not None:
The approximate number of singular vectors required to explain most
of the input data by linear combinations. Using this kind of
singular spectrum in the input allows the generator to reproduce
the correlations often observed in practice.
if None:
The input set is well conditioned, centered and gaussian with
unit variance.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile if `effective_rank` is not None.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
coef : boolean, optional (default=False)
If True, the coefficients of the underlying linear model are returned.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples] or [n_samples, n_targets]
The output values.
coef : array of shape [n_features] or [n_features, n_targets], optional
The coefficient of the underlying linear model. It is returned only if
coef is True.
"""
generator = check_random_state(random_state)
if effective_rank is None:
# Randomly generate a well conditioned input set
X = generator.randn(n_samples, n_features)
else:
# Randomly generate a low rank, fat tail input set
X = make_low_rank_matrix(n_samples=n_samples,
n_features=n_features,
effective_rank=effective_rank,
tail_strength=tail_strength,
random_state=generator)
# Generate a ground truth model with only n_informative features being non
# zeros (the other features are not correlated to y and should be ignored
# by a sparsifying regularizers such as L1 or elastic net)
ground_truth = np.zeros((n_features, n_targets))
ground_truth[:n_informative, :] = 100 * generator.rand(n_informative,
n_targets)
y = np.dot(X, ground_truth) + bias
# Add noise
if noise > 0.0:
y += generator.normal(scale=noise, size=y.shape)
# Randomly permute samples and features
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
ground_truth = ground_truth[indices]
y = np.squeeze(y)
if coef:
return X, y, np.squeeze(ground_truth)
else:
return X, y
def make_circles(n_samples=100, shuffle=True, noise=None, random_state=None,
factor=.8):
"""Make a large circle containing a smaller circle in 2d.
A simple toy dataset to visualize clustering and classification
algorithms.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle: bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
factor : double < 1 (default=.8)
Scale factor between inner and outer circle.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
if factor > 1 or factor < 0:
raise ValueError("'factor' has to be between 0 and 1.")
generator = check_random_state(random_state)
# so as not to have the first point = last point, we add one and then
# remove it.
linspace = np.linspace(0, 2 * np.pi, n_samples / 2 + 1)[:-1]
outer_circ_x = np.cos(linspace)
outer_circ_y = np.sin(linspace)
inner_circ_x = outer_circ_x * factor
inner_circ_y = outer_circ_y * factor
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples / 2), np.ones(n_samples / 2)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if not noise is None:
X += generator.normal(scale=noise, size=X.shape)
return X, y.astype(np.int)
def make_moons(n_samples=100, shuffle=True, noise=None, random_state=None):
"""Make two interleaving half circles
A simple toy dataset to visualize clustering and classification
algorithms.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle : bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
n_samples_out = n_samples / 2
n_samples_in = n_samples - n_samples_out
generator = check_random_state(random_state)
outer_circ_x = np.cos(np.linspace(0, np.pi, n_samples_out))
outer_circ_y = np.sin(np.linspace(0, np.pi, n_samples_out))
inner_circ_x = 1 - np.cos(np.linspace(0, np.pi, n_samples_in))
inner_circ_y = 1 - np.sin(np.linspace(0, np.pi, n_samples_in)) - .5
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples_in), np.ones(n_samples_out)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if not noise is None:
X += generator.normal(scale=noise, size=X.shape)
return X, y.astype(np.int)
def make_blobs(n_samples=100, n_features=2, centers=3, cluster_std=1.0,
center_box=(-10.0, 10.0), shuffle=True, random_state=None):
"""Generate isotropic Gaussian blobs for clustering.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points equally divided among clusters.
n_features : int, optional (default=2)
The number of features for each sample.
centers : int or array of shape [n_centers, n_features], optional
(default=3)
The number of centers to generate, or the fixed center locations.
cluster_std: float or sequence of floats, optional (default=1.0)
The standard deviation of the clusters.
center_box: pair of floats (min, max), optional (default=(-10.0, 10.0))
The bounding box for each cluster center when centers are
generated at random.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for cluster membership of each sample.
Examples
--------
>>> from sklearn.datasets.samples_generator import make_blobs
>>> X, y = make_blobs(n_samples=10, centers=3, n_features=2,
... random_state=0)
>>> print(X.shape)
(10, 2)
>>> y
array([0, 0, 1, 0, 2, 2, 2, 1, 1, 0])
"""
generator = check_random_state(random_state)
if isinstance(centers, numbers.Integral):
centers = generator.uniform(center_box[0], center_box[1],
size=(centers, n_features))
else:
centers = array2d(centers)
n_features = centers.shape[1]
X = []
y = []
n_centers = centers.shape[0]
n_samples_per_center = [int(n_samples // n_centers)] * n_centers
for i in range(n_samples % n_centers):
n_samples_per_center[i] += 1
for i, n in enumerate(n_samples_per_center):
X.append(centers[i] + generator.normal(scale=cluster_std,
size=(n, n_features)))
y += [i] * n
X = np.concatenate(X)
y = np.array(y)
if shuffle:
indices = np.arange(n_samples)
generator.shuffle(indices)
X = X[indices]
y = y[indices]
return X, y
def make_friedman1(n_samples=100, n_features=10, noise=0.0, random_state=None):
"""Generate the "Friedman #1" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are independent features uniformly distributed on the interval
[0, 1]. The output `y` is created according to the formula::
y(X) = 10 * sin(pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * N(0, 1).
Out of the `n_features` features, only 5 are actually used to compute
`y`. The remaining features are independent of `y`.
The number of features has to be >= 5.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features. Should be at least 5.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
if n_features < 5:
raise ValueError("n_features must be at least five.")
generator = check_random_state(random_state)
X = generator.rand(n_samples, n_features)
y = 10 * np.sin(np.pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * generator.randn(n_samples)
return X, y
def make_friedman2(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman #2" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = (X[:, 0] ** 2 + (X[:, 1] * X[:, 2] \
- 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 + noise * N(0, 1).
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = (X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 \
+ noise * generator.randn(n_samples)
return X, y
def make_friedman3(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman #3" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) \
/ X[:, 0]) + noise * N(0, 1).
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = np.arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) / X[:, 0]) \
+ noise * generator.randn(n_samples)
return X, y
def make_low_rank_matrix(n_samples=100, n_features=100, effective_rank=10,
tail_strength=0.5, random_state=None):
"""Generate a mostly low rank matrix with bell-shaped singular values
Most of the variance can be explained by a bell-shaped curve of width
effective_rank: the low rank part of the singular values profile is::
(1 - tail_strength) * exp(-1.0 * (i / effective_rank) ** 2)
The remaining singular values' tail is fat, decreasing as::
tail_strength * exp(-0.1 * i / effective_rank).
The low rank part of the profile can be considered the structured
signal part of the data while the tail can be considered the noisy
part of the data that cannot be summarized by a low number of linear
components (singular vectors).
This kind of singular profiles is often seen in practice, for instance:
- gray level pictures of faces
- TF-IDF vectors of text documents crawled from the web
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
effective_rank : int, optional (default=10)
The approximate number of singular vectors required to explain most of
the data by linear combinations.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The matrix.
"""
generator = check_random_state(random_state)
n = min(n_samples, n_features)
# Random (ortho normal) vectors
from ..utils.fixes import qr_economic
u, _ = qr_economic(generator.randn(n_samples, n))
v, _ = qr_economic(generator.randn(n_features, n))
# Index of the singular values
singular_ind = np.arange(n, dtype=np.float64)
# Build the singular profile by assembling signal and noise components
low_rank = ((1 - tail_strength) *
np.exp(-1.0 * (singular_ind / effective_rank) ** 2))
tail = tail_strength * np.exp(-0.1 * singular_ind / effective_rank)
s = np.identity(n) * (low_rank + tail)
return np.dot(np.dot(u, s), v.T)
def make_sparse_coded_signal(n_samples, n_components, n_features,
n_nonzero_coefs, random_state=None):
"""Generate a signal as a sparse combination of dictionary elements.
Returns a matrix Y = DX, such as D is (n_features, n_components),
X is (n_components, n_samples) and each column of X has exactly
n_nonzero_coefs non-zero elements.
Parameters
----------
n_samples : int
number of samples to generate
n_components: int,
number of components in the dictionary
n_features : int
number of features of the dataset to generate
n_nonzero_coefs : int
number of active (non-zero) coefficients in each sample
random_state: int or RandomState instance, optional (default=None)
seed used by the pseudo random number generator
Returns
-------
data: array of shape [n_features, n_samples]
The encoded signal (Y).
dictionary: array of shape [n_features, n_components]
The dictionary with normalized components (D).
code: array of shape [n_components, n_samples]
The sparse code such that each column of this matrix has exactly
n_nonzero_coefs non-zero items (X).
"""
generator = check_random_state(random_state)
# generate dictionary
D = generator.randn(n_features, n_components)
D /= np.sqrt(np.sum((D ** 2), axis=0))
# generate code
X = np.zeros((n_components, n_samples))
for i in range(n_samples):
idx = np.arange(n_components)
generator.shuffle(idx)
idx = idx[:n_nonzero_coefs]
X[idx, i] = generator.randn(n_nonzero_coefs)
# encode signal
Y = np.dot(D, X)
return map(np.squeeze, (Y, D, X))
def make_sparse_uncorrelated(n_samples=100, n_features=10, random_state=None):
"""Generate a random regression problem with sparse uncorrelated design
This dataset is described in Celeux et al [1]. as::
X ~ N(0, 1)
y(X) = X[:, 0] + 2 * X[:, 1] - 2 * X[:, 2] - 1.5 * X[:, 3]
Only the first 4 features are informative. The remaining features are
useless.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] G. Celeux, M. El Anbari, J.-M. Marin, C. P. Robert,
"Regularization in regression: comparing Bayesian and frequentist
methods in a poorly informative situation", 2009.
"""
generator = check_random_state(random_state)
X = generator.normal(loc=0, scale=1, size=(n_samples, n_features))
y = generator.normal(loc=(X[:, 0] +
2 * X[:, 1] -
2 * X[:, 2] -
1.5 * X[:, 3]), scale=np.ones(n_samples))
return X, y
def make_spd_matrix(n_dim, random_state=None):
"""Generate a random symmetric, positive-definite matrix.
Parameters
----------
n_dim : int
The matrix dimension.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_dim, n_dim]
The random symmetric, positive-definite matrix.
"""
generator = check_random_state(random_state)
A = generator.rand(n_dim, n_dim)
U, s, V = linalg.svd(np.dot(A.T, A))
X = np.dot(np.dot(U, 1.0 + np.diag(generator.rand(n_dim))), V)
return X
def make_sparse_spd_matrix(dim=1, alpha=0.95, norm_diag=False,
smallest_coef=.1, largest_coef=.9,
random_state=None):
"""Generate a sparse symetric definite positive matrix.
Parameters
----------
dim: integer, optional (default=1)
The size of the random (matrix to generate.
alpha: float between 0 and 1, optional (default=0.95)
The probability that a coefficient is non zero (see notes).
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
prec: array of shape = [dim, dim]
Notes
-----
The sparsity is actually imposed on the cholesky factor of the matrix.
Thus alpha does not translate directly into the filling fraction of
the matrix itself.
"""
random_state = check_random_state(random_state)
chol = -np.eye(dim)
aux = random_state.rand(dim, dim)
aux[aux < alpha] = 0
aux[aux > alpha] = (smallest_coef
+ (largest_coef - smallest_coef)
* random_state.rand(np.sum(aux > alpha)))
aux = np.tril(aux, k=-1)
# Permute the lines: we don't want to have assymetries in the final
# SPD matrix
permutation = random_state.permutation(dim)
aux = aux[permutation].T[permutation]
chol += aux
prec = np.dot(chol.T, chol)
if norm_diag:
d = np.diag(prec)
d = 1. / np.sqrt(d)
prec *= d
prec *= d[:, np.newaxis]
return prec
def make_swiss_roll(n_samples=100, noise=0.0, random_state=None):
"""Generate a swiss roll dataset.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
Notes
-----
The algorithm is from Marsland [1].
References
----------
.. [1] S. Marsland, "Machine Learning: An Algorithmic Perpsective",
Chapter 10, 2009.
http://www-ist.massey.ac.nz/smarsland/Code/10/lle.py
"""
generator = check_random_state(random_state)
t = 1.5 * np.pi * (1 + 2 * generator.rand(1, n_samples))
x = t * np.cos(t)
y = 21 * generator.rand(1, n_samples)
z = t * np.sin(t)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_s_curve(n_samples=100, noise=0.0, random_state=None):
"""Generate an S curve dataset.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
"""
generator = check_random_state(random_state)
t = 3 * np.pi * (generator.rand(1, n_samples) - 0.5)
x = np.sin(t)
y = 2.0 * generator.rand(1, n_samples)
z = np.sign(t) * (np.cos(t) - 1)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_gaussian_quantiles(mean=None, cov=1., n_samples=100,
n_features=2, n_classes=3,
shuffle=True, random_state=None):
"""Generate isotropic Gaussian and label samples by quantile
This classification dataset is constructed by taking a multi-dimensional
standard normal distribution and defining classes separated by nested
concentric multi-dimensional spheres such that roughly equal numbers of
samples are in each class (quantiles of the :math:`\chi^2` distribution).
Parameters
----------
mean : array of shape [n_features], optional (default=None)
The mean of the multi-dimensional normal distribution.
If None then use the origin (0, 0, ...).
cov : float, optional (default=1.)
The covariance matrix will be this value times the unit matrix. This
dataset only produces symmetric normal distributions.
n_samples : int, optional (default=100)
The total number of points equally divided among classes.
n_features : int, optional (default=2)
The number of features for each sample.
n_classes : int, optional (default=3)
The number of classes
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for quantile membership of each sample.
Notes
-----
The dataset is from Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
if n_samples < n_classes:
raise ValueError("n_samples must be at least n_classes")
generator = check_random_state(random_state)
if mean is None:
mean = np.zeros(n_features)
else:
mean = np.array(mean)
# Build multivariate normal distribution
X = generator.multivariate_normal(mean, cov * np.identity(n_features),
(n_samples,))
# Sort by distance from origin
idx = np.argsort(np.sum((X - mean[np.newaxis, :]) ** 2, axis=1))
X = X[idx, :]
# Label by quantile
step = n_samples // n_classes
y = np.hstack([np.repeat(np.arange(n_classes), step),
np.repeat(n_classes - 1, n_samples - step * n_classes)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
return X, y
|
maxlikely/scikit-learn
|
sklearn/datasets/samples_generator.py
|
Python
|
bsd-3-clause
| 43,666
|
[
"Gaussian"
] |
8187f99c381454a0bcd56743d6f9cca25b92870fd83e352b31ab9af47061defc
|
#! /usr/bin/env python
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2022 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
from __future__ import print_function
import argparse
import os
import re
import subprocess
import sys
def collect_version_input_from_fallback(meta_file='metadata.py'):
"""From *meta_file*, collect lines matching ``_version_{key} = {value}``
and return as dictionary.
"""
cwd = os.path.dirname(os.path.abspath(__file__))
res = dict(re.findall("__version_([a-z_]+)\s*=\s*'([^']+)'", open(cwd + '/' + meta_file).read()))
res.pop('_')
return res
def is_git_repo(cwd='./', dot_git_qualifies=False, no_git_cmd_result=False):
"""Returns boolean as to whether *cwd* is under git control. When no ``git``
command available in environment, *no_git_cmd_result* returned. If within
the .git directory of a git repository, *dot_git_qualifies* returned.
"""
command = 'git rev-parse --is-inside-work-tree'
try:
process = subprocess.Popen(command.split(),
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
cwd=cwd,
universal_newlines=True)
except EnvironmentError as e:
# most likely, git command not available
return no_git_cmd_result
(out, err) = process.communicate()
if process.returncode != 0:
# fatal: Not a git repository (or any of the parent directories): .git
return False
if out.strip() == 'true':
# in a git repo and not within .git dir
return True
if out.strip() == 'false':
# in a git repo in .git dir
return dot_git_qualifies
def collect_version_input_from_git():
"""Returns a dictionary filled with ``git describe`` results, clean/dirty
flag, and branch status. *cwd* should already be confirmed as a git
repository; this doesn't catch returncodes or EnvironmentErrors because the
raised errors are preferred to incomplete return dictionary.
"""
cwd = os.path.dirname(os.path.abspath(__file__))
res = {}
# * only want annotated tags, so not --all
# * in case *no* tags (impossible in Psi4), --always gets at least hash
# * get commits & hash info even if on tag using --long
command = 'git describe --abbrev=7 --long --always HEAD'
process = subprocess.Popen(command.split(),
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
cwd=cwd,
universal_newlines=True)
(out, err) = process.communicate()
fields = str(out).rstrip().split('-')
if len(fields) == 3:
# normal: 0.1-62-ga68d223
res['latest_annotated_v_tag'] = fields[0][1:] # drop the "v"; tag mismatch caught later
res['commits_since_tag'] = fields[1]
res['seven_char_hash'] = fields[2][1:] # drop the "g" git identifier
else:
# no tag present: a68d223
res['latest_annotated_v_tag'] = ''
res['commits_since_tag'] = ''
res['seven_char_hash'] = fields[0] # no prepended "g"
command = 'git diff-index --name-only HEAD'
process = subprocess.Popen(command.split(),
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
cwd=cwd,
universal_newlines=True)
(out, err) = process.communicate()
res['is_clean'] = False if str(out).rstrip() else True
command = 'git rev-parse --abbrev-ref HEAD' # returns HEAD when detached
process = subprocess.Popen(command.split(),
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
cwd=cwd,
universal_newlines=True)
(out, err) = process.communicate()
res['branch_name'] = str(out).rstrip()
return res
def reconcile_and_compute_version_output(quiet=False):
res = collect_version_input_from_fallback(meta_file='metadata.py')
meta_latest_annotated_v_tag, _, meta_seven_char_hash = res['long'].partition('+')
# this is the tag format (PEP440 compliant) that our machinery is expecting.
# let's catch any deviations with Travis before it can corrupt versioning.
sane_tag = re.compile("""^(?P<tag>(?P<forwardseries>\d+\.\d+(?P<patch>\.[1-9]+)?)(?(patch)|(?P<prere>((a)|(b)|(rc))\d+)?))$""")
mobj = sane_tag.match(meta_latest_annotated_v_tag)
if mobj:
# some versioning machinery (looking at you, CMake) does strictly
# numerical comparisons such as M.m.p.t and thus can't handle
# prereleases and dev snapshots. we compute a Most Rescent Ancestral
# Release tag (e.g., 1.0 or 1.12.1) for a backward release series.
backwardseries = mobj.group('forwardseries')
if mobj.group('prere'):
tmp = backwardseries.split('.')
bumpdown = str(int(tmp[-1]) - 1)
if bumpdown == '-1':
print("""Unavoidable snag. Probably "2.0". Can't predict backward series from present prerelease.""")
sys.exit()
else:
tmp[-1] = bumpdown
backwardseries = '.'.join(tmp)
else:
print("""Tag in {} is malformed: {}""".format(
'metadata.py', meta_latest_annotated_v_tag))
sys.exit()
cwd = os.path.dirname(os.path.abspath(__file__))
if is_git_repo(cwd=cwd):
res.update(collect_version_input_from_git())
# establish the default response
project_release = False
project_prerelease = False
project_version = 'undefined'
project_version_long = 'undefined+' + res['seven_char_hash']
if res['latest_annotated_v_tag'] == meta_latest_annotated_v_tag:
trial_version_long_release = res['latest_annotated_v_tag'] + '+' + res['seven_char_hash']
trial_version_devel = res['upcoming_annotated_v_tag'] + '.dev' + res['commits_since_tag']
trial_version_long_devel = trial_version_devel + '+' + res['seven_char_hash']
if int(res['commits_since_tag']) == 0:
if trial_version_long_release == res['long']:
print("""Amazing, this can't actually happen that git hash stored at git commit.""")
sys.exit()
else:
if meta_seven_char_hash == 'zzzzzzz':
if not quiet:
print("""Defining {} version: {} (recorded and computed)""".format(
'prerelease' if mobj.group('prere') else 'release', trial_version_long_release))
project_release = res['is_clean'] and not mobj.group('prere')
project_prerelease = res['is_clean'] and mobj.group('prere')
project_version = meta_latest_annotated_v_tag
project_version_long = trial_version_long_release
else:
print("""Undefining version for irreconcilable hashes: {} (computed) vs {} (recorded)""".format(
trial_version_long_release, res['long']))
else:
if res['branch_name'].endswith('.x'):
print("""Undefining version as development snapshots not allowed on maintenance branch: {} (rejected computed)""".format(
trial_version_long_devel))
# TODO prob should be undef unless on master
else:
if not quiet:
print("""Defining development snapshot version: {} (computed)""".format(
trial_version_long_devel))
project_version = trial_version_devel
project_version_long = trial_version_long_devel
else:
print("""Undefining version for irreconcilable tags: {} (computed) vs {} (recorded)""".format(
res['latest_annotated_v_tag'], meta_latest_annotated_v_tag))
else:
print("""Blindly (no git) accepting release version: {} (recorded)""".format(
res['long']))
# assumes that zip only comes from [pre]release. GitHub hides others, but they're there.
project_release = not bool(mobj.group('prere'))
project_prerelease = bool(mobj.group('prere'))
project_version = meta_latest_annotated_v_tag
project_version_long = res['long']
res['is_clean'] = True
res['branch_name'] = ''
def mapped_cmake_version(last_release, is_release):
"""CMake expects MAJOR.MINOR.PATCH.TWEAK. The ancestral *last_release*
is padded into the first three roles. If not *is_release*, the tweak role
collects all postrelease states (prereleases and devel snapshots) into
dummy 999 that at least gets them sorted correctly between releases and
allows EXACT CMake version comparisons. Returns, for example, 1.1.0.0 for
release 1.1, 1.3.4.0 for maintenance release 1.3.4, and 1.0.0.999 for
prerelease 1.1a1 or snapshot 1.1.dev600
"""
cm = last_release.split('.')
cm += ['0'] * (4 - len(cm))
if not is_release:
cm[-1] = '999'
cm = '.'.join(cm)
return cm
return {'__version__': project_version,
'__version_long': project_version_long,
'__version_is_clean': res['is_clean'],
'__version_branch_name': res['branch_name'],
'__version_last_release': backwardseries,
'__version_cmake': mapped_cmake_version(backwardseries, project_release),
'__version_release': project_release,
'__version_prerelease': project_prerelease}
def write_new_metafile(versdata, outfile='metadata.out.py'):
formatter_fn = """
def version_formatter(formatstring='{version}'):
if formatstring == 'all':
formatstring = '{version} {{{branch}}} {githash} {cmake} {clean} {release} {lastrel} <-- {versionlong}'
release = 'release' if (__version_release == 'True') else ('prerelease' if (__version_prerelease == 'True') else '')
ans = formatstring.format(version=__version__,
versionlong=__version_long,
githash=__version_long[len(__version__)+1:],
clean='' if __version_is_clean == 'True' else 'dirty',
branch=__version_branch_name,
lastrel=__version_last_release,
cmake=__version_cmake,
release=release)
return ans
"""
main_fn = """
if __name__ == '__main__':
print(version_formatter(formatstring='all'))
"""
with open(os.path.abspath(outfile), 'w') as handle:
for k in sorted(versdata):
handle.write("""{} = '{}'\n""".format(k, versdata[k]))
handle.write(formatter_fn)
handle.write(main_fn)
def write_new_cmake_metafile(versdata, outfile='metadata.out.cmake'):
main_fn = """
include(CMakePackageConfigHelpers)
write_basic_package_version_file(
${{WTO}}/${{PN}}ConfigVersion.cmake
VERSION {ver}
COMPATIBILITY AnyNewerVersion)
"""
with open(os.path.abspath(outfile), 'w') as handle:
handle.write(main_fn.format(ver=versdata['__version_cmake']))
def version_formatter(versdata, formatstring="""{version}"""):
"""Return version information string with data from *versdata* when
supplied with *formatstring* suitable for ``formatstring.format()``.
Use plaintext and any placeholders among: version, versionlong, githash,
branch, clean, release, lastrel, cmake. For example, '{branch}@{githash}'
returns something like 'fix200@1234567'.
"""
if formatstring == 'all':
formatstring = '{version} {{{branch}}} {githash} {cmake} {clean} {release} {lastrel} <-- {versionlong}'
release = 'release' if versdata['__version_release'] else ('prerelease' if versdata['__version_prerelease'] else '')
ans = formatstring.format(version=versdata['__version__'],
versionlong=versdata['__version_long'],
githash=versdata['__version_long'][len(versdata['__version__']) + 1:],
clean='' if versdata['__version_is_clean'] else 'dirty',
branch=versdata['__version_branch_name'],
lastrel=versdata['__version_last_release'],
cmake=versdata['__version_cmake'],
release=release)
return ans
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Script to extract Psi4 version from source. Use psi4.version_formatter(fmt_string) after build.')
parser.add_argument('--metaout', default='metadata.out.py', help='file to which the computed version info written')
parser.add_argument('--cmakeout', default='metadata.out.cmake', help='file to which the CMake ConfigVersion generator written')
parser.add_argument('--format', default='all', help='string like "{version} {githash}" to be filled in and returned')
parser.add_argument('--formatonly', action='store_true', help='print only the format string, not the detection info')
args = parser.parse_args()
ans = reconcile_and_compute_version_output(quiet=args.formatonly)
write_new_metafile(ans, args.metaout)
write_new_cmake_metafile(ans, args.cmakeout)
ans2 = version_formatter(ans, formatstring=args.format)
print(ans2)
|
susilehtola/psi4
|
psi4/versioner.py
|
Python
|
lgpl-3.0
| 14,574
|
[
"Psi4"
] |
9f9127345ad664d4cc4ea906ab2f30ca9a61e249e63d5eee5a8e3c9d7c3e5104
|
#!/usr/bin/env python3
from bisect import bisect_left
import matplotlib
from utils import TANGO, filename
from matplotlib import pyplot, mlab
from numpy import linspace
matplotlib.rcParams['lines.linewidth'] = 2
matplotlib.rcParams['savefig.dpi'] = 300
def gaussian(x, mu, sigma):
return mlab.normpdf(x, mu, sigma)
def gaussian_plt():
pyplot.clf()
x = linspace(-10, 10, num = 200)
y = gaussian(x, 0, 1)
NDEV = 1.5
nlo = bisect_left(x, -NDEV)
nhi = bisect_left(x, +NDEV)
xlo, ylo = x[:nlo], y[:nlo]
xmi, ymi = x[nlo-1:nhi+1], y[nlo-1:nhi+1]
xhi, yhi = x[nhi:], y[nhi:]
ax = pyplot.axes(frameon = False)
ax.set_xlim((-5,5))
pyplot.plot(xmi, ymi, color = TANGO["green"][2])
pyplot.fill_between(xmi, 0, ymi, color = TANGO["green"][1])
for (xx, yy) in ((xlo, ylo), (xhi, yhi)):
pyplot.plot(xx, yy, color = TANGO["red"][1])
pyplot.fill_between(xx, 0, yy, color = TANGO["red"][0])
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
pyplot.tight_layout()
batch_mode, fname = filename("gaussians-preview.pdf")
if batch_mode:
gaussian_plt()
pyplot.savefig(fname, transparent = True)
|
cpitclaudel/dBoost
|
graphics/gaussians-preview.pdf.py
|
Python
|
gpl-3.0
| 1,197
|
[
"Gaussian"
] |
9a01a92bbf0064c88b8b410a52a09ca3885592ddecd6408d35d05045a8be4ad6
|
# (c) 2015, Brian Coca <bcoca@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.lookup import LookupBase
import urllib2
class LookupModule(LookupBase):
def run(self, terms, inject=None, **kwargs):
if isinstance(terms, basestring):
terms = [ terms ]
ret = []
for term in terms:
try:
r = urllib2.Request(term)
response = urllib2.urlopen(r)
except URLError as e:
utils.warnings("Failed lookup url for %s : %s" % (term, str(e)))
continue
except HTTPError as e:
utils.warnings("Received HTTP error for %s : %s" % (term, str(e)))
continue
for line in response.read().splitlines():
ret.append(line)
return ret
|
majidaldo/ansible
|
v2/ansible/plugins/lookup/url.py
|
Python
|
gpl-3.0
| 1,545
|
[
"Brian"
] |
09d400ad8971e1e350fcebbda37a13be3716817165aec0c0a39375ebd2ada82b
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.