text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
########################################################################
# $Id$
########################################################################
"""
Matcher class. It matches Agent Site capabilities to job requirements.
It also provides an XMLRPC interface to the Matcher
"""
__RCSID__ = "$Id$"
import time
from types import StringType, DictType, StringTypes
import threading
from DIRAC.ConfigurationSystem.Client.Helpers import Registry, Operations
from DIRAC.Core.DISET.RequestHandler import RequestHandler
from DIRAC.Core.Utilities.ClassAd.ClassAdLight import ClassAd
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.WorkloadManagementSystem.DB.JobDB import JobDB
from DIRAC.WorkloadManagementSystem.DB.JobLoggingDB import JobLoggingDB
from DIRAC.WorkloadManagementSystem.DB.TaskQueueDB import TaskQueueDB
from DIRAC.WorkloadManagementSystem.DB.PilotAgentsDB import PilotAgentsDB
from DIRAC.FrameworkSystem.Client.MonitoringClient import gMonitor
from DIRAC.Core.Utilities.ThreadScheduler import gThreadScheduler
from DIRAC.Core.Security import Properties
from DIRAC.Core.Utilities.DictCache import DictCache
from DIRAC.ResourceStatusSystem.Client.SiteStatus import SiteStatus
DEBUG = 0
gMutex = threading.Semaphore()
gTaskQueues = {}
gJobDB = False
gJobLoggingDB = False
gTaskQueueDB = False
gPilotAgentsDB = False
def initializeMatcherHandler( serviceInfo ):
""" Matcher Service initialization
"""
global gJobDB
global gJobLoggingDB
global gTaskQueueDB
global gPilotAgentsDB
# Create JobDB object and initialize its tables.
gJobDB = JobDB( checkTables = True )
# Create JobLoggingDB object and initialize its tables.
gJobLoggingDB = JobLoggingDB( checkTables = True )
# Create PilotAgentsDB object and initialize its tables.
gPilotAgentsDB = PilotAgentsDB( checkTables = True )
gTaskQueueDB = TaskQueueDB()
gMonitor.registerActivity( 'matchTime', "Job matching time",
'Matching', "secs" , gMonitor.OP_MEAN, 300 )
gMonitor.registerActivity( 'matchesDone', "Job Match Request",
'Matching', "matches" , gMonitor.OP_RATE, 300 )
gMonitor.registerActivity( 'matchesOK', "Matched jobs",
'Matching', "matches" , gMonitor.OP_RATE, 300 )
gMonitor.registerActivity( 'numTQs', "Number of Task Queues",
'Matching', "tqsk queues" , gMonitor.OP_MEAN, 300 )
gTaskQueueDB.recalculateTQSharesForAll()
gThreadScheduler.addPeriodicTask( 120, gTaskQueueDB.recalculateTQSharesForAll )
gThreadScheduler.addPeriodicTask( 60, sendNumTaskQueues )
sendNumTaskQueues()
return S_OK()
def sendNumTaskQueues():
result = gTaskQueueDB.getNumTaskQueues()
if result[ 'OK' ]:
gMonitor.addMark( 'numTQs', result[ 'Value' ] )
else:
gLogger.error( "Cannot get the number of task queues", result[ 'Message' ] )
class Limiter:
__csDictCache = DictCache()
__condCache = DictCache()
__delayMem = {}
def __init__( self, opsHelper ):
""" Constructor
"""
self.__runningLimitSection = "JobScheduling/RunningLimit"
self.__matchingDelaySection = "JobScheduling/MatchingDelay"
self.__opsHelper = opsHelper
def checkJobLimit( self ):
return self.__opsHelper.getValue( "JobScheduling/CheckJobLimits", True )
def checkMatchingDelay( self ):
return self.__opsHelper.getValue( "JobScheduling/CheckMatchingDelay", True )
def getNegativeCond( self ):
""" Get negative condition for ALL sites
"""
orCond = Limiter.__condCache.get( "GLOBAL" )
if orCond:
return orCond
negCond = {}
#Run Limit
result = self.__opsHelper.getSections( self.__runningLimitSection )
sites = []
if result[ 'OK' ]:
sites = result[ 'Value' ]
for siteName in sites:
result = self.__getRunningCondition( siteName )
if not result[ 'OK' ]:
continue
data = result[ 'Value' ]
if data:
negCond[ siteName ] = data
#Delay limit
result = self.__opsHelper.getSections( self.__matchingDelaySection )
sites = []
if result[ 'OK' ]:
sites = result[ 'Value' ]
for siteName in sites:
result = self.__getDelayCondition( siteName )
if not result[ 'OK' ]:
continue
data = result[ 'Value' ]
if not data:
continue
if siteName in negCond:
negCond[ siteName ] = self.__mergeCond( negCond[ siteName ], data )
else:
negCond[ siteName ] = data
orCond = []
for siteName in negCond:
negCond[ siteName ][ 'Site' ] = siteName
orCond.append( negCond[ siteName ] )
Limiter.__condCache.add( "GLOBAL", 10, orCond )
return orCond
def getNegativeCondForSite( self, siteName ):
""" Generate a negative query based on the limits set on the site
"""
# Check if Limits are imposed onto the site
negativeCond = {}
if self.checkJobLimit():
result = self.__getRunningCondition( siteName )
if result['OK']:
negativeCond = result['Value']
gLogger.verbose( 'Negative conditions for site %s after checking limits are: %s' % ( siteName, str( negativeCond ) ) )
if self.checkMatchingDelay():
result = self.__getDelayCondition( siteName )
if result['OK']:
delayCond = result['Value']
gLogger.verbose( 'Negative conditions for site %s after delay checking are: %s' % ( siteName, str( delayCond ) ) )
negativeCond = self.__mergeCond( negativeCond, delayCond )
if negativeCond:
gLogger.info( 'Negative conditions for site %s are: %s' % ( siteName, str( negativeCond ) ) )
return negativeCond
def __mergeCond( self, negCond, addCond ):
""" Merge two negative dicts
"""
#Merge both negative dicts
for attr in addCond:
if attr not in negCond:
negCond[ attr ] = []
for value in addCond[ attr ]:
if value not in negCond[ attr ]:
negCond[ attr ].append( value )
return negCond
def __extractCSData( self, section ):
""" Extract limiting information from the CS in the form:
{ 'JobType' : { 'Merge' : 20, 'MCGen' : 1000 } }
"""
stuffDict = Limiter.__csDictCache.get( section )
if stuffDict:
return S_OK( stuffDict )
result = self.__opsHelper.getSections( section )
if not result['OK']:
return result
attribs = result['Value']
stuffDict = {}
for attName in attribs:
result = self.__opsHelper.getOptionsDict( "%s/%s" % ( section, attName ) )
if not result[ 'OK' ]:
return result
attLimits = result[ 'Value' ]
try:
attLimits = dict( [ ( k, int( attLimits[k] ) ) for k in attLimits ] )
except Exception, excp:
errMsg = "%s/%s has to contain numbers: %s" % ( section, attName, str( excp ) )
gLogger.error( errMsg )
return S_ERROR( errMsg )
stuffDict[ attName ] = attLimits
Limiter.__csDictCache.add( section, 300, stuffDict )
return S_OK( stuffDict )
def __getRunningCondition( self, siteName ):
""" Get extra conditions allowing site throttling
"""
siteSection = "%s/%s" % ( self.__runningLimitSection, siteName )
result = self.__extractCSData( siteSection )
if not result['OK']:
return result
limitsDict = result[ 'Value' ]
#limitsDict is something like { 'JobType' : { 'Merge' : 20, 'MCGen' : 1000 } }
if not limitsDict:
return S_OK( {} )
# Check if the site exceeding the given limits
negCond = {}
for attName in limitsDict:
if attName not in gJobDB.jobAttributeNames:
gLogger.error( "Attribute %s does not exist. Check the job limits" % attName )
continue
cK = "Running:%s:%s" % ( siteName, attName )
data = self.__condCache.get( cK )
if not data:
result = gJobDB.getCounters( 'Jobs', [ attName ], { 'Site' : siteName, 'Status' : [ 'Running', 'Matched', 'Stalled' ] } )
if not result[ 'OK' ]:
return result
data = result[ 'Value' ]
data = dict( [ ( k[0][ attName ], k[1] ) for k in data ] )
self.__condCache.add( cK, 10, data )
for attValue in limitsDict[ attName ]:
limit = limitsDict[ attName ][ attValue ]
running = data.get( attValue, 0 )
if running >= limit:
gLogger.verbose( 'Job Limit imposed at %s on %s/%s=%d,'
' %d jobs already deployed' % ( siteName, attName, attValue, limit, running ) )
if attName not in negCond:
negCond[ attName ] = []
negCond[ attName ].append( attValue )
#negCond is something like : {'JobType': ['Merge']}
return S_OK( negCond )
def updateDelayCounters( self, siteName, jid ):
#Get the info from the CS
siteSection = "%s/%s" % ( self.__matchingDelaySection, siteName )
result = self.__extractCSData( siteSection )
if not result['OK']:
return result
delayDict = result[ 'Value' ]
#limitsDict is something like { 'JobType' : { 'Merge' : 20, 'MCGen' : 1000 } }
if not delayDict:
return S_OK()
attNames = []
for attName in delayDict:
if attName not in gJobDB.jobAttributeNames:
gLogger.error( "Attribute %s does not exist in the JobDB. Please fix it!" % attName )
else:
attNames.append( attName )
result = gJobDB.getJobAttributes( jid, attNames )
if not result[ 'OK' ]:
gLogger.error( "While retrieving attributes coming from %s: %s" % ( siteSection, result[ 'Message' ] ) )
return result
atts = result[ 'Value' ]
#Create the DictCache if not there
if siteName not in Limiter.__delayMem:
Limiter.__delayMem[ siteName ] = DictCache()
#Update the counters
delayCounter = Limiter.__delayMem[ siteName ]
for attName in atts:
attValue = atts[ attName ]
if attValue in delayDict[ attName ]:
delayTime = delayDict[ attName ][ attValue ]
gLogger.notice( "Adding delay for %s/%s=%s of %s secs" % ( siteName, attName,
attValue, delayTime ) )
delayCounter.add( ( attName, attValue ), delayTime )
return S_OK()
def __getDelayCondition( self, siteName ):
""" Get extra conditions allowing matching delay
"""
if siteName not in Limiter.__delayMem:
return S_OK( {} )
lastRun = Limiter.__delayMem[ siteName ].getKeys()
negCond = {}
for attName, attValue in lastRun:
if attName not in negCond:
negCond[ attName ] = []
negCond[ attName ].append( attValue )
return S_OK( negCond )
#####
#
# End of Limiter
#
#####
class MatcherHandler( RequestHandler ):
__opsCache = {}
def initialize( self ):
self.__opsHelper = self.__getOpsHelper()
self.__limiter = Limiter( self.__opsHelper )
self.__siteStatus = SiteStatus()
def __getOpsHelper( self, setup = False, vo = False ):
if not setup:
setup = self.srv_getClientSetup()
if not vo:
vo = Registry.getVOForGroup( self.getRemoteCredentials()[ 'group' ] )
cKey = ( vo, setup )
if cKey not in MatcherHandler.__opsCache:
MatcherHandler.__opsCache[ cKey ] = Operations.Operations( vo = vo, setup = setup )
return MatcherHandler.__opsCache[ cKey ]
def __processResourceDescription( self, resourceDescription ):
# Check and form the resource description dictionary
resourceDict = {}
if type( resourceDescription ) in StringTypes:
classAdAgent = ClassAd( resourceDescription )
if not classAdAgent.isOK():
return S_ERROR( 'Illegal Resource JDL' )
gLogger.verbose( classAdAgent.asJDL() )
for name in gTaskQueueDB.getSingleValueTQDefFields():
if classAdAgent.lookupAttribute( name ):
if name == 'CPUTime':
resourceDict[name] = classAdAgent.getAttributeInt( name )
else:
resourceDict[name] = classAdAgent.getAttributeString( name )
for name in gTaskQueueDB.getMultiValueMatchFields():
if classAdAgent.lookupAttribute( name ):
if name == 'SubmitPool':
resourceDict[name] = classAdAgent.getListFromExpression( name )
else:
resourceDict[name] = classAdAgent.getAttributeString( name )
# Check if a JobID is requested
if classAdAgent.lookupAttribute( 'JobID' ):
resourceDict['JobID'] = classAdAgent.getAttributeInt( 'JobID' )
for k in ( 'DIRACVersion', 'ReleaseVersion', 'ReleaseProject', 'VirtualOrganization' ):
if classAdAgent.lookupAttribute( k ):
resourceDict[ k ] = classAdAgent.getAttributeString( k )
else:
for name in gTaskQueueDB.getSingleValueTQDefFields():
if resourceDescription.has_key( name ):
resourceDict[name] = resourceDescription[name]
for name in gTaskQueueDB.getMultiValueMatchFields():
if resourceDescription.has_key( name ):
resourceDict[name] = resourceDescription[name]
if resourceDescription.has_key( 'JobID' ):
resourceDict['JobID'] = resourceDescription['JobID']
for k in ( 'DIRACVersion', 'ReleaseVersion', 'ReleaseProject', 'VirtualOrganization',
'PilotReference', 'PilotInfoReportedFlag', 'PilotBenchmark' ):
if k in resourceDescription:
resourceDict[ k ] = resourceDescription[ k ]
return resourceDict
def selectJob( self, resourceDescription ):
""" Main job selection function to find the highest priority job
matching the resource capacity
"""
startTime = time.time()
resourceDict = self.__processResourceDescription( resourceDescription )
credDict = self.getRemoteCredentials()
#Check credentials if not generic pilot
if Properties.GENERIC_PILOT in credDict[ 'properties' ]:
#You can only match groups in the same VO
vo = Registry.getVOForGroup( credDict[ 'group' ] )
result = Registry.getGroupsForVO( vo )
if result[ 'OK' ]:
resourceDict[ 'OwnerGroup' ] = result[ 'Value' ]
else:
#If it's a private pilot, the DN has to be the same
if Properties.PILOT in credDict[ 'properties' ]:
gLogger.notice( "Setting the resource DN to the credentials DN" )
resourceDict[ 'OwnerDN' ] = credDict[ 'DN' ]
#If it's a job sharing. The group has to be the same and just check that the DN (if any)
# belongs to the same group
elif Properties.JOB_SHARING in credDict[ 'properties' ]:
resourceDict[ 'OwnerGroup' ] = credDict[ 'group' ]
gLogger.notice( "Setting the resource group to the credentials group" )
if 'OwnerDN' in resourceDict and resourceDict[ 'OwnerDN' ] != credDict[ 'DN' ]:
ownerDN = resourceDict[ 'OwnerDN' ]
result = Registry.getGroupsForDN( resourceDict[ 'OwnerDN' ] )
if not result[ 'OK' ] or credDict[ 'group' ] not in result[ 'Value' ]:
#DN is not in the same group! bad boy.
gLogger.notice( "You cannot request jobs from DN %s. It does not belong to your group!" % ownerDN )
resourceDict[ 'OwnerDN' ] = credDict[ 'DN' ]
#Nothing special, group and DN have to be the same
else:
resourceDict[ 'OwnerDN' ] = credDict[ 'DN' ]
resourceDict[ 'OwnerGroup' ] = credDict[ 'group' ]
# Check the pilot DIRAC version
if self.__opsHelper.getValue( "Pilot/CheckVersion", True ):
if 'ReleaseVersion' not in resourceDict:
if not 'DIRACVersion' in resourceDict:
return S_ERROR( 'Version check requested and not provided by Pilot' )
else:
pilotVersion = resourceDict['DIRACVersion']
else:
pilotVersion = resourceDict['ReleaseVersion']
validVersions = self.__opsHelper.getValue( "Pilot/Version", [] )
if validVersions and pilotVersion not in validVersions:
return S_ERROR( 'Pilot version does not match the production version %s not in ( %s )' % \
( pilotVersion, ",".join( validVersions ) ) )
#Check project if requested
validProject = self.__opsHelper.getValue( "Pilot/Project", "" )
if validProject:
if 'ReleaseProject' not in resourceDict:
return S_ERROR( "Version check requested but expected project %s not received" % validProject )
if resourceDict[ 'ReleaseProject' ] != validProject:
return S_ERROR( "Version check requested but expected project %s != received %s" % ( validProject,
resourceDict[ 'ReleaseProject' ] ) )
# Update pilot information
pilotInfoReported = False
pilotReference = resourceDict.get( 'PilotReference', '' )
if pilotReference:
if "PilotInfoReportedFlag" in resourceDict and not resourceDict['PilotInfoReportedFlag']:
gridCE = resourceDict.get( 'GridCE', 'Unknown' )
site = resourceDict.get( 'Site', 'Unknown' )
benchmark = benchmark = resourceDict.get( 'PilotBenchmark', 0.0 )
gLogger.verbose('Reporting pilot info for %s: gridCE=%s, site=%s, benchmark=%f' % (pilotReference,gridCE,site,benchmark) )
result = gPilotAgentsDB.setPilotStatus( pilotReference, status = 'Running',
gridSite = site,
destination = gridCE,
benchmark = benchmark )
if result['OK']:
pilotInfoReported = True
#Check the site mask
if not 'Site' in resourceDict:
return S_ERROR( 'Missing Site Name in Resource JDL' )
# Get common site mask and check the agent site
result = self.__siteStatus.getUsableSites( 'ComputingAccess' )
if not result['OK']:
return S_ERROR( 'Internal error: can not get site mask' )
usableSites = result['Value']
siteName = resourceDict['Site']
if siteName not in maskList:
# if 'GridCE' not in resourceDict:
# return S_ERROR( 'Site not in mask and GridCE not specified' )
# Even if the site is banned, if it defines a CE, it must be able to check it
# del resourceDict['Site']
# Banned site can only take Test jobs
resourceDict['JobType'] = 'Test'
resourceDict['Setup'] = self.serviceInfoDict['clientSetup']
gLogger.verbose( "Resource description:" )
for key in resourceDict:
gLogger.verbose( "%s : %s" % ( key.rjust( 20 ), resourceDict[ key ] ) )
negativeCond = self.__limiter.getNegativeCondForSite( siteName )
result = gTaskQueueDB.matchAndGetJob( resourceDict, negativeCond = negativeCond )
if DEBUG:
print result
if not result['OK']:
return result
result = result['Value']
if not result['matchFound']:
return S_ERROR( 'No match found' )
jobID = result['jobId']
resAtt = gJobDB.getJobAttributes( jobID, ['OwnerDN', 'OwnerGroup', 'Status'] )
if not resAtt['OK']:
return S_ERROR( 'Could not retrieve job attributes' )
if not resAtt['Value']:
return S_ERROR( 'No attributes returned for job' )
if not resAtt['Value']['Status'] == 'Waiting':
gLogger.error( 'Job matched by the TQ is not in Waiting state', str( jobID ) )
result = gTaskQueueDB.deleteJob( jobID )
if not result[ 'OK' ]:
return result
return S_ERROR( "Job %s is not in Waiting state" % str( jobID ) )
attNames = ['Status','MinorStatus','ApplicationStatus','Site']
attValues = ['Matched','Assigned','Unknown',siteName]
result = gJobDB.setJobAttributes( jobID, attNames, attValues )
# result = gJobDB.setJobStatus( jobID, status = 'Matched', minor = 'Assigned' )
result = gJobLoggingDB.addLoggingRecord( jobID,
status = 'Matched',
minor = 'Assigned',
source = 'Matcher' )
result = gJobDB.getJobJDL( jobID )
if not result['OK']:
return S_ERROR( 'Failed to get the job JDL' )
resultDict = {}
resultDict['JDL'] = result['Value']
resultDict['JobID'] = jobID
matchTime = time.time() - startTime
gLogger.info( "Match time: [%s]" % str( matchTime ) )
gMonitor.addMark( "matchTime", matchTime )
# Get some extra stuff into the response returned
resOpt = gJobDB.getJobOptParameters( jobID )
if resOpt['OK']:
for key, value in resOpt['Value'].items():
resultDict[key] = value
resAtt = gJobDB.getJobAttributes( jobID, ['OwnerDN', 'OwnerGroup'] )
if not resAtt['OK']:
return S_ERROR( 'Could not retrieve job attributes' )
if not resAtt['Value']:
return S_ERROR( 'No attributes returned for job' )
if self.__opsHelper.getValue( "JobScheduling/CheckMatchingDelay", True ):
self.__limiter.updateDelayCounters( siteName, jobID )
# Report pilot-job association
if pilotReference:
result = gPilotAgentsDB.setCurrentJobID( pilotReference, jobID )
result = gPilotAgentsDB.setJobForPilot( jobID, pilotReference, updateStatus=False )
resultDict['DN'] = resAtt['Value']['OwnerDN']
resultDict['Group'] = resAtt['Value']['OwnerGroup']
resultDict['PilotInfoReportedFlag'] = pilotInfoReported
return S_OK( resultDict )
##############################################################################
types_requestJob = [ [StringType, DictType] ]
def export_requestJob( self, resourceDescription ):
""" Serve a job to the request of an agent which is the highest priority
one matching the agent's site capacity
"""
result = self.selectJob( resourceDescription )
gMonitor.addMark( "matchesDone" )
if result[ 'OK' ]:
gMonitor.addMark( "matchesOK" )
return result
##############################################################################
types_getActiveTaskQueues = []
def export_getActiveTaskQueues( self ):
""" Return all task queues
"""
return gTaskQueueDB.retrieveTaskQueues()
##############################################################################
types_getMatchingTaskQueues = [ DictType ]
def export_getMatchingTaskQueues( self, resourceDict ):
""" Return all task queues
"""
if 'Site' in resourceDict and type( resourceDict[ 'Site' ] ) in StringTypes:
negativeCond = self.__limiter.getNegativeCondForSite( resourceDict[ 'Site' ] )
else:
negativeCond = self.__limiter.getNegativeCond()
return gTaskQueueDB.retrieveTaskQueuesThatMatch( resourceDict, negativeCond = negativeCond )
##############################################################################
types_matchAndGetTaskQueue = [ DictType ]
def export_matchAndGetTaskQueue( self, resourceDict ):
""" Return matching task queues
"""
return gTaskQueueDB.matchAndGetTaskQueue( resourceDict )
|
sposs/DIRAC
|
WorkloadManagementSystem/Service/MatcherHandler.py
|
Python
|
gpl-3.0
| 23,052
|
[
"DIRAC"
] |
704763f10b8b65bfe6b97525cf900e8771a042bf64e1d3901a73ea62a2bdddf9
|
# CIS312 Final Project
# Created by Seth Bagdanov 2/7/2017
# Calculates and displays a recipe based on the batch size
# Python v3.6
#
# REFERENCES -----
# PRICING: http://grocery.walmart.com/, https://www.dutchvalleyfoods.com/
# RECIPE: http://www.bettycrocker.com/recipes/ultimate-chocolate-chip-cookies/77c14e03-d8b0-4844-846d-f19304f61c57
# save instruction string for future use
ingredientString = "\nYour batch ingredients are as follows: "
# Welcome message
print("CIS312 COOKIE RECIPE CALCULATOR --------------------")
# Set minimum for regular batch to become bulk batch
bulkBatchSizeMin = 192
# Set base quantity variables for standard batch
standardBatch = 24 # base recipe size
numGSaltedButter = 150.0 / standardBatch
numGBrownSugar = 80.0 / standardBatch
numGWhiteSugar = 80.0 / standardBatch
numTspVanilla = 2.0 / standardBatch
numEaEgg = 1.0 / standardBatch
numGWhiteFlour = 225.0 / standardBatch
numTspBakingSoda = 0.25 / standardBatch
numTspSalt = 0.25 / standardBatch
numGChocolateChip = 200 / standardBatch
# Set regular pricing variables for items
unitCostSaltedButter = 0.006158940397
unitCostBrownSugar = 0.002560706402
unitCostWhiteSugar = 0.001719955899
unitCostVanilla = 0.406666666667
unitCostEgg = 0.131666666667
unitCostWhiteFlour = 0.001128747795
unitCostBakingSoda = 0.002202643172
unitCostSalt = 0.005384615385
unitCostChocolateChip = 0.008058823529
# Set bulk pricing variables for items
unitBulkCostSaltedButter = 0.006158940397
unitBulkCostBrownSugar = 0.002560706402
unitBulkCostWhiteSugar = 0.001719955899
unitBulkCostVanilla = 0.034192708333
unitBulkCostEgg = 0.131666666667
unitBulkCostWhiteFlour = 0.001128747795
unitBulkCostBakingSoda = 0.001902149950
unitBulkCostSalt = 0.000997372088
unitBulkCostChocolateChip = 0.005703804300
# Take input from user as to how many cookies
batchSize = int(input('Please enter the batch quantity: '))
# Determine batch size, regular or bulk
# Size is under min, proceed with regular pricing
if 0 < batchSize < bulkBatchSizeMin:
print(ingredientString)
print(" COST QUANTITY UNIT ITEM") # table header
# Salted Butter
print("{:10.2f}".format(unitCostSaltedButter * batchSize * numGSaltedButter)
, "{:10.2f}".format(numGSaltedButter * batchSize)
, " g "
, " Salted Butter")
# Brown Sugar
print("{:10.2f}".format(unitCostBrownSugar * batchSize * numGBrownSugar)
, "{:10.2f}".format(numGBrownSugar * batchSize)
, " g "
, " Brown Sugar")
# White Sugar
print("{:10.2f}".format(unitCostWhiteSugar * batchSize * numGWhiteSugar)
, "{:10.2f}".format(numGWhiteSugar * batchSize)
, " g "
, " White Sugar")
# Vanilla
print("{:10.2f}".format(unitCostVanilla * batchSize * numTspVanilla)
, "{:10.2f}".format(numTspVanilla * batchSize)
, " tsp"
, " Vanilla")
# Egg
print("{:10.2f}".format(unitCostEgg * batchSize * numEaEgg)
, "{:10.0f}".format(numEaEgg * batchSize)
, " ea "
, " Eggs")
# White Flour
print("{:10.2f}".format(unitCostWhiteFlour * batchSize * numGWhiteFlour)
, "{:10.2f}".format(numGWhiteFlour * batchSize)
, " g "
, " White Flour")
# Baking Soda
print("{:10.2f}".format(unitCostBakingSoda * batchSize * numTspBakingSoda)
, "{:10.2f}".format(numTspBakingSoda * batchSize)
, " tsp"
, " Baking Soda")
# Salt
print("{:10.2f}".format(unitCostSalt * batchSize * numTspSalt)
, "{:10.2f}".format(numTspSalt * batchSize)
, " tsp"
, " Salt")
# Chocolate Chip
print("{:10.2f}".format(unitCostChocolateChip * batchSize * numGChocolateChip)
, "{:10.2f}".format(numGChocolateChip * batchSize)
, " g "
, " Chocolate Chips")
# TOTAL COST
print("=" * 44) # separator
print("$"
, "{:8.2f}".format(unitCostSaltedButter * batchSize * numGSaltedButter
+ unitCostBrownSugar * batchSize * numGBrownSugar
+ unitCostWhiteSugar * batchSize * numGWhiteSugar
+ unitCostVanilla * batchSize * numTspVanilla
+ unitCostEgg * batchSize * numEaEgg
+ unitCostWhiteFlour * batchSize * numGWhiteFlour
+ unitCostBakingSoda * batchSize * numTspBakingSoda
+ unitCostSalt * batchSize * numTspSalt
+ unitCostChocolateChip * batchSize * numGChocolateChip)
, " TOTAL COST OF BATCH")
# Size is over minimum, use bulk pricing
elif batchSize >= bulkBatchSizeMin:
print(ingredientString)
print(" COST QUANTITY UNIT ITEM") # table header
# Salted Butter
print("{:10.2f}".format(unitBulkCostSaltedButter * batchSize * numGSaltedButter)
, "{:10.2f}".format(numGSaltedButter * batchSize)
, " g "
, " Salted Butter")
# Brown Sugar
print("{:10.2f}".format(unitBulkCostBrownSugar * batchSize * numGBrownSugar)
, "{:10.2f}".format(numGBrownSugar * batchSize)
, " g "
, " Brown Sugar")
# White Sugar
print("{:10.2f}".format(unitBulkCostWhiteSugar * batchSize * numGWhiteSugar)
, "{:10.2f}".format(numGWhiteSugar * batchSize)
, " g "
, " White Sugar")
# Vanilla
print("{:10.2f}".format(unitBulkCostVanilla * batchSize * numTspVanilla)
, "{:10.2f}".format(numTspVanilla * batchSize)
, " tsp"
, " Vanilla")
# Egg
print("{:10.2f}".format(unitBulkCostEgg * batchSize * numEaEgg)
, "{:10.0f}".format(numEaEgg * batchSize)
, " ea "
, " Eggs")
# White Flour
print("{:10.2f}".format(unitBulkCostWhiteFlour * batchSize * numGWhiteFlour)
, "{:10.2f}".format(numGWhiteFlour * batchSize)
, " g "
, " White Flour")
# Baking Soda
print("{:10.2f}".format(unitBulkCostBakingSoda * batchSize * numTspBakingSoda)
, "{:10.2f}".format(numTspBakingSoda * batchSize)
, " tsp"
, " Baking Soda")
# Salt
print("{:10.2f}".format(unitBulkCostSalt * batchSize * numTspSalt)
, "{:10.2f}".format(numTspSalt * batchSize)
, " tsp"
, " Salt")
# Chocolate Chip
print("{:10.2f}".format(unitBulkCostChocolateChip * batchSize * numGChocolateChip)
, "{:10.2f}".format(numGChocolateChip * batchSize)
, " g "
, " Chocolate Chips")
# TOTAL COST
print("=" * 44) # separator
print("$"
, "{:8.2f}".format(unitBulkCostSaltedButter * batchSize * numGSaltedButter
+ unitBulkCostBrownSugar * batchSize * numGBrownSugar
+ unitBulkCostWhiteSugar * batchSize * numGWhiteSugar
+ unitBulkCostVanilla * batchSize * numTspVanilla
+ unitBulkCostEgg * batchSize * numEaEgg
+ unitBulkCostWhiteFlour * batchSize * numGWhiteFlour
+ unitBulkCostBakingSoda * batchSize * numTspBakingSoda
+ unitBulkCostSalt * batchSize * numTspSalt
+ unitBulkCostChocolateChip * batchSize * numGChocolateChip)
, " TOTAL COST OF BATCH")
# batchSize entered was out of range, error!
else:
print("**ERROR! You made a mistake in your entry!")
# Display recipe directions ---------------------------------------------------
if batchSize > 0: # only display if batchSize was valid
# print cookie ascii
print("""\n _,._
__.o` o`”-.
.-O o `"-.o O )_,._
( o O o )--.-"`O o"-.
'--------' ( o O o)
`----------`""")
print("1.\tHeat oven to 375ºF.") # step 1
print("2.\tMix sugars, butter, vanilla and egg in large bowl. Stir in flour, "
"\n\t\tbaking soda and salt (dough will be stiff)."
"\n\t\tStir in nuts and chocolate chips.") # step 2
print("3.\tDrop dough by rounded tablespoonfuls onto cookie sheet.") # step 3
print("4.\tBake 8 to 10 minutes or until light brown (centers will be soft)."
"\n\t\tCool slightly; remove from cookie sheet. Cool on wire rack.") # step 4
print("\nFor additional details, visit: "
, "\nhttp://www.bettycrocker.com/recipes/ultimate-chocolate-chip-cookies/77c14e03-d8b0-4844-846d-f19304f61c57")
else: # batchSize was incorrect
print("Please try again.")
|
FPU-CIS-312/MiniProject2
|
CIS312_Project2_Bagdanov.py
|
Python
|
mit
| 8,727
|
[
"VisIt"
] |
751b030a6a396c5a375b71aa79876730cb728653ad67cd74d413f0aa9e3929dd
|
""" Example smearing script
This script:
* Reads in mc spectra from hdf5
* Smears spectra, default is to use weighted Gaussian method, but can
also use specify random Gaussian method via command line
* Smeared spectrum is saved to the same directory with ``_smeared``
added to the file name
Examples:
To smear hdf5 file ``example.hdf5`` using the random Gaussian method::
$ python dump_smeared.py --smear_method "random" /path/to/example.hdf5
This will create the smeared hdf5 file ``/path/to/example_smeared.hdf5``.
.. note:: Valid smear methods include:
* "weight", default
* "random"
"""
import echidna.output.store as store
import echidna.core.smear as smear
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--smear_method", nargs='?', const="weight",
type=str, default="weight",
help="specify the smearing method to use")
parser.add_argument("-r", "--energy_resolution", default=None, type=float,
help="specify energy resolution "
"e.g. 0.05 for 5 percent")
parser.add_argument("path", type=str,
help="specify path to hdf5 file")
args = parser.parse_args()
directory = args.path[:args.path.rfind("/")+1] # strip filename
# strip directory and extension
filename = args.path[args.path.rfind("/")+1:args.path.rfind(".")]
if args.energy_resolution is not None:
smearer = smear.EResSmear(args.energy_resolution)
else: # use light yield
smearer = smear.Smear()
spectrum = store.load(args.path)
if args.smear_method == "weight": # Use default smear method
smeared_spectrum = smearer.weight_gaussian_energy_spectra(spectrum)
smeared_spectrum = smearer.weight_gaussian_radius_spectra(smeared_spectrum)
elif args.smear_method == "random":
smeared_spectrum = smearer.random_gaussian_energy_spectra(spectrum)
smeared_spectrum = smearer.random_gaussian_radius_spectra(smeared_spectrum)
else: # Not a valid smear method
parser.error(args.smear_method + " is not a valid smear method")
filename = directory + filename + "_smeared" + ".hdf5"
store.dump(filename, smeared_spectrum)
|
drjeannewilson/echidna
|
echidna/scripts/dump_smeared.py
|
Python
|
mit
| 2,313
|
[
"Gaussian"
] |
19bba922a57d2c54cb83233549dda21c68f47075513446901acc66a9c9f06098
|
#!/usr/bin/env python3
#pylint: disable=missing-docstring
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import chigger
reader = chigger.exodus.ExodusReader('../../input/variable_range.e')
result = chigger.exodus.ExodusResult(reader, variable='u', representation='wireframe')
cell_result = chigger.exodus.LabelExodusResult(result, text_color=[0,1,1], font_size=10)
window = chigger.RenderWindow(result, cell_result, size=[800,800], test=True)
window.write('variable.png')
window.start()
|
nuclear-wizard/moose
|
python/chigger/tests/exodus/labels/variable.py
|
Python
|
lgpl-2.1
| 749
|
[
"MOOSE"
] |
6d9d7a224c2586fa6fc5bdf279f29c92e6ee6567efdf6fecb5eef5026706697e
|
# -*- coding: utf-8 -*-
"""
==========================================================
Create a new coordinate class (for the Sagittarius stream)
==========================================================
This document describes in detail how to subclass and define a custom spherical
coordinate frame, as discussed in :ref:`astropy-coordinates-design` and the
docstring for `~astropy.coordinates.BaseCoordinateFrame`. In this example, we
will define a coordinate system defined by the plane of orbit of the Sagittarius
Dwarf Galaxy (hereafter Sgr; as defined in Majewski et al. 2003). The Sgr
coordinate system is often referred to in terms of two angular coordinates,
:math:`\Lambda,B`.
To do this, wee need to define a subclass of
`~astropy.coordinates.BaseCoordinateFrame` that knows the names and units of the
coordinate system angles in each of the supported representations. In this case
we support `~astropy.coordinates.SphericalRepresentation` with "Lambda" and
"Beta". Then we have to define the transformation from this coordinate system to
some other built-in system. Here we will use Galactic coordinates, represented
by the `~astropy.coordinates.Galactic` class.
See Also
--------
* Majewski et al. 2003, "A Two Micron All Sky Survey View of the Sagittarius
Dwarf Galaxy. I. Morphology of the Sagittarius Core and Tidal Arms",
http://arxiv.org/abs/astro-ph/0304198
* Law & Majewski 2010, "The Sagittarius Dwarf Galaxy: A Model for Evolution in a
Triaxial Milky Way Halo", http://arxiv.org/abs/1003.1132
* David Law's Sgr info page http://www.stsci.edu/~dlaw/Sgr/
-------------------
*By: Adrian Price-Whelan, Erik Tollerud*
*License: BSD*
-------------------
"""
##############################################################################
# Make `print` work the same in all versions of Python, set up numpy,
# matplotlib, and use a nicer set of plot parameters:
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from astropy.visualization import astropy_mpl_style
plt.style.use(astropy_mpl_style)
##############################################################################
# Import the packages necessary for coordinates
from astropy.coordinates import frame_transform_graph
from astropy.coordinates.matrix_utilities import rotation_matrix, matrix_product, matrix_transpose
import astropy.coordinates as coord
import astropy.units as u
##############################################################################
# The first step is to create a new class, which we'll call
# ``Sagittarius`` and make it a subclass of
# `~astropy.coordinates.BaseCoordinateFrame`:
class Sagittarius(coord.BaseCoordinateFrame):
"""
A Heliocentric spherical coordinate system defined by the orbit
of the Sagittarius dwarf galaxy, as described in
http://adsabs.harvard.edu/abs/2003ApJ...599.1082M
and further explained in
http://www.stsci.edu/~dlaw/Sgr/.
Parameters
----------
representation : `BaseRepresentation` or None
A representation object or None to have no data (or use the other keywords)
Lambda : `Angle`, optional, must be keyword
The longitude-like angle corresponding to Sagittarius' orbit.
Beta : `Angle`, optional, must be keyword
The latitude-like angle corresponding to Sagittarius' orbit.
distance : `Quantity`, optional, must be keyword
The Distance for this object along the line-of-sight.
pm_Lambda_cosBeta : :class:`~astropy.units.Quantity`, optional, must be keyword
The proper motion along the stream in ``Lambda`` (including the
``cos(Beta)`` factor) for this object (``pm_Beta`` must also be given).
pm_Beta : :class:`~astropy.units.Quantity`, optional, must be keyword
The proper motion in Declination for this object (``pm_ra_cosdec`` must
also be given).
radial_velocity : :class:`~astropy.units.Quantity`, optional, must be keyword
The radial velocity of this object.
"""
default_representation = coord.SphericalRepresentation
default_differential = coord.SphericalCosLatDifferential
frame_specific_representation_info = {
coord.SphericalRepresentation: [
coord.RepresentationMapping('lon', 'Lambda'),
coord.RepresentationMapping('lat', 'Beta'),
coord.RepresentationMapping('distance', 'distance')],
coord.SphericalCosLatDifferential: [
coord.RepresentationMapping('d_lon_coslat', 'pm_Lambda_cosBeta'),
coord.RepresentationMapping('d_lat', 'pm_Beta'),
coord.RepresentationMapping('d_distance', 'radial_velocity')],
coord.SphericalDifferential: [
coord.RepresentationMapping('d_lon', 'pm_Lambda'),
coord.RepresentationMapping('d_lat', 'pm_Beta'),
coord.RepresentationMapping('d_distance', 'radial_velocity')]
}
frame_specific_representation_info[coord.UnitSphericalRepresentation] = \
frame_specific_representation_info[coord.SphericalRepresentation]
frame_specific_representation_info[coord.UnitSphericalCosLatDifferential] = \
frame_specific_representation_info[coord.SphericalCosLatDifferential]
frame_specific_representation_info[coord.UnitSphericalDifferential] = \
frame_specific_representation_info[coord.SphericalDifferential]
##############################################################################
# Breaking this down line-by-line, we define the class as a subclass of
# `~astropy.coordinates.BaseCoordinateFrame`. Then we include a descriptive
# docstring. The final lines are class-level attributes that specify the
# default representation for the data, default differential for the velocity
# information, and mappings from the attribute names used by representation
# objects to the names that are to be used by the ``Sagittarius`` frame. In this
# case we override the names in the spherical representations but don't do
# anything with other representations like cartesian or cylindrical.
#
# Next we have to define the transformation from this coordinate system to some
# other built-in coordinate system; we will use Galactic coordinates. We can do
# this by defining functions that return transformation matrices, or by simply
# defining a function that accepts a coordinate and returns a new coordinate in
# the new system. Because the transformation to the Sagittarius coordinate
# system is just a spherical rotation from Galactic coordinates, we'll just
# define a function that returns this matrix. We'll start by constructing the
# transformation matrix using pre-deteremined Euler angles and the
# ``rotation_matrix`` helper function:
SGR_PHI = (180 + 3.75) * u.degree # Euler angles (from Law & Majewski 2010)
SGR_THETA = (90 - 13.46) * u.degree
SGR_PSI = (180 + 14.111534) * u.degree
# Generate the rotation matrix using the x-convention (see Goldstein)
D = rotation_matrix(SGR_PHI, "z")
C = rotation_matrix(SGR_THETA, "x")
B = rotation_matrix(SGR_PSI, "z")
A = np.diag([1.,1.,-1.])
SGR_MATRIX = matrix_product(A, B, C, D)
##############################################################################
# Since we already constructed the transformation (rotation) matrix above, and
# the inverse of a rotation matrix is just its transpose, the required
# transformation functions are very simple:
@frame_transform_graph.transform(coord.StaticMatrixTransform, coord.Galactic, Sagittarius)
def galactic_to_sgr():
""" Compute the transformation matrix from Galactic spherical to
heliocentric Sgr coordinates.
"""
return SGR_MATRIX
##############################################################################
# The decorator ``@frame_transform_graph.transform(coord.StaticMatrixTransform,
# coord.Galactic, Sagittarius)`` registers this function on the
# ``frame_transform_graph`` as a coordinate transformation. Inside the function,
# we simply return the previously defined rotation matrix.
#
# We then register the inverse transformation by using the transpose of the
# rotation matrix (which is faster to compute than the inverse):
@frame_transform_graph.transform(coord.StaticMatrixTransform, Sagittarius, coord.Galactic)
def sgr_to_galactic():
""" Compute the transformation matrix from heliocentric Sgr coordinates to
spherical Galactic.
"""
return matrix_transpose(SGR_MATRIX)
##############################################################################
# Now that we've registered these transformations between ``Sagittarius`` and
# `~astropy.coordinates.Galactic`, we can transform between *any* coordinate
# system and ``Sagittarius`` (as long as the other system has a path to
# transform to `~astropy.coordinates.Galactic`). For example, to transform from
# ICRS coordinates to ``Sagittarius``, we would do:
icrs = coord.ICRS(280.161732*u.degree, 11.91934*u.degree)
sgr = icrs.transform_to(Sagittarius)
print(sgr)
##############################################################################
# Or, to transform from the ``Sagittarius`` frame to ICRS coordinates (in this
# case, a line along the ``Sagittarius`` x-y plane):
sgr = Sagittarius(Lambda=np.linspace(0, 2*np.pi, 128)*u.radian,
Beta=np.zeros(128)*u.radian)
icrs = sgr.transform_to(coord.ICRS)
print(icrs)
##############################################################################
# As an example, we'll now plot the points in both coordinate systems:
fig, axes = plt.subplots(2, 1, figsize=(8, 10),
subplot_kw={'projection': 'aitoff'})
axes[0].set_title("Sagittarius")
axes[0].plot(sgr.Lambda.wrap_at(180*u.deg).radian, sgr.Beta.radian,
linestyle='none', marker='.')
axes[1].set_title("ICRS")
axes[1].plot(icrs.ra.wrap_at(180*u.deg).radian, icrs.dec.radian,
linestyle='none', marker='.')
plt.show()
##############################################################################
# This particular transformation is just a spherical rotation, which is a
# special case of an Affine transformation with no vector offset. The
# transformation of velocity components is therefore natively supported as
# well:
sgr = Sagittarius(Lambda=np.linspace(0, 2*np.pi, 128)*u.radian,
Beta=np.zeros(128)*u.radian,
pm_Lambda_cosBeta=np.random.uniform(-5, 5, 128)*u.mas/u.yr,
pm_Beta=np.zeros(128)*u.mas/u.yr)
icrs = sgr.transform_to(coord.ICRS)
print(icrs)
fig, axes = plt.subplots(3, 1, figsize=(8, 10), sharex=True)
axes[0].set_title("Sagittarius")
axes[0].plot(sgr.Lambda.degree,
sgr.pm_Lambda_cosBeta.value,
linestyle='none', marker='.')
axes[0].set_xlabel(r"$\Lambda$ [deg]")
axes[0].set_ylabel(r"$\mu_\Lambda \, \cos B$ [{0}]"
.format(sgr.pm_Lambda_cosBeta.unit.to_string('latex_inline')))
axes[1].set_title("ICRS")
axes[1].plot(icrs.ra.degree, icrs.pm_ra_cosdec.value,
linestyle='none', marker='.')
axes[1].set_ylabel(r"$\mu_\alpha \, \cos\delta$ [{0}]"
.format(icrs.pm_ra_cosdec.unit.to_string('latex_inline')))
axes[2].set_title("ICRS")
axes[2].plot(icrs.ra.degree, icrs.pm_dec.value,
linestyle='none', marker='.')
axes[2].set_xlabel("RA [deg]")
axes[2].set_ylabel(r"$\mu_\delta$ [{0}]"
.format(icrs.pm_dec.unit.to_string('latex_inline')))
plt.show()
|
AustereCuriosity/astropy
|
examples/coordinates/plot_sgr-coordinate-frame.py
|
Python
|
bsd-3-clause
| 11,382
|
[
"Galaxy"
] |
9a5d53960c3594af8ee30d03ac6a64a9a591b0c96d1ad4fd9098543aab0acd50
|
from __future__ import division, print_function
import numpy as np
from ase.lattice import bulk
from ase.calculators.lj import LennardJones
from ase.constraints import UnitCellFilter
from ase.optimize import MDMin
vol0 = 4 * 0.91615977036 # theoretical minimum
a0 = vol0**(1 / 3)
a = bulk('X', 'fcc', a=a0)
a.calc = LennardJones()
a.set_cell(np.dot(a.cell,
[[1.02, 0, 0.03],
[0, 0.99, -0.02],
[0.1, -0.01, 1.03]]),
scale_atoms=True)
a *= (1, 2, 3)
a.rattle()
sigma_vv = a.get_stress(voigt=False)
print(sigma_vv)
print(a.get_potential_energy() / len(a))
vol = a.get_volume()
deps = 1e-5
cell = a.cell.copy()
for v in range(3):
x = np.eye(3)
x[v, v] += deps
a.set_cell(np.dot(cell, x), scale_atoms=True)
ep = a.calc.get_potential_energy(a, force_consistent=True)
x[v, v] -= 2 * deps
a.set_cell(np.dot(cell, x), scale_atoms=True)
em = a.calc.get_potential_energy(a, force_consistent=True)
s = (ep - em) / 2 / deps / vol
print(v, s, abs(s - sigma_vv[v, v]))
assert abs(s - sigma_vv[v, v]) < 1e-7
for v1 in range(3):
v2 = (v1 + 1) % 3
x = np.eye(3)
x[v1, v2] = deps
x[v2, v1] = deps
a.set_cell(np.dot(cell, x), scale_atoms=True)
ep = a.calc.get_potential_energy(a, force_consistent=True)
x[v1, v2] = -deps
x[v2, v1] = -deps
a.set_cell(np.dot(cell, x), scale_atoms=True)
em = a.calc.get_potential_energy(a, force_consistent=True)
s = (ep - em) / deps / 4 / vol
print(v1, v2, s, abs(s - sigma_vv[v1, v2]))
assert abs(s - sigma_vv[v1, v2]) < 1e-7
opt = MDMin(UnitCellFilter(a), dt=0.01)
opt.run(fmax=0.5)
print(a.cell)
for i in range(3):
for j in range(3):
x = np.dot(a.cell[i], a.cell[j])
y = (i + 1) * (j + 1) * a0**2 / 2
if i != j:
y /= 2
print(i, j, x, (x - y) / x)
assert abs((x - y) / x) < 0.01
|
grhawk/ASE
|
tools/ase/test/stress.py
|
Python
|
gpl-2.0
| 1,914
|
[
"ASE"
] |
582894ecd77dc3f4f7c6b99e1393132b171e87a04796356d97b6047d1d72bc18
|
import wx
from images import icon_add, icon_delete, icon_edit
from images import icon_main, icon_about
from images import icon_home, icon_help, \
icon_visit_site, icon_searches, \
icon_browser
from images import icon_default_layout
from images import icon_add, icon_delete, icon_edit
from images import icon_preferences ,to_download, downloading, \
icon_downloaded, icon_ready, icon_processed, all_series, \
icon_update, icon_update_all, icon_restore_wnd, \
icon_edit_episode, icon_sync_status, icon_sync_series
from images import sync_selected_files, sync_all_files
NORMAL = 1
CHECK = 2
menuItems = dict()
mainMenuLookup = dict()
mainToolLookup = dict()
parent = None
TBFLAGS = ( wx.TB_HORIZONTAL | wx.NO_BORDER | wx.TB_FLAT )
TBSIZE = (16, 16)
class MenuData(object):
def __init__(self):
self.id = -1
self.data = None
def _createSubMenu(mnu, menulst, extramenus = None):
menuIdLookup = dict()
if extramenus is None:
extramenus = dict()
for submenu in menulst:
if isinstance(submenu, tuple):
smenu = wx.Menu()
d = _createSubMenu(smenu, submenu[1], extramenus)
menuIdLookup.update(d)
mnu.AppendMenu(wx.NewId(), submenu[0], smenu)
elif submenu == "-":
mnu.AppendSeparator()
else:
if submenu in mainMenuLookup:
id = mainMenuLookup[submenu].id
else:
id = wx.NewId()
if submenu in menuItems:
ml = menuItems[submenu]
else:
ml = extramenus[submenu]
if ml[4] == CHECK:
mt = wx.ITEM_CHECK
else:
mt = wx.ITEM_NORMAL
smenu = wx.MenuItem(mnu, id, ml[0], ml[2], mt)
if ml[3]:
smenu.SetBitmap(ml[3])
md = MenuData()
md.id = id
if len(ml) > 5:
md.data = ml[5]
else:
md.data = None
menuIdLookup[submenu] = md
mnu.AppendItem(smenu)
return menuIdLookup
def _createMainMenu(menulst, menuBar):
global mainMenuLookup
for mainmenu in menulst:
mnu = wx.Menu()
d = _createSubMenu(mnu, mainmenu[1])
mainMenuLookup.update(d)
menuBar.Append(mnu, mainmenu[0])
def _createToolBar(menulst, tb):
global mainToolLookup
for submenu in menulst:
if submenu == "-":
tb.AddSeparator()
else:
tid = wx.NewId()
ml = menuItems[submenu]
if ml[3]:
bmp = ml[3]
else:
bmp = wx.NullBitmap
if ml[4] == CHECK:
tb.AddCheckLabelTool(tid, ml[1], bmp, shortHelp = ml[1], longHelp = ml[2])
else:
tb.AddLabelTool(tid, ml[1], bmp, shortHelp = ml[1], longHelp = ml[2])
mainToolLookup[submenu] = tid
def create(parent, bindEvents):
"""
Creates the menu system for the application, and initializes the
event handlers for it.
"""
global menuItems
menuItems = {
"exit": ("E&xit", "Exit", "Exit the application", None, NORMAL),
"add_series": ("&Add Series...\tCtrl+N", "Add", "Add a new series", icon_add.getBitmap(), NORMAL),
"edit_series": ("&Edit Series...\tCtrl+E", "Edit", "Edit Series properties", icon_edit.getBitmap(), NORMAL),
"del_series": ("&Delete Series\tCtrl+D", "Delete", "Delete Series", icon_delete.getBitmap(), NORMAL),
"preferences": ("&Preferences ...", "Preferences", "Open the application preferences", icon_preferences.getBitmap(), NORMAL ),
"clear_cache": ("&Clear Cache", "Clear Cache", "Clear cache of one or all series", None, NORMAL),
"select_all": ("&Select All\tCtrl+A", "Select All", "Select all episodes", None, NORMAL),
"edit_episode": ("&Edit Episode...", "Edit Episode", "Edit selected episode", icon_edit_episode.getBitmap(), NORMAL),
"searches": ("Search &Engines ...", "Edit Search Engines", "Edit Search Engine properties", icon_searches.getBitmap(), NORMAL),
"restore": ("&Restore Default Layout", "Restore Default Layout", "Restore default window layout", icon_default_layout.getBitmap(), NORMAL),
"toggle_sel": ("Toggle View Selector", "Toggle View Selector", "Toggle View Selector", None, CHECK),
"toggle_prog": ("Toggle Progress Log", "Toggle Progress Log", "Hide or show Progress Log window", None, CHECK),
"toggle_stat": ("Toggle Statistics Window", "Toggle Statistics Window", "Hide or show Progress Statistics window", None, CHECK),
"to_tray": ("Minimize to tray", "Minimize to tray", "Upon minimize, hide in system tray", None, CHECK),
"help": ("&Help ... ", "Help", "Show the application help", icon_help.getBitmap(), NORMAL),
"visit_site": ("&Visit Site ... ", "Visit Site", "Visit Project Site", icon_visit_site.getBitmap(), NORMAL),
"about": ("&About ...", "About", "Show the about dialog", None, NORMAL),
"s_todownload": ("&To Download", "Mark as To Download", "Mark as To Download", to_download.getBitmap(), NORMAL),
"s_download": ("&Downloading", "Mark as Downloading", "Mark as Downloading", downloading.getBitmap(), NORMAL),
"s_downloaded": ("Down&loaded", "Mark as Downloaded", "Mark as Downloaded", icon_downloaded.getBitmap(), NORMAL),
"s_ready": ("&Ready", "Mark as Ready", "Mark as Ready", icon_ready.getBitmap(), NORMAL),
"s_seen": ("&Seen", "Mark as Seen", "Mark as Seen", icon_processed.getBitmap(), NORMAL),
"browser": ("Start &Browser", "Start Browser", "Start browser to show the episodes", icon_browser.getBitmap(), NORMAL),
"update_all": ("&Update All\tCtrl+Shift+U", "Update All", "Update all series", icon_update_all.getBitmap(), NORMAL),
"update": ("Update &Series\tCtrl+U", "Update Series", "Update this series", icon_update.getBitmap(), NORMAL),
"restore_wnd": ("Restore Window", "Restore Window", "Restore this window", icon_restore_wnd.getBitmap(), NORMAL),
"sync_status": ("&Sync All Statuses", "Synchronize Status", "Scan all series, synchronize statuses", icon_sync_status.getBitmap(), NORMAL),
"sync_series": ("Sync Status of S&elected Series", "Synchronize Selected Series", "Synchronize only selected series", icon_sync_series.getBitmap(), NORMAL),
"dir_epfiles": ("Scan files for &this Series", "Scan Files For This Series", "Scan media files for selected series", sync_selected_files.getBitmap(), NORMAL),
"dir_epfiles_all": ("Scan Files for &all Series", "Sync Files For All Series", "Scan media files for all series", sync_all_files.getBitmap(), NORMAL)
}
mainmenu = [ ("&File", [ "preferences",
"browser", "-",
"exit" ]
),
(
"&Series", [ "add_series",
"edit_series",
"del_series",
"-",
"update",
"update_all",
"-",
( "&Sync", [ "sync_status",
"sync_series" ] ),
"-",
"clear_cache" ]
),
("&Media Files", ["dir_epfiles", "dir_epfiles_all" ] ),
("&Episode", [ "select_all", "searches", "-", "edit_episode",
( "&Mark Status As", [ "s_todownload", "s_download",
"s_downloaded", "s_ready", "s_seen"] )
] ),
("&Window", [ "restore", "toggle_sel", "toggle_prog", "toggle_stat", "-", "to_tray" ] ),
("&Help", [ "help", "visit_site", "-", "about" ] )
]
toolmenu = [ "add_series", "edit_series", "del_series", "-", "searches", "-",
"s_todownload", "s_download", "s_downloaded", "s_ready", "s_seen", "-",
"update", "update_all", "browser", "-", "sync_status", "sync_series"
]
# create menu
mb = wx.MenuBar()
_createMainMenu(mainmenu, mb)
parent.SetMenuBar(mb)
# create toolbar
tb = parent.CreateToolBar( TBFLAGS )
tb.SetToolBitmapSize( TBSIZE )
_createToolBar(toolmenu, tb)
tb.Realize()
# bind all events
for evtid, evthnd in bindEvents:
if evtid in mainMenuLookup:
parent.Bind(wx.EVT_MENU, evthnd, id = mainMenuLookup[evtid].id)
if evtid in mainToolLookup:
parent.Bind(wx.EVT_TOOL, evthnd, id = mainToolLookup[evtid])
def populate(mnu, menulst, extramenus = None):
""" Populates a submenu with a given definition list. If the
items in this list are known by the submenu, they will get
the same ID so that e.g. popup menu's can trigger the same
handler as the main menu can """
if extramenus is None:
extramenus = dict()
return _createSubMenu(mnu, menulst, extramenus)
def enable(parent, menu, enabled):
""" Enables or disables a menu item by reference. menu can be a list or a
single string """
tb = parent.GetToolBar()
mb = parent.GetMenuBar()
if isinstance(menu, list):
for id in menu:
if id in mainToolLookup:
tb.EnableTool(mainToolLookup[id], enabled)
if id in mainMenuLookup:
mb.Enable(mainMenuLookup[id].id, enabled)
else:
if menu in mainToolLookup:
tb.EnableTool(mainToolLookup[menu], enabled)
if menu in mainMenuLookup:
mb.Enable(mainMenuLookup[menu].id, enabled)
def check(parent, id, value):
""" Checks or unchecks a menu item """
if id in mainToolLookup:
tb = parent.GetToolBar()
tb.ToggleTool(mainToolLookup[id], value)
if id in mainMenuLookup:
mb = parent.GetMenuBar()
mb.Check(mainMenuLookup[id].id, value)
def getmenu(id):
""" Returns the string by this menu item. The ID must be unique """
for menu in mainMenuLookup.iterkeys():
if mainMenuLookup[menu].id == id:
return menu
for menu in mainToolLookup.iterkeys():
if mainToolLookup[menu] == id:
return menu
return None
def menuid_to_data(mnuid, menuLookup = None):
""" Scans in the menu lookup (or main menu) for the ID returning the
data that belongs to it """
if menuLookup is None:
menuLookup = mainMenuLookup
for data in menuLookup.itervalues():
if data.id == mnuid:
return data.data
|
jorgb/airs
|
gui/menuhelper.py
|
Python
|
gpl-2.0
| 11,126
|
[
"VisIt"
] |
748ea58ada216d74d2cef0aadff5723989cd39872b14ecf6072315bf52ec452b
|
from __future__ import absolute_import
from six.moves.urllib.parse import urlparse
from django.utils.translation import ugettext_lazy as _
from django import forms
from sentry import http
from sentry.web.helpers import render_to_response
from sentry.identity.pipeline import IdentityProviderPipeline
from sentry.identity.github_enterprise import get_user_info
from sentry.integrations import (
IntegrationMetadata,
IntegrationInstallation,
FeatureDescription,
IntegrationFeatures,
)
from sentry.integrations.constants import ERR_INTERNAL, ERR_UNAUTHORIZED
from sentry.integrations.exceptions import ApiError
from sentry.integrations.repositories import RepositoryMixin
from sentry.pipeline import NestedPipelineView, PipelineView
from sentry.utils.http import absolute_uri
from sentry.integrations.github.integration import GitHubIntegrationProvider, build_repository_query
from sentry.integrations.github.issues import GitHubIssueBasic
from sentry.integrations.github.utils import get_jwt
from .repository import GitHubEnterpriseRepositoryProvider
from .client import GitHubEnterpriseAppsClient
DESCRIPTION = """
Connect your Sentry organization into your on-premise GitHub Enterprise
instances. Take a step towards augmenting your sentry issues with commits from
your repositories ([using releases](https://docs.sentry.io/learn/releases/))
and linking up your GitHub issues and pull requests directly to issues in
Sentry.
"""
FEATURES = [
FeatureDescription(
"""
Create and link Sentry issue groups directly to a GitHub issue or pull
request in any of your repositories, providing a quick way to jump from
Sentry bug to tracked issue or PR!
""",
IntegrationFeatures.ISSUE_BASIC,
),
FeatureDescription(
"""
Authorize repositories to be added to your Sentry organization to augment
sentry issues with commit data with [deployment
tracking](https://docs.sentry.io/learn/releases/).
""",
IntegrationFeatures.COMMITS,
),
]
disable_dialog = {
"actionText": "Visit GitHub Enterprise",
"body": "Before deleting this integration, you must uninstall it from your"
" GitHub Enterprise instance. After uninstalling, your integration"
" will be disabled at which point you can choose to delete this"
" integration.",
}
removal_dialog = {
"actionText": "Delete",
"body": "Deleting this integration will delete all associated repositories"
" and commit data. This action cannot be undone. Are you sure you"
" want to delete your integration?",
}
setup_alert = {
"type": "warning",
"icon": "icon-warning-sm",
"text": "Your GitHub enterprise instance must be able to communicate with"
" Sentry. Sentry makes outbound requests from a [static set of IP"
" addresses](https://docs.sentry.io/ip-ranges/) that you may wish"
" to whitelist to support this integration.",
}
metadata = IntegrationMetadata(
description=DESCRIPTION.strip(),
features=FEATURES,
author="The Sentry Team",
noun=_("Installation"),
issue_url="https://github.com/getsentry/sentry/issues/new?title=GitHub%20Integration:%20&labels=Component%3A%20Integrations",
source_url="https://github.com/getsentry/sentry/tree/master/src/sentry/integrations/github_enterprise",
aspects={
"disable_dialog": disable_dialog,
"removal_dialog": removal_dialog,
"alerts": [setup_alert],
},
)
API_ERRORS = {404: "GitHub Enterprise returned a 404 Not Found error.", 401: ERR_UNAUTHORIZED}
class GitHubEnterpriseIntegration(IntegrationInstallation, GitHubIssueBasic, RepositoryMixin):
repo_search = True
def get_client(self):
base_url = self.model.metadata["domain_name"].split("/")[0]
return GitHubEnterpriseAppsClient(
base_url=base_url,
integration=self.model,
private_key=self.model.metadata["installation"]["private_key"],
app_id=self.model.metadata["installation"]["id"],
verify_ssl=self.model.metadata["installation"]["verify_ssl"],
)
def get_repositories(self, query=None):
if not query:
return [
{"name": i["name"], "identifier": i["full_name"]}
for i in self.get_client().get_repositories()
]
full_query = build_repository_query(self.model.metadata, self.model.name, query)
response = self.get_client().search_repositories(full_query)
return [
{"name": i["name"], "identifier": i["full_name"]} for i in response.get("items", [])
]
def search_issues(self, query):
return self.get_client().search_issues(query)
def reinstall(self):
installation_id = self.model.external_id.split(":")[1]
metadata = self.model.metadata
metadata["installation_id"] = installation_id
self.model.update(metadata=metadata)
self.reinstall_repositories()
def message_from_error(self, exc):
if isinstance(exc, ApiError):
message = API_ERRORS.get(exc.code)
if message:
return message
return "Error Communicating with GitHub Enterprise (HTTP %s): %s" % (
exc.code,
exc.json.get("message", "unknown error") if exc.json else "unknown error",
)
else:
return ERR_INTERNAL
class InstallationForm(forms.Form):
url = forms.CharField(
label="Installation Url",
help_text=_(
'The "base URL" for your GitHub enterprise instance, ' "includes the host and protocol."
),
widget=forms.TextInput(attrs={"placeholder": _("https://github.example.com")}),
)
id = forms.CharField(
label="GitHub App ID",
help_text=_(
"The App ID of your Sentry app. This can be " "found on your apps configuration page."
),
widget=forms.TextInput(attrs={"placeholder": _("1")}),
)
name = forms.CharField(
label="GitHub App Name",
help_text=_(
"The GitHub App name of your Sentry app. "
"This can be found on the apps configuration "
"page."
),
widget=forms.TextInput(attrs={"placeholder": _("our-sentry-app")}),
)
verify_ssl = forms.BooleanField(
label=_("Verify SSL"),
help_text=_(
"By default, we verify SSL certificates "
"when delivering payloads to your GitHub "
"Enterprise instance"
),
widget=forms.CheckboxInput(),
required=False,
)
webhook_secret = forms.CharField(
label="GitHub App Webhook Secret",
help_text=_(
"We require a webhook secret to be "
"configured. This can be generated as any "
"random string value of your choice and "
"should match your GitHub app "
"configuration."
),
widget=forms.TextInput(attrs={"placeholder": _("XXXXXXXXXXXXXXXXXXXXXXXXXXX")}),
)
private_key = forms.CharField(
label="GitHub App Private Key",
help_text=_("The Private Key generated for your Sentry " "GitHub App."),
widget=forms.Textarea(
attrs={
"rows": "60",
"placeholder": _(
"-----BEGIN RSA PRIVATE KEY-----\n"
"XXXXXXXXXXXXXXXXXXXXXXXXXXX\n"
"XXXXXXXXXXXXXXXXXXXXXXXXXXX\n"
"XXXXXXXXXXXXXXXXXXXXXXXXXXX\n"
"XXXXXXXXXXXXXXXXXXXXXXXXXXX\n"
"-----END RSA PRIVATE KEY-----"
),
}
),
)
client_id = forms.CharField(
label="GitHub App OAuth Client ID", widget=forms.TextInput(attrs={"placeholder": _("1")})
)
client_secret = forms.CharField(
label="GitHub App OAuth Client Secret",
widget=forms.TextInput(attrs={"placeholder": _("XXXXXXXXXXXXXXXXXXXXXXXXXXX")}),
)
def __init__(self, *args, **kwargs):
super(InstallationForm, self).__init__(*args, **kwargs)
self.fields["verify_ssl"].initial = True
class InstallationConfigView(PipelineView):
def dispatch(self, request, pipeline):
if request.method == "POST":
form = InstallationForm(request.POST)
if form.is_valid():
form_data = form.cleaned_data
form_data["url"] = urlparse(form_data["url"]).netloc
pipeline.bind_state("installation_data", form_data)
pipeline.bind_state(
"oauth_config_information",
{
"access_token_url": u"https://{}/login/oauth/access_token".format(
form_data.get("url")
),
"authorize_url": u"https://{}/login/oauth/authorize".format(
form_data.get("url")
),
"client_id": form_data.get("client_id"),
"client_secret": form_data.get("client_secret"),
"verify_ssl": form_data.get("verify_ssl"),
},
)
return pipeline.next_step()
else:
form = InstallationForm()
return render_to_response(
template="sentry/integrations/github-enterprise-config.html",
context={"form": form},
request=request,
)
class GitHubEnterpriseIntegrationProvider(GitHubIntegrationProvider):
key = "github_enterprise"
name = "GitHub Enterprise"
metadata = metadata
integration_cls = GitHubEnterpriseIntegration
def _make_identity_pipeline_view(self):
"""
Make the nested identity provider view. It is important that this view is
not constructed until we reach this step and the
``oauth_config_information`` is available in the pipeline state. This
method should be late bound into the pipeline vies.
"""
identity_pipeline_config = dict(
oauth_scopes=(),
redirect_url=absolute_uri("/extensions/github-enterprise/setup/"),
**self.pipeline.fetch_state("oauth_config_information")
)
return NestedPipelineView(
bind_key="identity",
provider_key="github_enterprise",
pipeline_cls=IdentityProviderPipeline,
config=identity_pipeline_config,
)
def get_pipeline_views(self):
return [
InstallationConfigView(),
GitHubEnterpriseInstallationRedirect(),
# The identity provider pipeline should be constructed at execution
# time, this allows for the oauth configuration parameters to be made
# available from the installation config view.
lambda: self._make_identity_pipeline_view(),
]
def post_install(self, integration, organization):
pass
def get_installation_info(self, installation_data, access_token, installation_id):
session = http.build_session()
resp = session.get(
u"https://{}/api/v3/app/installations/{}".format(
installation_data["url"], installation_id
),
headers={
"Authorization": "Bearer %s"
% get_jwt(
github_id=installation_data["id"],
github_private_key=installation_data["private_key"],
),
"Accept": "application/vnd.github.machine-man-preview+json",
},
verify=installation_data["verify_ssl"],
)
resp.raise_for_status()
installation_resp = resp.json()
resp = session.get(
u"https://{}/api/v3/user/installations".format(installation_data["url"]),
params={"access_token": access_token},
headers={"Accept": "application/vnd.github.machine-man-preview+json"},
verify=installation_data["verify_ssl"],
)
resp.raise_for_status()
user_installations_resp = resp.json()
# verify that user actually has access to the installation
for installation in user_installations_resp["installations"]:
if installation["id"] == installation_resp["id"]:
return installation_resp
return None
def build_integration(self, state):
identity = state["identity"]["data"]
installation_data = state["installation_data"]
user = get_user_info(installation_data["url"], identity["access_token"])
installation = self.get_installation_info(
installation_data, identity["access_token"], state["installation_id"]
)
domain = urlparse(installation["account"]["html_url"]).netloc
integration = {
"name": installation["account"]["login"],
# installation id is not enough to be unique for self-hosted GH
"external_id": u"{}:{}".format(domain, installation["id"]),
# GitHub identity is associated directly to the application, *not*
# to the installation itself.
# app id is not enough to be unique for self-hosted GH
"idp_external_id": u"{}:{}".format(domain, installation["app_id"]),
"metadata": {
# The access token will be populated upon API usage
"access_token": None,
"expires_at": None,
"icon": installation["account"]["avatar_url"],
"domain_name": installation["account"]["html_url"].replace("https://", ""),
"account_type": installation["account"]["type"],
"installation_id": installation["id"],
"installation": installation_data,
},
"user_identity": {
"type": "github_enterprise",
"external_id": user["id"],
"scopes": [], # GitHub apps do not have user scopes
"data": {"access_token": identity["access_token"]},
},
"idp_config": state["oauth_config_information"],
}
if state.get("reinstall_id"):
integration["reinstall_id"] = state["reinstall_id"]
return integration
def setup(self):
from sentry.plugins import bindings
bindings.add(
"integration-repository.provider",
GitHubEnterpriseRepositoryProvider,
id="integrations:github_enterprise",
)
class GitHubEnterpriseInstallationRedirect(PipelineView):
def get_app_url(self, installation_data):
url = installation_data.get("url")
name = installation_data.get("name")
return u"https://{}/github-apps/{}".format(url, name)
def dispatch(self, request, pipeline):
installation_data = pipeline.fetch_state(key="installation_data")
if "reinstall_id" in request.GET:
pipeline.bind_state("reinstall_id", request.GET["reinstall_id"])
if "installation_id" in request.GET:
pipeline.bind_state("installation_id", request.GET["installation_id"])
return pipeline.next_step()
return self.redirect(self.get_app_url(installation_data))
|
mvaled/sentry
|
src/sentry/integrations/github_enterprise/integration.py
|
Python
|
bsd-3-clause
| 15,330
|
[
"VisIt"
] |
6c2983ca8e86df6549f5e9185a20ef4cc03c9a65fe6009e92b5aa98cca155121
|
#!/usr/bin/env python
"""Predicting poker hand's strength with artificial neural networks in Python"""
from __future__ import absolute_import, print_function
from argparse import ArgumentParser as Parser
import os
from numpy import copy
from pybrain.structure.modules import LinearLayer, SoftmaxLayer, TanhLayer
from pybrain.supervised.trainers import BackpropTrainer, RPropMinusTrainer
from pybrain.tools.shortcuts import buildNetwork
from pybrain.tools.validation import Validator
from load import load_data
TRAIN_METHODS = {'gdm': BackpropTrainer,
'rp': RPropMinusTrainer}
ACTIVATION_FNS = {'purelin': LinearLayer,
'tansig': TanhLayer}
def get_parser():
"""Parse command-line arguments"""
parser = Parser(description='Train neural network to classify poker hands')
parser.add_argument('-a', '--activation', type=str,
nargs='?', default='tansig',
help='hidden layer activation fn (default: tansig)')
parser.add_argument('-me', '--max_epochs', type=int, nargs='?', default=1000,
help='# of training iterations (default: 1000)')
parser.add_argument('-hn', '--hidden-neurons', type=int, nargs='?', default=10,
help='# of hidden neuron units (default: 10)')
parser.add_argument('-lr', '--learning-rate', type=float,
nargs='?', default=0.01,
help='controls size of weight changes (default: 0.01)')
parser.add_argument('-m', '--method', type=str, nargs='?', default='rp',
help='training method (default: rp)')
parser.add_argument('-nt', '--num-testing', type=int,
nargs='?', default='25000',
help='# of testing inputs (default: 25000)')
parser.add_argument('-wtr', '--write-training-results', action='store_true',
help='write training results as well as testing')
parser.add_argument('-v', '--verbose', help='print status messages',
action='store_true')
return parser
def train(args, training_ds):
"""Build and train feed-forward neural network
Keyword arguments:
args -- program arguments (dict)
training_ds -- suit, ranks, and target hands (list)
"""
# Build a feed-forward network with x hidden units
if args['verbose']:
print('\nBuilding network:')
print('\tinput neurons: {}'.format(training_ds.indim))
print('\thidden neurons: {}'.format(args['hidden_neurons']))
print('\toutput neurons: {}'.format(training_ds.outdim))
print('\thidden layer activation fn: {}'.format(args['activation']))
print('\toutput layer activation fn: softmax')
ff_network = buildNetwork(training_ds.indim,
args['hidden_neurons'],
training_ds.outdim,
hiddenclass=ACTIVATION_FNS[args['activation']],
outclass=SoftmaxLayer)
if args['verbose']:
print('Network built.')
# Train using user-specified method and training data for n epochs
momentum = 0.0
if args['method'] == 'gdm':
momentum = 0.7
if args['verbose']:
print('\nTraining network:')
print('\tmax epochs: {}'.format(args['max_epochs']))
print('\ttraining method: {}'.format(args['method']))
print('\tmomentum: {}'.format(momentum))
print('\tlearning rate: {}'.format(args['learning_rate']))
trainer = TRAIN_METHODS[args['method']](ff_network, dataset=training_ds,
verbose=args['verbose'],
momentum=momentum,
learningrate=args['learning_rate'])
try:
trainer.trainEpochs(args['max_epochs'])
except (KeyboardInterrupt, EOFError):
pass
return trainer, ff_network
def bit_array_transform(output):
"""Transforms activated outputs into a bit array representation"""
def bit_transform(arr):
# Largest weight becomes 1 in the bit array
bit_arr = copy(arr)
greatest = bit_arr.argmax()
bit_arr[:] = 0
bit_arr[greatest] = 1
return bit_arr
return [bit_transform(x) for x in output]
def evaluate(args, trainer, ff_network, training_ds, testing_ds, sim_num, header):
"""Evaluate the networks hit rate and MSE on training and testing"""
if args['verbose']:
print('\nEvaluating the networks hit rate and MSE:')
print('\tTotal epochs: %4d' % trainer.totalepochs)
def dataset_eval(dataset):
"""Return dataset hit rate and MSE"""
# Transform output values to bit vectors, similar to the targets
predicted = bit_array_transform(ff_network.activate(x)
for x in dataset['input'])
target = dataset['target']
# Lists of positions holding predicted and target classes to compare
predicted_pos = [list(x).index(1) for x in predicted]
target_pos = [list(x).index(1) for x in target]
hits = Validator.classificationPerformance(predicted_pos, target_pos)
mse = Validator.MSE(predicted, target)
return hits, mse
def write_eval_results():
"""Write simulation results to a pipe-delimited text file"""
with open('results/simulation{}.txt'.format(sim_num), 'a') as sim_file:
sim_file.write('{}\n'.format('|'.join(str(args[x]) for x in header)))
print('\n\tTraining set:')
tr_hits, tr_mse = dataset_eval(training_ds)
print('\t\tHit rate: {}'.format(tr_hits))
print('\t\tMSE: {}'.format(tr_mse))
print('\n\tTesting set:')
te_hits, te_mse = dataset_eval(testing_ds)
print('\t\tHit rate: {}'.format(te_hits))
print('\t\tMSE: {}'.format(te_mse))
# Write simulation results for testing set only (unless specified)
if args['write_training_results']:
args['hits'] = tr_hits
args['mse'] = tr_mse
write_eval_results()
args['write_training_results'] = False
args['hits'] = te_hits
args['mse'] = te_mse
write_eval_results()
def run_simulation(args, sim_num=0, header=None):
"""Run ANN simulation"""
# Always run verbosely (for now)
args['verbose'] = True
# Load training and test data
training_ds, testing_ds = load_data(args)
# Build and train feed-forward neural network
trainer, ff_network = train(args, training_ds)
# Initialize results output file with given or default header
if header is None:
header = ['hidden_neurons', 'learning_rate', 'max_epochs',
'activation', 'hits', 'mse']
# Create results directory to hold simulation files if not existing
if not os.path.exists('results'):
os.makedirs('results')
# Write table header to simulation file
with open('results/simulation{}.txt'.format(sim_num), 'a') as sim_file:
sim_file.write('{}\n'.format('|'.join(header)))
# Use the trainer to evaluate the network on the training and test data
evaluate(args, trainer, ff_network, training_ds, testing_ds, sim_num, header)
def command_line_runner():
"""Handle command-line interaction"""
parser = get_parser()
args = vars(parser.parse_args())
run_simulation(args)
if __name__ == '__main__':
command_line_runner()
|
huntrar/PokerNet
|
train.py
|
Python
|
mit
| 7,477
|
[
"NEURON"
] |
50003458fb44330f75f6f2fddc7341e53482f82bd342bdfa328d1424f7933706
|
from django.contrib.auth.models import User, Group, Permission
from django.contrib.contenttypes.models import ContentType
from lettuce import step, world
from questionnaire.features.pages.extract import ExtractPage
from questionnaire.features.pages.home import HomePage
from questionnaire.features.pages.step_utils import create_user_with_no_permissions, assign
from questionnaire.features.pages.users import LoginPage, UserListingPage, CreateUserPage
from questionnaire.models import Region, Country, UserProfile, Organization
@step(u'Given I am registered user')
def given_i_am_registered_user(step):
world.user, world.uganda, world.region = create_user_with_no_permissions(username="Cool")
assign('can_submit_responses', world.user)
@step(u'And I visit the login page')
def and_i_visit_the_login_page(step):
world.page = LoginPage(world.browser)
world.page.visit()
@step(u'And I fill in the login credentials')
def and_i_fill_in_the_login_credentials(step):
data = {'username': world.user.username,
'password': "pass"}
world.page.fill_form(data)
@step(u'And I submit the form')
def and_i_submit_the_form(step):
world.page.submit()
@step(u'Then I should be redirected home page')
def then_i_should_be_redirected_dashboard(step):
world.page = HomePage(world.browser)
world.page.validate_url()
world.page.is_text_present("Electronic Joint Reporting Form")
@step(u'And I should see my username and the logout link')
def and_i_should_see_my_username_and_the_logout_link(step):
world.page.is_text_present(world.user.get_username(), "Logout")
@step(u'Given I visit the login page')
def given_i_visit_the_login_page(step):
world.page = LoginPage(world.browser)
world.page.visit()
@step(u'And I fill in invalid user credentials')
def and_i_fill_in_invalid_user_credentials(step):
data = {'username': "invalid username",
'password': "pass"}
world.page.fill_form(data)
@step(u'Then I should see an error message')
def then_i_should_see_an_error_message(step):
world.page.is_text_present("Please enter a correct username and password.")
@step(u'When I click the logout link')
def when_i_click_the_logout_link(step):
world.page.click_link_by_partial_href("/accounts/logout/")
@step(u'Then I should see the login page again')
def then_i_should_see_the_login_page_again(step):
world.page = LoginPage(world.browser)
world.page.validate_url()
@step(u'Given I visit the extract page')
def given_i_visit_the_extract_page(step):
world.page = ExtractPage(world.browser)
world.page.visit()
@step(u'When I fill in the login credentials')
def when_i_fill_in_the_login_credentials(step):
world.page = LoginPage(world.browser)
data = {'username': world.user.username,
'password': "pass"}
world.page.fill_form(data)
@step(u'Then I should see the extract page')
def then_i_should_see_the_extract_page(step):
world.page = ExtractPage(world.browser)
@step(u'Given I have a global admin user')
def given_i_have_a_global_admin_user(step):
world.user = User.objects.create(username='user1', email='rajni@kant.com')
world.user.set_password('pass')
world.user.save()
world.global_admin = Group.objects.create(name='Global Admin')
auth_content = ContentType.objects.get_for_model(Permission)
permission, out = Permission.objects.get_or_create(codename='can_view_users', content_type=auth_content)
world.global_admin.permissions.add(permission)
world.global_admin.user_set.add(world.user)
@step(u'And I have 100 other users')
def and_i_have_100_other_users(step):
for i in range(0, 100):
User.objects.create(username='Rajni%s' % str(i), email='rajni@kant%s.com' % str(i), password='I_Rock')
@step(u'And I visit the user listing page')
def and_i_visit_the_user_listing_page(step):
world.page = UserListingPage(world.browser)
world.page.visit()
@step(u'Then I should see the list of users paginated')
def then_i_should_see_the_list_of_users_paginated(step):
world.page.validate_user_list_headers()
world.page.validate_pagination()
@step(u'And I click an new user button')
def and_i_click_an_new_user_button(step):
world.page.click_by_css("#add-new-user")
world.page = CreateUserPage(world.browser)
@step(u'And I fill in the user information')
def and_i_fill_in_the_user_information(step):
world.form_data = {
'username': 'rajni',
'password1': 'kant',
'password2': 'kant',
'email': 'raj@ni.kant'}
world.page.fill_form(world.form_data)
@step(u'And I select global admin role')
def and_i_select_global_admin_role(step):
world.page.check(world.global_admin.id)
@step(u'Then I should see that the user was successfully created')
def then_i_should_see_that_the_user_was_successfully_created(step):
world.page.is_text_present("%s created successfully." % world.global_admin.name)
@step(u'And I should see the user listed on the listing page')
def and_i_should_see_the_user_listed_on_the_listing_page(step):
world.page.is_text_present(world.form_data['username'], world.form_data['email'], world.global_admin.name)
@step(u'And I have a region, in an organization')
def and_i_have_a_region(step):
world.afro_region = Region.objects.create(name="Afro")
world.uganda = Country.objects.create(name="Afro", code="COD")
world.afro_region.countries.add(world.uganda)
@step(u'And I have 10 users in one of the regions')
def and_i_have_10_users_in_one_region(step):
for i in range(0, 10):
world.user = User.objects.create(username='Rajni%s' % str(i), email='rajni@kant%s.com' % str(i), password='I_Rock')
world.country = Country.objects.create(name="Country%s" % str(i), code="UGX")
world.afro_region.countries.add(world.country)
UserProfile.objects.create(user=world.user, country=world.country, region=world.afro_region)
@step(u'And I have five others not in that region')
def and_i_have_five_others_not_in_that_region(step):
region = Region.objects.create(name="Afro")
for i in range(11, 16):
world.user = User.objects.create(username='Jacinta%s' % str(i), email='jacinta%s@gmail.com' % str(i), password='I_Rock')
UserProfile.objects.create(user=world.user, region=region)
@step(u'And I select a region')
def and_i_select_a_region(step):
world.page.select('region', world.afro_region.id)
@step(u'And I click get list')
def and_i_click_get_list(step):
world.page.click_by_css("#get-list-btn")
@step(u'Then I should see only the users in that region')
def then_i_should_see_only_the_users_in_that_region(step):
for i in range(0, 2):
world.page.is_text_present('Rajni%s' % str(i), 'rajni@kant%s.com' % str(i))
for i in range(11, 13):
world.page.is_text_present('Jacinta%s' % str(i), 'jacinta%s@gmail.com' % str(i), status=False)
@step(u'And I select regional admin role')
def and_i_select_regional_admin_role(step):
world.page.check(world.regional_admin.id)
@step(u'Then I should see only region and country fields')
def then_i_should_see_only_region_and_country_fields(step):
world.page.is_text_present("Region", "Organization")
world.page.validate_only_organization_and_region_drop_down_visible()
@step(u'And I select the region for the new user')
def when_i_select_the_country_and_region_for_the_new_user(step):
world.page.select('region', world.afro_region.id)
@step(u'And I have roles')
def and_i_have_roles(step):
world.regional_admin = Group.objects.create(name='Regional Admin')
world.country_admin = Group.objects.create(name='Country Admin')
world.data_submitter = Group.objects.create(name='Data Submitter')
@step(u'Then I should see that the data regional admin was successfully created')
def then_i_should_see_that_the_data_regional_admin_was_successfully_created(step):
world.page.is_text_present("%s created successfully." % world.regional_admin.name)
@step(u'And I have two organizations, region and role')
def and_i_have_an_organization_region_and_role(step):
world.organization = Organization.objects.create(name="UNICEF")
world.who_organization = Organization.objects.create(name="WHO")
world.region = Region.objects.create(name="Afro", organization=world.organization)
world.paho = Region.objects.create(name="PAHO", organization=world.who_organization)
world.global_admin = Group.objects.create(name="Global")
world.regional_admin = Group.objects.create(name="Regional")
@step(u'And I have 4 users in the UNICEF organization, 2 of which are regional admins in the AFRO region')
def and_i_have_4_users_in_the_unicef_organization(step):
for i in range(0, 4):
world.jacinta = User.objects.create(username="jacinta%s" % str(i), email='jacinta%s@gmail.com' % str(i))
UserProfile.objects.create(user=world.jacinta, region=world.region, organization=world.organization)
if i < 2:
world.regional_admin.user_set.add(world.jacinta)
@step(u'And I have 2 users in the WHO organization')
def and_i_have_2_users_in_the_who_organization(step):
for i in range(5, 7):
world.jacinta = User.objects.create(username="jacinta%s" % str(i))
UserProfile.objects.create(user=world.jacinta, region=world.region, organization=world.who_organization)
@step(u'Then I should see only regional admin users in the UNICEF organization in the AFRO region')
def then_i_should_see_only_regional_admin_users_in_the_unicef_organization_in_the_afro_region(step):
for i in range(0, 2):
world.page.is_text_present('jacinta%s' % str(i), 'jacinta%s@gmail.com' % str(i))
@step(u'And I should not see the rest of the users')
def and_i_should_not_see_the_rest_of_the_users(step):
for i in range(4, 7):
world.page.is_text_present('jacinta%s' % str(i), 'jacinta%s@gmail.com' % str(i), status=False)
@step(u'And I select unicef')
def and_i_select_unicef(step):
world.page.select('organization', world.organization.id)
@step(u'Then I should see the region under unicef in the select')
def then_i_should_see_the_region_under_unicef_in_the_select(step):
world.page.select('region', world.region.id)
@step(u'And I should not see the region under who in the select')
def and_i_should_not_see_the_region_under_who_in_the_select(step):
world.page.validate_select_not_present(world.paho.name)
@step(u'And I have two organizations and regions')
def and_i_have_two_organizations_and_regions(step):
world.unicef = Organization.objects.create(name="UNICEF")
world.who = Organization.objects.create(name="WHO")
world.region = Region.objects.create(name="Afro", organization=world.unicef)
world.paho = Region.objects.create(name="PAHO", organization=world.who)
@step(u'And I have four roles')
def and_i_have_four_roles(step):
world.regional_admin = Group.objects.create(name="Regional Admin")
world.country_admin = Group.objects.create(name="Country Admin")
world.data_submitter = Group.objects.create(name="Data Submitter")
@step(u'When I select the global admin role')
def when_i_select_the_global_admin_role(step):
world.page.check(world.global_admin.id)
@step(u'Then I should see organisations drop down')
def then_i_should_see_organisations_drop_down(step):
world.page.validate_only_organization_drop_down_visible()
@step(u'When I select the region admin role')
def when_i_select_the_region_admin_role(step):
world.page.check(world.regional_admin.id)
@step(u'Then I should see region and country')
def then_i_should_see_region_and_country(step):
world.page.validate_only_organization_and_region_drop_down_visible()
@step(u'When I select the country admin role')
def when_i_select_the_country_admin_role(step):
world.page.check(world.country_admin.id)
@step(u'Then I should see country drop down')
def then_i_should_see_country_drop_down(step):
world.page.validate_only_country_drop_down_visible()
@step(u'When I select the data submitter role')
def when_i_select_the_data_submitter_role(step):
world.page.check(world.data_submitter.id)
@step(u'And I have a region')
def and_i_have_a_region(step):
world.afro_region = Region.objects.create(name="Afro")
world.uganda = Country.objects.create(name="Afro", code="COD")
world.afro_region.countries.add(world.uganda)
@step(u'Then I should see only organization and region fields')
def then_i_should_see_only_organization_and_region_fields(step):
world.page.validate_only_organization_and_region_drop_down_visible()
@step(u'And I have one region, in an organization')
def and_i_have_one_region_in_an_organization(step):
world.unicef = Organization.objects.create(name="Unicef")
world.afro_region = Region.objects.create(name="Afro")
world.unicef.regions.add(world.afro_region)
@step(u'When I select the organization')
def when_i_select_the_organization(step):
world.page.select('organization', world.unicef.id)
@step(u'When I select UNICEF organization')
def when_i_select_unicef_organization(step):
world.page.select('organization', world.organization.id)
@step(u'And I select the AFRO region and regional admin role')
def and_i_select_the_afro_region_and_regional_admin_role(step):
world.page.select('region', world.region.id)
world.page.select('role', world.regional_admin.id)
@step(u'And I have a countries in that region')
def and_i_have_a_countries_in_that_region(step):
world.uganda = Country.objects.create(name="Uganda")
world.rwanda = Country.objects.create(name="Rwanda")
world.afro_region.countries.add(world.uganda, world.rwanda)
@step(u'And I fill in data submitter information')
def and_i_fill_in_data_submitter_information(step):
world.form_data = {
'username': 'jacinta',
'password1': 'pass',
'password2': 'pass',
'email': 'iacinta@ni.kant'}
world.page.fill_form(world.form_data)
@step(u'And I select data submitter role')
def and_i_select_data_submitter_role(step):
world.page.check(world.data_submitter.id)
@step(u'When I select a country')
def when_i_select_a_country(step):
world.page.select('country', world.uganda.id)
@step(u'Then I should see that the data submitter was successfully created')
def then_i_should_see_that_the_data_submitter_was_successfully_created(step):
world.page.is_text_present("%s created successfully." % world.data_submitter.name)
world.page.is_text_present(world.form_data['username'], world.form_data['email'], 'Active')
@step(u'And I have 2 country admins and data submitters in countries in the AFRO')
def and_i_have_2_country_admins_and_data_submitters_in_countries_in_the_afro(step):
world.user = User.objects.create(username="mutoni", email="mutoni@ccc.ccc")
UserProfile.objects.create(user=world.user, country=world.uganda)
world.user1 = User.objects.create(username="mbabazi", email="mbabazi@ccc.ccc")
UserProfile.objects.create(user=world.user1, country=world.rwanda)
world.data_submitter.user_set.add(world.user, world.user1)
@step(u'And I select the AFRO region')
def and_i_select_the_afro_region(step):
world.page.select('region', world.region.id)
@step(u'Then I should see all the data submitters too')
def then_i_should_see_all_the_users_in_the_region_including_data_submitters_and_country_admins(step):
world.page.is_text_present(world.user.username, world.user.email)
world.page.is_text_present(world.user1.username, world.user1.email)
@step(u'And I have countries in AFRO region')
def and_i_have_countries_in_afro_region(step):
world.uganda = Country.objects.create(name="Uganda")
world.rwanda = Country.objects.create(name="Rwanda")
world.region.countries.add(world.uganda, world.rwanda)
@step(u'And I click the create button')
def and_i_click_the_create_button(step):
world.page.click_by_css('button.submit')
@step(u'And I have a an active data submitter user')
def and_i_have_a_an_active_data_submitter_user(step):
password = 'pass'
world.uganda = Country.objects.create(name="Uganda")
world.datasubmitteruser = User.objects.create_user('ds', 'ds@ds.com', password)
UserProfile.objects.create(user=world.datasubmitteruser, country=world.uganda)
@step(u'And I select that user')
def and_i_select_that_user(step):
world.page.click_link_by_partial_href('/users/%s/edit' % world.datasubmitteruser.id)
@step(u'And I make that user inactive')
def and_i_make_that_user_inactive(step):
world.page.uncheck_by_name('is_active')
world.page.click_by_css('button.submit')
@step(u'Then that user should be unable to log in')
def then_that_user_should_be_unable_to_log_in(step):
world.page = LoginPage(world.browser)
world.page.visit()
data = {'username': world.datasubmitteruser.username,
'password': "pass"}
world.page.fill_form(data)
world.page.submit()
@step(u'And they should see a message that their account is inactive when they try to log in')
def and_they_should_see_a_message_that_their_account_is_inactive_when_they_try_to_log_in(step):
world.page.is_text_present('This account is inactive')
@step(u'And I select to change the password of that user')
def and_i_select_to_change_the_password_of_that_user(step):
world.page.click_by_id('id-reset-password-user-%s' % world.datasubmitxteruser.id)
@step(u'And I fill in the new password twice')
def and_i_fill_in_the_new_password_twice(step):
world.page.fill_form({'password1': 'p@ss',
'password2': 'p@ss'})
@step(u'And I click save button')
def and_i_click_save_button(step):
world.page.submit()
@step(u'Then I should see that the users password was reset successfully')
def then_i_should_see_that_the_users_password_was_reset_successfully(step):
world.page.is_text_present('The password was succesfully reset')
@step(u'And the user should not be able to login successfully using old password')
def and_the_user_should_not_be_able_to_login_successfully_using_old_password(step):
world.page.logout()
login(world.page, world.datasubmitteruser, 'pass')
world.page.is_text_present('Invalid password')
world.page.validate_url()
@step(u'And the user should be able to loggin successfully using the new password')
def and_the_user_should_be_able_to_loggin_successfully_using_the_new_password(step):
login(world.page, world.datasubmitteruser, 'p@ss')
world.page.is_text_present('Invalid password')
world.page = QuestionnairePage(world.browser)
world.page.validate_url()
def login(page, username, password):
page.visit()
page.fill_form({'username': username,
'password': password})
page.submit()
|
eJRF/ejrf
|
questionnaire/features/users_steps.py
|
Python
|
bsd-3-clause
| 18,631
|
[
"VisIt"
] |
1fb96ec8f7402a9beac654bdcc35fc94bb2f08c4e4f3bba68404b160b3fe6895
|
import numpy as N
import pyfits as PF
import pylab as P
# Note that the lamp spectrum can be in the raw, but would not be
# present in the frame that is used in the HST. Thus, this test is
# not entirely accurate.
climit = 67.
f3 = 'lb3y01c5q_rawtag_b.fits'
f6 = 'lb6d01giq_rawtag_b.fits'
d3 = PF.open(f3)[1].data
d6 = PF.open(f6)[1].data
min3 = N.min(d3.RAWX)
max3 = N.max(d3.RAWX)
min6 = N.min(d6.RAWX)
max6 = N.max(d6.RAWX)
time3 = 15.
time6 = 15.
bins3 = N.arange(max3)[::4]
bins6 = N.arange(max6)[::4]
n3, b3 = N.histogram(d3.RAWX[d3.TIME < 15.02], bins=bins3)
n6, b6 = N.histogram(d6.RAWX[d6.TIME < 15.02], bins=bins6)
wid3 = N.abs(b3[0] - b3[1])
wid6 = N.abs(b6[0] - b6[1])
print 'Max count rate in %s is %4.2f (avg over 15s)' % (f3, N.max(n3)/time3)
print 'Max count rate in %s is %4.2f (avg over 15s)' % (f6, N.max(n6)/time6)
#plot 1
fig = P.figure()
ax = fig.add_subplot(211)
ax2 = fig.add_subplot(212)
bars1 = ax.bar(b3[:-1], n3/time3, width=(wid3*0.98))
ax2.set_xlabel('Super Pixels (in dispersion direction)')
ax.set_ylabel('Count Rate (counts / s)')
ax.set_xlim(min3, max3/4.)
bars1 = ax2.bar(b3[:-1], n3/time3, width=(wid3*0.98))
ax2.axhline(climit, label = 'Local count rate limit')
ax2.set_ylabel('Count Rate (counts / s)')
_tmp = b3[n3 == N.max(n3)] + wid3
ax2.set_xlim(_tmp[0] - 12, _tmp[0] + 12)
P.annotate('Zero order light of 11530 visit 01',
xy = (0.5, 0.95),
horizontalalignment='center',
verticalalignment='center',
xycoords='figure fraction')
P.annotate('Local count rate limit %4.1f' % climit,
xy = (0.5, 0.83),
horizontalalignment='center',
verticalalignment='center',
xycoords='figure fraction')
P.annotate('Max Count Rate %4.1f cts/s (averaged over %5.1f s)' % (N.max(n3)/time3, time3),
xy = (0.5, 0.3),
horizontalalignment='center',
verticalalignment='center',
xycoords='figure fraction')
P.legend(shadow = True, fancybox = True)
P.savefig('lb3y01c5q.pdf')
P.close()
#plot2
fig = P.figure()
ax = fig.add_subplot(211)
ax2 = fig.add_subplot(212)
bars1 = ax.bar(b6[:-1], n6/time6, width=(wid6*0.98))
ax2.set_xlabel('Super Pixels (in dispersion direction)')
ax.set_ylabel('Count Rate (counts / s)')
ax.set_xlim(min6, max6/4.)
bars1 = ax2.bar(b6[:-1], n6/time6, width=(wid6*0.98))
ax2.axhline(climit, label = 'Local count rate limit')
ax2.set_ylabel('Count Rate (counts / s)')
_tmp = b6[n6 == N.max(n6)] + wid6
ax2.set_xlim(_tmp[0] - 12, _tmp[0] + 12)
P.annotate('Zero order light of 11528 visit 01',
xy = (0.5, 0.95),
horizontalalignment='center',
verticalalignment='center',
xycoords='figure fraction')
P.annotate('Local count rate limit %4.1f' % climit,
xy = (0.5, 0.83),
horizontalalignment='center',
verticalalignment='center',
xycoords='figure fraction')
P.annotate('Max Count Rate %4.1f cts/s (averaged over %5.1f s)' % (N.max(n6)/time6, time6),
xy = (0.5, 0.3),
horizontalalignment='center',
verticalalignment='center',
xycoords='figure fraction')
P.legend(shadow = True, fancybox = True)
P.savefig('lb6d01giq.pdf')
P.close()
|
sniemi/SamPy
|
sandbox/src/LocalCountRatesFromRaw.py
|
Python
|
bsd-2-clause
| 3,342
|
[
"VisIt"
] |
354326535022d42b9001c89529b88a0fbe9b3a57d22ad76b31af7b80565a5920
|
######################################################
# RATUser.py
# ---------
# Author: Matt Mottram
# <m.mottram@qmul.ac.uk>
#
# Description:
# Ganga application for SNO+ user analysis/simulation.
#
# Runs RAT snapshots on the given backend via ratRunner.py
# Ships the code in the input sandbox, can either download
# the snapshot from a given rat fork, or checkout and tar
# up the snapshot from a local repository.
#
# Classes:
# - RATUser: user analysis/simulation applicaiton
# - RATUserSplitter: splitter to create subjobs for subruns
# - UserRTHandler: handles submission to local/batch backends
# - UserLCGRTHandler: handles submission to LCG backend
#
# Revision History:
# - 03/11/12: M. Mottram: first revision with proper documentation!
# - 06/12/13: M. Mottram: Removed use of tokens, updated config and schema.
# - 26/08/14: M. Mottram: moved RT handlers to separate file (RTRATUser.py)
#
######################################################
import os
from GangaSNOplus.Lib.Utilities import RATUtil
from GangaCore.Core.exceptions import ApplicationConfigurationError
from GangaCore.GPIDev.Adapters.IApplication import IApplication
from GangaCore.GPIDev.Adapters.ISplitter import ISplitter
from GangaCore.GPIDev.Schema import *
from GangaCore.GPIDev.Lib.File import *
###################################################################
config = GangaCore.Utility.Config.makeConfig('defaults_RATUser','Defaults for the RATUser application')
config.addOption('local_softwareEnvironment', None, 'Local snoing-install directory (or directory with env_rat-x.y.sh files)')
config.addOption('local_environment', [], 'Environment options required to run on local or batch system')
config.addOption('local_outputDir', None, '*Default* output directory if running on a batch or local system (can override)')
config.addOption('grid_outputDir', None, '*Defult* output directory if running on system with grid storage (can override)')
config.addOption('cacheDir', '~/gaspCache', 'Directory to store RAT snaphots (if required)')
# Assume that the applications should come from the same GangaSNOplus directory
_app_directory = os.path.dirname(__file__)
###################################################################
class RATUser(IApplication):
"""The RAT job handler for data production and processing"""
#_schema is required for any Ganga plugin
#Add any options that are required, but try to set sensible default values to minimise effort required of user
_schema = Schema(Version(1,1), {
'discardOutput' : SimpleItem(defvalue=False,doc='Do not store the output: default False',typelist=['bool']),
'environment' : SimpleItem(defvalue=[],doc='list of strings with the commands to setup the correct backend environment, or single string location of a file with the appropriate commands (if necessary)',typelist=['str','list']),
'inputFile' : SimpleItem(defvalue=None,doc='Input file name, macro cannot have the inroot process defined within!',
typelist=['str','type(None)']),
'nEvents' : SimpleItem(defvalue=None,doc='Number of events to run, MUST not define number of events in the macro (/rat/run/start)',
typelist=['int','type(None)']),
'outputDir' : SimpleItem(defvalue=None,doc='Which output directory should we use (default Grid: RATUser/general/, should be modified to RATUser/<your-name> if you dont want admins to mess with it!)',
typelist=['str','type(None)']),
'outputFile' : SimpleItem(defvalue=None,doc='Output file name, macro must have outroot processor, but no output file defined!',
typelist=['str','type(None)']),
'rat_db_name' : SimpleItem(defvalue=None, doc='RAT db name', typelist=['str', 'type(None)']),
'rat_db_pswd' : SimpleItem(defvalue=None, doc='RAT db password', typelist=['str', 'type(None)']),
'rat_db_protocol' : SimpleItem(defvalue=None, doc='RAT db protocol', typelist=['str', 'type(None)']),
'rat_db_url' : SimpleItem(defvalue=None, doc='RAT db password', typelist=['str', 'type(None)']),
'rat_db_user' : SimpleItem(defvalue=None, doc='RAT db password', typelist=['str', 'type(None)']),
'ratBaseVersion' : SimpleItem(defvalue='dev',doc='RAT version that ratVersion derives from, necessary to get the correct libraries (ROOT, Geant4 etc)',
typelist=['str',"int"]),
'ratFork' : SimpleItem(defvalue='snoplus', doc='Fork of RAT [snoplus]', typelist=['str']),
'ratMacro' : SimpleItem(defvalue=None,doc='String pointing to the macro file to run',
typelist=['str','type(None)']),
'ratVersion' : SimpleItem(defvalue=None,doc='RAT version tag for the version to download and install (can also be a branch name, not recommended)',
typelist=['str','type(None)']),
'softwareEnvironment': SimpleItem(defvalue=None,doc='Software environment file, required if running on a non-LCG backend',
typelist=['str','type(None)']),
'tRun' : SimpleItem(defvalue=None,doc='Duration of run (cannot use with nEvents)',
typelist=['int','type(None)']),
'useDB' : SimpleItem(defvalue=False,doc='Use the RAT database (snopl.us)?',typelist=['bool']),
'versionUpdate' : SimpleItem(defvalue=False, doc="Update the rat version tag?", typelist=['bool']),
})
_category = 'applications'
_name = 'RATUser'
config = GangaCore.Utility.Config.getConfig('defaults_RATUser')
def configure(self,masterappconfig):
'''Configure method, called once per job.
'''
logger.debug('RAT::RATUser configure ...')
job = self._getParent()
masterjob = job._getParent()
#Critical options:
# - ratMacro
# - outputDirectory
# - ratBaseVersion
#If these aren't defined, don't let the user submit the job
#Note, the ratMacro can be defined in the subjob...
if self.ratMacro!=None:
job.inputsandbox.append(File(self.ratMacro))
else:
logger.error('Rat macro not defined')
raise Exception
if self.ratBaseVersion=='':
logger.error('Error: must give a rat base (fixed release) version number')
raise Exception
if not self.outputFile:
logger.error('No output file defined!') #checks if output file set in command line
raise Exception
if RATUtil.check_command(self.ratMacro,['/rat/procset','file']):
logger.error('Output file should only be defined via the command line and not in the macro!') #checks if output file si set in the macro
raise Exception
if self.outputFile:
if not RATUtil.check_command(self.ratMacro,['/rat/proclast','outroot']) and not RATUtil.check_command(self.ratMacro,['/rat/proclast','outntuple']) and not RATUtil.check_command(self.ratMacro,['/rat/proclast','outsoc']):
logger.error('Have specified an output file, but no root, ntuple or soc processor present in macro') #checks for putroot processor (needs to be there regardless where the output file is defined)
raise Exception
if not self.nEvents and not self.tRun:
logger.error('Need to specifiy either the number of events or the duration of run! No number of events or run duration defined!') #checks if number of events or run duration is set in command line
raise Exception
if self.nEvents and self.tRun:
logger.error('Cannot specify number of events and the duration of run!')
raise Exception
if not RATUtil.check_command(self.ratMacro,['/rat/run/start','']):
logger.error('/rat/run/start must be set in the macro but no number of events should be specified! Number of events should only be defined via the command line!') #check if the /rat/run/start command is set
raise Exception
if self.inputFile:
if RATUtil.checkText(self.ratMacro,['inroot/read']):
logger.error('Cannot specify inputFile in Ganga job if "/rat/inroot/read" line is present in macro')
raise Exception
if self.useDB:
if not config['rat_db_pswd']:
logger.error('Need a password in order to contact the ratdb database')
raise Exception
#Always run rat with a log called rat.log
job.outputsandbox.append('rat.log')
job.outputsandbox.append('return_card.js')
if self.ratVersion!=None:
#download the code locally
#only uses the main SNO+ rat branch for now
#need to add pkl object to inform which branch we have and add others when required
self.zipFileName = RATUtil.make_rat_snapshot(self.ratFork, self.ratVersion, self.versionUpdate, os.path.expanduser(config['cacheDir']))
job.inputsandbox.append(File(self.zipFileName))
#all args have to be str/file - force rat base version to be a string
self.ratBaseVersion=str(self.ratBaseVersion)
return(None,None)
###################################################################
class RATUserSplitter(ISplitter):
'''Splitter for RAT User jobs.
'''
_name = "RATUserSplitter"
_schema = Schema(Version(1,0), {
'ratMacro' : SimpleItem(defvalue=[],typelist=['str'],sequence=1,doc='A list of lists for specifying rat macros files'),
'outputFile' : SimpleItem(defvalue=[],typelist=['str','type(None)'],sequence=1,doc='A list of lists for specifying rat output files'),
'inputFile' : SimpleItem(defvalue=[],typelist=['str','type(None)'],sequence=1,doc='A list of lists for specifying rat input files'),
'nEvents' : SimpleItem(defvalue=[],typelist=['int','type(None)'],sequence=1,doc='A list of the number of events for each sub job')
} )
def split(self,job):
if self.outputFile!=[]:
if len(self.outputFile)!=len(self.ratMacro):
logger.error('Must have same number of macros, outputs and inputs for the splitter')
raise Exception
if self.inputFile!=[]:
if len(self.inputFile)!=len(self.ratMacro):
logger.error('Must have same number of macros, outputs and inputs for the splitter')
raise Exception
if self.nEvents!=[]:
if len(self.nEvents)!=len(self.ratMacro):
logger.error('Must have same number of nEvents as macros for the splitter')
raise Exception
subjobs = []
for i,rm in enumerate(self.ratMacro):
j = self.createSubjob(job)
j.application.ratMacro = rm
if self.outputFile!=[]:
j.application.outputFile = self.outputFile[i]
if self.inputFile!=[]:
j.application.inputFile = self.inputFile[i]
if self.nEvents!=[]:
j.application.nEvents = self.nEvents[i]
subjobs.append(j)
return subjobs
###################################################################
from GangaCore.GPIDev.Adapters.ApplicationRuntimeHandlers import allHandlers
from GangaSNOplus.Lib.RTHandlers.RTRATUser import UserRTHandler, UserLCGRTHandler, UserWGRTHandler, UserDiracRTHandler
allHandlers.add('RATUser','Local', UserRTHandler)
allHandlers.add('RATUser','PBS', UserRTHandler)
allHandlers.add('RATUser','SGE', UserRTHandler)
allHandlers.add('RATUser','Condor', UserRTHandler)
allHandlers.add('RATUser','LCG', UserLCGRTHandler)
allHandlers.add('RATUser','Dirac', UserDiracRTHandler)
allHandlers.add('RATUser','TestSubmitter', UserRTHandler)
allHandlers.add('RATUser','Interactive', UserRTHandler)
allHandlers.add('RATUser','Batch', UserRTHandler)
allHandlers.add('RATUser','WestGrid', UserWGRTHandler)
logger = GangaCore.Utility.logging.getLogger()
|
ganga-devs/ganga
|
ganga/GangaSNOplus/Lib/Applications/RATUser.py
|
Python
|
gpl-3.0
| 12,478
|
[
"DIRAC"
] |
ae03e2fd98a58eea9e3bfbf6b6a436126efd80bf7fad59b87ed4ae808ac095a1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright © 2015 Matthew Stone <mstone5@mgh.harvard.edu>
# Distributed under terms of the MIT license.
"""
"""
# from svcf import SVFile
from collections import deque
from itertools import combinations
import networkx as nx
def is_smaller_chrom(chrA, chrB, le=True):
""" Test if chrA is """
if chrA.startswith('chr'):
chrA = chrA[3:]
if chrB.startswith('chr'):
chrB = chrB[3:]
# Numeric comparison, if possible
if chrA.isdigit() and chrB.isdigit():
if le:
return int(chrA) <= int(chrB)
else:
return int(chrA) < int(chrB)
# String comparison for X/Y
elif not chrA.isdigit() and not chrB.isdigit():
if le:
return chrA <= chrB
else:
return chrA < chrB
# Numeric is always less than X/Y
else:
return chrA.isdigit()
class RPCNode(object):
def __init__(self, chrA, posA, chrB, posB, name='.'):
"""
Common format for SV calls for intersection analyses.
Includes methods for RPC-based clustering
"""
self.chrA = str(chrA)
self.posA = int(posA)
self.chrB = str(chrB)
self.posB = int(posB)
self.name = str(name)
self.sort_positions()
def sort_positions(self):
# Force chrA, posA to be upstream of chrB, posB
if self.chrA == self.chrB:
self.posA, self.posB = sorted([self.posA, self.posB])
elif not is_smaller_chrom(self.chrA, self.chrB):
self.chrA, self.chrB = self.chrB, self.chrA
self.posA, self.posB = self.posB, self.posA
def is_clusterable_with(self, other, dist):
"""
Test if
"""
return (self.chrA == other.chrA and
abs(self.posA - other.posA) < dist)
def clusters_with(self, other, dist):
return (self.chrB == other.chrB and
abs(self.posA - other.posA) < dist and
abs(self.posB - other.posB) < dist)
def is_in(self, tabixfile):
"""
Test if breakpoints of SV fall into any region in tabix-indexed bed.
Parameters
----------
tabixfile : pysam.TabixFile
Returns
-------
is_in : bool
"""
return ((self.chrA.encode('utf-8') in tabixfile.contigs and
any(tabixfile.fetch(self.chrA, self.posA, self.posA + 1))) or
(self.chrB.encode('utf-8') in tabixfile.contigs and
any(tabixfile.fetch(self.chrB, self.posB, self.posB + 1))))
@property
def HQ(self):
"""
Filter function
"""
return True
@property
def secondary(self):
"""
Filter function
TODO: rename
"""
return False
@property
def is_allowed_chrom(self):
GRCh = [str(x) for x in range(1, 23)] + 'X Y'.split()
UCSC = ['chr' + x for x in GRCh]
chroms = GRCh + UCSC
return (self.chrA in chroms) and (self.chrB in chroms)
def __hash__(self):
return id(self)
def __eq__(self, other):
return (self.chrA == other.chrA and
self.posA == other.posA and
self.chrB == other.chrB and
self.posB == other.posB and
self.name == other.name)
def _compare(self, other, le=True, match_chrom=False):
"""
Abstraction for __le__ and __lt__
Both use same logic for chromosome comparison
"""
if match_chrom:
if self.chrA == other.chrA:
if self.chrB == other.chrB:
if le:
return self.posA <= other.posA
else:
return self.posA < other.posA
else:
return is_smaller_chrom(self.chrB, other.chrB)
else:
return is_smaller_chrom(self.chrA, other.chrA)
else:
if self.chrA == other.chrA:
if le:
return self.posA <= other.posA
else:
return self.posA < other.posA
else:
return is_smaller_chrom(self.chrA, other.chrA)
def __lt__(self, other):
return self._compare(other, le=False)
def __le__(self, other):
return self._compare(other)
def __str__(self):
return ('{chrA}\t{posA}\t{posB}\t{chrA}\t{name}'.format(
**self.__dict__))
class RPC(object):
def __init__(self, nodes, dist, size=1, excluded_regions=None):
"""
Parameters
----------
svcalls : SVCalls
Iterator over sorted SVCalls.
dist : int
Maximum clustering distance.
excluded_regions : pysam.TabixFile, optional
Regions to exclude from clustering. Any read pair that overlaps
with a region is omitted.
"""
self.nodes = nodes
self.dist = dist
self.size = size
self.excluded_regions = excluded_regions
# TODO: redefine rpc funcs as class to sublcass for rpc?
# (inc mapq, size, etc)
def get_candidates(self):
"""
Find batches of SVCalls eligible for clustering.
Requires input sorted by chromosome and position of first read in each
pair. Pairs are collected while the first read in the next pair in the
parser is within the maximum clustering distance of the first read of
the previous pair.
Yields
------
deque of SVCalls
"""
candidates = deque()
prev = None
self.excluded_count = 0
self.total_count = 0
for node in self.nodes:
self.total_count += 1
if (self.excluded_regions and node.is_in(self.excluded_regions)):
self.excluded_count += 1
continue
if prev is None or prev.is_clusterable_with(node, self.dist):
candidates.append(node)
else:
yield candidates
candidates = deque([node])
prev = node
yield candidates
def cluster(self):
"""
Perform single linkage clustering on a batch of SVCalls.
Yields
------
list of RPCNode
A cluster of read pairs.
"""
for candidates in self.get_candidates():
G = nx.Graph()
# Permit clusters of size 1
for node in candidates:
G.add_node(node)
for node1, node2 in combinations(candidates, 2):
if node1.clusters_with(node2, self.dist):
G.add_edge(node1, node2)
clusters = list(nx.connected_components(G))
# Sort clusters internally by first read's position,
# then sort clusters by first pair's first read's position
clusters = [sorted(cluster, key=lambda v: (v.posA, v.name))
for cluster in clusters]
clusters = sorted(clusters, key=lambda c: c[0].posA)
for cluster in clusters:
if len(cluster) >= self.size:
yield cluster
# class SVCluster(RPC):
# def __init__(self, svfiles, dist, excluded_regions=None):
# nodes = heapq.merge(*svfiles)
# super().__init__(nodes, dist, excluded_regions)
# def merge_cluster(self):
# for cluster in self.cluster():
# yield SVCallCluster(cluster)
# pass
# def svcluster(svfiles, dist):
# svcalls = heapq.merge(*svfiles)
# for cluster in rpc(svcalls, dist):
# if len(cluster) == 1:
# yield cluster[0]
# else:
# yield cluster[0].merge(cluster[1:])
# def main():
# parser = argparse.ArgumentParser(
# description="")
# parser.add_argument('filelist')
# parser.add_argument('svcf')
# args = parser.parse_args()
# svcluster()
# if __name__ == '__main__':
# main()
|
talkowski-lab/Holmes
|
readpaircluster/svcf/rpc.py
|
Python
|
mit
| 8,112
|
[
"pysam"
] |
ba6a09d3a1e1c68dd0f0b2a10efc91fa12ea6022cf30697534752c2a9bb694a0
|
from numpy import (
vstack, where, intersect1d, in1d, unique,
cross, abs, arccos, sign,
dot, array, cov, nan_to_num, inf, pi,
hstack, repeat, bincount, arange
)
from numpy.linalg import norm, solve
class Box2D:
def __init__(self, *args, **kwargs):
if len(args) <= 5:
self._compute_bounding_box(*args, **kwargs)
else:
self._set_variables(*args)
def _compute_bounding_box(self, points, point_ids, vectors, labels=None, level=None):
center = points.mean(0)
centered_points = points - center
orientation = vectors.sum(0)
orientation /= norm(orientation)
orthogonal_direction = orthogonal_vector(orientation)
orthogonal_direction /= norm(orthogonal_direction)
points_orthogonal = dot(
orthogonal_direction,
centered_points.T
)
points_orientation = dot(orientation, centered_points.T)
max_main = points_orientation.max()
min_main = points_orientation.min()
max_orthogonal = points_orthogonal.max()
min_orthogonal = points_orthogonal.min()
bounding_box_corners = (vstack((
orientation * max_main + orthogonal_direction * max_orthogonal,
orientation * max_main + orthogonal_direction * min_orthogonal,
orientation * min_main + orthogonal_direction * min_orthogonal,
orientation * min_main + orthogonal_direction * max_orthogonal,
)) + center)
center = bounding_box_corners.mean(0)
volume = (max_main - min_main) * (max_orthogonal - min_orthogonal)
self.orthogonal = orthogonal_direction
self.points_orientation = points_orientation
self.points_orthogonal = points_orthogonal
self._set_variables(
bounding_box_corners, center, orientation,
labels, points, point_ids, vectors, volume,
None, None, None, level
)
def _set_variables(self,
box,
center,
orientation,
labels,
points,
point_ids,
vectors,
volume,
parent,
left,
right,
level
):
self.box = box
self.center = center
self.orientation = orientation
self.labels = labels
self.points = points
self.point_ids = point_ids
self.vectors = vectors
self.volume = volume
self.parent = parent
self.left = left
self.right = right
self.level = level
self._calculate_orientation_limits()
self._calculate_orthogonal_limits()
def _calculate_orientation_limits(self):
projections = [dot(self.orientation, point) for point in self.box]
self.orientation_limits = (min(projections), max(projections))
def _calculate_orthogonal_limits(self):
projections = [dot(self.orthogonal, point) for point in self.box]
self.orthogonal_limits = (min(projections), max(projections))
def siblings(self, generations_up=0, generations_down=0):
if generations_up == 0 and generations_down == 1:
left = [self.left] if self.left is not None else []
right = [self.right] if self.right is not None else []
return left + right
elif generations_up > 0:
if self.parent is None:
return []
return self.parent.siblings(generations_up - 1, generations_down + 1)
elif generations_down > 1:
if self.left is not None:
left = self.left.siblings(0, generations_down - 1)
else:
left = []
if self.right is not None:
right = self.right.siblings(0, generations_down - 1)
else:
right = []
return left + right
def swap_direction(self):
self.orientation *= -1
self._calculate_orientation_limits()
def overlap_main(self, box):
projections = [dot(self.orientation, point) for point in box.box]
orientation_limits = (min(projections), max(projections))
if (
self.orientation_limits[0] <= orientation_limits[0] <= self.orientation_limits[1] or
self.orientation_limits[0] <= orientation_limits[1] <= self.orientation_limits[1] or
orientation_limits[0] <= self.orientation_limits[0] <= orientation_limits[1] or
orientation_limits[0] <= self.orientation_limits[1] <= orientation_limits[1]
):
return True
return False
def center_signed_orientational_distance(self, box):
return dot(self.orientation, self.center - box.center)
def center_distance(self, box):
return norm(box.center - self.center)
def __repr__(self):
return self.box.__repr__() + '\n' +\
'level:' + repr(self.level)
def __str__(self):
return self.box.__str__() + '\n' +\
'level:' + str(self.level)
class Box3D(Box2D):
def _compute_bounding_box(self, points, point_ids, vectors, labels=None, level=None):
original_points = points
original_point_ids = point_ids
original_labels = labels
original_vectors = vectors
orientation = vectors.mean(0)
orientation /= norm(orientation)
orthogonal_direction1 = orthogonal_vector(orientation)
orthogonal_direction2 = cross(orientation, orthogonal_direction1)
orthogonal_direction1 /= norm(orthogonal_direction1)
orthogonal_direction2 /= norm(orthogonal_direction2)
center = points.mean(0)
centered_points = points - center
points_orientation = dot(orientation, centered_points.T)
points_orthogonal1 = dot(
orthogonal_direction1,
centered_points.T
)
points_orthogonal2 = dot(
orthogonal_direction2,
centered_points.T
)
max_main, min_main = points_orientation.max(), points_orientation.min()
max_orthogonal1, min_orthogonal1 = (
points_orthogonal1.max(),
points_orthogonal1.min()
)
max_orthogonal2, min_orthogonal2 = (
points_orthogonal2.max(),
points_orthogonal2.min()
)
bounding_box_corners = (vstack((
orientation * max_main + orthogonal_direction1 *
max_orthogonal1 + orthogonal_direction2 * max_orthogonal2,
orientation * max_main + orthogonal_direction1 *
max_orthogonal1 + orthogonal_direction2 * min_orthogonal2,
orientation * max_main + orthogonal_direction1 *
min_orthogonal1 + orthogonal_direction2 * min_orthogonal2,
orientation * max_main + orthogonal_direction1 *
min_orthogonal1 + orthogonal_direction2 * max_orthogonal2,
orientation * min_main + orthogonal_direction1 *
max_orthogonal1 + orthogonal_direction2 * max_orthogonal2,
orientation * min_main + orthogonal_direction1 *
max_orthogonal1 + orthogonal_direction2 * min_orthogonal2,
orientation * min_main + orthogonal_direction1 *
min_orthogonal1 + orthogonal_direction2 * min_orthogonal2,
orientation * min_main + orthogonal_direction1 *
min_orthogonal1 + orthogonal_direction2 * max_orthogonal2,
)) + center)
center = bounding_box_corners.mean(0)
volume = (
(max_main - min_main) *
(max_orthogonal1 - min_orthogonal1) *
(max_orthogonal2 - min_orthogonal2)
)
self.orthogonal1 = orthogonal_direction1
self.orthogonal2 = orthogonal_direction2
self.points_orientation = points_orientation
self.points_orthogonal1 = points_orthogonal1
self.points_orthogonal2 = points_orthogonal2
self._set_variables(
bounding_box_corners, center, orientation,
original_labels, original_points, original_point_ids, original_vectors, volume,
None, None, None, level
)
def _calculate_orthogonal_limits(self):
projections = dot(self.orthogonal1, self.box.T).T
self.orthogonal1_limits = (min(projections), max(projections))
projections = dot(self.orthogonal2, self.box.T).T
self.orthogonal2_limits = (min(projections), max(projections))
def overlap_main(self, box):
projections = dot(self.orientation, box.box.T).T
orientation_limits = (min(projections), max(projections))
return (
self.orientation_limits[0] <= orientation_limits[0] <= self.orientation_limits[1] or
self.orientation_limits[0] <= orientation_limits[1] <= self.orientation_limits[1] or
orientation_limits[0] <= self.orientation_limits[0] <= orientation_limits[1] or
orientation_limits[0] <= self.orientation_limits[1] <= orientation_limits[1]
)
def overlap_orthogonal(self, box):
projections = dot(self.orthogonal1, box.box.T).T
orthogonal1_limits = (min(projections), max(projections))
if (
self.orthogonal1_limits[0] <= orthogonal1_limits[0] <= self.orthogonal1_limits[1] or
self.orthogonal1_limits[0] <= orthogonal1_limits[1] <= self.orthogonal1_limits[1] or
orthogonal1_limits[0] <= self.orthogonal1_limits[0] <= orthogonal1_limits[1] or
orthogonal1_limits[0] <= self.orthogonal1_limits[1] <= orthogonal1_limits[1]
):
overlap_orthogonal1 = True
if not overlap_orthogonal1:
return False
projections = dot(self.orthogonal2, box.box.T).T
orthogonal2_limits = (min(projections), max(projections))
if (
self.orthogonal2_limits[0] <= orthogonal2_limits[0] <= self.orthogonal2_limits[1] or
self.orthogonal2_limits[0] <= orthogonal2_limits[1] <= self.orthogonal2_limits[1] or
orthogonal2_limits[0] <= self.orthogonal2_limits[0] <= orthogonal2_limits[1] or
orthogonal2_limits[0] <= self.orthogonal2_limits[1] <= orthogonal2_limits[1]
):
overlap_orthogonal2 = True
return overlap_orthogonal1 and overlap_orthogonal2
def overlap(self, box):
return self.overlap_main(box) and self.overlap_orthogonal(box)
def overlap_volume(self, box):
projections = dot(self.orientation, box.box.T).T
orientation_limits = (min(projections), max(projections))
if not (
self.orientation_limits[0] <= orientation_limits[0] <= self.orientation_limits[1] or
self.orientation_limits[0] <= orientation_limits[1] <= self.orientation_limits[1] or
orientation_limits[0] <= self.orientation_limits[0] <= orientation_limits[1] or
orientation_limits[0] <= self.orientation_limits[1] <= orientation_limits[1]
):
return 0
overlap_main_length =\
min(orientation_limits[1], self.orientation_limits[1]) -\
max(orientation_limits[0], self.orientation_limits[0])
projections = dot(self.orthogonal1, box.box.T).T
orthogonal1_limits = (min(projections), max(projections))
if not\
(self.orthogonal1_limits[0] <= orthogonal1_limits[0] <= self.orthogonal1_limits[1] or
self.orthogonal1_limits[0] <= orthogonal1_limits[1] <= self.orthogonal1_limits[1] or
orthogonal1_limits[0] <= self.orthogonal1_limits[0] <= orthogonal1_limits[1] or
orthogonal1_limits[0] <= self.orthogonal1_limits[1] <= orthogonal1_limits[1]):
return 0
overlap_orthogonal1_length = \
min(orthogonal1_limits[1], self.orthogonal1_limits[1]) -\
max(orthogonal1_limits[0], self.orthogonal1_limits[0])
projections = dot(self.orthogonal2, box.box.T)
orthogonal2_limits = (min(projections), max(projections))
if not\
(self.orthogonal2_limits[0] <= orthogonal2_limits[0] <= self.orthogonal2_limits[1] or
self.orthogonal2_limits[0] <= orthogonal2_limits[1] <= self.orthogonal2_limits[1] or
orthogonal2_limits[0] <= self.orthogonal2_limits[0] <= orthogonal2_limits[1] or
orthogonal2_limits[0] <= self.orthogonal2_limits[1] <= orthogonal2_limits[1]):
return 0
overlap_orthogonal2_length = \
min(orthogonal2_limits[1], self.orthogonal2_limits[1]) -\
max(orthogonal2_limits[0], self.orthogonal2_limits[0])
return overlap_main_length * overlap_orthogonal1_length * overlap_orthogonal2_length
class Box3DRich(Box2D):
def _compute_bounding_box(self, points, point_ids, vectors, labels=None, level=None, robustify=None):
original_points = points
original_point_ids = point_ids
original_labels = labels
original_vectors = vectors
if robustify == 'points' and len(points) > 4:
p_mean = points.mean(0)
p_cov = cov(points.T)
c_points = points - p_mean
z = (solve(p_cov, c_points.T) * c_points.T).sum(0)
cutoff = 9.3484036044961485 # chi2.ppf(.975, 3)
points = points[z < cutoff]
point_ids = point_ids[z < cutoff]
print 'Discarded', (len(original_points) - len(points)) * 1. / len(points)
vectors = vectors[z < cutoff]
if labels is not None:
labels = labels[z < cutoff]
orientation = vectors.mean(0)
orientation /= norm(orientation)
orthogonal_direction1 = orthogonal_vector(orientation)
orthogonal_direction2 = cross(orientation, orthogonal_direction1)
orthogonal_direction1 /= norm(orthogonal_direction1)
orthogonal_direction2 /= norm(orthogonal_direction2)
center = points.mean(0)
centered_points = points - center
points_orientation = dot(orientation, centered_points.T)
points_orthogonal1 = dot(
orthogonal_direction1, centered_points.T)
points_orthogonal2 = dot(
orthogonal_direction2, centered_points.T)
max_main, min_main = points_orientation.max(), points_orientation.min()
max_orthogonal1, min_orthogonal1 = points_orthogonal1.max(
), points_orthogonal1.min()
max_orthogonal2, min_orthogonal2 = points_orthogonal2.max(
), points_orthogonal2.min()
bounding_box_corners = (vstack((
orientation * max_main + orthogonal_direction1 *
max_orthogonal1 + orthogonal_direction2 * max_orthogonal2,
orientation * max_main + orthogonal_direction1 *
max_orthogonal1 + orthogonal_direction2 * min_orthogonal2,
orientation * max_main + orthogonal_direction1 *
min_orthogonal1 + orthogonal_direction2 * min_orthogonal2,
orientation * max_main + orthogonal_direction1 *
min_orthogonal1 + orthogonal_direction2 * max_orthogonal2,
orientation * min_main + orthogonal_direction1 *
max_orthogonal1 + orthogonal_direction2 * max_orthogonal2,
orientation * min_main + orthogonal_direction1 *
max_orthogonal1 + orthogonal_direction2 * min_orthogonal2,
orientation * min_main + orthogonal_direction1 *
min_orthogonal1 + orthogonal_direction2 * min_orthogonal2,
orientation * min_main + orthogonal_direction1 *
min_orthogonal1 + orthogonal_direction2 * max_orthogonal2,
)) + center)
center = bounding_box_corners.mean(0)
volume = (
(max_main - min_main) *
(max_orthogonal1 - min_orthogonal1) *
(max_orthogonal2 - min_orthogonal2)
)
self.orthogonal1 = orthogonal_direction1
self.orthogonal2 = orthogonal_direction2
self.points_orientation = points_orientation
self.points_orthogonal1 = points_orthogonal1
self.points_orthogonal2 = points_orthogonal2
self._set_variables(
bounding_box_corners, center, orientation,
original_labels, original_points, original_point_ids, original_vectors, volume,
None, None, None, level
)
def _calculate_orthogonal_limits(self):
projections = dot(self.orthogonal1, self.box.T).T
self.orthogonal1_limits = (min(projections), max(projections))
projections = dot(self.orthogonal2, self.box.T).T
self.orthogonal2_limits = (min(projections), max(projections))
def overlap_main(self, box):
projections = dot(self.orientation, box.box.T).T
orientation_limits = (min(projections), max(projections))
return (
self.orientation_limits[0] <= orientation_limits[0] <= self.orientation_limits[1] or
self.orientation_limits[0] <= orientation_limits[1] <= self.orientation_limits[1] or
orientation_limits[0] <= self.orientation_limits[0] <= orientation_limits[1] or
orientation_limits[0] <= self.orientation_limits[1] <= orientation_limits[1]
)
def overlap_orthogonal(self, box):
projections = dot(self.orthogonal1, box.box.T).T
orthogonal1_limits = (min(projections), max(projections))
if (
self.orthogonal1_limits[0] <= orthogonal1_limits[0] <= self.orthogonal1_limits[1] or
self.orthogonal1_limits[0] <= orthogonal1_limits[1] <= self.orthogonal1_limits[1] or
orthogonal1_limits[0] <= self.orthogonal1_limits[0] <= orthogonal1_limits[1] or
orthogonal1_limits[0] <= self.orthogonal1_limits[1] <= orthogonal1_limits[1]
):
overlap_orthogonal1 = True
if not overlap_orthogonal1:
return False
projections = dot(self.orthogonal2, box.box.T).T
orthogonal2_limits = (min(projections), max(projections))
if (
self.orthogonal2_limits[0] <= orthogonal2_limits[0] <= self.orthogonal2_limits[1] or
self.orthogonal2_limits[0] <= orthogonal2_limits[1] <= self.orthogonal2_limits[1] or
orthogonal2_limits[0] <= self.orthogonal2_limits[0] <= orthogonal2_limits[1] or
orthogonal2_limits[0] <= self.orthogonal2_limits[1] <= orthogonal2_limits[1]
):
overlap_orthogonal2 = True
return overlap_orthogonal1 and overlap_orthogonal2
def overlap(self, box):
return self.overlap_main(box) and self.overlap_orthogonal(box)
def overlap_volume(self, box):
projections = dot(self.orientation, box.box.T).T
orientation_limits = (min(projections), max(projections))
if not (
self.orientation_limits[0] <= orientation_limits[0] <= self.orientation_limits[1] or
self.orientation_limits[0] <= orientation_limits[1] <= self.orientation_limits[1] or
orientation_limits[0] <= self.orientation_limits[0] <= orientation_limits[1] or
orientation_limits[0] <= self.orientation_limits[1] <= orientation_limits[1]
):
return 0
overlap_main_length =\
min(orientation_limits[1], self.orientation_limits[1]) -\
max(orientation_limits[0], self.orientation_limits[0])
projections = dot(self.orthogonal1, box.box.T).T
orthogonal1_limits = (min(projections), max(projections))
if not\
(self.orthogonal1_limits[0] <= orthogonal1_limits[0] <= self.orthogonal1_limits[1] or
self.orthogonal1_limits[0] <= orthogonal1_limits[1] <= self.orthogonal1_limits[1] or
orthogonal1_limits[0] <= self.orthogonal1_limits[0] <= orthogonal1_limits[1] or
orthogonal1_limits[0] <= self.orthogonal1_limits[1] <= orthogonal1_limits[1]):
return 0
overlap_orthogonal1_length = \
min(orthogonal1_limits[1], self.orthogonal1_limits[1]) -\
max(orthogonal1_limits[0], self.orthogonal1_limits[0])
projections = dot(self.orthogonal2, box.box.T)
orthogonal2_limits = (min(projections), max(projections))
if not\
(self.orthogonal2_limits[0] <= orthogonal2_limits[0] <= self.orthogonal2_limits[1] or
self.orthogonal2_limits[0] <= orthogonal2_limits[1] <= self.orthogonal2_limits[1] or
orthogonal2_limits[0] <= self.orthogonal2_limits[0] <= orthogonal2_limits[1] or
orthogonal2_limits[0] <= self.orthogonal2_limits[1] <= orthogonal2_limits[1]):
return 0
overlap_orthogonal2_length = \
min(orthogonal2_limits[1], self.orthogonal2_limits[1]) -\
max(orthogonal2_limits[0], self.orthogonal2_limits[0])
return overlap_main_length * overlap_orthogonal1_length * overlap_orthogonal2_length
def orthogonal_vector(vector, tol=1e-8):
a_vector = abs(vector)
if len(vector) == 3:
if a_vector[0] > tol:
orthogonal = vector[::-1] * (1, 0, -1)
elif a_vector[2] > tol:
orthogonal = vector[::-1] * (-1, 0, 1)
elif a_vector[1] > tol:
orthogonal = vector[::-1] * (-1, 0, 0)
else:
raise ValueError('vector must have non-null norm')
else:
if a_vector[0] > tol:
orthogonal = vector[::-1] * (-1, 1)
elif a_vector[1] > tol:
orthogonal = vector[::-1] * (1, -1)
else:
raise ValueError('vector must have non-null norm')
orthogonal /= norm(orthogonal)
return orthogonal
def box_cut(points, direction, mapped_points=None, max_main=None, min_main=None):
if mapped_points is None:
mapped_points = dot(direction, points.T)
if max_main is None:
max_main = mapped_points.max()
if min_main is None:
min_main = mapped_points.min()
mid_main = (max_main + min_main) / 2.
split1 = where(mapped_points <= mid_main)
split2 = where(mapped_points > mid_main)
return split1, split2
def all_obb_2d(points, vectors, labels, tol=1e-8, level=0, intersection_threshold=.8, split_threshold=.2, box=None):
if (box is not None) and (points is box.points) and (vectors is box.vectors) and (labels is box.labels):
box_center = box
box.level = level
else:
box_center = Box2D(points, vectors, labels, level)
level += 1
if len(unique(labels)) == 1:
return [box_center]
# First compute the splitting across the fibers
split_along_fiber = True
left, right = box_cut(points, box_center.orthogonal,
mapped_points=box_center.points_orthogonal)
labels_left = labels[left]
labels_right = labels[right]
if len(intersect1d(labels_left, labels_right)) >= len(unique(labels)) * intersection_threshold:
split_along_fiber = True
else:
points_left = points[left]
vectors_left = vectors[left]
box_left = Box2D(points_left, vectors_left, labels_left)
points_right = points[right]
vectors_right = vectors[right]
box_right = Box2D(points_left, vectors_left, labels_left)
if (box_left.volume + box_right.volume) < (1 - split_threshold) * box_center.volume:
split_along_fiber = False
left = all_obb_2d(
points_left, vectors_left, labels_left, tol=tol, level=level,
intersection_threshold=intersection_threshold, box=box_left)
right = all_obb_2d(
points_right, vectors_right, labels_right, tol=tol, level=level,
intersection_threshold=intersection_threshold, box=box_right)
else:
split_along_fiber = True
if split_along_fiber: # If we could not split across we split along
left, right = box_cut(
points, box_center.orientation, mapped_points=box_center.points_orientation)
labels_left = labels[left]
labels_right = labels[right]
if len(intersect1d(labels_left, labels_right)) <= len(unique(labels)) * intersection_threshold:
return [box_center]
points_left = points[left]
vectors_left = vectors[left]
left = all_obb_2d(
points_left, vectors_left, labels_left, tol=tol, level=level,
intersection_threshold=intersection_threshold)
points_right = points[right]
vectors_right = vectors[right]
right = all_obb_2d(
points_right, vectors_right, labels_right, tol=tol, level=level,
intersection_threshold=intersection_threshold)
box_center.left = left[0]
box_center.right = right[0]
left[0].parent = box_center
right[0].parent = box_center
return [box_center] + left + right
def all_obb_3d_along_tract(
points, vectors, labels, tol=1e-8, level=0,
intersection_threshold=.8, split_threshold=.2,
box=None, clean=False, point_ids=None
):
if point_ids is None:
point_ids = arange(len(points))
if (
(box is not None) and (points is box.points) and
(vectors is box.vectors) and (labels is box.labels)
):
box_center = box
box.level = level
else:
box_center = Box3D(points, point_ids, vectors, labels, level)
level += 1
if len(points) == 1:
return [box_center]
unique_labels = unique(labels)
left, right = box_cut(
points, box_center.orientation,
mapped_points=box_center.points_orientation
)
masks = {
'left': left,
'right': right
}
split_labels = {
'left': labels[left],
'right': labels[right]
}
labels_both = intersect1d(split_labels['left'], split_labels['right'])
if clean:
labels_count = bincount(labels)
labels_count = {
side: bincount(split_labels[side])
for side in split_labels
}
labels_ratio = {
side: nan_to_num(
labels_count[side] * 1. / labels_count[:len(labels_count(side))]
) for side in labels_count
}
new_results = [box_center]
if (
(len(labels_both) <= len(unique_labels) * intersection_threshold) and
(box_center.points_orientation.ptp() / 2. < min((norm(v) for v in vectors)))
):
return new_results
for side in ('left', 'right'):
mask = masks[side]
new_labels = split_labels[side]
if len(new_labels) > 0:
new_points = points[mask]
new_point_ids = point_ids[mask]
new_vectors = vectors[mask]
if clean:
clean_labels = in1d(
labels[side],
intersect1d(labels_both, (labels_ratio[side] > .2).nonzero()[0]),
)
new_points = new_points[clean_labels]
new_point_ids = new_point_ids[clean_labels]
new_vectors = new_vectors[clean_labels]
new_labels = new_labels[clean_labels]
if len(new_points) > 1:
new_tree = all_obb_3d_along_tract(
new_points, new_vectors, new_labels,
tol=tol, level=level, point_ids=new_point_ids,
intersection_threshold=intersection_threshold, clean=clean
)
setattr(box_center, side, new_tree[0])
getattr(box_center, side).parent = box_center
new_results += new_tree
return new_results
def all_obb_3d(points, vectors, labels, tol=1e-8, level=0, intersection_threshold=.8, split_threshold=.2, box=None, clean=False, point_ids=None):
if point_ids is None:
point_ids = arange(len(points))
if (
(box is not None) and (points is box.points) and
(vectors is box.vectors) and (labels is box.labels)
):
box_center = box
box.level = level
else:
box_center = Box3D(
points, point_ids, vectors,
labels, level
)
level += 1
if len(points) == 1:
return [box_center]
unique_labels = unique(labels)
for orientation in ('orthogonal1', 'orthogonal2', 'orientation'):
left, right = box_cut(
points, getattr(box_center, orientation),
mapped_points=getattr(box_center, 'points_' + orientation)
)
masks = {
'left': left,
'right': right
}
split_labels = {
'left': labels[left],
'right': labels[right]
}
labels_both = intersect1d(split_labels['left'], split_labels['right'])
if len(labels_both) == 0:
break
if clean:
labels_count = bincount(labels)
labels_count = {
side: bincount(split_labels[side])
for side in split_labels
}
labels_ratio = {
side: nan_to_num(
labels_count[side] * 1. / labels_count[:len(labels_count(side))]
) for side in labels_count
}
new_results = [box_center]
print level
if (
orientation == 'orientation' and
(len(labels_both) <= len(unique_labels) * intersection_threshold) # and
#(box_center.points_orientation.ptp() / 2. > min((norm(v) for v in vectors)))
):
return new_results
for side in ('left', 'right'):
mask = masks[side]
new_labels = split_labels[side]
if len(new_labels) > 0:
new_points = points[mask]
new_point_ids = point_ids[mask]
new_vectors = vectors[mask]
if clean:
clean_labels = in1d(
labels[side],
intersect1d(labels_both, (labels_ratio[side] > .2).nonzero()[0]),
)
new_points = new_points[clean_labels]
new_point_ids = new_point_ids[clean_labels]
new_vectors = new_vectors[clean_labels]
new_labels = new_labels[clean_labels]
if len(new_points) > 1:
new_tree = all_obb_3d(
new_points, new_vectors, new_labels, tol=tol, level=level, point_ids=new_point_ids,
intersection_threshold=intersection_threshold, clean=clean)
setattr(box_center, side, new_tree[0])
getattr(box_center, side).parent = box_center
new_results += new_tree
return new_results
def all_obb_3d_nr(points_, vectors_, labels_, tol=1e-8, level_=0, intersection_threshold=.8, split_threshold=.2, robustify=None, point_ids_=None):
if point_ids_ is None:
point_ids_ = arange(len(points_))
root = Box3D(points_, point_ids_, vectors_, labels_, level_, robustify=robustify)
stack = [root]
total_points = len(points_)
points_done = 0
while len(stack):
box = stack.pop()
level = box.level + 1
if len(box.points) == 1:
continue
unique_labels = unique(box.labels)
for orientation in ('orthogonal1', 'orthogonal2', 'orientation'):
left, right = box_cut(
box.points, getattr(box, orientation),
mapped_points=getattr(box, 'points_' + orientation)
)
masks = {
'left': left,
'right': right
}
split_labels = {
'left': box.labels[left],
'right': box.labels[right]
}
labels_both = intersect1d(split_labels['left'], split_labels['right'])
if len(labels_both) == 0:
break
print level, len(unique_labels), len(box.points), total_points - points_done
if (
orientation == 'orientation' and
(len(labels_both) <= len(unique_labels) * intersection_threshold) # and
#(box_center.points_orientation.ptp() / 2. > min((norm(v) for v in vectors)))
):
points_done += len(box.points)
continue
for side in ('left', 'right'):
mask = masks[side]
new_labels = split_labels[side]
if len(new_labels) > 0:
new_points = box.points[mask]
new_point_ids = box.point_ids[mask]
new_vectors = box.vectors[mask]
if len(new_points) > 1 and len(new_points) < len(box.points):
new_box = Box3D(new_points, new_point_ids, new_vectors, new_labels, level, robustify=robustify)
setattr(box, side, new_box)
getattr(box, side).parent = box
print "\tAdded to stack ", side
stack.append(new_box)
else:
points_done += len(new_points)
return root
def all_obb_3d_old(points, vectors, labels, tol=1e-8, level=0, intersection_threshold=.8, split_threshold=.2, box=None, point_ids=None):
if point_ids is None:
point_ids = arange(len(points))
if (box is not None) and (points is box.points) and (vectors is box.vectors) and (labels is box.labels):
box_center = box
box.level = level
else:
box_center = Box3D(points, point_ids, vectors, labels, level)
level += 1
if len(points) == 1:
return [box_center]
# First compute the splitting across the fibers
split_along_fiber = True
o1_left, o1_right = box_cut(
points, box_center.orthogonal1, mapped_points=box_center.points_orthogonal1)
o2_left, o2_right = box_cut(
points, box_center.orthogonal2, mapped_points=box_center.points_orthogonal2)
o1_labels_left = labels[o1_left]
o1_labels_right = labels[o1_right]
o2_labels_left = labels[o2_left]
o2_labels_right = labels[o2_right]
unique_labels = unique(labels)
if (
len(intersect1d(o1_labels_left, o1_labels_right)) > 0 and
len(intersect1d(o2_labels_left, o2_labels_right)) > 0
):
split_along_fiber = True
else:
o1_box_left = Box3D(points[o1_left], vectors[o1_left], o1_labels_left)
o1_box_right = Box3D(points[
o1_right], vectors[o1_right], o1_labels_right)
o2_box_left = Box3D(points[o2_left], vectors[o2_left], o2_labels_left)
o2_box_right = Box3D(points[
o2_right], vectors[o2_right], o2_labels_right)
if (o1_box_left.volume + o1_box_right.volume) < (o1_box_left.volume + o2_box_right.volume):
box_left = o1_box_left
box_right = o1_box_right
else:
box_left = o2_box_left
box_right = o2_box_right
if (box_left.volume + box_right.volume) < (1 - split_threshold) * box_center.volume:
split_along_fiber = False
left = all_obb_3d(
box_left.points, box_left.vectors, box_left.labels, tol=tol, level=level,
intersection_threshold=intersection_threshold, box=box_left)
right = all_obb_3d(
box_right.points, box_right.vectors, box_right.labels, tol=tol, level=level,
intersection_threshold=intersection_threshold, box=box_right)
else:
split_along_fiber = True
if split_along_fiber: # If we could not split across we split along
left, right = box_cut(
points, box_center.orientation, mapped_points=box_center.points_orientation)
labels_left = labels[left]
labels_right = labels[right]
if len(intersect1d(labels_left, labels_right)) <= len(unique_labels) * intersection_threshold:
return [box_center]
points_left = points[left]
point_ids_left = point_ids[left]
vectors_left = vectors[left]
left = all_obb_3d(
points_left, point_ids_left, vectors_left, labels_left, tol=tol, level=level,
intersection_threshold=intersection_threshold)
points_right = points[right]
point_ids_right = point_ids[left]
vectors_right = vectors[right]
right = all_obb_3d(
points_right, point_ids_right, vectors_right, labels_right, tol=tol, level=level,
intersection_threshold=intersection_threshold)
box_center.left = left[0]
box_center.right = right[0]
left[0].parent = box_center
right[0].parent = box_center
return [box_center] + left + right
def point_coverage_by_level(obbs, points):
level = 0
points_level = [obb.points for obb in obbs if obb.level == level]
level_coverage = []
while len(points_level) > 0:
level_coverage.append(sum((len(
points) for points in points_level)) * 1. / len(points))
level += 1
points_level = [obb.points for obb in obbs if obb.level == level if len(
obb.points) > 0]
return array(level_coverage)
def draw_boxes_2d(obbs, level, color=None, **args):
from pylab import plot, cm
for i, obb in enumerate(obbs):
if obb.level != level:
continue
box = vstack([obb.box, obb.box[0]])
if color is None:
plot(box.T[0], box.T[1], lw=5, hold=True, **args)
else:
plot(box.T[0], box.T[
1], lw=5, hold=True, c=cm.jet(color[i]), **args)
def draw_box_2d(obbs, **args):
from pylab import plot, quiver
if isinstance(obbs, Box2D):
obbs = [obbs]
for obb in obbs:
box = vstack([obb.box, obb.box[0]])
plot(box.T[0], box.T[1], lw=5, hold=True, **args)
quiver([obb.center[0]], [obb.center[1]], [obb.orientation[
0]], [obb.orientation[1]], pivot='middle', hold=True, **args)
def draw_box_3d(obbs, tube_radius=1, color=None, **kwargs):
from mayavi.mlab import plot3d
from numpy.random import rand
if isinstance(obbs, Box2D):
obbs = [obbs]
for obb in obbs:
if color is None:
color_ = tuple(rand(3))
else:
color_ = color
box = obb.box
b1 = vstack([box[:4], box[0]]).T
b2 = vstack([box[4:], box[4]]).T
es = [vstack([b1.T[i], b2.T[i]]).T for i in xrange(4)]
plot3d(b1[0], b1[1], b1[
2], tube_radius=tube_radius, color=color_, **kwargs)
plot3d(b2[0], b2[1], b2[
2], tube_radius=tube_radius, color=color_, **kwargs)
[plot3d(e[0], e[1], e[
2], tube_radius=tube_radius, color=color_, **kwargs) for e in es]
def oriented_trace(obb, positive=True, generations=2, angle_threshold=pi / 4):
tract = [obb]
center = obb
candidates = center.siblings(generations)
if positive:
sg = 1
else:
sg = -1
while len(candidates) > 0:
next_candidate_distance = inf
for c in candidates:
signed_distance = sg *\
sign(center.center_signed_orientational_distance(c)) *\
center.center_distance(c)
if (signed_distance <= 0) or\
not center.overlap_orthogonal(c) or\
arccos(dot(center.orientation, c.orientation)) > angle_threshold:
continue
if signed_distance < next_candidate_distance:
next_candidate_distance = signed_distance
next_candidate = c
if next_candidate_distance < inf:
if next_candidate in tract:
break
tract.append(next_candidate)
if dot(center.orientation, next_candidate.orientation) < 0:
next_candidate.swap_direction()
center = next_candidate
candidates = center.siblings(generations)
else:
break
return tract
def trace(obb, generations=2, angle_threshold=pi / 4):
trace_positive = oriented_trace(obb, True, generations, angle_threshold)
trace_negative = oriented_trace(obb, False, generations, angle_threshold)
return trace_negative[::-1] + trace_positive
def get_most_probable_trace(obbs, generations=2, angle_threshold=pi / 4, return_all=True):
traces_list = [trace(obb, generations=generations,
angle_threshold=angle_threshold) for obb in obbs]
traces_w_set = [(t, set(t)) for t in traces_list]
n = 1. * len(traces_w_set)
traces_with_frequency = []
while len(traces_w_set) > 0:
trace_ = traces_w_set.pop()
traces_w_set_new = []
count = 1
for t in traces_w_set:
if t[1] == trace_[1]:
count += 1
else:
traces_w_set_new.append(t)
traces_with_frequency.append((count / n, trace_[0]))
traces_w_set = traces_w_set_new
traces_with_frequency.sort(cmp=lambda x, y: int(sign(y[0] - x[0])))
return traces_with_frequency
def get_level(tree, level):
if tree is None or tree.level > level:
return []
elif tree.level == level:
return [tree]
else:
return get_level(tree.left, level) + get_level(tree.right, level)
def overlapping_boxes(tree, box, levels=None, threshold=0.):
if tree is None:
return []
overlap = tree.overlap_volume(box)
if overlap < threshold:
return []
else:
left = overlapping_boxes(
tree.left, box, levels=levels, threshold=threshold)
right = overlapping_boxes(
tree.right, box, levels=levels, threshold=threshold)
if levels is None or tree.level in levels:
return [tree] + left + right
else:
return left + right
def containing_boxes(tree, box, levels=None, threshold=1.):
if tree is None or tree.level > max(levels):
return []
normalized_overlap = tree.overlap_volume(box) / box.volume
if normalized_overlap < threshold:
return []
else:
left = overlapping_boxes(
tree.left, box, levels=levels, threshold=threshold)
right = overlapping_boxes(
tree.right, box, levels=levels, threshold=threshold)
if levels is None or tree.level in levels:
return [tree] + left + right
else:
return left + right
def min_max(vector, axis=None):
return array((vector.min(axis), vector.max(axis)))
def overlap_vtk(self, box):
a = self
b = box
axes_a = vstack((a.orientation, a.orthogonal1, a.orthogonal2))
axes_b = vstack((b.orientation, b.orthogonal1, b.orthogonal2))
a2b = b.center - a.center
a_a2b_limits = min_max(dot(a2b, a.box.T))
b_a2b_limits = min_max(dot(a2b, a.box.T))
if (
a_a2b_limits[0] < b_a2b_limits[1] or
b_a2b_limits[1] < a_a2b_limits[0]
):
return False
def obb_tree_dfs(obb_tree):
for obb in obb_tree:
if obb.level == 0:
root = obb
break
else:
raise ValueError('No root in the tree')
return obb_tree_dfs_recursive(root)
def obb_tree_dfs_recursive(obb_node):
if obb_node is None:
return []
if obb_node.left is None and obb_node.right is None:
return [obb_node]
return obb_tree_dfs_recursive(obb_node.left) + obb_tree_dfs_recursive(obb_node.right)
def prototype_tract(
tracts, obb_tree=None, intersection_threshold=.01, minimum_level=0,
clean=False, return_obb_tree=False, return_leave_centers=False
):
if obb_tree is None:
points = vstack([t[:-1] for t in tracts])
vectors = vstack([t[1:] - t[:-1] for t in tracts])
labels = hstack([repeat(i, len(t) - 1) for i, t in enumerate(tracts)])
obb_tree = all_obb_3d_along_tract(
points, vectors, labels,
intersection_threshold=intersection_threshold, clean=clean
)
if minimum_level < 0:
max_level = max((obb.level for obb in obb_tree))
minimum_level = max_level + 1 - minimum_level
leave_centers = array(
[obb.center for obb in obb_tree if obb.left is None and obb.right is None and obb.level >
minimum_level]
)
mse_tract = array([
((t[..., None] - leave_centers[..., None].T) ** 2).sum(1).min(0).sum()
for t in tracts
])
tract_index = mse_tract.argmin()
if return_obb_tree or return_leave_centers:
res = (tract_index,)
if return_obb_tree:
res += (obb_tree,)
if return_leave_centers:
res += (leave_centers,)
return res
else:
return tract_index
def obb_tree_level(obb_tree, level, include_superior_leaves=True):
if not isinstance(obb_tree, Box3D):
node = obb_tree[0]
for n in obb_tree:
if n.level < node.level:
node = n
else:
node = obb_tree
return obb_tree_level_dfs(node, level, include_superior_leaves=include_superior_leaves)
def obb_tree_level_dfs(obb_node, level, include_superior_leaves=True):
if obb_node is None or obb_node.level > level:
return []
if (
obb_node.level == level or
(
include_superior_leaves and
obb_node.level < level and
obb_node.left is None and obb_node.right is None
)
):
return [obb_node]
return (
obb_tree_level_dfs(obb_node.left, level, include_superior_leaves=include_superior_leaves) +
obb_tree_level_dfs(
obb_node.right, level, include_superior_leaves=include_superior_leaves)
)
def obb_from_tractography(tractography, *args, **kwargs):
along_tract = False
if 'along_tract' in kwargs and kwargs['along_tract']:
along_tract = True
fibers = tractography.tracts()
points = vstack([f[:-1] for f in fibers])
vectors = vstack([f[1:] - f[:-1] for f in fibers])
labels = hstack([repeat(i, len(f) - 1) for i, f in enumerate(fibers)])
if along_tract:
obbs3d = all_obb_3d_along_tract(
points, vectors, labels, **kwargs
)
else:
obbs3d = all_obb_3d_nr(
points, vectors, labels, **kwargs
)
return obbs3d
|
BRAINSia/tract_querier
|
tract_querier/tract_math/tract_obb.py
|
Python
|
bsd-3-clause
| 46,285
|
[
"Mayavi"
] |
a4d90cacbb26d7e1ed6a0adb721ec3156095c9b134321a9d534088cdb7aa61f3
|
"""
grdlandmask - Create a "wet-dry" mask grid from shoreline data base
"""
import xarray as xr
from pygmt.clib import Session
from pygmt.exceptions import GMTInvalidInput
from pygmt.helpers import (
GMTTempFile,
build_arg_string,
fmt_docstring,
kwargs_to_strings,
use_alias,
)
@fmt_docstring
@use_alias(
G="outgrid",
I="spacing",
R="region",
V="verbose",
r="registration",
)
@kwargs_to_strings(R="sequence")
def grdlandmask(**kwargs):
r"""
Create a grid file with set values for land and water.
Read the selected shoreline database and create a grid to specify which
nodes in the specified grid are over land or over water. The nodes defined
by the selected region and lattice spacing
will be set according to one of two criteria: (1) land vs water, or
(2) the more detailed (hierarchical) ocean vs land vs lake
vs island vs pond.
Full option list at :gmt-docs:`grdlandmask.html`
{aliases}
Parameters
----------
outgrid : str or None
The name of the output netCDF file with extension .nc to store the grid
in.
{I}
{R}
{V}
{r}
Returns
-------
ret: xarray.DataArray or None
Return type depends on whether the ``outgrid`` parameter is set:
- :class:`xarray.DataArray` if ``outgrid`` is not set
- None if ``outgrid`` is set (grid output will be stored in file set by
``outgrid``)
"""
if "I" not in kwargs.keys() or "R" not in kwargs.keys():
raise GMTInvalidInput("Both 'region' and 'spacing' must be specified.")
with GMTTempFile(suffix=".nc") as tmpfile:
with Session() as lib:
if "G" not in kwargs.keys(): # if outgrid is unset, output to tempfile
kwargs.update({"G": tmpfile.name})
outgrid = kwargs["G"]
arg_str = build_arg_string(kwargs)
lib.call_module("grdlandmask", arg_str)
if outgrid == tmpfile.name: # if user did not set outgrid, return DataArray
with xr.open_dataarray(outgrid) as dataarray:
result = dataarray.load()
_ = result.gmt # load GMTDataArray accessor information
else:
result = None # if user sets an outgrid, return None
return result
|
GenericMappingTools/gmt-python
|
pygmt/src/grdlandmask.py
|
Python
|
bsd-3-clause
| 2,314
|
[
"NetCDF"
] |
bed11ac1148d4f0f1e185e190cf86afd32e887683be91ab564ed59b571e13b14
|
from utils import *
import qcelemental as qcel
import qcdb
def test_Molecule_BFS():
#! apply linear fragmentation algorithm to a water cluster
iceIh = """\
36
Crystal created from CIF file. Box size: 7.82000 7.82000 7.36000
O 2.606641 0.000000 3.220000
O 5.213320 4.514902 6.900000
O 6.516680 2.257417 3.220000
N 2.606680 4.514902 3.220000
O 1.303320 2.257417 6.900000
O 5.213359 0.000000 4.140000
O 6.516680 2.257417 0.460000
O 5.213320 4.514902 4.140000
O 1.303320 2.257417 4.140000
O 2.606680 4.514902 0.460000
O 5.213359 0.000000 6.900000
O 2.606641 0.000000 0.460000
H 2.193510 0.711093 3.496000
H 7.339070 2.255182 3.496000
H 5.622580 5.228230 7.176000
H 1.712580 1.544089 7.176000
He 3.429070 4.517137 3.496000
H 6.103510 6.061225 3.496000
H 1.716490 6.061225 7.176000
H 4.390930 4.517137 7.176000
H 6.107420 1.544089 3.496000
He 2.197420 5.228230 3.496000
H 0.480930 2.255182 7.176000
H 4.394840 0.000000 3.871360
H 6.107420 2.966276 0.191360
H 5.622580 3.806043 3.871360
H 1.712580 2.966276 3.871360
H 2.197420 3.806043 0.191360
H 5.213359 0.000000 4.960640
H 6.516680 2.257417 1.280640
H 5.213320 4.514902 4.960640
H 1.303320 2.257417 4.960640
H 2.606680 4.514902 1.280640
H 5.626490 0.711093 7.176000
H 3.425160 0.000000 0.191360
H 2.606641 0.000000 1.280640
"""
ref_fragmentation = [
[3, 16],
[21],
[0, 12],
[1, 14, 19],
[2, 13, 20],
[4, 15, 22],
[5, 23, 28],
[6, 24, 29],
[7, 25, 30],
[8, 26, 31],
[9, 27, 32],
[10, 33],
[11, 34, 35],
[17],
[18]] # yapf: disable
qmol = qcdb.Molecule.from_string(iceIh, dtype='xyz')
frag, arrs, bmols, bmol = qmol.BFS(
seed_atoms=[[3, 16], [21]], return_arrays=True, return_molecule=True, return_molecules=True)
assert compare_integers(frag == ref_fragmentation, 1, 'Q: BFS from qcdb.Molecule')
assert compare_arrays(qmol.geometry(np_out=True)[[1, 14, 19]], arrs[0][3], 4, 'Q: geom back from BFS')
assert compare_integers(15, bmol.nfragments(), 'Q: nfrag')
assert compare_values(qmol.nuclear_repulsion_energy(), bmol.nuclear_repulsion_energy(), 4, 'Q: nre')
assert compare_arrays(
qmol.geometry(np_out=True)[[2, 13, 20]], bmols[4].geometry(np_out=True), 4, 'Q: frag geom back from BFS')
assert compare_integers(True, type(bmol) == qcdb.Molecule, 'Q return type')
def test_numpy_BFS():
import numpy as np
from qcdb.bfs import BFS
# FaOOFaOO 3.6 ?
mol_elem = np.asarray(['C', 'C', 'H', 'H', 'O', 'O', 'O', 'O', 'H', 'H'])
mol_geom = np.asarray([
[ 1.79035823, -0.18606050, 0.00000000],
[ -1.79035823, 0.18606050, 0.00000000],
[ 2.89087214, -0.30042988, 0.00000000],
[ -2.89087214, 0.30042988, 0.00000000],
[ 1.07568931, -1.19425943, 0.00000000],
[ -1.07568931, 1.19425943, 0.00000000],
[ 1.44185816, 1.08049605, 0.00000000],
[ -1.44185816, -1.08049605, 0.00000000],
[ 0.43274661, 1.15045330, 0.00000000],
[ -0.43274661, -1.15045330, 0.00000000]]) # yapf: disable
ans = BFS(mol_geom / qcel.constants.bohr2angstroms, mol_elem)
ref_fragmentation = [[0, 2, 4, 6, 8], [1, 3, 5, 7, 9]]
assert compare_integers(True, ans == ref_fragmentation, 'BFS from np.array')
|
jgonthier/psi4
|
psi4/driver/qcdb/pytest/test_bfs.py
|
Python
|
lgpl-3.0
| 4,124
|
[
"CRYSTAL"
] |
c770d57d6d1124c091bdb2c6a219155e380752f124a18deeb244dbc114a1ae5e
|
# Copyright (C) 2012,2013,2018
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
********************************
espressopp.interaction.Potential
********************************
This is an abstract class, only needed to be inherited from.
.. function:: espressopp.interaction.Potential.computeEnergy(\*args)
:param \*args:
:type \*args:
:rtype:
.. function:: espressopp.interaction.Potential.computeForce(\*args)
:param \*args:
:type \*args:
:rtype:
"""
from espressopp import pmi
from espressopp import toReal3DFromVector
from _espressopp import interaction_Potential
# Python base class for potentials
class PotentialLocal(object):
def computeEnergy(self, *args):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
if len(args) == 1:
arg0 = args[0]
if isinstance(arg0, float) or isinstance(arg0, int):
return self.cxxclass.computeEnergy(self, arg0)
return self.cxxclass.computeEnergy(self, toReal3DFromVector(*args))
def computeForce(self, *args):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
if len(args) == 1:
arg0 = args[0]
if isinstance(arg0, float) or isinstance(arg0, int):
newargs = [arg0, 0, 0]
return self.cxxclass.computeForce(self, toReal3DFromVector(*newargs))[0]
return self.cxxclass.computeForce(self, toReal3DFromVector(*args))
def _setShift(self, shift="auto"):
if (shift == "auto"):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setAutoShift(self)
else:
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.shift.fset(self, shift)
def _getShift(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.shift.fget(self)
shift = property(_getShift, _setShift)
if pmi.isController:
class Potential(object):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
localcall = [ 'computeForce', 'computeEnergy' ],
pmiproperty = ['cutoff', 'shift', 'colVarBondList', 'colVarAngleList', 'colVarDihedList', 'colVar']
)
# class PythonPotentialLocal(potential_PythonPotential):
# def getCutoffSqr(self):
# pass
# def computeForce(self, *args):
# """Override this method to compute the force for a given distance.
# It should at least be able to handle a Real3D distance input.
# """
# pass
# def computeEnergy(self, *args):
# """Override this method to compute the energy at a given distance.
# It should at least be able to handle a Real3D distance input.
# """
# pass
|
niktre/espressopp
|
src/interaction/Potential.py
|
Python
|
gpl-3.0
| 3,873
|
[
"ESPResSo"
] |
be04170dcf7e48274420409dfc9602ccc9c621d32a2a0571eecadc7453a9be06
|
#!/usr/bin/env python
__RCSID__ = "$Id$"
# pylint: disable=wrong-import-position
import DIRAC
from DIRAC.Core.Base import Script
groupName = None
groupProperties = []
userNames = []
def setGroupName(arg):
global groupName
if groupName or not arg:
Script.showHelp()
DIRAC.exit(-1)
groupName = arg
def addUserName(arg):
global userNames
if not arg:
Script.showHelp()
DIRAC.exit(-1)
if arg not in userNames:
userNames.append(arg)
def addProperty(arg):
global groupProperties
if not arg:
Script.showHelp()
DIRAC.exit(-1)
if arg not in groupProperties:
groupProperties.append(arg)
Script.setUsageMessage('\n'.join(['Add or Modify a Group info in DIRAC',
'\nUsage:\n',
' %s [option|cfgfile] ... Property=<Value> ...' % Script.scriptName,
'\nArguments:\n',
' Property=<Value>: Other properties to be added to the User like (VOMSRole=XXXX)',
]))
Script.registerSwitch('G:', 'GroupName:', 'Name of the Group (Mandatory)', setGroupName)
Script.registerSwitch(
'U:',
'UserName:',
'Short Name of user to be added to the Group (Allow Multiple instances or None)',
addUserName)
Script.registerSwitch(
'P:',
'Property:',
'Property to be added to the Group (Allow Multiple instances or None)',
addProperty)
Script.parseCommandLine(ignoreErrors=True)
if groupName is None:
Script.showHelp()
DIRAC.exit(-1)
args = Script.getPositionalArgs()
from DIRAC.Interfaces.API.DiracAdmin import DiracAdmin
diracAdmin = DiracAdmin()
exitCode = 0
errorList = []
groupProps = {}
if userNames:
groupProps['Users'] = ', '.join(userNames)
if groupProperties:
groupProps['Properties'] = ', '.join(groupProperties)
for prop in args:
pl = prop.split("=")
if len(pl) < 2:
errorList.append(("in arguments", "Property %s has to include a '=' to separate name from value" % prop))
exitCode = 255
else:
pName = pl[0]
pValue = "=".join(pl[1:])
Script.gLogger.info("Setting property %s to %s" % (pName, pValue))
groupProps[pName] = pValue
if not diracAdmin.csModifyGroup(groupName, groupProps, createIfNonExistant=True)['OK']:
errorList.append(("add group", "Cannot register group %s" % groupName))
exitCode = 255
else:
result = diracAdmin.csCommitChanges()
if not result['OK']:
errorList.append(("commit", result['Message']))
exitCode = 255
for error in errorList:
Script.gLogger.error("%s: %s" % error)
DIRAC.exit(exitCode)
|
andresailer/DIRAC
|
Interfaces/scripts/dirac-admin-add-group.py
|
Python
|
gpl-3.0
| 2,614
|
[
"DIRAC"
] |
b407d139d2214e920259486fb0b752cfa87e029433cde51dd48b994c7d637c55
|
#!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import os
import unittest
import pandas
import numpy
try:
from postprocessing import combine_csv
except ModuleNotFoundError:
pass
class TestCombineCSV(unittest.TestCase):
"""
Test use of combine_csv.py for combining csv files.
"""
def setUp(self):
"""
Define the pattern for test files.
"""
self.__goldpath = os.path.abspath('../../test_files/gold')
self.__basename = os.path.abspath('../../test_files/test_combine_in_')
def tearDown(self):
"""
Remove made CSV files
"""
if os.path.exists("remove_me_54.csv"):
os.remove("remove_me_54.csv")
def testBasic(self):
"""
Test basic usage with minimal options and headers written.
"""
df_test = combine_csv.CombineCSV(self.__basename, "remove_me_54.csv",
"large_number", write_header=True)
self.assertTrue(df_test._ended)
gold_df = pandas.read_csv("{}/combine_basic.csv".format(
self.__goldpath))
self.assertTrue(df_test._final_df.equals(gold_df),
msg="Pandas dataframe is different from gold CSV for basic usage.")
def testBasicTime(self):
"""
Test basic usage with headers and a time file.
"""
df_test = combine_csv.CombineCSV(self.__basename, "remove_me_54.csv",
"large_number", write_header=True, timefile=True)
self.assertTrue(df_test._ended)
gold_df = pandas.read_csv("{}/combine_basic_time.csv".format(
self.__goldpath))
self.assertTrue(df_test._final_df.equals(gold_df),
msg="Pandas dataframe is different from gold CSV for time usage.")
def testBasicX(self):
"""
Test basic usage with headers and a "x" variable name.
"""
df_test = combine_csv.CombineCSV(self.__basename, "remove_me_54.csv",
"large_number", write_header=True, x_varname='y')
self.assertTrue(df_test._ended)
gold_df = pandas.read_csv("{}/combine_basic_x.csv".format(
self.__goldpath))
self.assertTrue(df_test._final_df.equals(gold_df),
msg="Pandas dataframe is different from gold CSV for x variable usage.")
def testBilinear(self):
"""
Test bilinear usage.
"""
df_test = combine_csv.CombineCSV(self.__basename, "remove_me_54.csv",
"large_number", write_header=True, x_varname='y',
timefile=True, bilinear=True)
self.assertTrue(df_test._ended)
gold_df = pandas.read_csv("{}/combine_bilinear.csv".format(
self.__goldpath))
self.assertTrue(df_test._final_df.shape == gold_df.shape,
msg="Pandas dataframe size is different from gold CSV for bilinear usage.")
# We used to use DataFrame.equals for a comparison here, but because a
# newer version of Pandas resulted in a very small diff in the column
# data, we can't use it anymore. Therefore - we need to do a more fuzzy
# check on both the columns and th data.
data_difference = df_test._final_df.to_numpy() - gold_df.to_numpy()
column_difference = df_test._final_df.columns.to_numpy(dtype=numpy.double) \
- gold_df.columns.to_numpy(dtype=numpy.double)
self.assertTrue(abs(data_difference.max()) < 1e-10,
msg="Pandas data is different from gold CSV for bilinear usage.")
self.assertTrue(abs(column_difference.max()) < 1e-10,
msg="Pandas column data is different from gold CSV for bilinear usage.")
def testBasenameError(self):
"""
Test exception when bad basename is provided.
"""
with self.assertRaises(combine_csv.CombineCSV.CombineError) as cerr:
df_test = combine_csv.CombineCSV('bad_basename_54',
"remove_me_54.csv", "large_number")
self.assertEqual(cerr.exception._name, "BasenameError")
def testStepBoundsError(self):
"""
Test exception when mismatch of steps are provided.
"""
with self.assertRaises(combine_csv.CombineCSV.CombineError) as cerr:
df_test = combine_csv.CombineCSV(self.__basename,
"remove_me_54.csv", "large_number", lastn=2, endt=1)
self.assertEqual(cerr.exception._name, "StepBoundsError")
def testXVariableError(self):
"""
Test exception when bad "x" variable name is provided.
"""
with self.assertRaises(combine_csv.CombineCSV.CombineError) as cerr:
df_test = combine_csv.CombineCSV(self.__basename,
"remove_me_54.csv", "large_number",
x_varname='bad_x54_name')
self.assertEqual(cerr.exception._name, "XVariableError")
def testInconsistentError(self):
"""
Test exception when data rows are not consistent.
"""
with self.assertRaises(combine_csv.CombineCSV.CombineError) as cerr:
df_test = combine_csv.CombineCSV("{}54_bad_".format(
self.__basename), "remove_me_54.csv", "large_number",
x_varname='x')
self.assertEqual(cerr.exception._name, "InconsistentError")
def testYVariableError(self):
"""
Test exception when bad "y" variable name is provided.
"""
with self.assertRaises(combine_csv.CombineCSV.CombineError) as cerr:
df_test = combine_csv.CombineCSV(self.__basename,
"remove_me_54.csv", "bad_y54_name")
self.assertEqual(cerr.exception._name, "YVariableError")
if __name__ == '__main__':
import sys
sys.path.append("..")
import combine_csv
unittest.main(module=__name__, verbosity=2)
|
harterj/moose
|
python/postprocessing/tests/test_combine_csv.py
|
Python
|
lgpl-2.1
| 6,106
|
[
"MOOSE"
] |
2a7da56be9e2d566b7edbdbacccf2bbe748a89d81f8f6705c52ee4c9ac2f58bd
|
# Copyright 2007 by Tiago Antao <tiagoantao@gmail.com>. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""
This module provides code to work with FDist.
See http://www.rubic.rdg.ac.uk/~mab/software.html .
Classes:
Record Holds FDist data.
Functions:
read Parses a FDist record (file) into a Record object.
"""
def read(handle):
"""Parses FDist data into a Record object.
handle is a file-like object that contains a FDist record.
"""
record = Record()
record.data_org = int(str(handle.next()).rstrip())
record.num_pops = int(str(handle.next()).rstrip())
record.num_loci = int(str(handle.next()).rstrip())
for i in range(record.num_loci):
handle.next()
num_alleles = int(str(handle.next()).rstrip())
pops_data = []
if record.data_org==0:
for j in range(record.num_pops):
line_comp = str(handle.next()).rstrip().split(' ')
pop_dist = map(lambda x: int(x), line_comp)
pops_data.append(pop_dist)
else:
raise NotImplementedError('1/alleles by rows not implemented')
record.loci_data.append((num_alleles, pops_data))
return record
class Record(object):
"""Holds information from a FDist record.
Members:
data_org Data organization (0 pops by rows, 1 alleles by rows).
The Record will behave as if data was 0 (converting if needed)
num_pops Number of populations
num_loci Number of loci
loci_data Loci data
loci_data is a list, where each element represents a locus. Each element
is a tuple, the first element is the number of alleles, the second
element a list. Each element of the list is the count of each allele
per population.
"""
def __init__(self):
self.data_org = 0
self.num_pops = 0
self.num_loci = 0
self.loci_data = []
def __str__(self):
rep = ['0\n'] #We only export in 0 format, even if originally was 1
rep.append(str(self.num_pops) + '\n')
rep.append(str(self.num_loci) + '\n')
rep.append('\n')
for locus_data in self.loci_data:
num_alleles, pops_data = locus_data
rep.append(str(num_alleles) + '\n')
for pop_data in pops_data:
for allele_count in pop_data:
rep.append(str(allele_count) + ' ')
rep.append('\n')
rep.append('\n')
return "".join(rep)
|
bryback/quickseq
|
genescript/Bio/PopGen/FDist/__init__.py
|
Python
|
mit
| 2,714
|
[
"Biopython"
] |
506b8e8e495d7b31b2b725f41ba23a3a1b8b13ffdac1bf6a2a5789618cd551ea
|
# Default Django settings. Override these with settings in the module
# pointed-to by the DJANGO_SETTINGS_MODULE environment variable.
# This is defined here as a do-nothing function because we can't import
# django.utils.translation -- that module depends on the settings.
gettext_noop = lambda s: s
####################
# CORE #
####################
DEBUG = False
TEMPLATE_DEBUG = False
# Whether the framework should propagate raw exceptions rather than catching
# them. This is useful under some testing situations and should never be used
# on a live site.
DEBUG_PROPAGATE_EXCEPTIONS = False
# Whether to use the "Etag" header. This saves bandwidth but slows down performance.
USE_ETAGS = False
# People who get code error notifications.
# In the format (('Full Name', 'email@example.com'), ('Full Name', 'anotheremail@example.com'))
ADMINS = ()
# Tuple of IP addresses, as strings, that:
# * See debug comments, when DEBUG is true
# * Receive x-headers
INTERNAL_IPS = ()
# Hosts/domain names that are valid for this site.
# "*" matches anything, ".example.com" matches example.com and all subdomains
ALLOWED_HOSTS = []
# Local time zone for this installation. All choices can be found here:
# https://en.wikipedia.org/wiki/List_of_tz_zones_by_name (although not all
# systems may support all possibilities). When USE_TZ is True, this is
# interpreted as the default user time zone.
TIME_ZONE = 'America/Chicago'
# If you set this to True, Django will use timezone-aware datetimes.
USE_TZ = False
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
# Languages we provide translations for, out of the box.
LANGUAGES = (
('af', gettext_noop('Afrikaans')),
('ar', gettext_noop('Arabic')),
('ast', gettext_noop('Asturian')),
('az', gettext_noop('Azerbaijani')),
('bg', gettext_noop('Bulgarian')),
('be', gettext_noop('Belarusian')),
('bn', gettext_noop('Bengali')),
('br', gettext_noop('Breton')),
('bs', gettext_noop('Bosnian')),
('ca', gettext_noop('Catalan')),
('cs', gettext_noop('Czech')),
('cy', gettext_noop('Welsh')),
('da', gettext_noop('Danish')),
('de', gettext_noop('German')),
('el', gettext_noop('Greek')),
('en', gettext_noop('English')),
('en-au', gettext_noop('Australian English')),
('en-gb', gettext_noop('British English')),
('eo', gettext_noop('Esperanto')),
('es', gettext_noop('Spanish')),
('es-ar', gettext_noop('Argentinian Spanish')),
('es-mx', gettext_noop('Mexican Spanish')),
('es-ni', gettext_noop('Nicaraguan Spanish')),
('es-ve', gettext_noop('Venezuelan Spanish')),
('et', gettext_noop('Estonian')),
('eu', gettext_noop('Basque')),
('fa', gettext_noop('Persian')),
('fi', gettext_noop('Finnish')),
('fr', gettext_noop('French')),
('fy', gettext_noop('Frisian')),
('ga', gettext_noop('Irish')),
('gl', gettext_noop('Galician')),
('he', gettext_noop('Hebrew')),
('hi', gettext_noop('Hindi')),
('hr', gettext_noop('Croatian')),
('hu', gettext_noop('Hungarian')),
('ia', gettext_noop('Interlingua')),
('id', gettext_noop('Indonesian')),
('io', gettext_noop('Ido')),
('is', gettext_noop('Icelandic')),
('it', gettext_noop('Italian')),
('ja', gettext_noop('Japanese')),
('ka', gettext_noop('Georgian')),
('kk', gettext_noop('Kazakh')),
('km', gettext_noop('Khmer')),
('kn', gettext_noop('Kannada')),
('ko', gettext_noop('Korean')),
('lb', gettext_noop('Luxembourgish')),
('lt', gettext_noop('Lithuanian')),
('lv', gettext_noop('Latvian')),
('mk', gettext_noop('Macedonian')),
('ml', gettext_noop('Malayalam')),
('mn', gettext_noop('Mongolian')),
('mr', gettext_noop('Marathi')),
('my', gettext_noop('Burmese')),
('nb', gettext_noop('Norwegian Bokmal')),
('ne', gettext_noop('Nepali')),
('nl', gettext_noop('Dutch')),
('nn', gettext_noop('Norwegian Nynorsk')),
('os', gettext_noop('Ossetic')),
('pa', gettext_noop('Punjabi')),
('pl', gettext_noop('Polish')),
('pt', gettext_noop('Portuguese')),
('pt-br', gettext_noop('Brazilian Portuguese')),
('ro', gettext_noop('Romanian')),
('ru', gettext_noop('Russian')),
('sk', gettext_noop('Slovak')),
('sl', gettext_noop('Slovenian')),
('sq', gettext_noop('Albanian')),
('sr', gettext_noop('Serbian')),
('sr-latn', gettext_noop('Serbian Latin')),
('sv', gettext_noop('Swedish')),
('sw', gettext_noop('Swahili')),
('ta', gettext_noop('Tamil')),
('te', gettext_noop('Telugu')),
('th', gettext_noop('Thai')),
('tr', gettext_noop('Turkish')),
('tt', gettext_noop('Tatar')),
('udm', gettext_noop('Udmurt')),
('uk', gettext_noop('Ukrainian')),
('ur', gettext_noop('Urdu')),
('vi', gettext_noop('Vietnamese')),
('zh-cn', gettext_noop('Simplified Chinese')),
('zh-hans', gettext_noop('Simplified Chinese')),
('zh-hant', gettext_noop('Traditional Chinese')),
('zh-tw', gettext_noop('Traditional Chinese')),
)
# Languages using BiDi (right-to-left) layout
LANGUAGES_BIDI = ("he", "ar", "fa", "ur")
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
LOCALE_PATHS = ()
# Settings for language cookie
LANGUAGE_COOKIE_NAME = 'django_language'
LANGUAGE_COOKIE_AGE = None
LANGUAGE_COOKIE_DOMAIN = None
LANGUAGE_COOKIE_PATH = '/'
# If you set this to True, Django will format dates, numbers and calendars
# according to user current locale.
USE_L10N = False
# Not-necessarily-technical managers of the site. They get broken link
# notifications and other various emails.
MANAGERS = ADMINS
# Default content type and charset to use for all HttpResponse objects, if a
# MIME type isn't manually specified. These are used to construct the
# Content-Type header.
DEFAULT_CONTENT_TYPE = 'text/html'
DEFAULT_CHARSET = 'utf-8'
# Encoding of files read from disk (template and initial SQL files).
FILE_CHARSET = 'utf-8'
# Email address that error messages come from.
SERVER_EMAIL = 'root@localhost'
# Database connection info. If left empty, will default to the dummy backend.
DATABASES = {}
# Classes used to implement DB routing behavior.
DATABASE_ROUTERS = []
# The email backend to use. For possible shortcuts see django.core.mail.
# The default is to use the SMTP backend.
# Third-party backends can be specified by providing a Python path
# to a module that defines an EmailBackend class.
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
# Host for sending email.
EMAIL_HOST = 'localhost'
# Port for sending email.
EMAIL_PORT = 25
# Optional SMTP authentication information for EMAIL_HOST.
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
EMAIL_USE_TLS = False
EMAIL_USE_SSL = False
EMAIL_SSL_CERTFILE = None
EMAIL_SSL_KEYFILE = None
EMAIL_TIMEOUT = None
# List of strings representing installed apps.
INSTALLED_APPS = ()
# List of locations of the template source files, in search order.
TEMPLATE_DIRS = ()
# List of callables that know how to import templates from various sources.
# See the comments in django/core/template/loader.py for interface
# documentation.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
# List of processors used by RequestContext to populate the context.
# Each one should be a callable that takes the request object as its
# only parameter and returns a dictionary to add to the context.
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
# 'django.template.context_processors.request',
'django.contrib.messages.context_processors.messages',
)
# Output to use in template system for invalid (e.g. misspelled) variables.
TEMPLATE_STRING_IF_INVALID = ''
TEMPLATES = []
# Default email address to use for various automated correspondence from
# the site managers.
DEFAULT_FROM_EMAIL = 'webmaster@localhost'
# Subject-line prefix for email messages send with django.core.mail.mail_admins
# or ...mail_managers. Make sure to include the trailing space.
EMAIL_SUBJECT_PREFIX = '[Django] '
# Whether to append trailing slashes to URLs.
APPEND_SLASH = True
# Whether to prepend the "www." subdomain to URLs that don't have it.
PREPEND_WWW = False
# Override the server-derived value of SCRIPT_NAME
FORCE_SCRIPT_NAME = None
# List of compiled regular expression objects representing User-Agent strings
# that are not allowed to visit any page, systemwide. Use this for bad
# robots/crawlers. Here are a few examples:
# import re
# DISALLOWED_USER_AGENTS = (
# re.compile(r'^NaverBot.*'),
# re.compile(r'^EmailSiphon.*'),
# re.compile(r'^SiteSucker.*'),
# re.compile(r'^sohu-search')
# )
DISALLOWED_USER_AGENTS = ()
ABSOLUTE_URL_OVERRIDES = {}
# Tuple of strings representing allowed prefixes for the {% ssi %} tag.
# Example: ('/home/html', '/var/www')
ALLOWED_INCLUDE_ROOTS = ()
# List of compiled regular expression objects representing URLs that need not
# be reported by BrokenLinkEmailsMiddleware. Here are a few examples:
# import re
# IGNORABLE_404_URLS = (
# re.compile(r'^/apple-touch-icon.*\.png$'),
# re.compile(r'^/favicon.ico$),
# re.compile(r'^/robots.txt$),
# re.compile(r'^/phpmyadmin/),
# re.compile(r'\.(cgi|php|pl)$'),
# )
IGNORABLE_404_URLS = ()
# A secret key for this particular Django installation. Used in secret-key
# hashing algorithms. Set this in your settings, or Django will complain
# loudly.
SECRET_KEY = ''
# Default file storage mechanism that holds media.
DEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage'
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = None
# URL that handles the static files served from STATIC_ROOT.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = None
# List of upload handler classes to be applied in order.
FILE_UPLOAD_HANDLERS = (
'django.core.files.uploadhandler.MemoryFileUploadHandler',
'django.core.files.uploadhandler.TemporaryFileUploadHandler',
)
# Maximum size, in bytes, of a request before it will be streamed to the
# file system instead of into memory.
FILE_UPLOAD_MAX_MEMORY_SIZE = 2621440 # i.e. 2.5 MB
# Directory in which upload streamed files will be temporarily saved. A value of
# `None` will make Django use the operating system's default temporary directory
# (i.e. "/tmp" on *nix systems).
FILE_UPLOAD_TEMP_DIR = None
# The numeric mode to set newly-uploaded files to. The value should be a mode
# you'd pass directly to os.chmod; see http://docs.python.org/lib/os-file-dir.html.
FILE_UPLOAD_PERMISSIONS = None
# The numeric mode to assign to newly-created directories, when uploading files.
# The value should be a mode as you'd pass to os.chmod;
# see http://docs.python.org/lib/os-file-dir.html.
FILE_UPLOAD_DIRECTORY_PERMISSIONS = None
# Python module path where user will place custom format definition.
# The directory where this setting is pointing should contain subdirectories
# named as the locales, containing a formats.py file
# (i.e. "myproject.locale" for myproject/locale/en/formats.py etc. use)
FORMAT_MODULE_PATH = None
# Default formatting for date objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'N j, Y'
# Default formatting for datetime objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATETIME_FORMAT = 'N j, Y, P'
# Default formatting for time objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
TIME_FORMAT = 'P'
# Default formatting for date objects when only the year and month are relevant.
# See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
YEAR_MONTH_FORMAT = 'F Y'
# Default formatting for date objects when only the month and day are relevant.
# See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
MONTH_DAY_FORMAT = 'F j'
# Default short formatting for date objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
SHORT_DATE_FORMAT = 'm/d/Y'
# Default short formatting for datetime objects.
# See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
SHORT_DATETIME_FORMAT = 'm/d/Y P'
# Default formats to be used when parsing dates from input boxes, in order
# See all available format string here:
# http://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
DATE_INPUT_FORMATS = (
'%Y-%m-%d', '%m/%d/%Y', '%m/%d/%y', # '2006-10-25', '10/25/2006', '10/25/06'
'%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006'
'%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006'
'%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006'
'%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
)
# Default formats to be used when parsing times from input boxes, in order
# See all available format string here:
# http://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M:%S.%f', # '14:30:59.000200'
'%H:%M', # '14:30'
)
# Default formats to be used when parsing dates and times from input boxes,
# in order
# See all available format string here:
# http://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59'
'%m/%d/%Y %H:%M:%S.%f', # '10/25/2006 14:30:59.000200'
'%m/%d/%Y %H:%M', # '10/25/2006 14:30'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
'%m/%d/%y %H:%M:%S.%f', # '10/25/06 14:30:59.000200'
'%m/%d/%y %H:%M', # '10/25/06 14:30'
'%m/%d/%y', # '10/25/06'
)
# First day of week, to be used on calendars
# 0 means Sunday, 1 means Monday...
FIRST_DAY_OF_WEEK = 0
# Decimal separator symbol
DECIMAL_SEPARATOR = '.'
# Boolean that sets whether to add thousand separator when formatting numbers
USE_THOUSAND_SEPARATOR = False
# Number of digits that will be together, when splitting them by
# THOUSAND_SEPARATOR. 0 means no grouping, 3 means splitting by thousands...
NUMBER_GROUPING = 0
# Thousand separator symbol
THOUSAND_SEPARATOR = ','
# The tablespaces to use for each model when not specified otherwise.
DEFAULT_TABLESPACE = ''
DEFAULT_INDEX_TABLESPACE = ''
# Default X-Frame-Options header value
X_FRAME_OPTIONS = 'SAMEORIGIN'
USE_X_FORWARDED_HOST = False
# The Python dotted path to the WSGI application that Django's internal servers
# (runserver, runfcgi) will use. If `None`, the return value of
# 'django.core.wsgi.get_wsgi_application' is used, thus preserving the same
# behavior as previous versions of Django. Otherwise this should point to an
# actual WSGI application object.
WSGI_APPLICATION = None
# If your Django app is behind a proxy that sets a header to specify secure
# connections, AND that proxy ensures that user-submitted headers with the
# same name are ignored (so that people can't spoof it), set this value to
# a tuple of (header_name, header_value). For any requests that come in with
# that header/value, request.is_secure() will return True.
# WARNING! Only set this if you fully understand what you're doing. Otherwise,
# you may be opening yourself up to a security risk.
SECURE_PROXY_SSL_HEADER = None
##############
# MIDDLEWARE #
##############
# List of middleware classes to use. Order is important; in the request phase,
# this middleware classes will be applied in the order given, and in the
# response phase the middleware will be applied in reverse order.
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
)
############
# SESSIONS #
############
# Cache to store session data if using the cache session backend.
SESSION_CACHE_ALIAS = 'default'
# Cookie name. This can be whatever you want.
SESSION_COOKIE_NAME = 'sessionid'
# Age of cookie, in seconds (default: 2 weeks).
SESSION_COOKIE_AGE = 60 * 60 * 24 * 7 * 2
# A string like ".example.com", or None for standard domain cookie.
SESSION_COOKIE_DOMAIN = None
# Whether the session cookie should be secure (https:// only).
SESSION_COOKIE_SECURE = False
# The path of the session cookie.
SESSION_COOKIE_PATH = '/'
# Whether to use the non-RFC standard httpOnly flag (IE, FF3+, others)
SESSION_COOKIE_HTTPONLY = True
# Whether to save the session data on every request.
SESSION_SAVE_EVERY_REQUEST = False
# Whether a user's session cookie expires when the Web browser is closed.
SESSION_EXPIRE_AT_BROWSER_CLOSE = False
# The module to store session data
SESSION_ENGINE = 'django.contrib.sessions.backends.db'
# Directory to store session files if using the file session module. If None,
# the backend will use a sensible default.
SESSION_FILE_PATH = None
# class to serialize session data
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
#########
# CACHE #
#########
# The cache backends to use.
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
CACHE_MIDDLEWARE_KEY_PREFIX = ''
CACHE_MIDDLEWARE_SECONDS = 600
CACHE_MIDDLEWARE_ALIAS = 'default'
##################
# AUTHENTICATION #
##################
AUTH_USER_MODEL = 'auth.User'
AUTHENTICATION_BACKENDS = ('django.contrib.auth.backends.ModelBackend',)
LOGIN_URL = '/accounts/login/'
LOGOUT_URL = '/accounts/logout/'
LOGIN_REDIRECT_URL = '/accounts/profile/'
# The number of days a password reset link is valid for
PASSWORD_RESET_TIMEOUT_DAYS = 3
# the first hasher in this list is the preferred algorithm. any
# password using different algorithms will be converted automatically
# upon login
PASSWORD_HASHERS = (
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
'django.contrib.auth.hashers.BCryptPasswordHasher',
'django.contrib.auth.hashers.SHA1PasswordHasher',
'django.contrib.auth.hashers.MD5PasswordHasher',
'django.contrib.auth.hashers.UnsaltedSHA1PasswordHasher',
'django.contrib.auth.hashers.UnsaltedMD5PasswordHasher',
'django.contrib.auth.hashers.CryptPasswordHasher',
)
###########
# SIGNING #
###########
SIGNING_BACKEND = 'django.core.signing.TimestampSigner'
########
# CSRF #
########
# Dotted path to callable to be used as view when a request is
# rejected by the CSRF middleware.
CSRF_FAILURE_VIEW = 'django.views.csrf.csrf_failure'
# Settings for CSRF cookie.
CSRF_COOKIE_NAME = 'csrftoken'
CSRF_COOKIE_AGE = 60 * 60 * 24 * 7 * 52
CSRF_COOKIE_DOMAIN = None
CSRF_COOKIE_PATH = '/'
CSRF_COOKIE_SECURE = False
CSRF_COOKIE_HTTPONLY = False
############
# MESSAGES #
############
# Class to use as messages backend
MESSAGE_STORAGE = 'django.contrib.messages.storage.fallback.FallbackStorage'
# Default values of MESSAGE_LEVEL and MESSAGE_TAGS are defined within
# django.contrib.messages to avoid imports in this settings file.
###########
# LOGGING #
###########
# The callable to use to configure logging
LOGGING_CONFIG = 'logging.config.dictConfig'
# Custom logging configuration.
LOGGING = {}
# Default exception reporter filter class used in case none has been
# specifically assigned to the HttpRequest instance.
DEFAULT_EXCEPTION_REPORTER_FILTER = 'django.views.debug.SafeExceptionReporterFilter'
###########
# TESTING #
###########
# The name of the class to use to run the test suite
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# Apps that don't need to be serialized at test database creation time
# (only apps with migrations are to start with)
TEST_NON_SERIALIZED_APPS = []
############
# FIXTURES #
############
# The list of directories to search for fixtures
FIXTURE_DIRS = ()
###############
# STATICFILES #
###############
# A list of locations of additional static files
STATICFILES_DIRS = ()
# The default file storage backend used during the build process
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.StaticFilesStorage'
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
##############
# MIGRATIONS #
##############
# Migration module overrides for apps, by app label.
MIGRATION_MODULES = {}
#################
# SYSTEM CHECKS #
#################
# List of all issues generated by system checks that should be silenced. Light
# issues like warnings, infos or debugs will not generate a message. Silencing
# serious issues like errors and criticals does not result in hiding the
# message, but Django will not stop you from e.g. running server.
SILENCED_SYSTEM_CHECKS = []
#######################
# SECURITY MIDDLEWARE #
#######################
SECURE_BROWSER_XSS_FILTER = False
SECURE_CONTENT_TYPE_NOSNIFF = False
SECURE_HSTS_INCLUDE_SUBDOMAINS = False
SECURE_HSTS_SECONDS = 0
SECURE_REDIRECT_EXEMPT = []
SECURE_SSL_HOST = None
SECURE_SSL_REDIRECT = False
|
WhySoGeeky/DroidPot
|
venv/lib/python2.7/site-packages/django/conf/global_settings.py
|
Python
|
mit
| 22,710
|
[
"VisIt"
] |
a6889ce8a6a0b873eec0ccc172fe4443c3cd05fd21738457d4a8430141ff4294
|
#!/usr/bin/env python
"""
"""
import vtk
def main():
colors = vtk.vtkNamedColors()
fileName, tissue = get_program_parameters()
tissueMap = CreateTissueMap()
colorLut = CreateFrogLut()
# Setup render window, renderer, and interactor.
rendererLeft = vtk.vtkRenderer()
rendererLeft.SetViewport(0, 0, .5, 1)
rendererRight = vtk.vtkRenderer()
rendererRight.SetViewport(.5, 0, 1, 1)
renderWindow = vtk.vtkRenderWindow()
renderWindow.AddRenderer(rendererLeft)
renderWindow.AddRenderer(rendererRight)
renderWindowInteractor = vtk.vtkRenderWindowInteractor()
renderWindowInteractor.SetRenderWindow(renderWindow)
actor = CreateFrogActor(fileName, tissueMap[tissue])
actor.GetProperty().SetDiffuseColor(colorLut.GetTableValue(tissueMap[tissue])[:3])
rendererLeft.AddActor(actor)
actorSmooth = CreateSmoothFrogActor(fileName, tissueMap[tissue])
actorSmooth.GetProperty().SetDiffuseColor(colorLut.GetTableValue(tissueMap[tissue])[:3])
actorSmooth.GetProperty().SetDiffuse(1.0)
actorSmooth.GetProperty().SetSpecular(.5)
actorSmooth.GetProperty().SetSpecularPower(100)
rendererRight.AddActor(actorSmooth)
rendererLeft.ResetCamera()
rendererLeft.GetActiveCamera().SetViewUp(-1, 0, 0)
rendererLeft.GetActiveCamera().Azimuth(180)
rendererLeft.ResetCameraClippingRange()
rendererLeft.SetBackground(colors.GetColor3d("SlateGray"))
rendererRight.SetBackground(colors.GetColor3d("SlateGray"))
rendererRight.SetActiveCamera(rendererLeft.GetActiveCamera())
renderWindow.SetSize(640, 480)
renderWindow.Render()
renderWindowInteractor.Start()
def get_program_parameters():
import argparse
description = 'The frog’s brain.'
epilogue = '''
Model extracted without smoothing (left) and with smoothing (right).
'''
parser = argparse.ArgumentParser(description=description, epilog=epilogue,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('filename', help='frog.mhd.')
parser.add_argument('tissue', default='brain', nargs='?', help='The tissue to use.')
args = parser.parse_args()
return args.filename, args.tissue
def CreateFrogLut():
colors = vtk.vtkNamedColors()
colorLut = vtk.vtkLookupTable()
colorLut.SetNumberOfColors(17)
colorLut.SetTableRange(0, 16)
colorLut.Build()
colorLut.SetTableValue(0, 0, 0, 0, 0)
colorLut.SetTableValue(1, colors.GetColor4d("salmon")) # blood
colorLut.SetTableValue(2, colors.GetColor4d("beige")) # brain
colorLut.SetTableValue(3, colors.GetColor4d("orange")) # duodenum
colorLut.SetTableValue(4, colors.GetColor4d("misty_rose")) # eye_retina
colorLut.SetTableValue(5, colors.GetColor4d("white")) # eye_white
colorLut.SetTableValue(6, colors.GetColor4d("tomato")) # heart
colorLut.SetTableValue(7, colors.GetColor4d("raspberry")) # ileum
colorLut.SetTableValue(8, colors.GetColor4d("banana")) # kidney
colorLut.SetTableValue(9, colors.GetColor4d("peru")) # l_intestine
colorLut.SetTableValue(10, colors.GetColor4d("pink")) # liver
colorLut.SetTableValue(11, colors.GetColor4d("powder_blue")) # lung
colorLut.SetTableValue(12, colors.GetColor4d("carrot")) # nerve
colorLut.SetTableValue(13, colors.GetColor4d("wheat")) # skeleton
colorLut.SetTableValue(14, colors.GetColor4d("violet")) # spleen
colorLut.SetTableValue(15, colors.GetColor4d("plum")) # stomach
return colorLut
def CreateTissueMap():
tissueMap = dict()
tissueMap["blood"] = 1
tissueMap["brain"] = 2
tissueMap["duodenum"] = 3
tissueMap["eyeRetina"] = 4
tissueMap["eyeWhite"] = 5
tissueMap["heart"] = 6
tissueMap["ileum"] = 7
tissueMap["kidney"] = 8
tissueMap["intestine"] = 9
tissueMap["liver"] = 10
tissueMap["lung"] = 11
tissueMap["nerve"] = 12
tissueMap["skeleton"] = 13
tissueMap["spleen"] = 14
tissueMap["stomach"] = 15
return tissueMap
def CreateSmoothFrogActor(fileName, tissue):
reader = vtk.vtkMetaImageReader()
reader.SetFileName(fileName)
reader.Update()
selectTissue = vtk.vtkImageThreshold()
selectTissue.ThresholdBetween(tissue, tissue)
selectTissue.SetInValue(255)
selectTissue.SetOutValue(0)
selectTissue.SetInputConnection(reader.GetOutputPort())
gaussianRadius = 1
gaussianStandardDeviation = 2.0
gaussian = vtk.vtkImageGaussianSmooth()
gaussian.SetStandardDeviations(gaussianStandardDeviation, gaussianStandardDeviation, gaussianStandardDeviation)
gaussian.SetRadiusFactors(gaussianRadius, gaussianRadius, gaussianRadius)
gaussian.SetInputConnection(selectTissue.GetOutputPort())
isoValue = 127.5
mcubes = vtk.vtkMarchingCubes()
mcubes.SetInputConnection(gaussian.GetOutputPort())
mcubes.ComputeScalarsOff()
mcubes.ComputeGradientsOff()
mcubes.ComputeNormalsOff()
mcubes.SetValue(0, isoValue)
smoothingIterations = 0
passBand = 0.001
featureAngle = 60.0
smoother = vtk.vtkWindowedSincPolyDataFilter()
smoother.SetInputConnection(mcubes.GetOutputPort())
smoother.SetNumberOfIterations(smoothingIterations)
smoother.BoundarySmoothingOff()
smoother.FeatureEdgeSmoothingOff()
smoother.SetFeatureAngle(featureAngle)
smoother.SetPassBand(passBand)
smoother.NonManifoldSmoothingOn()
smoother.NormalizeCoordinatesOn()
smoother.Update()
normals = vtk.vtkPolyDataNormals()
normals.SetInputConnection(smoother.GetOutputPort())
normals.SetFeatureAngle(featureAngle)
stripper = vtk.vtkStripper()
stripper.SetInputConnection(normals.GetOutputPort())
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(stripper.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
return actor
def CreateFrogActor(fileName, tissue):
reader = vtk.vtkMetaImageReader()
reader.SetFileName(fileName)
reader.Update()
selectTissue = vtk.vtkImageThreshold()
selectTissue.ThresholdBetween(tissue, tissue)
selectTissue.SetInValue(255)
selectTissue.SetOutValue(0)
selectTissue.SetInputConnection(reader.GetOutputPort())
isoValue = 63.5
mcubes = vtk.vtkMarchingCubes()
mcubes.SetInputConnection(selectTissue.GetOutputPort())
mcubes.ComputeScalarsOff()
mcubes.ComputeGradientsOff()
mcubes.ComputeNormalsOn()
mcubes.SetValue(0, isoValue)
stripper = vtk.vtkStripper()
stripper.SetInputConnection(mcubes.GetOutputPort())
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(stripper.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
return actor
if __name__ == '__main__':
main()
|
lorensen/VTKExamples
|
src/Python/Visualization/ViewFrogBoth.py
|
Python
|
apache-2.0
| 6,776
|
[
"Gaussian",
"VTK"
] |
c850b44e6bcf405ac5d97fc0c79749d1041cb0582c546e4ebbc365077f3b3896
|
import logging
import re
from urllib.parse import urlparse, urlunparse
from itertools import tee, filterfalse
from dateutil.parser import parse as parse_datetime
from bs4 import BeautifulSoup as Soup, Comment
from langdetect import detect
from tornado import httpclient
from tornado.httputil import HTTPHeaders
from tornado.options import options
from torspider.urlnorm import norm, join_parts, get_domain
from langdetect.lang_detect_exception import LangDetectException
ALLOW_SCHEMES = ('http', 'https')
ALLOWED_TYPES = ('text/html')
ALLOWED_LANGS = ('ru', 'en', 'Russian', 'ru-RU')
SAVE_HEADERS = (
'Content-Encoding', 'Content-Language', 'Content-Length', 'Content-Location',
'Content-MD5', 'Content-Type', 'Date', 'ETag', 'Expires', 'Last-Modified',
'Link', 'Retry-After', 'Server', 'Via', 'Warning', 'Status', 'X-Powered-By',
'X-UA-Compatible'
)
MAX_CONTENT_SIZE = 1024 # Kb
DEFAULT_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:28.0) Gecko/20100101 Firefox/28.0'
DEFAULT_TIMEOUT = 20
DEFAULT_HEADERS = HTTPHeaders({
'Accept': ','.join(ALLOWED_TYPES),
'Accept-Charset': 'utf-8, windows-1251;q=0.5, koi8-r;q=0.3, *;q=0.3',
'Accept-Language': 'ru, en;q=0.7',
})
httpclient.AsyncHTTPClient.configure(
"tornado.curl_httpclient.CurlAsyncHTTPClient",
defaults=dict(user_agent=DEFAULT_AGENT)
)
# remove these tags, complete with contents.
SKIP_TAGS = ("script", "style", "form", "input")
def is_inner_link(url, page):
return get_domain(url) == get_domain(page.base)
class Page():
"""
Parse HTML page.
All page attributes are available as lazily initialized properties.
Arguments:
url : target URL (str)
response : tornado.httpclient.HTTPResponse instance
"""
def __init__(self, url, response):
self.url = url
self.response = response
self._base = None
self._soup = None
self._title = None
self._meta = None
self._text = None
self._language = None
self._links = None
self._headers = None
@property
def soup(self):
"""BeautifulSoup tree."""
if self._soup is None:
self._soup = Soup(self.response.body, 'lxml')
return self._soup
@property
def base(self):
"""<base> header value is any, or response.effective_url domain name.
"""
if self._base is None:
if self.soup.base:
self._base = self.soup.base.get('href')
if not self._base:
logging.debug('No <base> header. Using response domain name')
parts = norm(self.response.effective_url)
self._base = urlunparse((parts[0], parts[1], '/', '', '', ''))
return self._base
@property
def title(self):
"""<title> header, if any, or the first found heading."""
if not self._title:
if self.soup.title and self.soup.title.string:
self._title = self.soup.title.string.strip()
if not self._title:
logging.debug('Page title not found. Searching headings...')
body = self.soup.body
for i in range(5):
h = body.find('h{}'.format(i+1))
if h:
self._title = h.string
break
return self._title
def _iter_meta(self):
for tag in self.soup.find_all('meta'):
k = tag.get('property', tag.get('name'))
if k:
v = tag.get('content')
if v:
yield k, v
@property
def meta(self):
"""Page meta tags as dictionary."""
if self._meta is None:
self._meta = {k: v for k, v in self._iter_meta()}
return self._meta
def _sanitize(self, soup):
# now strip HTML we don't like.
for tag in soup.findAll():
if tag.name.lower() in SKIP_TAGS:
# blacklisted tags are removed in their entirety
tag.extract()
# scripts can be executed from comments in some cases
comments = soup.findAll(text=lambda text:isinstance(text, Comment))
for com in comments:
com.extract()
@property
def text(self):
"""Page text converted to markup format."""
if self._text is None:
#doc = Document(str(self.soup.body))
body = Soup(str(self.soup.body), 'lxml')
self._sanitize(body)
self._text = body.get_text(' ')
self._text = re.sub(r'\s{2,}', ' ', self._text)
self._text = re.sub(r'\s*\n\s*', '\n', self._text)
self._text = re.sub(r'\n{2,}', '\n', self._text)
self._text = re.sub(r'\s+\.\s+', '. ', self._text)
self._text = self._text.strip()
#self._text = html2text(str(self.soup.body))
return self._text
@property
def language(self):
"""Detected page language."""
if self._language is None:
try:
self._language = detect(self.text)
except LangDetectException as ex:
logging.error(ex)
self._language = 'UNKNOWN'
return self._language
def _iter_links(self):
domain = urlparse(self.base)[1]
for a in self.soup.find_all('a', href=True):
try:
url = norm(a['href'], domain)
if not url[0] in ALLOW_SCHEMES:
logging.debug('Skipping scheme <%s>', url[0])
else:
yield join_parts(url)
except Exception as ex:
logging.warn(ex)
@property
def links(self):
"""Set of normalized links found inside the page <body>."""
if self._links is None:
self._links = {url for url in self._iter_links()}
return self._links
def partition_links(self):
"""Return inner and outer links as two separate lists."""
t1, t2 = tee(self.links)
pred = lambda x: is_inner_link(x, self)
return list(filter(pred, t1)), list(filterfalse(pred, t2))
def _parse_header(self, k, v):
lk = k.lower()
if lk in ('date', 'expires', 'last-modified'):
try:
return parse_datetime(v)
except ValueError as ex:
logging.error(ex)
return v
if lk in ('content-length'):
return int(v)
return v
@property
def headers(self):
"""Some usefull data from HTTP response headers"""
if self._headers is None:
self._headers = {
k: self._parse_header(k, v)
for (k, v) in sorted(self.response.headers.get_all())
if k in SAVE_HEADERS
}
return self._headers
def as_dict(self):
report = {}
if self.title:
report['title'] = self.title
if self.text:
report['text'] = self.text
if self.meta:
report['meta'] = self.meta
if self.language:
report['language'] = self.language
if self.links:
inner, outer = self.partition_links()
report['links'] = {'inner': inner, 'outer': outer}
if self.headers:
report['headers'] = self.headers
return report
class HTTPClient:
"""Asyncroneous HTTP client.
"""
def __init__(self):
self.client = httpclient.AsyncHTTPClient()
self.req_options = dict(headers = DEFAULT_HEADERS)
if options.proxy:
logging.debug('Using proxy: %s', options.proxy)
h, p = options.proxy.split(':')
self.req_options['proxy_host'] = h
self.req_options['proxy_port'] = int(p)
else:
logging.debug('Working without proxy')
self.req_options['connect_timeout'] = options.connect_timeout
self.req_options['request_timeout'] = options.request_timeout
self.req_options['validate_cert'] = options.validate_cert
#self.req_options['ca_certs'] = None
#self.ssl_options = {"ssl_version": ssl.PROTOCOL_TLSv1}
def _validate_headers(self, headers):
"""If anything is wrong with HTTP headers, raise AssertionError."""
h = headers.get('Content-Type')
if h:
v = h.split(';')[0].strip()
assert v in ALLOWED_TYPES, 'Illegal Content-Type: %s' % h
h = headers.get('Content-Language')
if h:
langs = [x.strip().lower() for x in h.split(',')]
assert set(langs).intersection(ALLOWED_LANGS), 'Illegal Content-Language: %s' % h
h = headers.get('Content-Length')
if h:
v = int(h) / 1024
assert v <= MAX_CONTENT_SIZE, 'Content size %d exceeds %dKb' % (v, MAX_CONTENT_SIZE)
logging.debug('Headers OK.')
async def visit(self, url, count=0):
logging.debug('Fetching %s...', url)
try:
req = httpclient.HTTPRequest(url, **self.req_options)
except UnicodeEncodeError as ex:
logging.error(ex)
if count > 1:
return self.visit(url.encode('idna'), count=count+1)
else:
res = await self.client.fetch(req)
logging.info('%s: %s - %s', res.effective_url, res.code, res.reason)
self._validate_headers(res.headers)
return res
|
skrushinsky/torspider
|
torspider/scraper.py
|
Python
|
mit
| 9,474
|
[
"VisIt"
] |
d73f1370efaf517cacdcdd366ad19bc162ccaf5c4dac4d72e17172d9eb1e6621
|
###############################################################################
# Python Script to Convert .ab1 to .fasta and .qual Using BioPython
###############################################################################
# Written by Mario Muscarella
# Last Update 25 Apr 2013
# Directions:
# use the following command: > python ReadAB1.py
import glob
files = glob.glob("*.ab1")
#names= []
#for x in files:
# tempname = (x).split(".")[0]
# names.append(tempname)
from Bio import SeqIO
for x in files:
sample_seq = SeqIO.read(x, "abi")
sample_seq.id = sample_seq.name
SeqIO.write(sample_seq, sample_seq.id+".fasta", "fasta")
SeqIO.write(sample_seq, sample_seq.id+".qual", "qual")
print "fasta and qual file created for %s" % sample_seq.id
|
mmuscarella/StarvationTraits
|
bin/ReadAB1.py
|
Python
|
gpl-3.0
| 753
|
[
"Biopython"
] |
c4c43aae8c435ead4b6a83a77c7c043f4bc8daeb7a440e333042d4f86fec79be
|
################################
## Set diagnostics to True ##
## If you want display the ##
## Tracking process. ##
################################
diagnostics=False
#################################
## Import packages ##
#################################
import sys
import os
from trackeddy.tracking import *
from trackeddy.savedata import *
from numpy import *
from pylab import *
import random
import pytest
import time
#################################
## Import tools to create ##
## syntetic fields ##
#################################
from trackeddy.utils.gaussian_field_functions import *
import trackeddy.utils.field_generator as fg
n=2
a = 0.1
b = 0.1
t0 = 0
t = 1
xx=linspace(10,12,200)
yy=linspace(10,12,200)
gf=fg.Generate_field(a,b,n,xx,yy,'Nint')
data = abs(gf.assemble_field(t))
x=linspace(10,13,300)
y=linspace(10,13,300)
preferences={'ellipse':0.85,'eccentricity':0.85,'gaussian':0.8}
eddytd={}
eddytdn={}
levels = {'max':data.max(),'min':0.1,'step':0.1}
eddytd = analyseddyzt(data,x,y,t0,t,1,levels,preferences=preferences,areamap='',mask='',maskopt='forcefit'\
,destdir='',physics='',diagnostics=False,plotdata=False,pprint=True,debug=False)
@pytest.mark.ttrackeddy_data
def test_data2npy():
save_data('./test.npy', eddytd)
assert os.path.isfile('./test.npy')
@pytest.mark.ttrackeddy_data
def test_tracknpy2nc():
track2nc = Trackeddy2dataset('./test.npy','./','nc')
track2nc.file2dict()
track2nc.trackeddy2nc()
assert os.path.isfile('./output_000.nc')
@pytest.mark.ttrackeddy_data
def test_trackdata2nc():
track2nc = Trackeddy2dataset(eddytd,'./','nc')
track2nc.trackeddy2nc()
assert os.path.isfile('./output_001.nc')
@pytest.mark.ttrackeddy_data
def test_rm_files():
os.remove('./test.npy')
os.remove('./output_000.nc')
os.remove('./output_001.nc')
|
Josue-Martinez-Moreno/trackeddy
|
tests/test_savedata.py
|
Python
|
mit
| 1,916
|
[
"Gaussian"
] |
aebeb41604235d609f1aa6abd711e9e402b6e141b2c63a466e73b79d83eed906
|
# This file is part of Korman.
#
# Korman is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Korman is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Korman. If not, see <http://www.gnu.org/licenses/>.
import bpy
from contextlib import contextmanager
import itertools
from .explosions import *
from .logger import ExportProgressLogger, ExportVerboseLogger
from .mesh import _MeshManager, _VERTEX_COLOR_LAYERS
from ..helpers import *
_NUM_RENDER_LAYERS = 20
class LightBaker:
"""ExportTime Lighting"""
def __init__(self, *, mesh=None, report=None, verbose=False):
self._lightgroups = {}
if report is None:
self._report = ExportVerboseLogger() if verbose else ExportProgressLogger()
self.add_progress_steps(self._report, True)
self._report.progress_start("BAKING LIGHTING")
self._own_report = True
else:
self._report = report
self._own_report = False
# This used to be the base class, but due to the need to access the export state
# which may be stored in the exporter's mesh manager, we've changed from is-a to has-a
# semantics. Sorry for this confusion!
self._mesh = _MeshManager(self._report) if mesh is None else mesh
self.vcol_layer_name = "autocolor"
self.lightmap_name = "{}_LIGHTMAPGEN.png"
self.lightmap_uvtex_name = "LIGHTMAPGEN"
self.retain_lightmap_uvtex = True
self.force = False
self._lightmap_images = {}
self._uvtexs = {}
self._active_vcols = {}
def __del__(self):
if self._own_report:
self._report.progress_end()
def __enter__(self):
self._mesh.__enter__()
return self
def __exit__(self, *exc_info):
self._mesh.__exit__(*exc_info)
@staticmethod
def add_progress_steps(report, add_base=False):
if add_base:
_MeshManager.add_progress_presteps(report)
report.progress_add_step("Searching for Bahro")
report.progress_add_step("Baking Static Lighting")
def _apply_render_settings(self, toggle, vcols):
render = bpy.context.scene.render
# Remember, lightmaps carefully control the enabled textures such that light
# can be cast through transparent materials. See diatribe in lightmap prep.
toggle.track(render, "use_textures", not vcols)
toggle.track(render, "use_shadows", True)
toggle.track(render, "use_envmaps", True)
toggle.track(render, "use_raytrace", True)
toggle.track(render, "bake_type", "FULL")
toggle.track(render, "use_bake_clear", True)
toggle.track(render, "use_bake_to_vertex_color", vcols)
def _associate_image_with_uvtex(self, uvtex, im):
# Associate the image with all the new UVs
# NOTE: no toggle here because it's the artist's problem if they are looking at our
# super swagalicious LIGHTMAPGEN uvtexture...
for i in uvtex.data:
i.image = im
def _bake_lightmaps(self, objs, layers):
with GoodNeighbor() as toggle:
scene = bpy.context.scene
scene.layers = layers
self._apply_render_settings(toggle, False)
self._select_only(objs, toggle)
bpy.ops.object.bake_image()
self._pack_lightmaps(objs)
def _bake_vcols(self, objs, layers):
with GoodNeighbor() as toggle:
bpy.context.scene.layers = layers
self._apply_render_settings(toggle, True)
self._select_only(objs, toggle)
bpy.ops.object.bake_image()
def bake_static_lighting(self, objs):
"""Bakes all static lighting for Plasma geometry"""
self._report.msg("\nBaking Static Lighting...")
with GoodNeighbor() as toggle:
try:
# reduce the amount of indentation
bake = self._harvest_bakable_objects(objs, toggle)
result = self._bake_static_lighting(bake, toggle)
finally:
# this stuff has been observed to be problematic with GoodNeighbor
self._pop_lightgroups()
self._restore_uvtexs()
self._restore_vcols()
if not self.retain_lightmap_uvtex:
self._remove_stale_uvtexes(bake)
return result
def _bake_static_lighting(self, bake, toggle):
inc_progress = self._report.progress_increment
# Lightmap passes are expensive, so we will warn about any passes that seem
# particularly wasteful.
try:
largest_pass = max((len(value) for key, value in bake.items() if key[0] != "vcol"))
except ValueError:
largest_pass = 0
# Step 0.9: Make all layers visible.
# This prevents context operators from phailing.
bpy.context.scene.layers = (True,) * _NUM_RENDER_LAYERS
# Step 1: Prepare... Apply UVs, etc, etc, etc
self._report.progress_advance()
self._report.progress_range = len(bake)
self._report.msg("Preparing to bake...", indent=1)
for key, value in bake.items():
if key[0] == "lightmap":
for i in range(len(value)-1, -1, -1):
obj = value[i]
if not self._prep_for_lightmap(obj, toggle):
self._report.msg("Lightmap '{}' will not be baked -- no applicable lights",
obj.name, indent=2)
value.pop(i)
elif key[0] == "vcol":
for i in range(len(value)-1, -1, -1):
obj = value[i]
if not self._prep_for_vcols(obj, toggle):
if self._has_valid_material(obj):
self._report.msg("VCols '{}' will not be baked -- no applicable lights",
obj.name, indent=2)
value.pop(i)
else:
raise RuntimeError(key[0])
inc_progress()
self._report.msg(" ...")
# Step 2: BAKE!
self._report.progress_advance()
self._report.progress_range = len(bake)
for key, value in bake.items():
if value:
if key[0] == "lightmap":
num_objs = len(value)
self._report.msg("{} Lightmap(s) [H:{:X}]", num_objs, hash(key[1:]), indent=1)
if largest_pass > 1 and num_objs < round(largest_pass * 0.02):
pass_names = set((i.plasma_modifiers.lightmap.bake_pass_name for i in value))
pass_msg = ", ".join(pass_names)
self._report.warn("Small lightmap bake pass! Bake Pass(es): {}".format(pass_msg), indent=2)
self._bake_lightmaps(value, key[1:])
elif key[0] == "vcol":
self._report.msg("{} Vertex Color(s) [H:{:X}]", len(value), hash(key[1:]), indent=1)
self._bake_vcols(value, key[1:])
self._fix_vertex_colors(value)
else:
raise RuntimeError(key[0])
inc_progress()
# Return how many thingos we baked
return sum(map(len, bake.values()))
@contextmanager
def _bmesh_from_mesh(self, mesh):
bm = bmesh.new()
try:
# from_object would likely cause Blender to crash further in the export process,
# so use the safer from_mesh instead.
bm.from_mesh(mesh)
yield bm
finally:
bm.free()
def _fix_vertex_colors(self, blender_objects):
# Blender's lightmapper has a bug which allows vertices to "self-occlude" when shared between
# two faces. See here https://forum.guildofwriters.org/viewtopic.php?f=9&t=6576&p=68939
# What we're doing here is an improved version of the algorithm in the previous link.
# For each loop, we find all other loops in the mesh sharing the same vertex, which aren't
# separated by a sharp edge. We then take the brightest color out of all those loops, and
# assign it back to the base loop.
# "Sharp edges" include edges manually tagged as sharp by the user, or part of a non-smooth
# face, or edges for which the face angle is superior to the mesh's auto-smooth threshold.
# (If the object has an edge split modifier, well, screw you!)
for bo in blender_objects:
mesh = bo.data
if self.vcol_layer_name not in mesh.vertex_colors:
# No vertex color. Baking either failed or is turned off.
continue
with self._bmesh_from_mesh(mesh) as bm:
bm.faces.ensure_lookup_table()
light_vcol = bm.loops.layers.color.get(self.vcol_layer_name)
for face in bm.faces:
for loop in face.loops:
vert = loop.vert
max_color = loop[light_vcol]
if not face.smooth:
# Face is sharp, so we can't smooth anything.
continue
# Now that we have a loop and its vertex, find all edges the vertex connects to.
for edge in vert.link_edges:
if len(edge.link_faces) != 2:
# Either a border edge, or an abomination.
continue
if mesh.use_auto_smooth and (not edge.smooth
or edge.calc_face_angle() > mesh.auto_smooth_angle):
# Normals are split for edges marked as sharp by the user, and edges
# whose angle is above the theshold. Auto smooth must be on in both cases.
continue
if face in edge.link_faces:
# Alright, this edge is connected to our loop AND our face.
# Now for the Fun Stuff(c)... First, actually get ahold of the other
# face (the one we're connected to via this edge).
other_face = next(f for f in edge.link_faces if f != face)
if not other_face.calc_area():
# Zero area face, ignore it.
continue
# Now get ahold of the loop sharing our vertex on the OTHER SIDE
# of that damnable edge...
other_loop = next(loop for loop in other_face.loops if loop.vert == vert)
if not other_loop.is_convex:
# Happens with complex polygons after edge dissolving. Ignore it.
continue
other_color = other_loop[light_vcol]
# Phew ! Good, now just pick whichever color has the highest average value
if sum(max_color) / 3 < sum(other_color) / 3:
max_color = other_color
# Assign our hard-earned color back
loop[light_vcol] = max_color
bm.to_mesh(mesh)
def _generate_lightgroup(self, bo, user_lg=None):
"""Makes a new light group for the baking process that excludes all Plasma RT lamps"""
shouldibake = (user_lg is not None and bool(user_lg.objects))
mesh = bo.data
for material in mesh.materials:
if material is None:
# material is not assigned to this material... (why is this even a thing?)
continue
# Already done it?
lg, mat_name = material.light_group, material.name
if mat_name not in self._lightgroups:
self._lightgroups[mat_name] = lg
if not user_lg:
if not lg or bool(lg.objects) is False:
source = [i for i in bpy.context.scene.objects if i.type == "LAMP"]
else:
source = lg.objects
dest = bpy.data.groups.new("_LIGHTMAPGEN_{}_{}".format(bo.name, mat_name))
# Rules:
# 1) No animated lights, period.
# 2) If we accept runtime lighting, no Plasma Objects
rtl_mod = bo.plasma_modifiers.lighting
for obj in source:
if obj.plasma_object.has_animation_data:
continue
if rtl_mod.rt_lights and obj.plasma_object.enabled:
continue
dest.objects.link(obj)
shouldibake = True
else:
# The aforementioned rules do not apply. You better hope you know WTF you are
# doing. I'm not going to help!
dest = user_lg
material.light_group = dest
return shouldibake
def get_lightmap(self, bo):
return self._lightmap_images.get(bo.name)
def get_lightmap_name(self, bo):
return self.lightmap_name.format(bo.name)
def _has_valid_material(self, bo):
for material in bo.data.materials:
if material is not None:
return True
return False
def _harvest_bakable_objects(self, objs, toggle):
# The goal here is to minimize the calls to bake_image, so we are going to collect everything
# that needs to be baked and sort it out by configuration.
default_layers = tuple((True,) * _NUM_RENDER_LAYERS)
bake, bake_passes = {}, bpy.context.scene.plasma_scene.bake_passes
bake_vcol = bake.setdefault(("vcol",) + default_layers, [])
def lightmap_bake_required(obj) -> bool:
mod = obj.plasma_modifiers.lightmap
if mod.bake_lightmap:
if self.force:
return True
if mod.image is not None:
uv_texture_names = frozenset((i.name for i in obj.data.uv_textures))
if self.lightmap_uvtex_name in uv_texture_names:
self._report.msg("'{}': Skipping due to valid lightmap override", obj.name, indent=1)
else:
self._report.warn("'{}': Have lightmap, but regenerating UVs", obj.name, indent=1)
self._prep_for_lightmap_uvs(obj, mod.image, toggle)
return False
return True
return False
def vcol_bake_required(obj) -> bool:
if obj.plasma_modifiers.lightmap.bake_lightmap:
return False
vcol_layer_names = frozenset((vcol_layer.name.lower() for vcol_layer in obj.data.vertex_colors))
manual_layer_names = _VERTEX_COLOR_LAYERS & vcol_layer_names
if manual_layer_names:
self._report.msg("'{}': Skipping due to valid manual vertex color layer(s): '{}'", obj.name, manual_layer_names.pop(), indent=1)
return False
if self.force:
return True
if self.vcol_layer_name.lower() in vcol_layer_names:
self._report.msg("'{}': Skipping due to valid matching vertex color layer(s): '{}'", obj.name, self.vcol_layer_name, indent=1)
return False
return True
for i in filter(lambda x: x.type == "MESH" and bool(x.data.materials), objs):
mods = i.plasma_modifiers
lightmap_mod = mods.lightmap
if lightmap_mod.enabled:
if lightmap_mod.bake_pass_name:
bake_pass = bake_passes.get(lightmap_mod.bake_pass_name, None)
if bake_pass is None:
raise ExportError("Bake Lighting '{}': Could not find pass '{}'".format(i.name, lightmap_mod.bake_pass_name))
lm_layers = tuple(bake_pass.render_layers)
else:
lm_layers = default_layers
# In order for Blender to be able to bake this properly, at least one of the
# layers this object is on must be selected. We will sanity check this now.
obj_layers = tuple(i.layers)
lm_active_layers = set((i for i, value in enumerate(lm_layers) if value))
obj_active_layers = set((i for i, value in enumerate(obj_layers) if value))
if not lm_active_layers & obj_active_layers:
raise ExportError("Bake Lighting '{}': At least one layer the object is on must be selected".format(i.name))
if lightmap_bake_required(i) is False and vcol_bake_required(i) is False:
continue
method = "lightmap" if lightmap_mod.bake_lightmap else "vcol"
key = (method,) + lm_layers
bake_pass = bake.setdefault(key, [])
bake_pass.append(i)
self._report.msg("'{}': Bake to {}", i.name, method, indent=1)
elif mods.lighting.preshade and vcol_bake_required(i):
self._report.msg("'{}': Bake to vcol (crappy)", i.name, indent=1)
bake_vcol.append(i)
return bake
def _pack_lightmaps(self, objs):
for bo in objs:
im = self.get_lightmap(bo)
if im is not None and im.is_dirty:
im.pack(as_png=True)
def _pop_lightgroups(self):
materials = bpy.data.materials
for mat_name, lg in self._lightgroups.items():
materials[mat_name].light_group = lg
self._lightgroups.clear()
groups = bpy.data.groups
for i in groups:
if i.name.startswith("_LIGHTMAPGEN_"):
bpy.data.groups.remove(i)
def _prep_for_lightmap(self, bo, toggle):
mesh = bo.data
modifier = bo.plasma_modifiers.lightmap
uv_textures = mesh.uv_textures
# Previously, we told Blender to just ignore textures althogether when baking
# VCols or lightmaps. This is easy, but it prevents us from doing tricks like
# using the "Receive Transparent" option, which allows for light to be cast
# through sections of materials that are transparent. Therefore, on objects
# that are lightmapped, we will disable all the texture slots...
# Due to our batching, however, materials that are transparent cannot be lightmapped.
for material in (i for i in mesh.materials if i is not None):
if material.use_transparency:
raise ExportError("'{}': Cannot lightmap material '{}' because it is transparnt".format(bo.name, material.name))
for slot in (j for j in material.texture_slots if j is not None):
toggle.track(slot, "use", False)
# Create a special light group for baking
if not self._generate_lightgroup(bo, modifier.lights):
return False
# We need to ensure that we bake onto the "BlahObject_LIGHTMAPGEN" image
data_images = bpy.data.images
im_name = self.get_lightmap_name(bo)
size = modifier.resolution
im = data_images.get(im_name)
if im is None:
im = data_images.new(im_name, width=size, height=size)
elif im.size[0] != size:
# Force delete and recreate the image because the size is out of date
data_images.remove(im)
im = data_images.new(im_name, width=size, height=size)
self._lightmap_images[bo.name] = im
self._prep_for_lightmap_uvs(bo, im, toggle)
# Now, set the new LIGHTMAPGEN uv layer as what we want to render to...
# NOTE that this will need to be reset by us to what the user had previously
# Not using toggle.track due to observed oddities
for i in uv_textures:
value = i.name == self.lightmap_uvtex_name
i.active = value
i.active_render = value
# Indicate we should bake
return True
def _prep_for_lightmap_uvs(self, bo, image, toggle):
mesh = bo.data
modifier = bo.plasma_modifiers.lightmap
uv_textures = mesh.uv_textures
# If there is a cached LIGHTMAPGEN uvtexture, nuke it
uvtex = uv_textures.get(self.lightmap_uvtex_name, None)
if uvtex is not None:
uv_textures.remove(uvtex)
# Make sure we can enter Edit Mode(TM)
toggle.track(bo, "hide", False)
# Because the way Blender tracks active UV layers is massively stupid...
if uv_textures.active is not None:
self._uvtexs[mesh.name] = uv_textures.active.name
# We must make this the active object before touching any operators
bpy.context.scene.objects.active = bo
# Originally, we used the lightmap unpack UV operator to make our UV texture, however,
# this tended to create sharp edges. There was already a discussion about this on the
# Guild of Writers forum, so I'm implementing a code version of dendwaler's process,
# as detailed here: https://forum.guildofwriters.org/viewtopic.php?p=62572#p62572
# This has been amended with Sirius's observations in GH-265 about forced uv map
# packing. Namely, don't do it unless modifiers make us.
uv_base = uv_textures.get(modifier.uv_map) if modifier.uv_map else None
if uv_base is not None:
uv_textures.active = uv_base
# this will copy the UVs to the new UV texture
uvtex = uv_textures.new(self.lightmap_uvtex_name)
uv_textures.active = uvtex
# if the artist hid any UVs, they will not be baked to... fix this now
with self._set_mode("EDIT"):
bpy.ops.uv.reveal()
self._associate_image_with_uvtex(uv_textures.active, image)
# Meshes with modifiers need to have islands packed to prevent generated vertices
# from sharing UVs. Sigh.
if self._mesh.is_collapsed(bo):
# Danger: uv_base.name -> UnicodeDecodeError (wtf? another blender bug?)
self._report.warn("'{}': packing islands in UV Texture '{}' due to modifier collapse",
bo.name, modifier.uv_map, indent=2)
with self._set_mode("EDIT"):
bpy.ops.mesh.select_all(action="SELECT")
bpy.ops.uv.select_all(action="SELECT")
bpy.ops.uv.pack_islands(margin=0.01)
else:
# same thread, see Sirius's suggestion RE smart unwrap. this seems to yield good
# results in my tests. it will be good enough for quick exports.
uvtex = uv_textures.new(self.lightmap_uvtex_name)
uv_textures.active = uvtex
self._associate_image_with_uvtex(uvtex, image)
with self._set_mode("EDIT"):
bpy.ops.mesh.select_all(action="SELECT")
bpy.ops.uv.smart_project(island_margin=0.05)
def _prep_for_vcols(self, bo, toggle):
mesh = bo.data
modifier = bo.plasma_modifiers.lightmap
vcols = mesh.vertex_colors
# Create a special light group for baking
user_lg = modifier.lights if modifier.enabled else None
if not self._generate_lightgroup(bo, user_lg):
return False
vcol_layer_name = self.vcol_layer_name
autocolor = vcols.get(vcol_layer_name)
needs_vcol_layer = autocolor is None
if needs_vcol_layer:
autocolor = vcols.new(vcol_layer_name)
self._active_vcols[mesh] = (
next(i for i, vc in enumerate(mesh.vertex_colors) if vc.active),
next(i for i, vc in enumerate(mesh.vertex_colors) if vc.active_render),
)
# Mark "autocolor" as our active render layer
for vcol_layer in mesh.vertex_colors:
autocol = vcol_layer.name == vcol_layer_name
vcol_layer.active_render = autocol
vcol_layer.active = autocol
mesh.update()
# Vertex colors are sort of ephemeral, so if we have an exit stack, we want to
# terminate this layer when the exporter is done. But, this is not an unconditional
# nukage. If we're in the lightmap operators, we clearly want this to persist for
# future exports as an optimization. We won't reach this point if there is already an
# autocolor layer (gulp).
if not self.force and needs_vcol_layer:
self._mesh.context_stack.enter_context(TemporaryObject(vcol_layer.name, lambda layer_name: vcols.remove(vcols[layer_name])))
# Indicate we should bake
return True
def _remove_stale_uvtexes(self, bake):
lightmap_iter = itertools.chain.from_iterable((value for key, value in bake.items() if key[0] == "lightmap"))
for bo in lightmap_iter:
uv_textures = bo.data.uv_textures
uvtex = uv_textures.get(self.lightmap_uvtex_name, None)
if uvtex is not None:
uv_textures.remove(uvtex)
def _restore_uvtexs(self):
for mesh_name, uvtex_name in self._uvtexs.items():
mesh = bpy.data.meshes[mesh_name]
for i in mesh.uv_textures:
i.active = uvtex_name == i.name
mesh.uv_textures.active = mesh.uv_textures[uvtex_name]
def _restore_vcols(self):
for mesh, (vcol_index, vcol_render_index) in self._active_vcols.items():
mesh.vertex_colors[vcol_index].active = True
mesh.vertex_colors[vcol_render_index].active_render = True
def _select_only(self, objs, toggle):
if isinstance(objs, bpy.types.Object):
toggle.track(objs, "hide_render", False)
for i in bpy.data.objects:
if i == objs:
# prevents proper baking to texture
for mat in (j for j in i.data.materials if j is not None):
toggle.track(mat, "use_vertex_color_paint", False)
i.select = True
else:
i.select = False
if isinstance(i.data, bpy.types.Mesh) and not self._has_valid_material(i):
toggle.track(i, "hide_render", True)
else:
for i in bpy.data.objects:
value = i in objs
if value:
# prevents proper baking to texture
for mat in (j for j in i.data.materials if j is not None):
toggle.track(mat, "use_vertex_color_paint", False)
toggle.track(i, "hide_render", False)
elif isinstance(i.data, bpy.types.Mesh) and not self._has_valid_material(i):
toggle.track(i, "hide_render", True)
i.select = value
@contextmanager
def _set_mode(self, mode):
bpy.ops.object.mode_set(mode=mode)
try:
yield
finally:
bpy.ops.object.mode_set(mode="OBJECT")
|
H-uru/korman
|
korman/exporter/etlight.py
|
Python
|
gpl-3.0
| 27,828
|
[
"GULP"
] |
7d16c259c007c5deadbbfe36b8bda05396d0d14889b5c682d3eb4512dd88c6ba
|
import logging
import random
import numpy as np
from ray.rllib.agents import with_common_config
from ray.rllib.agents.dreamer.dreamer_torch_policy import DreamerTorchPolicy
from ray.rllib.agents.trainer_template import build_trainer
from ray.rllib.execution.common import STEPS_SAMPLED_COUNTER, \
LEARNER_INFO, _get_shared_metrics
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.policy.sample_batch import DEFAULT_POLICY_ID
from ray.rllib.evaluation.metrics import collect_metrics
from ray.rllib.agents.dreamer.dreamer_model import DreamerModel
from ray.rllib.execution.rollout_ops import ParallelRollouts
from ray.rllib.utils.typing import SampleBatchType
logger = logging.getLogger(__name__)
# yapf: disable
# __sphinx_doc_begin__
DEFAULT_CONFIG = with_common_config({
# PlaNET Model LR
"td_model_lr": 6e-4,
# Actor LR
"actor_lr": 8e-5,
# Critic LR
"critic_lr": 8e-5,
# Grad Clipping
"grad_clip": 100.0,
# Discount
"discount": 0.99,
# Lambda
"lambda": 0.95,
# Training iterations per data collection from real env
"dreamer_train_iters": 100,
# Horizon for Enviornment (1000 for Mujoco/DMC)
"horizon": 1000,
# Number of episodes to sample for Loss Calculation
"batch_size": 50,
# Length of each episode to sample for Loss Calculation
"batch_length": 50,
# Imagination Horizon for Training Actor and Critic
"imagine_horizon": 15,
# Free Nats
"free_nats": 3.0,
# KL Coeff for the Model Loss
"kl_coeff": 1.0,
# Distributed Dreamer not implemented yet
"num_workers": 0,
# Prefill Timesteps
"prefill_timesteps": 5000,
# This should be kept at 1 to preserve sample efficiency
"num_envs_per_worker": 1,
# Exploration Gaussian
"explore_noise": 0.3,
# Batch mode
"batch_mode": "complete_episodes",
# Custom Model
"dreamer_model": {
"custom_model": DreamerModel,
# RSSM/PlaNET parameters
"deter_size": 200,
"stoch_size": 30,
# CNN Decoder Encoder
"depth_size": 32,
# General Network Parameters
"hidden_size": 400,
# Action STD
"action_init_std": 5.0,
},
"env_config": {
# Repeats action send by policy for frame_skip times in env
"frame_skip": 2,
}
})
# __sphinx_doc_end__
# yapf: enable
class EpisodicBuffer(object):
def __init__(self, max_length: int = 1000, length: int = 50):
"""Data structure that stores episodes and samples chunks
of size length from episodes
Args:
max_length: Maximum episodes it can store
length: Episode chunking lengh in sample()
"""
# Stores all episodes into a list: List[SampleBatchType]
self.episodes = []
self.max_length = max_length
self.timesteps = 0
self.length = length
def add(self, batch: SampleBatchType):
"""Splits a SampleBatch into episodes and adds episodes
to the episode buffer
Args:
batch: SampleBatch to be added
"""
self.timesteps += batch.count
episodes = batch.split_by_episode()
for i, e in enumerate(episodes):
episodes[i] = self.preprocess_episode(e)
self.episodes.extend(episodes)
if len(self.episodes) > self.max_length:
delta = len(self.episodes) - self.max_length
# Drop oldest episodes
self.episodes = self.episodes[delta:]
def preprocess_episode(self, episode: SampleBatchType):
"""Batch format should be in the form of (s_t, a_(t-1), r_(t-1))
When t=0, the resetted obs is paired with action and reward of 0.
Args:
episode: SampleBatch representing an episode
"""
obs = episode["obs"]
new_obs = episode["new_obs"]
action = episode["actions"]
reward = episode["rewards"]
act_shape = action.shape
act_reset = np.array([0.0] * act_shape[-1])[None]
rew_reset = np.array(0.0)[None]
obs_end = np.array(new_obs[act_shape[0] - 1])[None]
batch_obs = np.concatenate([obs, obs_end], axis=0)
batch_action = np.concatenate([act_reset, action], axis=0)
batch_rew = np.concatenate([rew_reset, reward], axis=0)
new_batch = {
"obs": batch_obs,
"rewards": batch_rew,
"actions": batch_action
}
return SampleBatch(new_batch)
def sample(self, batch_size: int):
"""Samples [batch_size, length] from the list of episodes
Args:
batch_size: batch_size to be sampled
"""
episodes_buffer = []
while len(episodes_buffer) < batch_size:
rand_index = random.randint(0, len(self.episodes) - 1)
episode = self.episodes[rand_index]
if episode.count < self.length:
continue
available = episode.count - self.length
index = int(random.randint(0, available))
episodes_buffer.append(episode.slice(index, index + self.length))
batch = {}
for k in episodes_buffer[0].keys():
batch[k] = np.stack([e[k] for e in episodes_buffer], axis=0)
return SampleBatch(batch)
def total_sampled_timesteps(worker):
return worker.policy_map[DEFAULT_POLICY_ID].global_timestep
class DreamerIteration:
def __init__(self, worker, episode_buffer, dreamer_train_iters, batch_size,
act_repeat):
self.worker = worker
self.episode_buffer = episode_buffer
self.dreamer_train_iters = dreamer_train_iters
self.repeat = act_repeat
self.batch_size = batch_size
def __call__(self, samples):
# Dreamer Training Loop
for n in range(self.dreamer_train_iters):
print(n)
batch = self.episode_buffer.sample(self.batch_size)
if n == self.dreamer_train_iters - 1:
batch["log_gif"] = True
fetches = self.worker.learn_on_batch(batch)
# Custom Logging
policy_fetches = self.policy_stats(fetches)
if "log_gif" in policy_fetches:
gif = policy_fetches["log_gif"]
policy_fetches["log_gif"] = self.postprocess_gif(gif)
# Metrics Calculation
metrics = _get_shared_metrics()
metrics.info[LEARNER_INFO] = fetches
metrics.counters[STEPS_SAMPLED_COUNTER] = self.episode_buffer.timesteps
metrics.counters[STEPS_SAMPLED_COUNTER] *= self.repeat
res = collect_metrics(local_worker=self.worker)
res["info"] = metrics.info
res["info"].update(metrics.counters)
res["timesteps_total"] = metrics.counters[STEPS_SAMPLED_COUNTER]
self.episode_buffer.add(samples)
return res
def postprocess_gif(self, gif: np.ndarray):
gif = np.clip(255 * gif, 0, 255).astype(np.uint8)
B, T, C, H, W = gif.shape
frames = gif.transpose((1, 2, 3, 0, 4)).reshape((1, T, C, H, B * W))
return frames
def policy_stats(self, fetches):
return fetches["default_policy"]["learner_stats"]
def execution_plan(workers, config):
# Special Replay Buffer for Dreamer agent
episode_buffer = EpisodicBuffer(length=config["batch_length"])
local_worker = workers.local_worker()
# Prefill episode buffer with initial exploration (uniform sampling)
while total_sampled_timesteps(local_worker) < config["prefill_timesteps"]:
samples = local_worker.sample()
episode_buffer.add(samples)
batch_size = config["batch_size"]
dreamer_train_iters = config["dreamer_train_iters"]
act_repeat = config["action_repeat"]
rollouts = ParallelRollouts(workers)
rollouts = rollouts.for_each(
DreamerIteration(local_worker, episode_buffer, dreamer_train_iters,
batch_size, act_repeat))
return rollouts
def get_policy_class(config):
return DreamerTorchPolicy
def validate_config(config):
config["action_repeat"] = config["env_config"]["frame_skip"]
if config["framework"] != "torch":
raise ValueError("Dreamer not supported in Tensorflow yet!")
if config["batch_mode"] != "complete_episodes":
raise ValueError("truncate_episodes not supported")
if config["num_workers"] != 0:
raise ValueError("Distributed Dreamer not supported yet!")
if config["clip_actions"]:
raise ValueError("Clipping is done inherently via policy tanh!")
if config["action_repeat"] > 1:
config["horizon"] = config["horizon"] / config["action_repeat"]
DREAMERTrainer = build_trainer(
name="Dreamer",
default_config=DEFAULT_CONFIG,
default_policy=DreamerTorchPolicy,
get_policy_class=get_policy_class,
execution_plan=execution_plan,
validate_config=validate_config)
|
richardliaw/ray
|
rllib/agents/dreamer/dreamer.py
|
Python
|
apache-2.0
| 8,885
|
[
"Gaussian"
] |
09cd7eb574d184741815874c8835cb9cebc2a85178f2f52cc6687a3d27a4405c
|
import filecmp
import json
import logging
import os
import re
from ast import literal_eval as make_tuple
from typing import List, Optional, Tuple, Set, Dict
import numpy as np
import pymc3 as pm
from . import io_consts
from .._version import __version__ as gcnvkernel_version
from ..models.fancy_model import GeneralizedContinuousModel
_logger = logging.getLogger(__name__)
def extract_sample_name_from_header(input_file: str,
max_scan_lines: int = 10000,
sample_name_header_regexp: str = io_consts.sample_name_header_regexp) -> str:
"""Extracts sample name from header.
Args:
input_file: any readable text file
max_scan_lines: maximum number of lines to scan from the top of the file
sample_name_header_regexp: the regular expression for identifying the header line that contains
the sample name
Returns:
Sample name
"""
with open(input_file, 'r') as f:
for _ in range(max_scan_lines):
line = f.readline()
match = re.search(sample_name_header_regexp, line, re.M)
if match is None:
continue
groups = match.groups()
return groups[0]
raise Exception("Sample name could not be found in \"{0}\"".format(input_file))
def get_sample_name_from_txt_file(input_path: str) -> str:
"""Extract sample name from a text file.
Args:
input_path: a path containing the sample name .txt file
Returns:
Sample name
"""
sample_name_file = os.path.join(input_path, io_consts.default_sample_name_txt_filename)
assert os.path.exists(sample_name_file), \
"Sample name .txt file could not be found in \"{0}\"".format(input_path)
with open(sample_name_file, 'r') as f:
for line in f:
return line.strip()
def write_sample_name_to_txt_file(output_path: str, sample_name: str):
"""Writes sample name to a text file."""
with open(os.path.join(output_path, io_consts.default_sample_name_txt_filename), 'w') as f:
f.write(sample_name + '\n')
def assert_output_path_writable(output_path: str,
try_creating_output_path: bool = True):
"""Assert an output path is either writable or can be created upon request.
Args:
output_path: the tentative output path
try_creating_output_path: whether or not try creating the path recursively
if it does not already exist
Raises:
IOError: if the output path is not writable, is not a directory, or does
not exist and can not be created
Returns:
None
"""
if os.path.exists(output_path):
if not os.path.isdir(output_path):
raise IOError("The provided output path \"{0}\" is not a directory")
elif try_creating_output_path:
try:
os.makedirs(output_path)
except IOError:
raise IOError("The provided output path \"{0}\" does not exist and can not be created")
tmp_prefix = "write_tester"
count = 0
filename = os.path.join(output_path, tmp_prefix)
while os.path.exists(filename):
filename = "{}.{}".format(os.path.join(output_path, tmp_prefix), count)
count = count + 1
try:
filehandle = open(filename, 'w')
filehandle.close()
os.remove(filename)
except IOError:
raise IOError("The output path \"{0}\" is not writeable".format(output_path))
def write_ndarray_to_tsv(output_file: str,
array: np.ndarray,
comment=io_consts.default_comment_char,
delimiter=io_consts.default_delimiter_char,
extra_comment_lines: Optional[List[str]] = None,
header: Optional[str] = None,
write_shape_info: bool = True) -> None:
"""Write an vector or matrix ndarray to .tsv file.
Note:
Shape and dtype information are stored in the header.
Args:
output_file: output .tsv file
array: array to write to .tsv
comment: comment character
delimiter: delimiter character
extra_comment_lines: (optional) list of extra comment lines to add to the header
header: header line (e.g. for representing the ndarray as a table with named columns)
write_shape_info: if True, ndarray shape info will be written to the header
Returns:
None
"""
array = np.asarray(array)
assert array.ndim <= 2
shape = array.shape
dtype = array.dtype
if array.ndim == 2:
array_matrix = array
else:
array_matrix = array.reshape((array.size, 1))
with open(output_file, 'w') as f:
if write_shape_info:
f.write(comment + 'shape=' + repr(shape) + '\n')
f.write(comment + 'dtype=' + str(dtype) + '\n')
if extra_comment_lines is not None:
for comment_line in extra_comment_lines:
f.write(comment + comment_line + '\n')
if header is not None:
f.write(header + '\n')
for i_row in range(array_matrix.shape[0]):
row = array_matrix[i_row, :]
row_repr = delimiter.join([repr(x) for x in row])
f.write(row_repr + '\n')
def read_ndarray_from_tsv(input_file: str,
comment=io_consts.default_comment_char,
delimiter=io_consts.default_delimiter_char) -> np.ndarray:
"""Reads a vector or matrix ndarray from .tsv file.
Args:
input_file: input .tsv file
comment: comment character
delimiter: delimiter character
Returns:
ndarray
"""
dtype = None
shape = None
rows: List[np.ndarray] = []
def _get_value(key: str, _line: str):
key_loc = _line.find(key)
if key_loc >= 0:
val_loc = _line.find('=')
return _line[val_loc + 1:].strip()
else:
return None
with open(input_file, 'r') as f:
for line in f:
stripped_line = line.strip()
if len(stripped_line) == 0:
continue
elif stripped_line[0] == comment:
if dtype is None:
dtype = _get_value('dtype', stripped_line)
if shape is None:
shape = _get_value('shape', stripped_line)
else:
assert dtype is not None and shape is not None,\
"Shape and dtype information could not be found in the header of " \
"\"{0}\"".format(input())
row = np.asarray(stripped_line.split(delimiter), dtype=dtype)
rows.append(row)
return np.vstack(rows).reshape(make_tuple(shape))
def get_var_map_list_from_meanfield_approx(approx: pm.MeanField) -> List[pm.blocking.VarMap]:
"""Extracts the variable-to-linear-array of a PyMC3 mean-field approximation.
Args:
approx: an instance of PyMC3 mean-field approximation
Returns:
A list of `pymc3.blocking.VarMap`
"""
if pm.__version__ == "3.1":
return approx.gbij.ordering.vmap
elif pm.__version__ == "3.2":
return approx.bij.ordering.vmap
else:
raise Exception("Unsupported PyMC3 version")
def extract_meanfield_posterior_parameters(approx: pm.MeanField)\
-> Tuple[Set[str], Dict[str, np.ndarray], Dict[str, np.ndarray]]:
"""Extracts mean-field posterior parameters in the right shape and dtype from an instance
of PyMC3 mean-field approximation.
Args:
approx: an instance of PyMC3 mean-field approximation
Returns:
A tuple (set of variable names,
map from variable names to their respective Gaussian means,
map from variable names to their respective Gaussian standard deviations)
"""
mu_flat_view = approx.mean.get_value()
std_flat_view = approx.std.eval()
mu_map = dict()
std_map = dict()
var_set = set()
for vmap in get_var_map_list_from_meanfield_approx(approx):
var_set.add(vmap.var)
mu_map[vmap.var] = mu_flat_view[vmap.slc].reshape(vmap.shp).astype(vmap.dtyp)
std_map[vmap.var] = std_flat_view[vmap.slc].reshape(vmap.shp).astype(vmap.dtyp)
return var_set, mu_map, std_map
def write_dict_to_json_file(output_file: str,
dict_to_write: Dict,
ignored_keys: Set):
"""Writes a dictionary to JSON file.
Args:
output_file: output .json file
dict_to_write: dictionary to write to file
ignored_keys: a set of keys to ignore
"""
filtered_dict = {k: v for k, v in dict_to_write.items() if k not in ignored_keys}
with open(output_file, 'w') as fp:
json.dump(filtered_dict, fp, indent=1)
def check_gcnvkernel_version_from_json_file(gcnvkernel_version_json_file: str):
"""Reads gcnvkernel version from a JSON file and issues a warning if it is created with a different
version of the module.
Args:
gcnvkernel_version_json_file: input .json file containing gcnvkernel version
"""
with open(gcnvkernel_version_json_file, 'r') as fp:
loaded_gcnvkernel_version = json.load(fp)['version']
if loaded_gcnvkernel_version != gcnvkernel_version:
_logger.warning("The saved model is created with a different version of gcnvkernel (saved: {0}, "
"current: {1}). Backwards compatibility is not guaranteed. Proceed at your own "
"risk".format(loaded_gcnvkernel_version, gcnvkernel_version))
def check_gcnvkernel_version_from_path(input_path: str):
"""Reads gcnvkernel version from a path that contains `io_consts.default_gcnvkernel_version_json_filename`,
reads the gcnvkernel version and issues a warning if it is created with a different version of the module.
Args:
input_path:
"""
check_gcnvkernel_version_from_json_file(
os.path.join(input_path, io_consts.default_gcnvkernel_version_json_filename))
def _get_mu_tsv_filename(path: str, var_name: str):
return os.path.join(path, "mu_" + var_name + ".tsv")
def _get_std_tsv_filename(path: str, var_name: str):
return os.path.join(path, "std_" + var_name + ".tsv")
def _get_singleton_slice_along_axis(array: np.ndarray, axis: int, index: int):
slc = [slice(None)] * array.ndim
slc[axis] = index
return slc
def write_meanfield_sample_specific_params(sample_index: int,
sample_posterior_path: str,
approx_var_name_set: Set[str],
approx_mu_map: Dict[str, np.ndarray],
approx_std_map: Dict[str, np.ndarray],
model: GeneralizedContinuousModel,
extra_comment_lines: Optional[List[str]] = None):
"""Writes sample-specific parameters contained in an instance of PyMC3 mean-field approximation
to disk.
Args:
sample_index: sample integer index
sample_posterior_path: output path (must be writable)
approx_var_name_set: set of all variable names in the model
approx_mu_map: a map from variable names to their respective Gaussian means
approx_std_map: a map from variable names to their respective Gaussian standard deviations
model: the generalized model corresponding to the provided mean-field approximation
extra_comment_lines: (optional) additional comment lines to write to the header of each output file
"""
sample_specific_var_registry = model.sample_specific_var_registry
for var_name, var_sample_axis in sample_specific_var_registry.items():
assert var_name in approx_var_name_set, "A model variable named \"{0}\" could not be found in the " \
"meanfield posterior while trying to write sample-specific " \
"variables to disk".format(var_name)
mu_all = approx_mu_map[var_name]
std_all = approx_std_map[var_name]
mu_slice = mu_all[_get_singleton_slice_along_axis(mu_all, var_sample_axis, sample_index)]
std_slice = std_all[_get_singleton_slice_along_axis(mu_all, var_sample_axis, sample_index)]
mu_out_file_name = _get_mu_tsv_filename(sample_posterior_path, var_name)
write_ndarray_to_tsv(mu_out_file_name, mu_slice, extra_comment_lines=extra_comment_lines)
std_out_file_name = _get_std_tsv_filename(sample_posterior_path, var_name)
write_ndarray_to_tsv(std_out_file_name, std_slice, extra_comment_lines=extra_comment_lines)
def write_meanfield_global_params(output_path: str,
approx: pm.MeanField,
model: GeneralizedContinuousModel):
"""Writes global parameters contained in an instance of PyMC3 mean-field approximation to disk.
Args:
output_path: output path (must be writable)
approx: an instance of PyMC3 mean-field approximation
model: the generalized model corresponding to the provided mean-field approximation
"""
# parse meanfield posterior parameters
approx_var_set, approx_mu_map, approx_std_map = extract_meanfield_posterior_parameters(approx)
for var_name in model.global_var_registry:
assert var_name in approx_var_set, "A model variable named \"{0}\" could not be found in the " \
"meanfield posterior while trying to write global variables " \
"to disk".format(var_name)
_logger.info("Writing {0}...".format(var_name))
var_mu = approx_mu_map[var_name]
var_mu_out_path = _get_mu_tsv_filename(output_path, var_name)
write_ndarray_to_tsv(var_mu_out_path, var_mu)
var_std = approx_std_map[var_name]
var_std_out_path = _get_std_tsv_filename(output_path, var_name)
write_ndarray_to_tsv(var_std_out_path, var_std)
def read_meanfield_global_params(input_model_path: str,
approx: pm.MeanField,
model: GeneralizedContinuousModel) -> None:
"""Reads global parameters of a given model from saved mean-field posteriors and injects them
into a provided mean-field instance.
Args:
input_model_path: input model path
approx: an instance of PyMC3 mean-field approximation to be updated
model: the generalized model corresponding to the provided mean-field approximation and the saved
instance
"""
vmap_list = get_var_map_list_from_meanfield_approx(approx)
def _update_param_inplace(param, slc, dtype, new_value):
param[slc] = new_value.astype(dtype).flatten()
return param
model_mu = approx.params[0]
model_rho = approx.params[1]
for var_name in model.global_var_registry:
var_mu_input_file = _get_mu_tsv_filename(input_model_path, var_name)
var_std_input_file = _get_std_tsv_filename(input_model_path, var_name)
assert os.path.exists(var_mu_input_file) and os.path.exists(var_std_input_file), \
"Model parameter values for \"{0}\" could not be found in the saved model path while trying " \
"to read global mean-field parameters".format(var_name)
_logger.info("Reading model parameter values for \"{0}\"...".format(var_name))
var_mu = read_ndarray_from_tsv(var_mu_input_file)
var_std = read_ndarray_from_tsv(var_std_input_file)
# convert std to rho, see pymc3.dist_math.sd2rho
var_rho = np.log(np.exp(var_std) - 1)
del var_std
for vmap in vmap_list:
if vmap.var == var_name:
assert var_mu.shape == vmap.shp,\
"Loaded mean for \"{0}\" has an unexpected shape; loaded: {1}, " \
"expected: {2}".format(var_name, var_mu.shape, vmap.shp)
assert var_rho.shape == vmap.shp, \
"Loaded standard deviation for \"{0}\" has an unexpected shape; loaded: {1}, " \
"expected: {2}".format(var_name, var_mu.shape, vmap.shp)
model_mu.set_value(_update_param_inplace(
model_mu.get_value(borrow=True), vmap.slc, vmap.dtyp, var_mu), borrow=True)
model_rho.set_value(_update_param_inplace(
model_rho.get_value(borrow=True), vmap.slc, vmap.dtyp, var_rho), borrow=True)
def read_meanfield_sample_specific_params(input_sample_calls_path: str,
sample_index: int,
sample_name: str,
approx: pm.MeanField,
model: GeneralizedContinuousModel):
"""Reads sample-specific parameters of a given sample from saved mean-field posteriors and injects them
into a provided mean-field instance.
Args:
input_sample_calls_path: path to saved sample-specific posteriors
sample_index: index of the sample in the current instance of model/approximation
sample_name: name of the sample in the current instance of model/approximation
(used to check whether `input_sample_calls_path` actually corresponds to the sample)
approx: an instance of PyMC3 mean-field approximation corresponding to the provided model
model: the generalized model corresponding to the provided mean-field approximation
Returns:
None
"""
path_sample_name = get_sample_name_from_txt_file(input_sample_calls_path)
assert path_sample_name == sample_name, \
"The sample name in \"{0}\" does not match the sample name at index {1}; " \
"found: {2}, expected: {3}. Make sure that the saved posteriors and the current " \
"task correspond to the same datasets and with the same order/name of samples.".format(
input_sample_calls_path, sample_index, path_sample_name, sample_name)
vmap_list = get_var_map_list_from_meanfield_approx(approx)
def _update_param_inplace(_param: np.ndarray,
_var_slice: slice,
_var_shape: Tuple,
_sample_specific_loaded_value: np.ndarray,
_var_sample_axis: int,
_sample_index: int) -> np.ndarray:
"""Updates the ndarray buffer of the shared parameter tensor according to a given sample-specific
parameter for a given sample index.
Args:
_param: ndarray buffer of the shared parameter tensor (i.e. `mu` or `rho`)
_var_slice: the slice that of `_param` that yields the full view of the sample-specific
parameter to be updated
_var_shape: full shape of the sample-specific parameter to be updated
_sample_specific_loaded_value: new single-sample slice of the sample-specific parameter
to be updated
_var_sample_axis: the sample-index axis in the full view of the sample-specific
parameter to be updates
_sample_index: sample index
Returns:
updated `_param`
"""
sample_specific_var = _param[_var_slice].reshape(_var_shape)
sample_specific_var[_get_singleton_slice_along_axis(
sample_specific_var, _var_sample_axis, _sample_index)] = _sample_specific_loaded_value[:]
return _param
# reference to meanfield posterior mu and rho
model_mu = approx.params[0]
model_rho = approx.params[1]
for var_name, var_sample_axis in model.sample_specific_var_registry.items():
var_mu_input_file = _get_mu_tsv_filename(input_sample_calls_path, var_name)
var_std_input_file = _get_std_tsv_filename(input_sample_calls_path, var_name)
assert os.path.exists(var_mu_input_file) and os.path.exists(var_std_input_file), \
"Model parameter values for \"{0}\" could not be found in the provided calls " \
"path \"{1}\"".format(var_name, input_sample_calls_path)
var_mu = read_ndarray_from_tsv(var_mu_input_file)
var_std = read_ndarray_from_tsv(var_std_input_file)
# convert std to rho, see pymc3.dist_math.sd2rho
var_rho = np.log(np.exp(var_std) - 1)
del var_std
# update mu and rho
for vmap in vmap_list:
if vmap.var == var_name:
model_mu.set_value(_update_param_inplace(
model_mu.get_value(borrow=True), vmap.slc, vmap.shp, var_mu,
var_sample_axis, sample_index), borrow=True)
model_rho.set_value(_update_param_inplace(
model_rho.get_value(borrow=True), vmap.slc, vmap.shp, var_rho,
var_sample_axis, sample_index), borrow=True)
def write_gcnvkernel_version(output_path: str):
"""Writes the current gcnvkernel version as a JSON file to a given path.
Args:
output_path: path to write the gcnvkernel version
Returns:
None
"""
# write gcnvkernel version
write_dict_to_json_file(
os.path.join(output_path, io_consts.default_gcnvkernel_version_json_filename),
{'version': gcnvkernel_version}, set())
def assert_mandatory_columns(mandatory_columns_set: Set[str],
found_columns_set: Set[str],
input_tsv_file: str):
"""Asserts that a given .tsv file contains a set of mandatory header columns.
Note:
The set of header columns found in the .tsv file must be provided. `input_tsv_file` is only used
for generating exception messages.
Args:
mandatory_columns_set: set of mandatory header columns
found_columns_set: set of header columns found in the .tsv file
input_tsv_file: path to the .tsv file in question
Returns:
None
"""
not_found_set = mandatory_columns_set.difference(found_columns_set)
assert len(not_found_set) == 0, "The following mandatory columns could not be found in \"{0}\"; " \
"cannot continue: {1}".format(input_tsv_file, not_found_set)
def assert_files_are_identical(input_file_1: str, input_file_2: str):
"""Asserts that two given files are bit identical."""
assert os.path.isfile(input_file_1), "Cannot find {0}.".format(input_file_1)
assert os.path.isfile(input_file_2), "Cannot find {0}.".format(input_file_2)
assert filecmp.cmp(input_file_1, input_file_2, shallow=False), \
"The following two files are expected to be identical: {0}, {1}".format(input_file_1, input_file_2)
|
magicDGS/gatk
|
src/main/python/org/broadinstitute/hellbender/gcnvkernel/io/io_commons.py
|
Python
|
bsd-3-clause
| 22,930
|
[
"Gaussian"
] |
66ac1d8288883cbff7309aaf650061df4acf392d54d6ecfee84982aebcfdafe2
|
import json
class Element(object):
"""
Abstract class for Cytoscape.js graph element with common properties for nodes and edges
"""
def __init__(self, _id):
self.__metaclass__ = Element
self.group = 'nodes' # 'nodes' for a node, 'edges' for an edge
self.data = { # element data (put json serializable dev data here)
# define 'source' and 'target' for an edge
'id': small_hash(self) if _id is None else _id
}
self.scratch = {} # scratchpad data (usually temp or non-serializable data)
self.selected = False # whether the element is selected (default false)
self.selectable = True # whether the selection state is mutable (default true)
self.classes = '' # a space separated list of class names that the element has
def add_class(self, cls):
if self.classes.__len__() > 0:
self.classes += ' '
self.classes += cls
def add_data(self, key, value):
# self.data.update({key : value})
self.data[key] = value
def set_edge_data(self, source_el, target_el):
self.group = 'edges'
self.add_data('source', source_el.data['id'])
self.add_data('target', target_el.data['id'])
def to_json(self):
return json.dumps(self, default=serialize_json)
class Node(Element):
"""
Class which represents Cytoscape.js nodes with node specific properties
"""
def __init__(self, _id=None):
super().__init__(_id)
self.group = 'nodes'
self.position = { # the model position of the node (optional on init, mandatory after)
'x': 0,
'y': 0
}
self.locked = False # when locked a node's position is immutable (default false)
self.grabbable = True # whether the node can be grabbed and moved by the user
class Edge(Element):
"""
Class which represents Cytoscape.js edges with edge specific properties
"""
def __init__(self, source_el, target_el, _id=None):
super().__init__(_id)
self.set_edge_data(source_el, target_el)
class ViewStyle(object):
"""
Class which represents Cytoscape.js style definition.
Contains css styles and selector for elements for which those styles should be applied.
"""
def __init__(self, selector):
# self.selector = element.__class__.__name__.lower()
# if element.classes.__len__() > 0:
# self.selector += '.' + element.classes.replace(' ', '.')
self.selector = selector
self.style = {}
def to_json(self):
return json.dumps(self, default=serialize_json)
def serialize_json(obj):
"""
A helper method for serializing Cytoscape.js elements in desired json form.
:param obj: Object to serialize
:return: JSON string representation of obj
"""
# handle concrete class serialization
if hasattr(obj, '__metaclass__') and obj.__metaclass__.__name__ == 'Element':
json = {} # { '__classname__' : type(obj).__name__ }
json.update(vars(obj))
json.pop('__metaclass__', None) # remove __metaclass__ from json
# handle abstract class serialization
elif obj.__class__.__name__ == 'type' and obj.__name__ == 'Element':
json = obj.__name__
elif obj.__class__.__name__ == 'ViewStyle':
json = {}
json.update(vars(obj))
else:
json = obj.__str__()
return json
def deserialize_json(json):
"""
A helper method for deserializing json into Cytoscape.js elements.
:param json: json representation of Cytoscape.js element
:return: Cytoscape.js element object if json is valid, else json
"""
class_name = json.pop('__classname__', None)
if class_name == 'Element': # type(self).__name__:
obj = Element.__new__(Element) # Make instance without calling __init__
for key, value in json.items():
setattr(obj, key, value)
return obj
else:
return json
def small_hash(instance, digits=9):
"""
Cytoscape.js has trouble with dealing large or negative integers.
Returns positive and n-digit hash of the passed instance.
:param instance: for which it's positive 9-digit hash is returned
:param digits: number of digits of the resulting hash. Default is 9.
:return: positive 9-digit hash of the instance
"""
# take positive value
hash = instance.__hash__().__abs__()
hash_str = str(hash)
# reduce hash to n digit
hash = int(hash_str[-digits:]) if hash_str.__len__() > digits else hash
# if first digit iz 0 increase digits to n
hash = hash * 10 if str(hash).__len__() < digits else hash
return hash
|
danielkupco/viewX-vscode
|
src/python/cytoscape_helper.py
|
Python
|
mit
| 4,728
|
[
"Cytoscape"
] |
854a912c41335f12132d358e3f2dd8e9b408ad91019121d62c88786d43570b64
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from paddle.utils import gast
from paddle.fluid.dygraph.dygraph_to_static.static_analysis import AstNodeWrapper
from paddle.fluid.dygraph.dygraph_to_static.utils import ast_to_source_code
class AssertTransformer(gast.NodeTransformer):
"""
A class transforms python assert to convert_assert.
"""
def __init__(self, wrapper_root):
assert isinstance(
wrapper_root, AstNodeWrapper
), "Input non-AstNodeWrapper node for the initialization of AssertTransformer."
self.wrapper_root = wrapper_root
self.root = wrapper_root.node
def transform(self):
self.visit(self.root)
def visit_Assert(self, node):
convert_assert_node = gast.parse(
'paddle.jit.dy2static.convert_assert({test}, {msg})'.format(
test=ast_to_source_code(node.test),
msg=ast_to_source_code(node.msg)
if node.msg else "")).body[0].value
return gast.Expr(value=convert_assert_node)
|
luotao1/Paddle
|
python/paddle/fluid/dygraph/dygraph_to_static/assert_transformer.py
|
Python
|
apache-2.0
| 1,648
|
[
"VisIt"
] |
bfdbef8c9e2b10eb955712157596f7b15beeaa1ce83eb770b37adba5ed2be8a8
|
# Copyright 2014 the National Renewable Energy Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module serves provides utilities to work with the following mesh types:
* WAMTI
* Nemoh
* VTK Polydata (*.vtp)
* STL files
The functionality provided includes:
* Ability to read mesh the mesh formats listed above
* Ability to convert between mesh formats listed above
* Utilities to calculate volume, surface area, linear sprint stiffness, and
other related mesh parameters.
.. Note::
"""
import numpy as np
import imp
import os
from sys import platform as _platform
from copy import copy
from platform import system as _system
try:
import vtk
from vtk.util.numpy_support import vtk_to_numpy
except:
print 'The VTK Python module is required for a significant amnount of functionality in this module. Many functions will not be available for use.'
class VTK_Exception(Exception):
pass
class PanelMesh(object):
''' Class to store mesh data. All mesh data is currently read and stored
as quad elements. Tri elements are supported, but are stored as quad
elements with a repeated point.
Parameters:
file_name : str
Name of mesh file to read. Currently WAMIT (.gdf), Stereolithography
(.stl), VTK PolyDATA (.vtp), and NEMOH (.dat) mesh formats are
supported
Attribuites:
files : dictionary
Dictionary containg input and output file names
orig_type : str
Mesh type of input file
points : list
List of points that define the mesh. `points[n] = [x coord, y coord, z coord]`.
faces : list
List of points that define connectivity for each face. `face[n] = [point 1 index
, point 2 index, point 3 index, point 4 index]`, where 'point 1-4'
are integres that correspond to the point index in the `points`
attribuite.
center_of_gravity : np.array
Center of gravity of floating body
center_of_buoyancy : np.array
Center of buoyancy
volume_vtk : np.array
Mesh volume determined using VTK
volume_x, y, and z : np.array
Mesh volume determined using internal bemio calculations
surface_area_vtk : float
Surface area determined using VTK
surface_area : float
Surface area determined using internal bemio calculations
normals : np.array
Cell normals. This arrays is size `[faces.shape[0], 3]`.
`normals[n] = [x, y, z]` is a vector that defines the normal vector
for face `n`
cell_surface_area : np.array
Cell survace area. This array is size `[faces.shape[0], 3]`.
`cell_surface_area[n]` is the surface aras of face[n].
centroid : np.array
Cell centroid. This array is size `[faces.shape[0], 3]`.
`cell_surface_area[n]` is centroid of face[n].
hydrostatic_stiffness : np.array
The linear hydrostatic stiffness matrix of the mesh assuming the
water surface is at z=0
bounds : dictionary
The bounds of the mesh. `bounds['min']` and `bounds['max']` are the
minimum and maximum mesh dimensions, respectively
'''
def __init__(self,file_name):
self.files = {}
self.files['input_file'] = file_name
self.orig_type = None
self.points = []
self.faces = []
self.center_of_gravity = np.array([0., 0., 0.])
self._center_of_buoyancy = None
self._volume_vtk = None
self._volume_x = None
self._volume_y = None
self._volume_z = None
self._surface_area = None
self._surface_area_vtk = None
self._normals = None
self._cell_surface_area = None
self._centroid = None
self._hydrostatic_stiffness = None
self._bounds = None
self.zero_tol = -1.e-3
try:
imp.find_module('vtk')
self.VTK_installed = True
except :
self.VTK_installed = False
if os.path.isfile(file_name) is False:
raise Exception('The file ' + file_name + ' does not exist')
def __repr__(self):
out_string = 'Object type: bemio.mesh_utilities.mesh.PanelMesh' + \
'\nFile name: ' + str(self.files['input_file']) + \
'\nNumber of points: ' + str(self.points.shape[0]) + \
'\nNumber of faces: ' + str(self.faces.shape[0]) + \
'\nOriginal mesh type: ' + str(self.orig_type) + \
'\nMesh bounds:' + \
'\n\tMax: ' + str(self.bounds['max']) + \
'\n\tMin: ' + str(self.bounds['min']) + \
'\nCenter of mass: ' + str(self.center_of_gravity) + \
'\nCenter of buoyancy: ' + str(self.center_of_buoyancy) + \
'\nMesh volume [volume_x, volume_y, volume_z]: [' + str(self.volume_x) + ', ' + str(self.volume_y) + ', ' + str(self.volume_z) + ']' + \
'\nMesh surface area: ' + str(self.surface_area) + \
'\nHydrostatic stiffness: ' + \
'\n\tC[3,3], C[3,4], C[3,5]: ' + str(self.hydrostatic_stiffness[2,2]) + ', ' + str(self.hydrostatic_stiffness[2,3]) + ', ' + str(self.hydrostatic_stiffness[2,4]) + \
'\n\tC[4,4], C[4,5], C[4,6]: ' + str(self.hydrostatic_stiffness[3,3]) + ', ' + str(self.hydrostatic_stiffness[3,4]) + ', ' + str(self.hydrostatic_stiffness[3,5]) + \
'\n\tC[5,5], C[5,6]: ' + str(self.hydrostatic_stiffness[4,4]) + ', ' + str(self.hydrostatic_stiffness[4,5])
return out_string
@ property
def bounds(self, ):
if self._bounds is None:
self._bounds = {}
self._bounds['max'] = self.points.max(axis=0)
self._bounds['min'] = self.points.min(axis=0)
return self._bounds
@property
def hydrostatic_stiffness(self, ):
'''Getter for the `hydrostatic_stiffness` variable.
Calculated as defined in Section 3.1 of the WAMIT v7.0 users manual.
'''
if self._hydrostatic_stiffness is None:
self._hydrostatic_stiffness = np.zeros([6,6])
for face_n,face in enumerate(self.faces):
if self.points[face[0]][2] <= self.zero_tol or \
self.points[face[1]][2] <= self.zero_tol or \
self.points[face[2]][2] <= self.zero_tol or \
self.points[face[3]][2] <= self.zero_tol:
self._hydrostatic_stiffness[2,2] += -self.normals[face_n][2] * self.cell_surface_area[face_n]
self._hydrostatic_stiffness[2,3] += -self.centroid[face_n][1] * self.normals[face_n][2] * self.cell_surface_area[face_n]
self._hydrostatic_stiffness[2,4] += self.centroid[face_n][0] * self.normals[face_n][2] * self.cell_surface_area[face_n]
self._hydrostatic_stiffness[3,3] += -self.centroid[face_n][1]**2 * self.normals[face_n][2] * self.cell_surface_area[face_n]
self._hydrostatic_stiffness[3,4] += self.centroid[face_n][0] * self.centroid[face_n][1] * self.normals[face_n][2] * self.cell_surface_area[face_n]
self._hydrostatic_stiffness[4,4] += -self.centroid[face_n][0]**2 * self.normals[face_n][2] * self.cell_surface_area[face_n]
self._hydrostatic_stiffness[3,3] += self.volume_x * self.center_of_buoyancy[2] - self.volume_x * self.center_of_gravity[2]
self._hydrostatic_stiffness[3,5] += -self.volume_x * self.center_of_buoyancy[0] + self.volume_x * self.center_of_gravity[0]
self._hydrostatic_stiffness[4,4] += self.volume_x * self.center_of_buoyancy[2] - self.volume_x * self.center_of_gravity[2]
self._hydrostatic_stiffness[4,5] += -self.volume_x * self.center_of_buoyancy[1] + self.volume_x * self.center_of_gravity[1]
print 'Calculated hydorstatic stiffness'
return self._hydrostatic_stiffness
@property
def center_of_buoyancy(self, ):
'''Getter for the `center_of_buoyancy` variable.
Calculated as defined in Section 3.1 of the WAMIT v7.0 users manual.
'''
if self._center_of_buoyancy is None:
x_b = 0.
y_b = 0.
z_b = 0.
self._center_of_buoyancy = 0.
for face_n,face in enumerate(self.faces):
if self.points[face[0]][2] <= self.zero_tol or \
self.points[face[1]][2] <= self.zero_tol or \
self.points[face[2]][2] <= self.zero_tol or \
self.points[face[3]][2] <= self.zero_tol:
x_b += self.normals[face_n][0]*self.centroid[face_n][0]**2*self.cell_surface_area[face_n]
y_b += self.normals[face_n][1]*self.centroid[face_n][1]**2*self.cell_surface_area[face_n]
z_b += self.normals[face_n][2]*self.centroid[face_n][2]**2*self.cell_surface_area[face_n]
self._center_of_buoyancy = 1./(2.*self.volume_x )*np.array([x_b, y_b, z_b])
print 'Calculated the center of buoyancy'
return self._center_of_buoyancy
@property
def normals(self, ):
if self._normals is None:
self._normals = {}
for face_n in xrange(self.faces.shape[0]):
a = self.points[self.faces[face_n][1]] - self.points[self.faces[face_n][0]]
b = self.points[self.faces[face_n][2]] - self.points[self.faces[face_n][1]]
self._normals[face_n] = np.cross(a,b)
if self._normals[face_n][0] == 0. and self._normals[face_n][1] == 0. and self._normals[face_n][2] == 0.:
a = self.points[self.faces[face_n][2]] - self.points[self.faces[face_n][1]]
b = self.points[self.faces[face_n][3]] - self.points[self.faces[face_n][2]]
self._normals[face_n] = np.cross(a,b)
if self._normals[face_n][0] == 0. and self._normals[face_n][1] == 0. and self._normals[face_n][2] == 0.:
a = self.points[self.faces[face_n][2]] - self.points[self.faces[face_n][0]]
b = self.points[self.faces[face_n][3]] - self.points[self.faces[face_n][2]]
self._normals[face_n] = np.cross(a,b)
self._normals[face_n] /= np.linalg.norm(self._normals[face_n])
print 'Calculated mesh cell normals'
return self._normals
@property
def cell_surface_area(self):
'''Getter for `cell_surface_area`
Calculated
'''
if self._cell_surface_area is None:
self._cell_surface_area = {}
for face_n in xrange(self.faces.shape[0]):
a = self.points[self.faces[face_n][1]] - self.points[self.faces[face_n][0]]
b = self.points[self.faces[face_n][2]] - self.points[self.faces[face_n][1]]
c = self.points[self.faces[face_n][3]] - self.points[self.faces[face_n][2]]
d = self.points[self.faces[face_n][0]] - self.points[self.faces[face_n][3]]
self._cell_surface_area[face_n] = 1./2. * ( np.linalg.norm(np.cross(a,b)) + np.linalg.norm(np.cross(c,d)) )
print 'Calculated surface cell area'
return self._cell_surface_area
@property
def surface_area(self):
if self._surface_area is None:
self._surface_area = sum(self.cell_surface_area.values())
print 'Calculated surface area'
return self._surface_area
@property
def volume_vtk(self):
if self.VTK_installed is False:
raise VTK_Exception('VTK must be installed to access the volume_vtk property')
if self._volume_vtk is None:
tri_converter = vtk.vtkTriangleFilter()
tri_converter.SetInputDataObject(self.vtp_mesh)
tri_converter.Update()
tri_mesh = tri_converter.GetOutput()
mass_props = vtk.vtkMassProperties()
mass_props.SetInputDataObject(tri_mesh)
self._volume_vtk = mass_props.GetVolume()
print 'Calculated mesh volume using VTK library'
return self._volume_vtk
@property
def centroid(self):
if self._centroid is None:
self._centroid = {}
for face_n,face in enumerate(self.faces):
points = [self.points[face[0]],
self.points[face[1]],
self.points[face[2]],
self.points[face[3]]]
points = map(np.asarray, set(map(tuple, points))) # This removes duplicate points... somehow
self._centroid[face_n] = np.mean(points,axis=0)
return self._centroid
@property
def volume_x(self):
if self._volume_x is None:
self._calc_component_vol()
return self._volume_x
@property
def volume_y(self):
if self._volume_y is None:
self._calc_component_vol()
return self._volume_y
@property
def volume_z(self):
if self._volume_z is None:
self._calc_component_vol()
return self._volume_z
@property
def surface_area_vtk(self):
if self.VTK_installed is False:
raise VTK_Exception('VTK must be installed to access the surface_area_vtk property')
if self._surface_area_vtk is None:
tri_converter = vtk.vtkTriangleFilter()
tri_converter.SetInputDataObject(self.vtp_mesh)
tri_converter.Update()
tri_mesh = tri_converter.GetOutput()
mass_props = vtk.vtkMassProperties()
mass_props.SetInputDataObject(tri_mesh)
self._surface_area_vtk = mass_props.GetSurfaceArea()
print 'Calculated mesh surface area using VTK Python bindings'
return self._surface_area_vtk
def write(self,mesh_format='VTP'):
'''Function to write NEMOH, WAMIT, or VTK PolyData formats.
Parameters:
mesh_format : string {'VTP', 'WAMIT', 'NEMOH'}
Variable that specifies the mesh format to write.
Examples:
This example assumes that a mesh has been read by bemio and mesh
data is contained in a `PanelMesh` object called `mesh`
Here is how a WAMIT mesh would be written
>>> mesh.write(mesh_format='WAMTI')
'''
if mesh_format == 'VTK' or mesh_format == 'VTP':
self._write_vtp()
if mesh_format == 'WAMIT' or mesh_format == 'GDF':
self._write_gdf()
if mesh_format == 'NEMOH':
self._write_nemoh()
def calculate_center_of_gravity_vtk(self, ):
'''Function to calculate the center of gravity
.. Note::
The VTK Pytnon bindings must be installed to use this function
Examples:
This example assumes that a mesh has been read by bemio and mesh
data is contained in a `PanelMesh` object called `mesh`
>>> mesh.calculate_center_of_gravity_vtk()
'''
if self.VTK_installed is False:
raise VTK_Exception('VTK must be installed to access the calculate_center_of_gravity_vtk function')
com = vtk.vtkCenterOfMass()
if vtk.VTK_MAJOR_VERSION >= 6:
com.SetInputData(self.vtp_mesh)
else:
com.SetInput(self.vtp_mesh)
com.Update()
self.center_of_gravity = com.GetCenter()
print 'Calculated center of gravity assuming uniform material density'
# def cut(self,plane=2,value=0.0,direction=1):
'''This function is not currently working 100%
'''
# self.collapse(plane,value,direction)
#
# tempFaces = []
# count = 0
#
# for i in xrange(self.faces.shape[0]):
#
# delete_face = 0
#
# for j in xrange(4):
#
# p = self.faces[i][j]
# z = float(self.cords[int(p)][2])
#
# if z == 0.:
# delete_face += 1
#
# if delete_face != 4:
# tempFaces.append(self.faces[i])
# count += 1
#
# print 'removed ' + str(count) + ' surface faces'
# self.faces = tempFaces
# self.faces.shape[0] = self.faces.shape[0]
def view(self, color=[0.5,1,0.5], opacity=1.0, save_png=False, camera_pos=[50,50,50], interact=True):
'''Function to view the mesh using the VTK library
Parameters:
color : list, optional
VTK color specification for the mesh
opackty : float, optional
VTK opacity for the mesh. Must be between 0. and 1.
save_png : bool
Boolean operater that determines if a .png image of the mesh is
saved.
interact : bool, optional
Boolean operater that determines if the user can interact with
the geometry (e.g. zoom and rotate) after it is displayed
camera_pos : list, optional
Examples:
This example assumes that a mesh has been read by bemio and mesh
data is contained in a `PanelMesh` object called `mesh`
>>> mesh.view()
The mesh view window must be closed in order to return command to
the Python shell
'''
if self.VTK_installed is False:
raise VTK_Exception('VTK must be installed to use the view function')
# Create a mapper and load VTP data into the mapper
mapper=vtk.vtkPolyDataMapper()
if vtk.VTK_MAJOR_VERSION >= 6:
mapper.SetInputData(self.vtp_mesh)
else:
mapper.SetInput(self.vtp_mesh)
# Create an actor that contains the data in the mapper
actor=vtk.vtkActor()
actor.GetProperty().SetColor(color)
actor.GetProperty().SetOpacity(opacity)
actor.SetMapper(mapper)
actor.GetProperty().EdgeVisibilityOn()
# Camera
camera = vtk.vtkCamera();
camera.SetPosition(camera_pos)
camera.SetFocalPoint(0, 0, 0)
# Add axes
axes = vtk.vtkAxesActor()
# Render the data
ren = vtk.vtkRenderer()
ren.AddActor(actor)
ren.AddActor(axes)
ren.SetActiveCamera(camera)
# Create a render window
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
renWin.SetSize(800, 800)
# Start the visiuilization
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
ren.SetBackground(0,0,0)
renWin.Render()
vtk.vtkPolyDataMapper().SetResolveCoincidentTopologyToPolygonOffset()
if save_png is True:
w2if = vtk.vtkWindowToImageFilter()
w2if.SetInput(renWin)
w2if.Update()
writer = vtk.vtkPNGWriter()
writer.SetFileName(self.files['png_image'])
writer.SetInputDataObject(w2if.GetOutput())
writer.Write()
print 'Wrote mesh image to: ' + self.files['png_image']
if interact is True:
iren.Start()
def scale(self, scale_vect):
'''Function used to scale mesh objects in the x, y, and z directions.
Parameters:
scale_vect : list
A list that contains the x, y, and z scale factors for the mesh
Examples:
This example assumes that a mesh has been read by bemio and mesh
data is contained in a `PanelMesh` object called `mesh`
Here is how to scale a mesh by a factor of 2 in the x direction and
.5 in the y direction:
>>> mesh.scale(scale_vect=[2, 0.5, 1])
'''
scale_vect = np.array(scale_vect)
if scale_vect.size != 3:
raise Exception('The scale_vect input must be a length 3 vector')
self.points = self.points*scale_vect
self.scale_vect = scale_vect
self._create_vtp_mesh()
print 'Scaled mesh by: ' + str(scale_vect)
def translate(self,translation_vect,translate_cog=True):
'''Function used to translate mesh obvjects in the x, y, and z directions.
Parameters:
translation_vect : list
A list that contains the desired x, y, and z translation for the
mesh
Examples:
This example assumes that a mesh has been read by bemio and mesh
data is contained in a `PanelMesh` object called `mesh`
Here is how to translate a mesh by 2 in the x direction and
.5 in the y direction:
>>> mesh.translate(scale_vect=[2, 0.5, 0])
'''
translation_vect = np.array(translation_vect)
if translation_vect.size != 3:
raise Exception('The translation_vect input must be a length 3 vector')
self.points += translation_vect
self.translation_vect = translation_vect
if translate_cog is True:
self.center_of_gravity += translation_vect
print 'Translated mesh by: ' + str(translation_vect) + '\nCenter of gravity is: ' + str(self.center_of_gravity)
def open(self):
'''Function to open a VTK PolyData object in the default viewer of your
operating system.
.. Note::
This function is only available for OSX and Linux systems and
and requires you have a program installed that has the ability to
open VTK PolyData (.vtp) files.
Example:
This example assumes that a mesh has been read by bemio and mesh
data is contained in a `PanelMesh` object called `mesh`
>>> mesh.open()
'''
self.write(mesh_format='VTP')
if _system() == 'Darwin':
os.system('open ' + self.files['vtp'])
elif _system() == 'Linux':
os.system('xdg ' + self.files['vtp'])
else:
raise Exception('The open function is only supported for OSX')
def _create_vtp_mesh(self):
'''Internal function to creat a VTP mesh from the imported mesh data
'''
if self.VTK_installed is True:
self.vtp_mesh = vtk.vtkPolyData()
points = vtk.vtkPoints()
polys = vtk.vtkCellArray()
for i in range(self.points.shape[0]):
x= self.points[i]
#print type(x)
# type could be either numpy.ndarray or list
if type(x).__module__ == np.__name__ :
#print "[DEBUG:]type is ndarray, converting into float"
y = x.astype(np.float)
points.InsertPoint(i, y)
elif isinstance(x,list):
#print "[DEBUG:]type is list, converting list int float!"
y= [float(j) for j in x]
if len(y)>3:
#print "[DEBUG:]size of list > 3"
y=y[:3]
elif len(y)<3:
# Expand the list up to 10 elements with zeros.
for n in range(len(y), 3):
y.append(0)
points.InsertPoint(i, y)
else :
points.InsertPoint(i, self.points[i])
for i in range(self.faces.shape[0]):
polys.InsertNextCell(_mk_vtk_id_list(self.faces[i]))
self.vtp_mesh.SetPoints(points)
self.vtp_mesh.SetPolys(polys)
# def _collapse(self,plane=2,value=0.0,direction=1):
#This function is not yet working 100%
# '''Collapse points
# '''
# for face,face_n in xrange(self.faces.shape[0]):
#
# for j in xrange(self.faces[i].size):
#
# p = int(self.faces[i][j])
#
# if self.points[p][plane] > value*direction:
#
# self.points[p][plane] = value
def _write_vtp(self):
'''Internal function to write VTK PolyData mesh files
'''
if self.VTK_installed is False:
raise VTK_Exception('VTK must be installed write VTP/VTK meshes, please select a different output mesh_format')
writer = vtk.vtkXMLPolyDataWriter()
writer.SetFileName(self.files['vtp'])
if vtk.VTK_MAJOR_VERSION >= 6:
writer.SetInputData(self.vtp_mesh)
else:
writer.SetInput(self.vtp_mesh)
writer.SetDataModeToAscii()
writer.Write()
print 'Wrote VTK PolyData mesh to: ' + str(self.files['vtp'])
def _write_nemoh(self):
'''Internal function to write NEMOH mesh files
'''
with open(self.files['nemoh'],'w') as fid:
fid.write('2 0') # This should not be hard coded
fid.write('\n')
for i in xrange(self.points.shape[0]):
fid.write(str(i+1) + ' ' +str(self.points[i]).replace('[','').replace(']',''))
fid.write('\n')
fid.write('0 0 0 0')
fid.write('\n')
for i in xrange(self.faces.shape[0]):
fid.write(str(self.faces[i]+1).replace('[','').replace(']','').replace('.',''))
fid.write('\n')
fid.write('0 0 0 0')
print 'Wrote NEMOH mesh to: ' + str(self.files['nemoh'])
def _write_gdf(self):
'''Internal function to write WAMIT mesh files
'''
with open(self.files['wamit'],'w') as fid:
fid.write('Mesh file written by meshio.py')
fid.write('\n')
fid.write('1 9.80665 ULEN GRAV')
fid.write('\n')
fid.write('0 0 ISX ISY')
fid.write('\n')
fid.write(str(self.faces.shape[0]))
fid.write('\n')
for i,face in enumerate(self.faces):
if np.size(face) is 4: # if the mesh element is a quad
for j,pointKey in enumerate(face):
fid.write(str(self.points[pointKey]).replace(',','').replace('[','').replace(']','') + '\n')
if np.size(face) is 3: # if the mesh element is a tri
faceMod = np.append(face,face[-1])
for j,pointKey in enumerate(faceMod):
fid.write(str(self.points[pointKey]).replace(',','').replace('[','').replace(']','') + '\n')
print 'Wrote WAMIT mesh to: ' + str(self.files['wamit'])
def _calc_component_vol(self, ):
'''Internal function to calculate mesh volume using the methods
described in Section 3.1 the WAMIT v7.0 users manual.
'''
self._volume_x = 0.
self._volume_y = 0.
self._volume_z = 0.
volume = 0.
for face_n in xrange(self.faces.shape[0]):
volume += self.normals[face_n]*self.centroid[face_n]*self.cell_surface_area[face_n]
self._volume_x = volume[0]
self._volume_y = volume[1]
self._volume_z = volume[2]
print 'Calculated x y and z mesh volumes'
def _read_gdf(file_name):
'''Internal function to read gdf wamit meshes
'''
with open(file_name,'r') as fid:
lines = fid.readlines()
mesh_data = PanelMesh(file_name)
mesh_data.orig_type = 'WAMIT (.gdf)'
mesh_data.gdfLines = lines
mesh_data.uLen = float(lines[1].split()[0])
mesh_data.gravity = float(lines[1].split()[1])
mesh_data.isx = float(lines[2].split()[0])
mesh_data.isy = float(lines[2].split()[1])
mesh_data.num_faces = int(lines[3].split()[0])
mesh_data.num_points = mesh_data.num_faces * 4
mesh_data.points = np.array([temp.split() for temp in lines[4:]])
mesh_data.pointsString = [str(temp).replace("," ,'').replace('\r','') for temp in lines[4:]] # Output string for Nemoh mesh fil
for panelNum,i in enumerate(np.arange(4,4+mesh_data.num_points,4)):
mesh_data.faces.append(np.array([i-4,i-3,i-2,i-1]))
mesh_data.faces = np.array(mesh_data.faces)
return mesh_data
def _read_stl(file_name):
'''Internal function to read stl mesh files
'''
reader = vtk.vtkSTLReader()
reader.SetFileName(file_name)
reader.Update()
mesh_data = PanelMesh(file_name)
mesh_data.orig_type = 'Stereolithography (.stl)'
mesh_data.num_faces = int(reader.GetOutput().GetNumberOfCells())
mesh_data.num_points = mesh_data.num_faces * 3
for i in range(mesh_data.num_faces):
n = i*3
mesh_data.faces.append(np.array([n,n+1,n+2,n+2]))
mesh_data.points.append(np.array(vtk_to_numpy(reader.GetOutput().GetCell(i).GetPoints().GetData())))
mesh_data.points = np.array(mesh_data.points).reshape([mesh_data.num_faces*3,3])
return mesh_data
def _read_vtp(file_name):
'''Internal function to read vtp mesh files
'''
reader = vtk.vtkXMLPolyDataReader()
reader.SetFileName(file_name)
reader.Update()
mesh_data = PanelMesh(file_name)
mesh_data.orig_type = 'VTK Polydata (.vtp)'
readerOut = reader.GetOutput()
mesh_data.num_faces = int(readerOut.GetNumberOfCells())
mesh_data.num_points = int(readerOut.GetNumberOfPoints())
for i in xrange(mesh_data.num_points):
mesh_data.points.append(readerOut.GetPoint(i))
mesh_data.points = np.array(mesh_data.points)
for i in xrange(mesh_data.num_faces):
c = readerOut.GetCell(i)
numCellPoints = int(c.GetNumberOfPoints())
idsTemp = []
for i in xrange(numCellPoints):
idsTemp.append(int(c.GetPointId(i)))
mesh_data.faces.append(np.array(idsTemp))
mesh_data.faces = np.array(mesh_data.faces)
return mesh_data
def _read_nemoh(file_name):
'''Internal function to read nemoh mesh
'''
with open(file_name,'r') as fid:
lines = fid.readlines()
temp = np.array([np.array(str(lines[i]).split()).astype(float) for i in range(1,np.size(lines))])
count = 0
mesh_data = PanelMesh(file_name)
mesh_data.orig_type = 'NEMOH (.dat)'
while temp[count,0] != 0.:
mesh_data.points.append(temp[count,1:])
count += 1
count += 1
while sum(temp[count,:]) != 0.:
mesh_data.faces.append(temp[count,:])
count += 1
mesh_data.points = np.array(mesh_data.points)
mesh_data.faces = np.array(mesh_data.faces)-1
mesh_data.num_points = np.shape(mesh_data.points)[0]
mesh_data.num_faces = np.shape(mesh_data.faces)[0]
return mesh_data
def read(file_name):
'''Function to read surface mesh files. Currently VTK PolyData (.vtk),
WAMIT (.gdf), NEMOH (.dat), and Stereolithography (.stl) mesh formats are
supported
Parameters:
file_name : str
Name of the mesh file
Returns:
mesh_data : PanelMesh
A PanelMesh object that contains the mesh data
Exmaples:
This example assumes that a VTK PlolyData mesh named mesh.vtp exists
in the current working directory
>>> mesh = read('mesh.vtp')
The mesh can then be converted to another format using the `write`
function. In this case a wamit mesh is created.
>>> mesh.write(mesh_format='WAMIT')
If the VTK python bindings are installed the mesh can be viewed using
the following command:
>>> mesh.view()
If you would are using OSX or Linux and have Paraview installed you can
view the file using the follwing command:
>>> mesh.open()
'''
print 'Reading mesh file: ' + str(file_name)
file_name = os.path.abspath(file_name)
(f_name,f_ext) = os.path.splitext(file_name)
if f_ext == '.GDF' or f_ext == '.gdf':
mesh_data = _read_gdf(file_name)
elif f_ext == '.stl':
mesh_data = _read_stl(file_name)
elif f_ext == '.vtp':
mesh_data = _read_vtp(file_name)
elif f_ext == '.dat':
mesh_data = _read_nemoh(file_name)
else:
raise Exception(f_ext + ' is an unsupported file mesh file type')
mesh_data.files_base = os.path.splitext(file_name)[0]
head, tail = os.path.split(mesh_data.files_base)
mesh_data.files['vtp'] = mesh_data.files_base + '.vtp'
mesh_data.files['wamit'] = mesh_data.files_base + '.gdf'
#mesh_data.files['nemoh'] = mesh_data.files_base + '.dat'
mesh_data.files['nemoh'] = os.path.dirname(file_name)+'/openwarp/'+ tail + '.dat'
mesh_data.files['png'] = os.path.splitext(file_name)[0] + '.png'
if mesh_data.VTK_installed is True:
mesh_data._create_vtp_mesh()
print 'Successfully read mesh file: ' + str(file_name)
return mesh_data
def _mk_vtk_id_list(it):
'''
Internal function to make vtk id list object
Parameters:
it : list
List of nodes that define a face
Returns:
vil: vtkIdList
A vtkIdList object
'''
vil = vtk.vtkIdList()
for i in it:
vil.InsertNextId(int(i))
return vil
def collapse_to_plane(mesh_obj, plane_ind=2, plane_loc=-1e-5, cut_dir=1.):
'''Function to collapse points to a given plane
.. Note::
This function is not yet implemented
'''
pass
def cut_mesh(mesh_obj, plane_ind=2, plane_loc=-1e-5, cut_dir=1.):
'''Function to remove cells on one side of plane
.. Note::
This function is still early in the stages of development and needs
to be improved and made more robust.
Parameters:
mesh_obj : PanelMesh
Mesh object to cut
plane_ind : int, optional
Index of plane along which to cut the mesh, 0 == x, 1 == y, 2 == z
plane_loc : float
Location of the mesh cut
cut_dir : int, {1, -1}
Direction for the mesh cut
Returns:
cut_mesh : PanelMesh
Panel mesh object that has been cut as specified
Examples:
None avaiable to data
'''
cut_mesh = copy(mesh_obj)
tempFaces = []
cut_mesh.removed_faces = []
cut_mesh.removed_points = []
for face_n,face in enumerate(cut_mesh.faces):
if cut_mesh.points[face[0]][plane_ind] <= plane_loc*cut_dir or \
cut_mesh.points[face[1]][plane_ind] <= plane_loc*cut_dir or \
cut_mesh.points[face[2]][plane_ind] <= plane_loc*cut_dir or \
cut_mesh.points[face[3]][plane_ind] <= plane_loc*cut_dir:
tempFaces.append(face)
else:
cut_mesh.removed_faces.append(face)
cut_mesh.removed_points.append(cut_mesh.points[face[0]])
cut_mesh.removed_points.append(cut_mesh.points[face[1]])
cut_mesh.removed_points.append(cut_mesh.points[face[2]])
cut_mesh.removed_points.append(cut_mesh.points[face[3]])
cut_mesh.faces = np.array(tempFaces)
cut_mesh.removed_faces = np.array(cut_mesh.removed_faces)
cut_mesh.removed_points = np.array(cut_mesh.removed_points)
cut_mesh._create_vtp_mesh()
cut_mesh.files_base = os.path.splitext(cut_mesh.file_name)[0] + '_cut_mesh_bemio_output'
print 'Cut mesh in direction [' + str(plane_ind) + '] in direction [' + str(cut_dir) + '] at the location [' + str(plane_loc) + ']'
return cut_mesh
|
NREL/OpenWARP
|
source/automated_test/Test1/mesh.py
|
Python
|
apache-2.0
| 35,411
|
[
"ParaView",
"VTK"
] |
a2eb4b47e648e6a17313c67b2f677d50f98fd70ecb9a88d146e1781c0b89d732
|
import os
import yaml
from yaml import CLoader as Loader, CDumper as Dumper
class MooseYaml(object):
"""
A utility to read the YAML data from MOOSE.
Args:
raw[str]: The raw yaml output from MOOSE.
"""
def __init__(self, raw):
raw = raw.split('**START YAML DATA**\n')[-1]
raw = raw.split('**END YAML DATA**')[0]
self._data = yaml.load(raw, Loader=Loader)
def get(self):
return self._data
def __str__(self):
output = []
for itr in self._data:
output += self._print(itr)
return '\n'.join(output)
def dump(self, **kwargs):
label = kwargs.pop('label', None)
output = []
for itr in self._data:
output += self._print(itr, 0, label)
print '\n'.join(output)
def find(self, key):
for itr in self._data:
output = self._search(key, itr, fuzzy=False)
if output:
return output[0]
return None
def __getitem__(self, key):
"""
Operator [] access to the yaml blocks.
Args:
key[str]: The yaml key to return.
"""
output = []
for itr in self._data:
output += self._search(key, itr)
return output
@staticmethod
def _search(key, data, **kwargs):
"""
A helper method for locating the desired yaml data.
"""
fuzzy = kwargs.pop('fuzzy', True)
output = []
if (fuzzy and data['name'].endswith(key)) or (key == data['name']):
output.append(data)
if data['subblocks']:
for child in data['subblocks']:
child_data = MooseYaml._search(key, child)
if child_data:
output += child_data
return output
@staticmethod
def _print(data, level=0, label=None):
output = []
if (label == None) or (('labels' in data) and (label in data['labels'])):
output.append(' '*2*level + data['name'])
if data['subblocks']:
for child in data['subblocks']:
output += MooseYaml._print(child, level+1, label)
return output
|
katyhuff/moose
|
python/utils/MooseYaml.py
|
Python
|
lgpl-2.1
| 2,205
|
[
"MOOSE"
] |
e83f5b14d28270245fa7c02d5ca2b43d994a9d65b1bba90607a6717867621e5a
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.0-a53ec6ee1b on 2019-05-07.
# 2019, SMART Health IT.
import os
import io
import unittest
import json
from . import appointmentresponse
from .fhirdate import FHIRDate
class AppointmentResponseTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get('FHIR_UNITTEST_DATADIR') or ''
with io.open(os.path.join(datadir, filename), 'r', encoding='utf-8') as handle:
js = json.load(handle)
self.assertEqual("AppointmentResponse", js["resourceType"])
return appointmentresponse.AppointmentResponse(js)
def testAppointmentResponse1(self):
inst = self.instantiate_from("appointmentresponse-example-req.json")
self.assertIsNotNone(inst, "Must have instantiated a AppointmentResponse instance")
self.implAppointmentResponse1(inst)
js = inst.as_json()
self.assertEqual("AppointmentResponse", js["resourceType"])
inst2 = appointmentresponse.AppointmentResponse(js)
self.implAppointmentResponse1(inst2)
def implAppointmentResponse1(self, inst):
self.assertEqual(inst.comment, "can't we try for this time, can't do mornings")
self.assertEqual(inst.end.date, FHIRDate("2013-12-25T13:30:00Z").date)
self.assertEqual(inst.end.as_json(), "2013-12-25T13:30:00Z")
self.assertEqual(inst.id, "exampleresp")
self.assertEqual(inst.identifier[0].system, "http://example.org/sampleappointmentresponse-identifier")
self.assertEqual(inst.identifier[0].value, "response123")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.participantStatus, "tentative")
self.assertEqual(inst.participantType[0].coding[0].code, "ATND")
self.assertEqual(inst.participantType[0].coding[0].system, "http://terminology.hl7.org/CodeSystem/v3-ParticipationType")
self.assertEqual(inst.start.date, FHIRDate("2013-12-25T13:15:00Z").date)
self.assertEqual(inst.start.as_json(), "2013-12-25T13:15:00Z")
self.assertEqual(inst.text.div, "<div xmlns=\"http://www.w3.org/1999/xhtml\">Accept Brian MRI results discussion</div>")
self.assertEqual(inst.text.status, "generated")
def testAppointmentResponse2(self):
inst = self.instantiate_from("appointmentresponse-example.json")
self.assertIsNotNone(inst, "Must have instantiated a AppointmentResponse instance")
self.implAppointmentResponse2(inst)
js = inst.as_json()
self.assertEqual("AppointmentResponse", js["resourceType"])
inst2 = appointmentresponse.AppointmentResponse(js)
self.implAppointmentResponse2(inst2)
def implAppointmentResponse2(self, inst):
self.assertEqual(inst.id, "example")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.participantStatus, "accepted")
self.assertEqual(inst.text.div, "<div xmlns=\"http://www.w3.org/1999/xhtml\">Accept Brian MRI results discussion</div>")
self.assertEqual(inst.text.status, "generated")
|
all-of-us/raw-data-repository
|
rdr_service/lib_fhir/fhirclient_4_0_0/models/appointmentresponse_tests.py
|
Python
|
bsd-3-clause
| 3,510
|
[
"Brian"
] |
69a6b354953287256d43b8723c7275b725bda29c95fb3bb4f692c142adea8ed5
|
import numpy as np
from multiphenotype_utils import (get_continuous_features_as_matrix, add_id, remove_id_and_get_mat,
partition_dataframe_into_binary_and_continuous, divide_idxs_into_batches)
import pandas as pd
import tensorflow as tf
from dimreducer import DimReducer
from scipy.special import expit
from general_autoencoder import GeneralAutoencoder
from standard_autoencoder import StandardAutoencoder
class VariationalAutoencoder(StandardAutoencoder):
"""
Implements a standard variational autoencoder (diagonal Gaussians everywhere).
"""
def __init__(self,
**kwargs):
super(VariationalAutoencoder, self).__init__(**kwargs)
self.sigma_scaling = .1 # keeps the sigmas (ie, the std of the normal from which Z is drawn) from getting too large.
def init_network(self):
self.weights = {}
self.biases = {}
if self.learn_continuous_variance:
# we exponentiate this because it has to be non-negative.
self.log_continuous_variance = tf.Variable(self.initialization_function([1]))
# Encoder layers.
for encoder_layer_idx, encoder_layer_size in enumerate(self.encoder_layer_sizes):
if encoder_layer_idx == 0:
input_dim = len(self.feature_names) + self.include_age_in_encoder_input # if we include age in input, need one extra feature.
else:
input_dim = self.encoder_layer_sizes[encoder_layer_idx - 1]
output_dim = self.encoder_layer_sizes[encoder_layer_idx]
print("Added encoder layer with input dimension %i and output dimension %i" % (input_dim, output_dim))
self.weights['encoder_h%i' % encoder_layer_idx] = tf.Variable(
self.initialization_function([input_dim, output_dim]))
self.biases['encoder_b%i' % encoder_layer_idx] = tf.Variable(
self.initialization_function([output_dim]))
self.weights['encoder_h%i_sigma' % encoder_layer_idx] = tf.Variable(
self.initialization_function([input_dim, output_dim]))
self.biases['encoder_b%i_sigma' % encoder_layer_idx] = tf.Variable(
self.initialization_function([output_dim]))
# Decoder layers.
self.decoder_layer_sizes.append(len(self.feature_names))
for decoder_layer_idx, decoder_layer_size in enumerate(self.decoder_layer_sizes):
if decoder_layer_idx == 0:
input_dim = self.k
else:
input_dim = self.decoder_layer_sizes[decoder_layer_idx - 1]
output_dim = self.decoder_layer_sizes[decoder_layer_idx]
print("Added decoder layer with input dimension %i and output dimension %i" % (input_dim, output_dim))
self.weights['decoder_h%i' % decoder_layer_idx] = tf.Variable(
self.initialization_function([input_dim, output_dim]))
self.biases['decoder_b%i' % decoder_layer_idx] = tf.Variable(
self.initialization_function([output_dim]))
def set_up_encoder_structure(self):
"""
This function sets up the basic encoder structure and return arguments.
We need to return Z, Z_mu, and Z_sigma.
"""
self.Z, self.Z_mu, self.Z_sigma = self.encode(self.X)
def encode(self, X):
num_layers = len(self.encoder_layer_sizes)
# Get mu
mu = X
for idx in range(num_layers):
mu = tf.matmul(mu, self.weights['encoder_h%i' % (idx)]) \
+ self.biases['encoder_b%i' % (idx)]
# No non-linearity on the last layer
if idx != num_layers - 1:
mu = self.non_linearity(mu)
Z_mu = mu
# Get sigma
sigma = X
for idx in range(num_layers):
sigma = tf.matmul(sigma, self.weights['encoder_h%i_sigma' % (idx)]) \
+ self.biases['encoder_b%i_sigma' % (idx)]
# No non-linearity on the last layer
if idx != num_layers - 1:
sigma = self.non_linearity(sigma)
sigma = sigma * self.sigma_scaling # scale so sigma doesn't explode when we exponentiate it.
sigma = tf.exp(sigma)
Z_sigma = sigma
# Sample from N(mu, sigma)
eps = tf.random_normal(tf.shape(Z_mu), dtype=tf.float32, mean=0., stddev=1.0, seed=self.random_seed)
Z = Z_mu + Z_sigma * eps
return Z, Z_mu, Z_sigma
def sample_X_given_Z(self, Z):
"""
given a Z, samples X. Adds noise for both binary and continuous features following the autoencoder model.
"""
Xr = self.sess.run(self.Xr, feed_dict = {self.Z:Z})
# for binary features, need to convert logits to 1s and 0s by sampling.
Xr[:, self.binary_feature_idxs] = np.random.random(Xr[:, self.binary_feature_idxs].shape) < \
expit(Xr[:, self.binary_feature_idxs])
# for continuous features, need to add noise.
if self.learn_continuous_variance:
continuous_variance = np.exp(self.sess.run(self.log_continuous_variance)[0])
std = np.sqrt(continuous_variance)
else:
std = 1
Xr[:, self.continuous_feature_idxs] = Xr[:, self.continuous_feature_idxs] + \
np.random.normal(loc=0, scale=std, size=Xr[:, self.continuous_feature_idxs].shape)
return Xr
def sample_X(self, age, n):
"""
samples X by first sampling Z from the autoencoder prior, then feeding it through the model.
Draws n samples for people of a given age.
Important note: in general, this function relies on sample_Z, which automatically applies the age preprocessing function
to the passed in age, so there is no need to transform age ahead of time (for either sample_X or sample_Z).
"""
Z = self.sample_Z(age, n)
return self.sample_X_given_Z(Z)
def sample_Z(self, age, n):
return np.random.multivariate_normal(mean = np.zeros([self.k,]), cov = np.eye(self.k), size = n)
def set_up_regularization_loss_structure(self):
self.reg_loss = self.get_regularization_loss(self.Z_mu, self.Z_sigma)
def get_regularization_loss(self, Z_mu, Z_sigma):
kl_div_loss = -.5 * (
1 +
2 * tf.log(Z_sigma) - tf.square(Z_mu) - tf.square(Z_sigma))
kl_div_loss = tf.reduce_mean(
tf.reduce_sum(
kl_div_loss,
axis=1),
axis=0)
return kl_div_loss
def project_forward(self, train_df, years_to_move_forward, add_noise_to_Z, add_noise_to_X):
"""
given a df and an autoencoder model, projects the train_df down into Z-space, moves it
years_to_move_forward in Z-space, then projects it back up. years_to_move_forward can be an array or a scalar.
This will not make sense unless the model has some notion of an age state and how it evolves,
so to implement this method, you need to implement fast_forward_Z.
if add_noise_to_Z is False, Z is projected onto the mean; otherwise, it's sampled.
if add_noise_to_X is False, X is decoded directly from Z (ie, it is Xr); otherwise, it's sampled.
"""
# cast years_to_move_forward to an array (I think this should be fine even if it is a scalar?)
years_to_move_forward = np.array(years_to_move_forward)
# project down to latent state.
if add_noise_to_Z:
Z0 = self.get_projections(train_df, project_onto_mean=False)
else:
Z0 = self.get_projections(train_df, project_onto_mean=True)
if (years_to_move_forward == 0).all():
# if we're not moving forward at all, Z0 is just Z. This is equivalent to reconstruction.
# we shouldn't actually need this if-branch, but it makes what is happening a little more explicit.
Z0_projected_forward = remove_id_and_get_mat(Z0)
else:
# move age components forward following the model's evolution rule.
Z0_projected_forward = self.fast_forward_Z(Z0, train_df, years_to_move_forward)
Z0_projected_forward = remove_id_and_get_mat(Z0_projected_forward)
# sample X again.
if add_noise_to_X:
projected_trajectory = self.sample_X_given_Z(Z0_projected_forward)
else:
projected_trajectory = self.sess.run(self.Xr, feed_dict = {self.Z:Z0_projected_forward})
assert projected_trajectory.shape[1] == len(self.feature_names)
return projected_trajectory
def project_forward_by_sampling_Z_and_then_sampling_X(self, train_df, years_to_move_forward):
"""
alternate way of projecting forward: sample Z from p(Z | X), then sample X. Not using this at present.
"""
# cast years_to_move_forward to an array (I think this should be fine even if it is a scalar?)
years_to_move_forward = np.array(years_to_move_forward)
n_iterates = 100
for i in range(n_iterates):
Z0 = self.get_projections(train_df, project_onto_mean=False)
if (years_to_move_forward == 0).all():
# if we're not moving forward at all, Z0 is just Z. This is equivalent to reconstruction.
# we shouldn't actually need this if-branch, but it makes what is happening a little more explicit.
Z0_projected_forward = remove_id_and_get_mat(Z0)
else:
# move age components forward following the model's evolution rule.
Z0_projected_forward = self.fast_forward_Z(Z0, train_df, years_to_move_forward)
Z0_projected_forward = remove_id_and_get_mat(Z0_projected_forward)
sampled_X = self.sample_X_given_Z(Z0_projected_forward)
if i == 0:
projected_trajectory = sampled_X
else:
projected_trajectory = projected_trajectory + sampled_X
assert projected_trajectory.shape[1] == len(self.feature_names)
return projected_trajectory / n_iterates
def fast_forward_Z(self, Z0, train_df, years_to_move_forward):
"""
given a Z, evolve it following the model's aging rule for years_to_move_forward years.
"""
raise NotImplementedException
def compute_elbo(self, df, continuous_variance=1):
if self.learn_continuous_variance:
continuous_variance = np.exp(self.sess.run(self.log_continuous_variance)[0])
print("Warning: ignoring continuous variance input because we have already learned continuous variance: %2.3f" % continuous_variance)
data, binary_feature_idxs, continuous_feature_idxs, feature_names = \
partition_dataframe_into_binary_and_continuous(df)
ages = None
age_adjusted_data = None
if self.need_ages:
# in general, self.need_ages is True for the variational age autoencoders
# (most of our interesting models). It is False for models that have nothing to do with age:
# eg, the simple variational autoencoder.
ages = self.get_ages(df)
# some models additionally require us to compute the age_adjusted_data, so we compute that just in case.
# eg, if we want to enforce sparse correlations between X (adjusted for age) and Z.
# age_adjusted_data is not actually used for most models.
age_adjusted_data = self.decorrelate_data_with_age(data, ages)
assert np.all(binary_feature_idxs == self.binary_feature_idxs)
assert np.all(continuous_feature_idxs == self.continuous_feature_idxs)
assert np.all(feature_names == self.feature_names)
print(("Computing ELBO with %i continuous features, %i binary features, "
"%i examples, continuous variance = %2.3f") %
(len(continuous_feature_idxs),
len(binary_feature_idxs),
len(data),
continuous_variance))
num_iter = 1 # set to more than one if you want to average runs together.
mean_binary_loss = 0
mean_continuous_loss = 0
mean_reg_loss = 0
for i in range(num_iter):
_, binary_loss, continuous_loss, reg_loss = self.minibatch_mean_eval(data,
ages,
age_adjusted_data=age_adjusted_data,
regularization_weighting=1)
mean_binary_loss += binary_loss / num_iter
mean_continuous_loss += continuous_loss / num_iter
mean_reg_loss += reg_loss / num_iter
# https://www.statlect.com/fundamentals-of-statistics/normal-distribution-maximum-likelihood
if not self.learn_continuous_variance:
# in this case, we have to add in the variance term since the mean_continuous_loss is just a squared-error term.
# if we learn the variance, the continuous loss is actually the full negative Gaussian log likelihood
# and there is no correction needed.
constant_offset_per_sample_and_feature = .5 * np.log(2 * np.pi) + .5 * np.log(continuous_variance)
mean_continuous_loss = constant_offset_per_sample_and_feature * len(continuous_feature_idxs) + mean_continuous_loss / continuous_variance
# note that we compute the elbo using a weight of 1 for the regularization loss regardless of the regularization weighting.
mean_combined_loss = mean_binary_loss + mean_continuous_loss + mean_reg_loss
elbo = -mean_combined_loss
print("Average ELBO per sample = %2.3f" % elbo)
return elbo
|
epierson9/multiphenotype_methods
|
variational_autoencoder.py
|
Python
|
mit
| 14,324
|
[
"Gaussian"
] |
598219599c177ea5765acdb9218f561f2325923ecf43a95c2583328490972d74
|
import tensorflow as tf # neural network for function approximation
import gym # environment
import numpy as np # matrix operation and math functions
from gym import wrappers
import gym_morph # customized environment for cart-pole
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import time
start_time = time.time()
MAX_TEST = 10
for test_num in range(1,MAX_TEST+1):
# Hyperparameters
RANDOM_NUMBER_SEED = test_num
ENVIRONMENT1 = "morph-v0"
MAX_EPISODES = 8000 # number of episodes
EPISODE_LENGTH = 500 # single episode length
HIDDEN_SIZE = 24
DISPLAY_WEIGHTS = False # Help debug weight update
gamma = 0.99 # Discount per step
RENDER = False # Render the cart-pole system
VIDEO_INTERVAL = 100 # Generate a video at this interval
CONSECUTIVE_TARGET = 100 # Including previous 100 rewards
CONST_LR = True # Constant or decaying learing rate
# Constant learning rate
const_learning_rate_in = 0.003
# Decay learning rate
start_learning_rate_in = 0.003
decay_steps_in = 100
decay_rate_in = 0.95
DIR_PATH_SAVEFIG = "/root/cartpole_plot/"
if CONST_LR:
learning_rate = const_learning_rate_in
file_name_savefig = "el" + str(EPISODE_LENGTH) \
+ "_hn" + str(HIDDEN_SIZE) \
+ "_clr" + str(learning_rate).replace(".", "p") \
+ "_test" + str(test_num) \
+ ".png"
else:
start_learning_rate = start_learning_rate_in
decay_steps = decay_steps_in
decay_rate = decay_rate_in
file_name_savefig = "el" + str(EPISODE_LENGTH) \
+ "_hn" + str(HIDDEN_SIZE) \
+ "_dlr_slr" + str(start_learning_rate).replace(".", "p") \
+ "_ds" + str(decay_steps) \
+ "_dr" + str(decay_rate).replace(".", "p") \
+ "_test" + str(test_num) \
+ ".png"
env = gym.make(ENVIRONMENT1)
env.seed(RANDOM_NUMBER_SEED)
np.random.seed(RANDOM_NUMBER_SEED)
tf.set_random_seed(RANDOM_NUMBER_SEED)
# Input and output sizes
input_size = 4
output_size = 2
# input_size = env.observation_space.shape[0]
# try:
# output_size = env.action_space.shape[0]
# except AttributeError:
# output_size = env.action_space.n
# Tensorflow network setup
x = tf.placeholder(tf.float32, shape=(None, input_size))
y = tf.placeholder(tf.float32, shape=(None, 1))
if not CONST_LR:
# decay learning rate
global_step = tf.Variable(0, trainable=False)
learning_rate = tf.train.exponential_decay(start_learning_rate, global_step, decay_steps, decay_rate, staircase=False)
expected_returns = tf.placeholder(tf.float32, shape=(None, 1))
# Xavier (2010) weights initializer for uniform distribution:
# x = sqrt(6. / (in + out)); [-x, x]
w_init = tf.contrib.layers.xavier_initializer()
hidden_W = tf.get_variable("W1", shape=[input_size, HIDDEN_SIZE],
initializer=w_init)
hidden_B = tf.Variable(tf.zeros(HIDDEN_SIZE))
dist_W = tf.get_variable("W2", shape=[HIDDEN_SIZE, output_size],
initializer=w_init)
dist_B = tf.Variable(tf.zeros(output_size))
hidden = tf.nn.elu(tf.matmul(x, hidden_W) + hidden_B)
dist = tf.tanh(tf.matmul(hidden, dist_W) + dist_B)
dist_soft = tf.nn.log_softmax(dist)
dist_in = tf.matmul(dist_soft, tf.Variable([[1.], [0.]]))
pi = tf.contrib.distributions.Bernoulli(dist_in)
pi_sample = pi.sample()
log_pi = pi.log_prob(y)
if CONST_LR:
optimizer = tf.train.RMSPropOptimizer(learning_rate)
train = optimizer.minimize(-1.0 * expected_returns * log_pi)
else:
optimizer = tf.train.RMSPropOptimizer(learning_rate)
train = optimizer.minimize(-1.0 * expected_returns * log_pi, global_step=global_step)
# saver = tf.train.Saver()
# Create and initialize a session
sess = tf.Session()
sess.run(tf.global_variables_initializer())
def run_episode(environment, ep, render=False):
raw_reward = 0
discounted_reward = 0
cumulative_reward = []
discount = 1.0
states = []
actions = []
obs = environment.reset()
done = False
while not done:
states.append(obs)
cumulative_reward.append(discounted_reward)
if render and ((ep % VIDEO_INTERVAL) == 0):
environment.render()
action = sess.run(pi_sample, feed_dict={x: [obs]})[0]
actions.append(action)
obs, reward, done, info = env.step(action[0])
raw_reward += reward
if reward > 0:
discounted_reward += reward * discount
else:
discounted_reward += reward
discount *= gamma
return raw_reward, discounted_reward, cumulative_reward, states, actions
def display_weights(session):
w1 = session.run(hidden_W)
b1 = session.run(hidden_B)
w2 = session.run(dist_W)
b2 = session.run(dist_B)
print(w1, b1, w2, b2)
returns = []
mean_returns = []
for ep in range(MAX_EPISODES):
raw_G, discounted_G, cumulative_G, ep_states, ep_actions = \
run_episode(env, ep, RENDER)
expected_R = np.transpose([discounted_G - np.array(cumulative_G)])
sess.run(train, feed_dict={x: ep_states, y: ep_actions,
expected_returns: expected_R})
if DISPLAY_WEIGHTS:
display_weights(sess)
returns.append(raw_G)
running_returns = returns[max(0, ep-CONSECUTIVE_TARGET):(ep+1)]
mean_return = np.mean(running_returns)
mean_returns.append(mean_return)
if CONST_LR:
msg = "Test: {}/{}, Episode: {}/{}, Time: {}, Learning rate: {}, Return: {}, Last {} returns mean: {}"
msg = msg.format(test_num, MAX_TEST, ep+1, MAX_EPISODES, time.strftime('%H:%M:%S', time.gmtime(time.time()-start_time)), learning_rate, raw_G, CONSECUTIVE_TARGET, mean_return)
print(msg)
else:
msg = "Test: {}/{}, Episode: {}/{}, Time: {}, Learning rate: {}, Return: {}, Last {} returns mean: {}"
msg = msg.format(test_num, MAX_TEST, ep+1, MAX_EPISODES, time.strftime('%H:%M:%S', time.gmtime(time.time()-start_time)), sess.run(learning_rate), raw_G, CONSECUTIVE_TARGET, mean_return)
print(msg)
env.close() # close openai gym environment
tf.reset_default_graph() # clear tensorflow graph
# Plot
# plt.style.use('ggplot')
plt.style.use('dark_background')
episodes_plot = np.arange(MAX_EPISODES)
fig = plt.figure()
ax = fig.add_subplot(111)
fig.subplots_adjust(top=0.85)
if CONST_LR:
ax.set_title("The Cart-Pole Problem Test %i \n \
Episode Length: %i \
Discount Factor: %.2f \n \
Number of Hidden Neuron: %i \
Constant Learning Rate: %.5f" % (test_num, EPISODE_LENGTH, gamma, HIDDEN_SIZE, learning_rate))
else:
ax.set_title("The Cart-Pole Problem Test %i \n \
EpisodeLength: %i DiscountFactor: %.2f NumHiddenNeuron: %i \n \
Decay Learning Rate: (start: %.5f, steps: %i, rate: %.2f)" % (test_num, EPISODE_LENGTH, gamma, HIDDEN_SIZE, start_learning_rate, decay_steps, decay_rate))
ax.set_xlabel("Episode")
ax.set_ylabel("Return")
ax.set_ylim((0, EPISODE_LENGTH))
ax.grid(linestyle='--')
ax.plot(episodes_plot, returns, label='Instant return')
ax.plot(episodes_plot, mean_returns, label='Averaged return')
legend = ax.legend(loc='best', shadow=True)
fig.savefig(DIR_PATH_SAVEFIG + file_name_savefig, dpi=500)
# plt.show()
|
GitYiheng/reinforcement_learning_test
|
test03_monte_carlo/t28_rlvps01_hn24_clr0p003.py
|
Python
|
mit
| 7,660
|
[
"NEURON"
] |
dc9b6e6b66767ad2fd27a3f7f24b034358b5d45d12f128a864349793d1598148
|
import vigra
from vigra import graphs
from vigra import numpy
import pylab
# parameter
filepath = '12003.jpg' # input image path
sigmaGradMag = 2.0 # sigma Gaussian gradient
superpixelDiameter = 10 # super-pixel size
slicWeight = 10.0 # SLIC color - spatial weight
beta = 0.5 # node vs edge weight
nodeNumStop = 50 # desired num. nodes in result
# load image and convert to LAB
img = vigra.impex.readImage(filepath)
# get super-pixels with slic on LAB image
imgLab = vigra.colors.transform_RGB2Lab(img)
labels, nseg = vigra.analysis.slicSuperpixels(imgLab, slicWeight,
superpixelDiameter)
labels = vigra.analysis.labelImage(labels)
# compute gradient on interpolated image
imgLabBig = vigra.resize(imgLab, [imgLab.shape[0]*2-1, imgLab.shape[1]*2-1])
gradMag = vigra.filters.gaussianGradientMagnitude(imgLabBig, sigmaGradMag)
# get 2D grid graph and edgeMap for grid graph
# from gradMag of interpolated image
gridGraph = graphs.gridGraph(img.shape[0:2])
gridGraphEdgeIndicator = graphs.edgeFeaturesFromInterpolatedImage(gridGraph,
gradMag)
# get region adjacency graph from super-pixel labels
rag = graphs.regionAdjacencyGraph(gridGraph, labels)
# accumulate edge weights from gradient magnitude
edgeWeights = rag.accumulateEdgeFeatures(gridGraphEdgeIndicator)
# accumulate node features from grid graph node map
# which is just a plain image (with channels)
nodeFeatures = rag.accumulateNodeFeatures(imgLab)
# do agglomerativeClustering
labels = graphs.agglomerativeClustering(graph=rag, edgeWeights=edgeWeights,
beta=beta, nodeFeatures=nodeFeatures,
nodeNumStop=nodeNumStop,wardness=0.0)
# show result
f = pylab.figure()
ax1 = f.add_subplot(2, 2, 1)
vigra.imshow(gradMag,show=False)
ax1.set_title("Input Image")
pylab.axis('off')
ax2 = f.add_subplot(2, 2, 2)
rag.show(img)
ax2.set_title("Over-Segmentation")
pylab.axis('off')
ax3 = f.add_subplot(2, 2, 3)
rag.show(img, labels)
ax3.set_title("Result-Segmentation")
pylab.axis('off')
ax4 = f.add_subplot(2, 2, 4)
rag.showNested(img, labels)
ax4.set_title("Result-Segmentation")
pylab.axis('off')
vigra.show()
|
timoMa/vigra
|
vigranumpy/examples/graph_agglomerative_clustering.py
|
Python
|
mit
| 2,297
|
[
"Gaussian"
] |
c696feaf37682d0c4b35b36c69836971508160b7c67c8f63a57f697bf03a105e
|
# coding: utf8
{
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"更新" は、"field1=\'newvalue\'" のようなオプションです。"JOIN の結果を更新または削除することはできません。',
'# Houses Damaged': '損傷した家屋の数',
'# Houses Flooded': '浸水した家屋数',
'# People Needing Food': '食料が必要な人の数',
'# People at Risk From Vector-Borne Diseases': '生物が媒介する疾病の危険性がある人の数',
'# People without Access to Safe Drinking-Water': '安全な飲料水が確保されていない人の数',
'# of Houses Damaged': '損壊した家屋数',
'# of Houses Destroyed': '全壊した家屋数',
'# of International Staff': '国外スタッフ人数',
'# of National Staff': '国内スタッフの人数',
'# of People Affected': '被災者数',
'# of People Deceased': '死亡者数',
'# of People Injured': '負傷者数',
'# of Vehicles': '車両数',
'%(system_name)s - Verify Email': '%(system_name)s - Verify Email',
'%.1f km': '%.1f km',
'%Y-%m-%d %H:%M:%S': '%Y-%m-%d %H:%M:%S',
'%m-%d-%Y': '%m-%d-%Y',
'%m-%d-%Y %H:%M:%S': '%m-%d-%Y %H:%M:%S',
'%s Create a new site or ensure that you have permissions for an existing site.': '%s 新しいサイトを作成するか既存のサイトに対する権限を持っているかどうか確認して下さい',
'%s rows deleted': '%s 行を削除しました',
'%s rows updated': '%s 行を更新しました',
'& then click on the map below to adjust the Lat/Lon fields': 'そして下の地図をクリックして、緯度 / 経度フィールドを調節してください',
"'Cancel' will indicate an asset log entry did not occur": "'Cancel' will indicate an asset log entry did not occur",
'* Required Fields': '* は必須項目です',
'0-15 minutes': '0-15 分間',
'1 Assessment': '1アセスメント',
'1 location, shorter time, can contain multiple Tasks': '1つの地域における短期間の活動を表し、1つの支援活動のなかで複数のタスクを実行します。',
'1-3 days': '1-3 日間',
'1. Fill the necessary fields in BLOCK letters.': '1. 太字の項目は必須項目です.',
'15-30 minutes': '15-30 分間',
'2 different options are provided here currently:': '現在は、2種類のオプションが提供されています。',
'2. Always use one box per letter and leave one box space to seperate words.': '2. 一マス一文字で、単語の間は一マス開けてください。',
'2x4 Car': '2x4 車両',
'30-60 minutes': '30-60 分間',
'4-7 days': '4-7 日間',
'4x4 Car': '四輪駆動車',
'8-14 days': '8-14 日間',
'A Marker assigned to an individual Location is set if there is a need to override the Marker assigned to the Feature Class.': '機能クラスに設定したマーカーを上書きする必要があれば、個々のロケーションに設定したマーカーを設定します',
'A Reference Document such as a file, URL or contact person to verify this data.': 'A Reference Document such as a file, URL or contact person to verify this data.',
'A Reference Document such as a file, URL or contact person to verify this data. You can type the 1st few characters of the document name to link to an existing document.': 'このデータ内容を確認できるファイルやURL情報、連絡先担当者などのリファレンスデータを記載します。最初の何文字かを入力することで、既存の類似文書にリンクすることが可能です。',
'A Warehouse is a physical place which contains Relief Items available to be Distributed.': '倉庫とは、救援物資の配布を行うことができる物理的な地点を意味します。',
'A Warehouse/Site is a physical location with an address and GIS data where Items are Stored. It can be a Building, a particular area in a city or anything similar.': '倉庫 / サイトとは、物資の保管場所のことであり、住所とGIS情報が付帯します。特定の建物や、市内の特定地域などがあげられます。',
'A brief description of the group (optional)': 'グループの詳細(オプション)',
'A file downloaded from a GPS containing a series of geographic points in XML format.': 'GPSからダウンロードしたファイルには、その地点に関する様々な情報がXML形式で保存されています。',
'A file in GPX format taken from a GPS whose timestamps can be correlated with the timestamps on the photos to locate them on the map.': 'GPSから取得したGPX形式のファイル。タイムスタンプは画像と関連づけられ、地図上に配置することができます。',
'A file in GPX format taken from a GPS.': 'A file in GPX format taken from a GPS.',
'A library of digital resources, such as photos, documents and reports': '写真や文書、レポートなど、電子化された資料',
'A location group can be used to define the extent of an affected area, if it does not fall within one administrative region.': 'A location group can be used to define the extent of an affected area, if it does not fall within one administrative region.',
'A location group is a set of locations (often, a set of administrative regions representing a combined area).': 'A location group is a set of locations (often, a set of administrative regions representing a combined area).',
'A location group is a set of locations (often, a set of administrative regions representing a combined area). Member locations are added to a location group here. Location groups may be used to filter what is shown on the map and in search results to only entities covered by locations in the group. A location group can be used to define the extent of an affected area, if it does not fall within one administrative region. Location groups can be used in the Regions menu.': 'ロケーションを取りまとめた単位はロケーショングループと呼称されます(たいていは、一定範囲内の管理対象地域をさします)。このページから、ロケーションをグループに追加することができます。ロケーショングループ単位で地図上に表示させたり、検索結果として表示させることが可能となります。グループを使用することで、1つの管理地域に縛られない被災地域定義が可能となります。ロケーショングループは、地域メニューから定義できます。',
'A location group must have at least one member.': 'ロケーショングループには、メンバーが最低一人必要です。',
"A location that specifies the geographic area for this region. This can be a location from the location hierarchy, or a 'group location', or a location that has a boundary for the area.": 'この地域を地理的に指定するロケーション。これはロケーションの階層構造のうちの一つか、ロケーショングループの一つか、この地域の境界に面するロケーションです。',
'A place within a Site like a Shelf, room, bin number etc.': 'Site内に存在する施設。例えば棚、部屋、Binの番号など',
'A snapshot of the bin or additional documents that contain supplementary information about it can be uploaded here.': 'binのスナップショットや追加情報の更新は、ここから行えます。',
'A snapshot of the location or additional documents that contain supplementary information about the Site Location can be uploaded here.': 'ロケーションのスナップショットや、Siteに関する追加情報の更新は、ここから行えます。',
'A snapshot of the location or additional documents that contain supplementary information about the Site can be uploaded here.': 'ロケーションのスナップショットや、Siteに関する追加情報の更新は、ここから行えます。',
'A survey series with id %s does not exist. Please go back and create one.': 'ID番号 %sに関するsurvey seriesは存在しません。「戻る」ボタンを押して、新規に作成してください。',
'A task is a piece of work that an individual or team can do in 1-2 days': 'A task is a piece of work that an individual or team can do in 1-2 days',
'ABOUT': '概要',
'ABOUT THIS MODULE': 'このモジュールについて',
'ACCESS DATA': 'アクセスデータ',
'ANY': '全て',
'API Key': 'API Key',
'API is documented here': 'APIに関する文書はこちら',
'ATC-20': 'ATC-20(建物の簡易安全性評価プロセス)',
'ATC-20 Rapid Evaluation modified for New Zealand': 'ニュージーランド向けに変更したATC-20(建物の簡易安全性評価プロセス)',
'ATC-38': 'ATC-38',
'ATC-45': 'ATC-45',
'Abbreviation': '省略',
'Ability to Fill Out Surveys': '調査記入能力',
'Ability to customize the list of details tracked at a Shelter': '避難所で追跡する詳細のリストのカスタマイズ可否',
'Ability to customize the list of human resource tracked at a Shelter': '避難所で追跡する詳細のリストのカスタマイズの可否',
'Ability to customize the list of important facilities needed at a Shelter': '避難所で追跡する人的資源のリストのカスタマイズの可否',
'Ability to track partial fulfillment of the request': '支援要請の部分的な達成度の追跡可否',
'Ability to view Results of Completed and/or partially filled out Surveys': '完了または一部完了した聞き取り調査の結果をみる機能',
'About': '情報',
'About Sahana': 'Sahanaについて',
'About Sahana Eden': 'Sahana Edenについて',
'About this module': 'モジュールの詳細',
'Access denied': 'アクセスが拒否されました',
'Access to Shelter': '避難所へのアクセス',
'Access to education services': '学校へのアクセス',
'Accessibility of Affected Location': '被災地域へのアクセス方法',
'Accompanying Relative': 'Accompanying Relative',
'Account Registered - Please Check Your Email': 'Account Registered - Please Check Your Email',
'Account registered, however registration is still pending approval - please wait until confirmation received.': '利用者登録の申請を受け付けました。所属団体またはサイト管理者による承認を待っています。',
'Acronym': '略称/イニシャル',
"Acronym of the organization's name, eg. IFRC.": '団体の略称 (IFRCなど)',
'Actionable': '対応可能',
'Actionable by all targeted recipients': 'すべての対象受信者にとって実用的な',
'Actionable only by designated exercise participants; exercise identifier SHOULD appear in <note>': '指定された参加者のみ実施可能です。<note>の中に行使するためのIDがあることが必要です。',
'Actioned?': '実施済み?',
'Actions': 'アクション',
'Actions taken as a result of this request.': '要請に対して行われるアクション',
'Activate Events from Scenario templates for allocation of appropriate Resources (Human, Assets & Facilities).': 'Activate Events from Scenario templates for allocation of appropriate Resources (Human, Assets & Facilities).',
'Active': 'Active',
'Active Problems': '対処中の問題',
'Activities': '支援活動',
'Activities Map': '支援活動マップ',
'Activities are blue.': '支援活動(アクティビティ)は青色で表示されます。',
'Activities matching Assessments:': 'アセスメントに適合した支援活動',
'Activities of boys 13-17yrs before disaster': '災害発生前の13-17歳男子の活動状況',
'Activities of boys 13-17yrs now': '現在の13-17歳男子の活動状況',
'Activities of boys <12yrs before disaster': '災害発生前の12歳以下男子の活動状況',
'Activities of boys <12yrs now': '現在の12歳以下男子の活動状況',
'Activities of children': '子供たちの活動',
'Activities of girls 13-17yrs before disaster': '災害発生前の13-17歳女子の活動状況',
'Activities of girls 13-17yrs now': '現在の13-17歳女子の活動状況',
'Activities of girls <12yrs before disaster': '災害発生前の12歳以下女子の活動状況',
'Activities of girls <12yrs now': '現在の12歳以下女子の活動状況',
'Activities:': '支援活動(アクティビティ):',
'Activity': '支援活動',
'Activity Added': '支援活動を追加しました',
'Activity Deleted': '支援活動を削除しました',
'Activity Details': '支援活動の詳細',
'Activity Report': '支援活動レポート',
'Activity Reports': '支援活動レポート',
'Activity Type': '支援活動タイプ',
'Activity Updated': '支援活動を更新しました',
'Activity added': 'Activity added',
'Activity removed': 'Activity removed',
'Activity updated': 'Activity updated',
'Add': '追加',
'Add Activity': '支援活動を追加',
'Add Activity Report': '支援活動レポートを追加',
'Add Activity Type': '支援活動タイプを追加',
'Add Address': 'アドレスを追加',
'Add Aid Request': '治療要請を追加',
'Add Alternative Item': '代わりの物資を追加',
'Add Assessment': 'アセスメントを追加',
'Add Assessment Summary': 'アセスメントの要約を追加',
'Add Asset': '資産の追加',
'Add Asset Log Entry - Change Label': 'Add Asset Log Entry - Change Label',
'Add Availability': 'Add Availability',
'Add Baseline': '基準値の追加',
'Add Baseline Type': '基準値タイプの追加',
'Add Bed Type': 'ベッドの種類を追加',
'Add Bin Type': 'Bin Typeを追加',
'Add Bins': 'Binを追加',
'Add Brand': '銘柄を追加',
'Add Budget': '予算を追加',
'Add Bundle': 'Bundleを追加',
'Add Camp': 'Add Camp',
'Add Camp Service': 'Add Camp Service',
'Add Camp Type': 'Add Camp Type',
'Add Catalog': 'カタログを追加',
'Add Catalog Item': '物資カタログを追加',
'Add Catalog.': 'カタログを追加',
'Add Category': 'カテゴリを追加',
'Add Category<>Sub-Category<>Catalog Relation': 'Category<>Sub-Category<>Catalog 関係を追加',
'Add Certificate': 'Add Certificate',
'Add Certification': 'Add Certification',
'Add Cholera Treatment Capability Information': 'コレラ治療能力に関する情報の追加',
'Add Cluster': 'クラスタを追加',
'Add Cluster Subsector': 'クラスタのサブセクタを追加',
'Add Competency Rating': 'Add Competency Rating',
'Add Config': '設定を追加',
'Add Contact': '連絡先を追加',
'Add Contact Information': '連絡先情報を追加',
'Add Course': 'Add Course',
'Add Course Certificate': 'Add Course Certificate',
'Add Credential': '証明書の追加',
'Add Credentials': '証明書の追加',
'Add Dead Body Report': '遺体発見レポートを追加',
'Add Detailed Evaluation': '詳細な評価を追加',
'Add Disaster Victims': '被災者情報を追加',
'Add Distribution': '配給所を追加する',
'Add Distribution.': '配給所を追加',
'Add Document': 'Add Document',
'Add Donor': '資金提供組織を追加',
'Add Facility': 'Add Facility',
'Add Feature Class': 'Feature Classを追加',
'Add Feature Layer': 'Feature Layerを追加',
'Add Flood Report': '洪水レポートを追加',
'Add GPS data': 'Add GPS data',
'Add Group': 'グループを追加',
'Add Group Member': 'グループメンバを追加',
'Add Hospital': '病院情報を追加',
'Add Human Resource': 'Add Human Resource',
'Add Identification Report': 'IDレポートを追加',
'Add Identity': 'IDを追加',
'Add Image': '画像を追加',
'Add Impact': '被災状況の追加',
'Add Impact Type': '災害影響のタイプを追加',
'Add Incident': 'インシデントを追加',
'Add Incident Report': 'インシデントレポートを追加',
'Add Inventory Item': '備蓄物資を追加します',
'Add Inventory Store': '物資集積地点を追加',
'Add Item': '物資を追加',
'Add Item (s)': '物資を追加',
'Add Item Catalog': '物資カタログを追加',
'Add Item Catalog ': '物資カタログを追加',
'Add Item Catalog Category ': '救援物資カタログカテゴリを追加',
'Add Item Category': '物資カテゴリを追加',
'Add Item Pack': '救援物資パックの追加',
'Add Item Sub-Category': '救援物資サブカテゴリを追加',
'Add Item to Catalog': 'Add Item to Catalog',
'Add Item to Commitment': 'Add Item to Commitment',
'Add Item to Inventory': 'Add Item to Inventory',
'Add Item to Request': '要求する支援物資の登録',
'Add Item to Shipment': '輸送に物資を追加する',
'Add Job Role': 'Add Job Role',
'Add Key': 'Keyを追加',
'Add Kit': 'Kitを追加',
'Add Layer': 'レイヤを追加',
'Add Level 1 Assessment': 'レベル1アセスメントを追加',
'Add Level 2 Assessment': 'レベル2アセスメントを追加',
'Add Line': '行を追加',
'Add Location': 'ロケーションを追加',
'Add Location Group': 'ロケーショングループを追加',
'Add Locations': 'ロケーションを追加',
'Add Log Entry': 'ログエントリを追加',
'Add Map Configuration': '地図設定を追加',
'Add Marker': 'マーカーを追加',
'Add Member': 'メンバを追加',
'Add Membership': 'メンバシップを追加',
'Add Message': 'メッセージを追加',
'Add Mission': 'Add Mission',
'Add Need': '要求を追加',
'Add Need Type': '需要タイプを追加',
'Add New': '新規追加',
'Add New Activity': '支援活動を新規追加',
'Add New Address': 'アドレスを新規追加',
'Add New Aid Request': '援助要請を新規追加',
'Add New Alternative Item': '新しい代わりの物資を追加',
'Add New Assessment': 'アセスメントを新規追加',
'Add New Assessment Summary': '新規アセスメントの要約を追加',
'Add New Asset': '新規資産の追加',
'Add New Baseline': '新しい基準値を追加',
'Add New Baseline Type': '基準値タイプの新規追加',
'Add New Bin': 'Binを新規追加',
'Add New Bin Type': 'Bin Typeを新規追加',
'Add New Brand': '銘柄を新規追加',
'Add New Budget': '予算を新規追加',
'Add New Bundle': 'Bundleを新規追加',
'Add New Camp': 'Add New Camp',
'Add New Camp Service': 'Add New Camp Service',
'Add New Camp Type': 'Add New Camp Type',
'Add New Catalog': 'Add New Catalog',
'Add New Catalog Item': '物資カタログを新規追加',
'Add New Cluster': 'クラスタを新規追加',
'Add New Cluster Subsector': 'クラスタのサブセクタを新規作成',
'Add New Commitment Item': '物資コミットメントを新規追加',
'Add New Config': '設定を新規追加',
'Add New Contact': '連絡先を新規追加',
'Add New Credential': '証明書の新規追加',
'Add New Distribution': '配給所を新規追加',
'Add New Distribution Item': '配給物資を新規追加',
'Add New Document': '文書を新規追加',
'Add New Donor': '資金提供組織を新規追加',
'Add New Entry': 'エントリを新規追加',
'Add New Event': 'Add New Event',
'Add New Facility': 'Add New Facility',
'Add New Feature Class': 'Feature Classを新規追加',
'Add New Feature Layer': 'Feature Layerを新規追加',
'Add New Flood Report': '洪水情報を新規追加',
'Add New Group': 'グループを新規追加',
'Add New Home': 'Add New Home',
'Add New Hospital': '病院を新規追加',
'Add New Human Resource': 'Add New Human Resource',
'Add New Identity': 'IDを新規追加',
'Add New Image': '画像を新規追加',
'Add New Impact': '新規影響を追加',
'Add New Impact Type': '災害影響のタイプを新規追加',
'Add New Incident': 'インシデントを新規追加',
'Add New Incident Report': '災害影響範囲レポートを新規追加',
'Add New Inventory Item': '備蓄物資を新規追加',
'Add New Inventory Store': '物資集積場所を新規追加',
'Add New Item': '救援物資を新規追加',
'Add New Item Catalog': '物資カタログを新規追加',
'Add New Item Catalog Category': '物資カタログカテゴリを新規追加',
'Add New Item Category': '物資カテゴリを新規追加',
'Add New Item Pack': '救援物資パックを新規追加',
'Add New Item Sub-Category': '物資サブカテゴリを新規追加',
'Add New Item to Kit': 'キットに救援物資を新規追加',
'Add New Key': 'Keyを新規追加',
'Add New Kit': 'キットを新規追加',
'Add New Layer': 'レイヤを新規追加',
'Add New Level 1 Assessment': 'レベル1アセスメントを新規追加',
'Add New Level 2 Assessment': 'レベル2アセスメントを新規追加',
'Add New Location': 'ロケーションを新規追加',
'Add New Log Entry': 'ログエントリを新規追加',
'Add New Map Configuration': '新規地図の設定追加',
'Add New Marker': 'マーカーを新規追加',
'Add New Member': 'メンバを新規追加',
'Add New Membership': 'メンバシップを新規追加',
'Add New Metadata': 'メタデータを新規追加',
'Add New Need': '新しい要求を登録する',
'Add New Need Type': '需要タイプを新規追加',
'Add New Note': '追加情報を新規追加',
'Add New Office': 'オフィスを新規追加',
'Add New Organization': '団体を新規追加',
'Add New Patient': 'Add New Patient',
'Add New Peer': 'データ同期先を新規追加',
'Add New Person to Commitment': 'Add New Person to Commitment',
'Add New Photo': '写真を新規追加',
'Add New Population Statistic': 'Add New Population Statistic',
'Add New Position': '場所を新規追加',
'Add New Problem': '問題を新規追加',
'Add New Project': 'プロジェクトを新規追加',
'Add New Projection': 'Projectionを新規追加',
'Add New Rapid Assessment': '被災地の現況アセスメントを新規追加',
'Add New Received Item': '受領した物資を新規追加',
'Add New Record': 'レコードを新規追加',
'Add New Relative': 'Add New Relative',
'Add New Report': 'レポートを新規追加',
'Add New Request': '支援要請を新規追加',
'Add New Request Item': '特定物資の要請を新規追加',
'Add New Resource': 'リソースを新規追加',
'Add New Response': '支援要請を新規追加',
'Add New River': '河川情報を新規追加',
'Add New Role': '役割を新規追加',
'Add New Role to User': 'ユーザに役割を新規割り当て',
'Add New Room': 'Add New Room',
'Add New Scenario': 'Add New Scenario',
'Add New Sector': '活動分野を新規追加',
'Add New Sent Item': '送った物資の追加',
'Add New Setting': '設定を新規追加',
'Add New Shelter': '避難所を新規追加',
'Add New Shelter Service': '避難所での提供サービスを新規追加',
'Add New Shelter Type': '避難所タイプを新規追加',
'Add New Shipment to Send': '発送する輸送物資を新規追加',
'Add New Site': 'Siteを新規追加',
'Add New Skill': 'スキルを新規追加',
'Add New Skill Type': 'スキルタイプを新規追加',
'Add New Solution': '解決案を提示する',
'Add New Staff': 'スタッフを新規追加',
'Add New Staff Member': 'Add New Staff Member',
'Add New Staff Type': 'スタッフタイプを新規追加',
'Add New Storage Location': '備蓄場所を新規追加',
'Add New Subsector': 'Add New Subsector',
'Add New Survey Answer': '新しい調査の回答を追加しました',
'Add New Survey Question': '調査項目を新規追加',
'Add New Survey Section': '新しい調査セクションを追加',
'Add New Survey Series': '新しい一連の調査を追加します',
'Add New Survey Template': 'Survey Templateを新規追加',
'Add New Task': 'タスクを新規追加',
'Add New Team': 'チームを新規追加',
'Add New Theme': 'テーマを新規追加',
'Add New Ticket': 'チケットを新規追加',
'Add New Track': '追跡情報を新規追加',
'Add New Unit': '単位を新規追加',
'Add New User': 'ユーザの新規登録',
'Add New User to Role': '新規ユーザに役割を割り当て',
'Add New Vehicle': 'Add New Vehicle',
'Add New Volunteer': 'Add New Volunteer',
'Add New Warehouse': '新しい倉庫を追加',
'Add New Warehouse Item': '倉庫物資を新規追加',
'Add Note': 'ノートを追加',
'Add Office': 'オフィスを追加',
'Add Organization': '団体を追加',
'Add Peer': 'データ同期先を追加',
'Add Performance Evaluation': 'パフォーマンス評価を追加',
'Add Person': '人物情報を追加',
'Add Person to Commitment': 'Add Person to Commitment',
'Add Personal Effects': 'Personal Effectsを追加',
'Add Photo': '写真を追加',
'Add Point': 'ポイントを追加',
'Add Polygon': 'Polygonを追加',
'Add Population Statistic': 'Add Population Statistic',
'Add Position': '場所を追加',
'Add Problem': '問題を追加',
'Add Project': 'プロジェクトを追加',
'Add Projection': '地図投影法を追加',
'Add Projections': '地図投影法を追加',
'Add Question': '質問事項を追加',
'Add Rapid Assessment': '被災地の現況アセスメントを追加',
'Add Rapid Evaluation': '迅速評価を追加',
'Add Recipient': '受け取り担当者を追加',
'Add Recipient Site': '受け取りSiteを追加',
'Add Recipient Site.': '受け取り先サイトを追加',
'Add Record': 'レコードを追加',
'Add Recovery Report': '遺体回収レポートを追加',
'Add Reference Document': 'リファレンス文書を追加',
'Add Report': 'レポートを追加',
'Add Request': '支援要請を追加',
'Add Request Detail': '支援要請の詳細を追加',
'Add Request Item': '物資の要請を追加します',
'Add Resource': 'リソースを追加',
'Add Response': '返答を追加',
'Add River': '河川情報を追加',
'Add Role': '役割を追加',
'Add Room': 'Add Room',
'Add Section': 'Sectionを追加',
'Add Sector': '活動分野を追加',
'Add Sender Organization': '送付元団体を追加',
'Add Sender Site': '送付元Siteを追加',
'Add Sender Site.': '送付元サイトを追加',
'Add Service Profile': 'サービスプロファイルを追加',
'Add Setting': '設定を追加',
'Add Shelter': '避難所を追加',
'Add Shelter Service': '避難所における提供サービスを追加',
'Add Shelter Type': '避難所タイプを追加',
'Add Shipment Transit Log': '輸送履歴を追加',
'Add Shipment/Way Bills': '輸送費/渡航費を追加',
'Add Site': 'サイトを追加',
'Add Skill': 'スキルを追加',
'Add Skill Equivalence': 'Add Skill Equivalence',
'Add Skill Provision': 'Add Skill Provision',
'Add Skill Type': 'スキルタイプを追加',
'Add Skill Types': 'スキルタイプを追加',
'Add Skill to Request': 'Add Skill to Request',
'Add Solution': '解決案を追加',
'Add Staff': 'スタッフを追加',
'Add Staff Member': 'Add Staff Member',
'Add Staff Type': 'スタッフタイプを追加',
'Add Status': '状況を追加',
'Add Storage Bin ': 'Storage Binを追加 ',
'Add Storage Bin Type': 'Storage Bin Typeを追加',
'Add Storage Location': '備蓄地点を追加',
'Add Storage Location ': '備蓄地点を追加',
'Add Sub-Category': 'サブカテゴリを追加',
'Add Subscription': '寄付金情報を追加',
'Add Subsector': 'Add Subsector',
'Add Survey Answer': '調査の回答を追加',
'Add Survey Question': '聞き取り調査項目を追加',
'Add Survey Section': '調査セクションの追加',
'Add Survey Series': '一連の調査を追加',
'Add Survey Template': '調査テンプレートを追加',
'Add Task': 'タスクを追加',
'Add Team': 'チームを追加',
'Add Theme': 'テーマを追加',
'Add Ticket': 'チケットを追加',
'Add Training': 'Add Training',
'Add Unit': '単位を追加',
'Add User': 'ユーザを追加',
'Add Vehicle': 'Add Vehicle',
'Add Vehicle Detail': 'Add Vehicle Detail',
'Add Vehicle Details': 'Add Vehicle Details',
'Add Volunteer': 'ボランティアの追加',
'Add Volunteer Availability': 'Add Volunteer Availability',
'Add Volunteer Registration': 'ボランティア登録を追加',
'Add Warehouse': '倉庫を追加',
'Add Warehouse Item': '倉庫物資を追加',
'Add a Person': '人物情報を追加',
'Add a Reference Document such as a file, URL or contact person to verify this data. If you do not enter a Reference Document, your email will be displayed instead.': 'ファイル、URL、あるいは、このデータの確認を行なう連絡先のような参照文書を追加します。参照文書を入力しない場合、代わりにあなたのメールが表示されます。',
'Add a Volunteer': 'ボランティアの追加',
'Add a new Relief Item.': '救援物資を新規追加',
'Add a new Site from where the Item is being sent.': 'この救援物資の送付先を新規サイトとして追加',
'Add a new Site where the Item is being sent to.': 'この物資の送付先サイトを新規追加',
'Add a new certificate to the catalog.': 'Add a new certificate to the catalog.',
'Add a new competency rating to the catalog.': 'Add a new competency rating to the catalog.',
'Add a new job role to the catalog.': 'Add a new job role to the catalog.',
'Add a new skill provision to the catalog.': 'Add a new skill provision to the catalog.',
'Add a new skill type to the catalog.': 'Add a new skill type to the catalog.',
'Add an Photo.': '写真を追加.',
'Add location': 'ロケーションを追加',
'Add main Item Category.': '主要なアイテムカテゴリを追加',
'Add main Item Sub-Category.': '主要な救援物資サブカテゴリを追加',
'Add new Group': 'グループを新規追加',
'Add new Individual': '個人を新規追加',
'Add new Patient': 'Add new Patient',
'Add new position.': '新しいポジションを追加してください。',
'Add new project.': 'プロジェクトを新規追加',
'Add new staff role.': 'スタッフの権限を新規追加',
'Add or Update': '追加、あるいは更新',
'Add staff members': 'Add staff members',
'Add the Storage Bin Type.': 'Storage Binタイプを追加します。',
'Add the Storage Location where this bin is located.': 'binが保存されている貯蔵場所を追加します。',
'Add the Storage Location where this this Bin belongs to.': 'このBinがある備蓄地点を追加します。',
'Add the main Warehouse/Site information where this Bin belongs to.': 'その物資の備蓄スペースとなっている倉庫/サイトの情報を追加してください。',
'Add the main Warehouse/Site information where this Item is to be added.': 'この物資が追加されることになっている主要な倉庫 / サイトの情報を追加してください。',
'Add the main Warehouse/Site information where this Storage location is.': 'その物資の備蓄場所となっている倉庫/サイトの情報を追加してください。',
'Add the unit of measure if it doesnt exists already.': '距離単位が未登録の場合、単位を追加します。',
'Add to Bundle': 'Bundleへの登録',
'Add to Catalog': 'カタログへ登録',
'Add to budget': '予算項目へ登録',
'Add volunteers': 'Add volunteers',
'Add/Edit/Remove Layers': 'レイヤを追加/編集/削除',
'Additional Beds / 24hrs': '追加ベッド予測数 / 24h',
'Additional Comments': '追加コメント',
'Additional quantity quantifier – i.e. “4x5”.': '数量を表す追記(例 「4x5」)',
'Address': '住所情報',
'Address Details': '住所情報の詳細',
'Address Type': '住所情報タイプ',
'Address added': '住所情報を追加しました',
'Address deleted': '住所情報を削除しました',
'Address updated': '住所情報を更新しました',
'Addresses': '住所',
'Adequate': '適正',
'Adequate food and water available': '適切な量の食料と水が供給されている',
'Adjust Item(s) Quantity': 'アイテム量の修正',
'Adjust Items due to Theft/Loss': 'アイテム量の修正(盗難/紛失のため)',
'Admin': '管理者',
'Admin Email': '管理者の電子メール',
'Admin Name': '管理者名',
'Admin Tel': '管理者の電話番号',
'Administration': '管理',
'Administrator': '管理者',
'Admissions/24hrs': '患者増加数/24h',
'Adolescent (12-20)': '青年(12-20)',
'Adolescent participating in coping activities': '未成年が災害対応に従事',
'Adult (21-50)': '成人(21-50)',
'Adult ICU': '成人 ICU',
'Adult Psychiatric': '精神病の成人',
'Adult female': '成人女性',
'Adult male': '成人男性',
'Adults in prisons': '刑務所で服役中の成人がいる',
'Advanced': '詳細',
'Advanced Bin Search': 'Binの詳細検索',
'Advanced Catalog Search': 'カタログの詳細検索',
'Advanced Category Search': '詳細カテゴリー検索',
'Advanced Item Search': '詳細な物資検索',
'Advanced Location Search': '詳細な位置検索',
'Advanced Site Search': 'Siteの詳細検索',
'Advanced Sub-Category Search': 'サブカテゴリの詳細検索',
'Advanced Unit Search': '高度な単位検索',
'Advanced:': 'もっと正確に:',
'Advisory': '注意喚起',
'After clicking on the button, a set of paired items will be shown one by one. Please select the one solution from each pair that you prefer over the other.': 'このボタンをクリックすると、解決法のペアが順に表示されます。各ペアから、最も適する項目を1つずつ選択してください。',
'Age Group': '年齢グループ',
'Age group': '年齢グループ',
'Age group does not match actual age.': '年齢グループが実際の年齢と一致しません。',
'Aggravating factors': '悪化要因',
'Aggregate Items': 'アイテムの集約',
'Agriculture': '農業',
'Aid Request': '治療要請',
'Aid Request Details': '援助要請の詳細',
'Aid Request added': '援助要請を追加しました',
'Aid Request deleted': '救援要請を追加しました',
'Aid Request updated': '援助要請を更新しました',
'Aid Requests': '援助要請',
'Air Transport Service': '物資空輸サービス',
'Air tajin': 'エア・タジン',
'Aircraft Crash': '飛行機事故',
'Aircraft Hijacking': '航空機ハイジャック',
'Airport Closure': '空港閉鎖',
'Airspace Closure': '離陸地点閉鎖',
'Alcohol': 'アルコール',
'Alert': 'アラート',
'All': '全て',
'All Inbound & Outbound Messages are stored here': '送受信した全てのメッセージはここに格納されます。',
'All Locations': '全てのロケーション',
'All Records': 'すべてのレコード',
'All Requested Items': '物資要請一覧',
'All Resources': 'すべての資源',
'All data provided by the Sahana Software Foundation from this site is licenced under a Creative Commons Attribution licence. However, not all data originates here. Please consult the source field of each entry.': 'このサイトのSahana Software Foundationで提供されるデータのライセンスは、CCA (Creative Commons Attribution licence)となります。しかし、すべてのデータの発生源が、このサイトであるとは限りません。詳細は、各エントリの情報ソースの項目に記載されています。',
'Allowed to push': 'プッシュが許可済みである',
'Allows a Budget to be drawn up': '予算の策定を行ないます',
'Allows authorized users to control which layers are available to the situation map.': '認証済みユーザーが「状況地図のどのレイヤが利用できるか」を制御することを許可します。',
'Alternative Item': '代わりの物資',
'Alternative Item Details': '代わりの品物についての詳細',
'Alternative Item added': '代わりの物資を追加しました',
'Alternative Item deleted': '代わりの品物が削除されました',
'Alternative Item updated': '代わりの物資を更新しました',
'Alternative Items': '代わりとなる物資',
'Alternative infant nutrition in use': '利用中の乳児用代替食',
'Alternative places for studying': '授業開設に利用可能な施設',
'Alternative places for studying available': '学校以外の場所を学習に利用可能である',
'Ambulance Service': '救急サービス',
'An Inventory Store is a physical place which contains Relief Items available to be Distributed.': '物資集積場所とは、救援物資の配給能力をもつ、物理的な場所を指します。',
'An intake system, a warehouse management system, commodity tracking, supply chain management, procurement and other asset and resource management capabilities.': '物資の受け入れ、貯蔵設備の管理、必要な物資の記録、サプライチェーン・マネジメント、調達、その他様々な資産やリソースの管理といった機能。',
'An item which can be used in place of another item': '他の物資の代わりに使う物資',
'Analysis of Completed Surveys': '完了したフィードバックの分析',
'Analysis of assessments': 'Analysis of assessments',
'Animal Die Off': '動物の死',
'Animal Feed': '動物のエサ',
'Animals': '動物',
'Answer Choices (One Per Line)': '選択肢(一行に一つ)',
'Anthropolgy': '人類学',
'Antibiotics available': '抗生物質が利用可能',
'Antibiotics needed per 24h': '24時間ごとに必要な抗生物質',
'Any available Metadata in the files will be read automatically, such as Timestamp, Author, Latitude & Longitude.': 'ファイル内の利用可能なすべてのメタデータ(タイムスタンプ、作成者、緯度経度等)を自動的に読み込みます。',
'Any comments about this sync partner.': 'データの同期先に関するコメント',
'Apparent Age': '年齢(外見)',
'Apparent Gender': '性別(外見)',
'Application': '申請',
'Application Deadline': 'Application Deadline',
'Application Permissions': 'アプリケーションに対する権限',
'Applications': 'アプリケーション',
'Appropriate clothing available': '適切な衣料が利用可能である',
'Appropriate cooking equipment/materials in HH': '世帯内にて適切な調理器具/食材が利用可能である',
'Approved': '承認されました',
'Approver': '承認者',
'Approx. number of cases/48h': '事象の発生概数/48h',
'Approximately how many children under 5 with diarrhea in the past 48 hours?': '過去48時間以内に発生した、5歳未満小児の下痢症状発生件数を記載してください。概数でかまいません',
'Arabic': 'Arabic',
'Archive not Delete': 'Archiveを削除しない',
'Arctic Outflow': '北極気団の南下',
'Are basic medical supplies available for health services since the disaster?': '災害発生後、基本的な医療行為を行えるよう、ヘルスサービスに対して供給があったかどうかを記載します',
'Are breast milk substitutes being used here since the disaster?': '災害発生後、母乳代替品が使われているかどうかを記載します',
'Are the areas that children, older people, and people with disabilities live in, play in and walk through on a daily basis physically safe?': '日中時間帯、この地域での生活や遊び、通行によって、未成年や高齢者、障碍者に肉体的な危害が及ぶ可能性があるかを記載します',
'Are the chronically ill receiving sufficient care and assistance?': '慢性病の罹患者に対して、十分なケアと介護が行われているかを記載します',
'Are there adults living in prisons in this area?': 'この地域で刑務所に収容されている成人がいるかどうかを記載してください',
'Are there alternative places for studying?': '学校以外に学習を行える場所があるかどうかを記載してください',
'Are there cases of diarrhea among children under the age of 5?': '5歳未満の幼児に下痢症状が発生しているかどうかを記載してください',
'Are there children living in adult prisons in this area?': 'この地域で、成人用刑務所に収容されている未成年がいるかどうかを記載してください',
'Are there children living in boarding schools in this area?': 'この地域で、寄宿舎に居住している未成年がいるかどうかを記載してください',
'Are there children living in homes for disabled children in this area?': 'この地域で、障がいのある子供の世話をするために家にいる未成年がいるかどうかを記載してください',
'Are there children living in juvenile detention in this area?': 'この地域で、少年院に収容されている未成年がいるかどうかを記載してください',
'Are there children living in orphanages in this area?': 'この地域で、孤児となった子供は居ますか?',
'Are there children with chronical illnesses in your community?': '慢性疾患をもった子どもが共同体の中にいるかどうかを記載してください',
'Are there health services functioning for the community since the disaster?': '災害発生後、共同体で医療サービスが機能しているかどうかを記載してください',
'Are there older people living in care homes in this area?': 'この地域で、介護施設に居住している高齢者がいるかどうかを記載してください',
'Are there older people with chronical illnesses in your community?': 'この共同体のなかで、慢性疾患を患っている高齢者がいるかどうかを記載してください',
'Are there people with chronical illnesses in your community?': 'この共同体の中で、慢性疾患を患っている人物がいるかどうかを記載してください',
'Are there separate latrines for women and men available?': 'トイレが男女別になっているかどうかを記載してください',
'Are there staff present and caring for the residents in these institutions?': 'これら施設の居住者に対して、ケアと介護を行えるスタッフが存在するかどうかを記載してください',
'Area': 'エリア',
'Areas inspected': '調査済み地域',
'As of yet, no sections have been added to this template.': 'As of yet, no sections have been added to this template.',
'Assessment': 'アセスメント',
'Assessment Details': 'アセスメントの詳細',
'Assessment Reported': 'アセスメントを報告しました',
'Assessment Summaries': 'アセスメントの要約',
'Assessment Summary Details': 'アセスメント要約の詳細',
'Assessment Summary added': 'アセスメントの要約を追加しました',
'Assessment Summary deleted': 'アセスメントの要約を削除しました',
'Assessment Summary updated': 'アセスメントの要約を更新しました',
'Assessment Type': 'アセスメントタイプ',
'Assessment added': 'アセスメントを追加しました',
'Assessment admin level': 'アセスメントの管理レベル',
'Assessment and Activities Gap Analysis Map': 'アセスメントと活動のギャップについての解析マップ',
'Assessment and Activities Gap Analysis Report': 'アセスメントと支援活動のギャップ解析レポート',
'Assessment deleted': 'アセスメントを削除しました',
'Assessment timeline': 'アセスメントタイムライン',
'Assessment updated': 'アセスメントを更新しました',
'Assessments': 'アセスメント',
'Assessments Needs vs. Activities': '需要アセスメントと支援活動のギャップ',
'Assessments and Activities': 'アセスメントと支援活動',
'Assessments are shown as green, yellow, orange, red.': 'アセスメントは、緑・黄・オレンジ・赤のいずれかの色で表されます。',
'Assessments are structured reports done by Professional Organizations': 'アセスメントとは、専門団体によって作成された調査文書のことを指します。',
'Assessments are structured reports done by Professional Organizations - data includes WFP Assessments': 'アセスメントとは、専門団体によって作成された調査文書のことを指します。データには、WFP(国連世界食糧計画)アセスメントも含まれます',
'Assessments:': 'アセスメント:',
'Assessor': '査定実施者',
'Asset': '資産',
'Asset Assigned': '資産割り当て',
'Asset Assignment Details': '資産割り当ての詳細',
'Asset Assignments': '資産割り当て',
'Asset Assignments deleted': '資産の割り当てを削除しました',
'Asset Assignments updated': '物資割り当てを更新しました',
'Asset Details': '資産の詳細',
'Asset Log': 'Asset Log',
'Asset Log Details': 'Asset Log Details',
'Asset Log Empty': 'Asset Log Empty',
'Asset Log Entry Added - Change Label': 'Asset Log Entry Added - Change Label',
'Asset Log Entry deleted': 'Asset Log Entry deleted',
'Asset Log Entry updated': 'Asset Log Entry updated',
'Asset Management': '資産管理',
'Asset Number': '資産番号',
'Asset added': '資産を追加しました',
'Asset deleted': '資産を削除しました',
'Asset removed': 'Asset removed',
'Asset updated': '資産を更新しました',
'Assets': '資産',
'Assets are resources which are not consumable but are expected back, so they need tracking.': 'Assets are resources which are not consumable but are expected back, so they need tracking.',
'Assign': 'Assign',
'Assign Asset': '資産割り当て',
'Assign Storage Location': '蓄積地点の割り当て',
'Assign to Org.': '組織に割り当て',
'Assign to Organisation': 'Assign to Organisation',
'Assign to Organization': 'Assign to Organization',
'Assign to Person': 'Assign to Person',
'Assign to Site': 'Assign to Site',
'Assigned': '割り当てられた',
'Assigned By': 'Assigned By',
'Assigned To': '担当者',
'Assigned to': '担当者',
'Assigned to Organisation': 'Assigned to Organisation',
'Assigned to Person': 'Assigned to Person',
'Assigned to Site': 'Assigned to Site',
'Assignments': '割り当て',
'Assistance for immediate repair/reconstruction of houses': '緊急の修理/家屋復旧の手伝い',
'Assistant': 'アシスタント',
'At/Visited Location (not virtual)': '実際に訪問した/訪問中のロケーション',
'Attend to information sources as described in <instruction>': '<instruction>に記載されている情報ソースへの参加',
'Attribution': '属性',
'Audit Read': '監査報告書の読み込み',
'Audit Write': '監査報告書の書き込み',
"Authenticate system's Twitter account": '認証システムの Twitter アカウント',
'Author': '作者',
'Automotive': '車両',
'Availability': 'ボランティア期間',
'Available Alternative Inventories': 'Available Alternative Inventories',
'Available Alternative Inventory Items': '利用可能な他の物資',
'Available Beds': '利用可能なベッド数',
'Available Forms': 'Available Forms',
'Available Inventories': 'Available Inventories',
'Available Inventory Items': '利用可能な倉庫内の物資',
'Available Messages': '利用可能なメッセージ',
'Available Records': '利用可能なレコード',
'Available databases and tables': '利用可能なデータベースおよびテーブル',
'Available for Location': '活動可能な地域',
'Available from': 'ボランティア開始日',
'Available in Viewer?': 'ビューワ内で利用可能?',
'Available until': 'ボランティア終了日',
'Availablity': '活動期間',
'Avalanche': '雪崩',
'Avoid the subject event as per the <instruction>': '<instruction>に従って対象の事象を避ける',
'Babies who are not being breastfed, what are they being fed on?': '乳児に対して母乳が与えられない場合、どうやって乳幼児の食事を確保しますか?',
'Baby And Child Care': '乳幼児へのケア',
'Background Color': 'Background Color',
'Background Color': '背景色',
'Background Color for Text blocks': 'テキストブロックの背景色',
'Bahai': 'バハイ',
'Baldness': '禿部',
'Balochi': 'バロチ語',
'Banana': 'バナナ',
'Bank/micro finance': '銀行/マイクロファイナンス',
'Barricades are needed': 'バリケードが必要',
'Base Layer?': '基本レイヤ?',
'Base Layers': '基本レイヤ',
'Base Location': '基本となるロケーション',
'Base Site Set': 'Base Site Set',
'Base Unit': '基本単位',
'Baseline Data': 'Baseline Data',
'Baseline Number of Beds': '平常時のベッド設置数',
'Baseline Type': '基準値タイプ',
'Baseline Type Details': '基準値タイプの詳細',
'Baseline Type added': '基準値タイプを追加しました',
'Baseline Type deleted': '基準値のタイプを削除しました',
'Baseline Type updated': '基準値タイプを更新しました',
'Baseline Types': '基準値の種類',
'Baseline added': '基準値を追加しました',
'Baseline deleted': '基準値を削除しました',
'Baseline number of beds of that type in this unit.': 'この施設における、通常状態のベッド収容数です。',
'Baseline updated': '基準値を更新しました',
'Baselines': '基準値',
'Baselines Details': '基準値の詳細',
'Basic': '基本',
'Basic Assess.': '基本アセスメント',
'Basic Assessment': '基本アセスメント',
'Basic Assessment Reported': 'ベーシック・アセスメントを報告しました',
'Basic Details': '基本情報',
'Basic information on the requests and donations, such as category, the units, contact details and the status.': '支援要請と寄付に関する基本情報です。カテゴリ、単位、連絡先詳細および状態等が記載されています。',
'Basic medical supplies available prior to disaster': '災害発生以前 基本的な医療行為の提供',
'Basic medical supplies available since disaster': '災害発生後 基本的な医療行為の提供',
'Basic reports on the Shelter and drill-down by region': '避難所の基本レポートと、地域による絞り込み',
'Baud': 'ボー値',
'Baud rate to use for your modem - The default is safe for most cases': 'モデムを使用するためのボーレートです。大抵の場合はデフォルトが安全です。',
'Beam': '梁',
'Bed Capacity': 'ベッド最大収容数',
'Bed Capacity per Unit': '施設ごとのベッド最大収容数',
'Bed Type': 'ベッド種別',
'Bed type already registered': 'ベッドのタイプは既に登録済みです。',
'Bedding materials available': '寝具が利用可能である',
'Below ground level': '地下',
'Beneficiary Type': '受益者タイプ',
"Bing Layers cannot be displayed if there isn't a valid API Key": "Bing Layers cannot be displayed if there isn't a valid API Key",
'Biological Hazard': '生物災害',
'Biscuits': 'ビスケット',
'Blizzard': '吹雪',
'Blood Type (AB0)': '血液型 (AB0式)',
'Blowing Snow': '地吹雪',
'Boat': 'ボート',
'Bodies': 'Bodies',
'Bodies found': '未回収の遺体',
'Bodies recovered': '回収済みの遺体',
'Body': '本文',
'Body Recovery': 'Body Recovery',
'Body Recovery Reports': '遺体回収レポート',
'Body Recovery Request': '遺体回収の要請',
'Body Recovery Requests': '遺体回収の要請',
'Bomb': '爆発物',
'Bomb Explosion': '爆発が発生',
'Bomb Threat': '爆発の危険性',
'Border Color for Text blocks': 'テキストブロックの枠色',
'Bounding Box Insets': '領域を指定した枠組みへ差し込む',
'Bounding Box Size': '領域を指定した枠組みのサイズ',
'Boys 13-18 yrs in affected area': '影響地域内の13-18歳の男子数',
'Boys 13-18 yrs not attending school': '学校に来ていなかった13-18歳の男子数',
'Boys 6-12 yrs in affected area': '影響地域内の6-12歳の男子数',
'Boys 6-12 yrs not attending school': '学校に来ていなかった6-12歳の男子数',
'Brand': '銘柄',
'Brand Details': '銘柄の詳細',
'Brand added': '銘柄を追加しました',
'Brand deleted': '銘柄が削除されました',
'Brand updated': '銘柄が更新されました',
'Brands': '銘柄',
'Breast milk substitutes in use since disaster': '災害発生後から母乳代替品を使用している',
'Breast milk substitutes used prior to disaster': '災害前から母乳代替品を使用していた',
'Bricks': 'レンガ',
'Bridge Closed': '橋梁(通行止め)',
'Bucket': 'バケツ',
'Buddhist': '仏教徒',
'Budget': '予算',
'Budget Details': '予算の詳細',
'Budget Updated': '予算を更新しました',
'Budget added': '予算を追加しました',
'Budget deleted': '予算を削除しました',
'Budget updated': '予算を更新しました',
'Budgeting Module': '予算編成モジュール',
'Budgets': '予算編成',
'Buffer': 'バッファ',
'Bug': 'バグ',
'Building Aide': '建設援助',
'Building Assessment': '建物のアセスメント',
'Building Assessments': '建築物アセスメント',
'Building Collapsed': '崩壊した建物',
'Building Name': '建物名',
'Building Safety Assessments': '建物の安全アセスメント',
'Building Short Name/Business Name': '建物の名前 / 会社名',
'Building or storey leaning': '建物または階層が傾いている',
'Built using the Template agreed by a group of NGOs working together as the': '例えばECB等、多くのNGOによって利用されている形式を使っての記録が可能です。',
'Bulk Uploader': 'まとめてアップロード',
'Bundle': 'バンドル',
'Bundle Contents': '小包の内容',
'Bundle Details': 'Bundleの詳細',
'Bundle Updated': 'バンドルを更新しました',
'Bundle added': 'バンドルを追加しました',
'Bundle deleted': 'バンドルを削除しました',
'Bundle updated': 'バンドル・セットを更新しました',
'Bundles': 'バンドル',
'Burn': '火傷(やけど)',
'Burn ICU': '熱傷 ICU',
'Burned/charred': '火傷/炭化',
'Business damaged': 'ビジネスへの損害が発生している',
'By Facility': 'By Facility',
'By Inventory': '物資の送付元',
'By Person': '人物ごと',
'By Site': 'サイト別',
'By Warehouse': '送付元倉庫',
'CBA Women': 'CBA 女性',
'CLOSED': 'CLOSED',
'CN': '貨物運送状',
'CSS file %s not writable - unable to apply theme!': 'CSS ファイル %s が書き込み不可になっているため、テーマを適用することができません。',
'Calculate': '計算',
'Camp': '仮泊施設',
'Camp Coordination/Management': '仮泊施設間の調整 / 管理',
'Camp Details': 'Camp Details',
'Camp Service': 'Camp Service',
'Camp Service Details': 'Camp Service Details',
'Camp Service added': 'Camp Service added',
'Camp Service deleted': 'Camp Service deleted',
'Camp Service updated': 'Camp Service updated',
'Camp Services': 'Camp Services',
'Camp Type': 'Camp Type',
'Camp Type Details': 'Camp Type Details',
'Camp Type added': 'Camp Type added',
'Camp Type deleted': 'Camp Type deleted',
'Camp Type updated': 'Camp Type updated',
'Camp Types': 'Camp Types',
'Camp Types and Services': 'Camp Types and Services',
'Camp added': 'Camp added',
'Camp deleted': 'Camp deleted',
'Camp updated': 'Camp updated',
'Camps': 'Camps',
'Can only disable 1 record at a time!': '一度に1つしか無効にできません!',
'Can only enable 1 record at a time!': 'Can only enable 1 record at a time!',
'Can users register themselves for authenticated login access?': '新規ユーザが、他者の承認なしに自分を新規ユーザとして登録できるか?',
"Can't import tweepy": 'tweepyをインポートできません',
'Cancel': 'キャンセル',
'Cancel Add': '追加を取り消す',
'Cancel Log Entry': 'Cancel Log Entry',
'Cancel Shipment': '輸送をキャンセルする',
'Canceled': 'キャンセル',
'Cancelled': 'キャンセルされました',
'Candidate Matches for Body %s': 'Bodyに適合した候補者は %s',
'Canned Fish': '魚の缶詰',
'Cannot be empty': '必ず入力してください。',
'Cannot delete whilst there are linked records. Please delete linked records first.': 'リンクされたレコードがあるので削除できません。このレコードよりも先に、リンク先のレコードを削除してください。',
'Cannot disable your own account!': '自分自身のアカウントを無効にする事はできません',
'Capacity (Max Persons)': '収容可能数 (最大人数)',
'Capacity (W x D X H)': '収容可能面積 (W x D X H)',
'Capture Information on Disaster Victim groups (Tourists, Passengers, Families, etc.)': '被災者の個々のグループについて、情報を取得する (ツアー旅行者、滞在者、家族、など)',
'Capture Information on each disaster victim': '被災者情報を個別に把握する',
'Capturing organizational information of a relief organization and all the projects they have in the region': '個々の支援団体と、地域内で実行中の全てのプロジェクトを取得します',
'Capturing the essential services each Volunteer is providing and where': '各ボランティアの居場所と、提供している主要なサービスを取得する',
'Capturing the projects each organization is providing and where': '各団体の所在地と、提供している主要なサービスを取得します',
'Cardiology': '心臓病学',
'Cash available to restart business': '事業再開に必要な資金調達が可能',
'Cassava': 'キャッサバ',
'Casual Labor': '一般労働',
'Casualties': '犠牲者',
'Catalog': 'カタログ',
'Catalog Details': 'Catalog Details',
'Catalog Item': '救援物資カタログ',
'Catalog Item added': '救援物資カタログにアイテムを追加しました',
'Catalog Item deleted': 'カタログアイテムを削除しました',
'Catalog Item updated': '救援物資カタログを更新しました',
'Catalog Items': '物資カタログ',
'Catalog Name': 'カタログ名',
'Catalog added': 'Catalog added',
'Catalog deleted': 'Catalog deleted',
'Catalog updated': 'Catalog updated',
'Catalogs': 'Catalogs',
'Categories': 'Categories',
'Category': 'カテゴリ',
'Category<>Sub-Category<>Catalog Relation': 'Category<>Sub-Category<>Catalog 間の関係',
'Category<>Sub-Category<>Catalog Relation added': 'Category<>Sub-Category<>Catalog 間の関係を追加しました',
'Category<>Sub-Category<>Catalog Relation deleted': 'Category<>Sub-Category<>Catalog 関係を削除しました',
'Category<>Sub-Category<>Catalog Relation updated': 'Category<>Sub-Category<>Catalog 間の関係を更新しました',
"Caution: doesn't respect the framework rules!": "Caution: doesn't respect the framework rules!",
'Ceilings, light fixtures': '天井、照明あり',
'Cell Phone': 'Cell Phone',
'Central point to record details on People': '被災者や支援者など、関係者情報の集積を行ないます',
'Certificate': 'Certificate',
'Certificate Catalog': 'Certificate Catalog',
'Certificate Details': 'Certificate Details',
'Certificate Status': '認証状態',
'Certificate added': 'Certificate added',
'Certificate deleted': 'Certificate deleted',
'Certificate updated': 'Certificate updated',
'Certificates': 'Certificates',
'Certification': '有資格者',
'Certification Details': 'Certification Details',
'Certification added': 'Certification added',
'Certification deleted': 'Certification deleted',
'Certification updated': 'Certification updated',
'Certifications': 'Certifications',
'Certifying Organization': 'Certifying Organization',
'Change Password': 'パスワードの変更',
'Check': '確認',
'Check Request': 'Check Request',
'Check for errors in the URL, maybe the address was mistyped.': '入力したURLに間違いがないか確認してください。',
'Check if the URL is pointing to a directory instead of a webpage.': 'URLがウェブページではなくディレクトリを指定しているか、確認してください。',
'Check outbox for the message status': '送信箱を調べてメッセージステータスを確認する',
'Check to delete': '削除項目にチェック',
'Check to delete:': '削除項目にチェック:',
'Check-In': 'チェックイン',
'Check-Out': 'チェックアウト',
'Check-in': 'チェックイン',
'Check-in at Facility': 'Check-in at Facility',
'Check-out': 'チェックアウト',
'Checked': 'Checked',
'Checklist': 'チェックリスト',
'Checklist created': 'チェックリストを作成しました',
'Checklist deleted': 'チェックリストを削除しました',
'Checklist of Operations': '作業項目チェックリスト',
'Checklist updated': 'チェックリストを更新しました',
'Chemical Hazard': '化学災害',
'Chemical, Biological, Radiological, Nuclear or High-Yield Explosive threat or attack': '兵器による攻撃、脅威(化学兵器、生物兵器、放射能汚染、核兵器、高威力の爆発)',
'Chicken': 'ニワトリ',
'Child': '子供',
'Child (2-11)': '子供 (2-11歳)',
'Child (< 18 yrs)': '子供 (18歳未満)',
'Child Abduction Emergency': '未成年誘拐警報',
'Child headed households (<18 yrs)': '代表者が未成年 (18歳以下)の世帯数',
'Children (2-5 years)': '子供たち (2-5歳)',
'Children (5-15 years)': '子供たち(5-15歳)',
'Children (< 2 years)': '子供たち (2歳未満)',
'Children in adult prisons': '成人用刑務所に未成年がいる',
'Children in boarding schools': '寄宿制学校の児童がいる',
'Children in homes for disabled children': '障がい児施設にいる子ども',
'Children in juvenile detention': '少年院収容者がいる',
'Children in orphanages': '身寄りの無い人がいる',
'Children living on their own (without adults)': '未成年のみで自活(成人無し)',
'Children not enrolled in new school': '新しい学校に入学していない子供',
'Children orphaned by the disaster': '被災のため孤児になった子供たち',
'Children separated from their parents/caregivers': '親(または親相当の後見人)とはぐれた子供の数',
'Children that have been sent to safe places': '安全な地域へ疎開済みの子供数',
'Children who have disappeared since the disaster': '災害発生後に行方不明の子供たち',
'Children with chronical illnesses': '慢性疾患をもつ子供がいる',
'Chinese (Simplified)': 'Chinese (Simplified)',
'Chinese (Taiwan)': '中国語 (台湾繁体字)',
'Cholera Treatment': 'コレラの治療',
'Cholera Treatment Capability': 'コレラ治療対応能力',
'Cholera Treatment Center': 'コレラ治療センター',
'Cholera-Treatment-Center': 'コレラ治療センター',
'Choose': '選択',
'Choose a new posting based on the new evaluation and team judgement. Severe conditions affecting the whole building are grounds for an UNSAFE posting. Localised Severe and overall Moderate conditions may require a RESTRICTED USE. Place INSPECTED placard at main entrance. Post all other placards at every significant entrance.': '新規の評価とチームの判定に基づいた新しいポスターを選択してください。建物全体が深刻な状態の場合「危険」を、一部は使える場合「制限あり」です。主要な出入口に「調査済み」プラカードを設置してください。全ての使用可能な出入口には他のプラカードを設置してください。',
'Choosing Skill and Resources of Volunteers': 'ボランティアのスキルとリソースを選択してください',
'Christian': 'キリスト教徒',
'Church': '教会',
'Circumstances of disappearance, other victims/witnesses who last saw the missing person alive.': '行方不明時の状況や、この人物の生存を最後に確認した人物についての情報を記載してください。',
'City': 'City',
'Civil Emergency': '市民緊急事態',
'Cladding, glazing': '被覆・外壁、ガラス板',
'Clear Selection': '選択をクリア',
"Click on 'Pledge' button in the left-hand column to make a Pledge to match a request for aid.": "救援要請と寄付項目を関連付けるには、項目左の'寄付'ボタンを押してください。",
'Click on the link': 'リンクをクリックしてください',
'Click on the link ': 'リンクをクリック ',
'Client IP': 'クライアントIP',
'Climate': 'Climate',
'Clinical Laboratory': '臨床検査',
'Clinical Operations': '診療の人員数',
'Clinical Status': '診療状況',
'Close map': '地図を閉じる',
'Closed': '閉鎖中',
'Closure': '閉鎖・通行止め',
'Clothing': '衣服',
'Cluster': 'クラスタ',
'Cluster Details': 'クラスタの詳細',
'Cluster Distance': 'クラスタ距離',
'Cluster Subsector': 'クラスタのサブクラスタ',
'Cluster Subsector Details': 'クラスタのサブクラスタの詳細',
'Cluster Subsector added': 'クラスタのサブセクタを追加しました',
'Cluster Subsector deleted': 'クラスタのサブセクタを削除しました',
'Cluster Subsector updated': 'クラスタのサブセクタを更新しました',
'Cluster Subsectors': 'クラスタのサブセクタ',
'Cluster Threshold': 'クラスタのしきい値',
'Cluster added': 'クラスタを追加しました',
'Cluster deleted': 'クラスタを削除しました',
'Cluster updated': 'クラスタを更新しました',
'Cluster(s)': 'クラスタ',
'Clusters': 'クラスタ',
'Code': 'プロジェクトコード',
'Cold Wave': '寒波',
'Collapse, partial collapse, off foundation': '全壊、一部損壊、off foundation',
'Collective center': '収集センター',
'Color for Underline of Subheadings': 'サブヘッダのアンダーラインの色',
'Color of Buttons when hovering': 'ホバー時のボタンの色',
'Color of bottom of Buttons when not pressed': '押されなかった時のボタンの下部の色',
'Color of bottom of Buttons when pressed': 'ボタン押下時の下部の色',
'Color of dropdown menus': 'ドロップダウンメニューの色',
'Color of selected Input fields': '選択中の入力フィールドの色',
'Color of selected menu items': '選択中のメニューアイテムの色',
'Column Choices (One Per Line': 'カラム選択 (一行に一つ',
'Columns, pilasters, corbels': '円柱、付け柱、コーベル',
'Combined Method': '複数証跡の組み合わせ',
'Come back later.': '復旧まで少々お待ちください',
'Come back later. Everyone visiting this site is probably experiencing the same problem as you.': '復旧まで少々お待ちください。あなた以外の閲覧者にも、この表示がされています。',
'Comments': 'コメント',
'Commercial/Offices': '商業 / オフィス',
'Commit': 'コミット',
'Commit Date': '受け入れ日',
'Commit from %s': '%sからのコミット',
'Commit. Status': '物資到着の見込み',
'Commiting a changed spreadsheet to the database': '変更後のスプレッドシートをデータベースに反映します',
'Commitment': 'コミットメント',
'Commitment Added': 'コミットメントを追加しました',
'Commitment Canceled': 'コミットをキャンセルしました',
'Commitment Details': 'コミットの詳細',
'Commitment Item': '物資のコミットメント',
'Commitment Item Details': 'コミットされた救援物資の詳細',
'Commitment Item added': 'コミットの物資を追加しました',
'Commitment Item deleted': 'コミットされた救援物資を削除しました',
'Commitment Item updated': 'コミット物資を更新しました',
'Commitment Items': 'コミットされた物資',
'Commitment Status': '支援の引き受け状況',
'Commitment Updated': 'コミットを更新しました',
'Commitments': 'コミット',
'Committed': 'コミット済み',
'Committed By': '受け入れ団体/人',
'Committed People': 'Committed People',
'Committed Person Details': 'Committed Person Details',
'Committed Person updated': 'Committed Person updated',
'Committing Inventory': '引き受け中の倉庫',
'Committing Organization': 'Committing Organization',
'Committing Person': 'Committing Person',
'Communication problems': 'コミュニケーションの問題',
'Community Centre': 'コミュニティセンター',
'Community Health Center': '地域の医療センター',
'Community Member': 'コミュニティの構成員',
'Competency': 'Competency',
'Competency Rating Catalog': 'Competency Rating Catalog',
'Competency Rating Details': 'Competency Rating Details',
'Competency Rating added': 'Competency Rating added',
'Competency Rating deleted': 'Competency Rating deleted',
'Competency Rating updated': 'Competency Rating updated',
'Competency Ratings': 'Competency Ratings',
'Complete': '完了',
'Complete Unit Label for e.g. meter for m.': '単位を表すラベル。例えばメートルなら m など。',
'Completed': '完了',
'Complexion': '人種、肌色',
'Compose': 'メッセージ作成',
'Compromised': '易感染状態',
'Concrete frame': 'コンクリートのフレーム',
'Concrete shear wall': 'コンクリートせん断壁',
'Condition': 'Condition',
'Config': '設定',
'Config added': '設定を追加しました',
'Config deleted': '設定を削除しました',
'Config updated': '設定を更新しました',
'Configs': '設定',
'Configurations': '設定',
'Configure Run-time Settings': 'ランタイムの設定',
'Confirm Shipment Received': '配送物の受領を確認',
'Confirmed': '確認済み',
'Confirmed Incidents': '確認済みのインシデント',
'Confirming Organization': 'Confirming Organization',
'Conflict Details': 'コンフリクトの詳細',
'Conflict Resolution': 'データ競合の解決',
'Consignment Note': '出荷通知',
'Constraints Only': '制約のみ',
'Consumable': '消耗品',
'Contact': '連絡先',
'Contact Data': '連絡先データ',
'Contact Details': '連絡先の詳細',
'Contact Info': 'Contact Info',
'Contact Information': '連絡先情報',
'Contact Information Added': '連絡先情報を追加しました',
'Contact Information Deleted': '連絡先情報を削除しました',
'Contact Information Updated': '連絡先情報を更新しました',
'Contact Method': '問い合わせ方法',
'Contact Name': '連絡先名',
'Contact Person': '窓口担当者',
'Contact Phone': '連絡先電話番号',
'Contact details': '連絡先の詳細',
'Contact information added': '連絡先情報を追加しました',
'Contact information deleted': '連絡先情報を削除しました',
'Contact information updated': '連絡先情報を更新しました',
'Contact person in case of news or further questions (if different from reporting person). Include telephone number, address and email as available.': '詳細事項の質問や連絡を行なう際の連絡担当者を記載します(レポート報告者と異なる場合のみ)。電話番号、住所、電子メールなどを記載してください。',
'Contact person(s) in case of news or further questions (if different from reporting person). Include telephone number, address and email as available.': '情報伝達や追加質問を行う際の代表担当者(報告者と異なる場合のみ記載してください)。電話番号や住所、メールアドレスなどを指定できます。',
'Contact us': '問い合わせ',
'Contacts': '連絡先',
'Contents': '内容',
'Contradictory values!': '値が矛盾しています!',
'Contributor': '投稿者',
'Conversion Tool': '変換ツール',
'Cooking NFIs': '調理用器具',
'Cooking Oil': '調理油',
'Coordinate Conversion': '座標変換',
'Coping Activities': '一時対応活動',
'Copy': 'コピー',
'Copy any data from the one to be deleted into the one to keep': '削除する側の候補地から残す方の候補地へ、必要なデータを転載します。',
'Corn': 'とうもろこし',
'Cost Type': '料金種別',
'Cost per Megabyte': '1メガバイト毎に課金',
'Cost per Minute': '1分毎に課金',
"Couldn't import tweepy library": 'tweepy libraryをインポートできません',
'Country': '国',
'Country of Residence': '居住国',
'County': 'County',
'Course': 'Course',
'Course Catalog': 'Course Catalog',
'Course Certificate Details': 'Course Certificate Details',
'Course Certificate added': 'Course Certificate added',
'Course Certificate deleted': 'Course Certificate deleted',
'Course Certificate updated': 'Course Certificate updated',
'Course Certificates': 'Course Certificates',
'Course Certificates': 'Course Certificates',
'Course Details': 'Course Details',
'Course added': 'Course added',
'Course deleted': 'Course deleted',
'Course updated': 'Course updated',
'Courses': 'Courses',
'Create & manage Distribution groups to receive Alerts': 'アラートの送付先グループを作成・管理する',
'Create Checklist': 'チェックリストの作成',
'Create Group Entry': 'グループエントリの作成',
'Create Impact Assessment': '災害影響範囲アセスメントの作成',
'Create Import Job': 'Import Jobの作成',
'Create Mobile Impact Assessment': '災害影響範囲アセスメントをモバイル端末から作成',
'Create New Asset': 'Create New Asset',
'Create New Catalog Item': 'Create New Catalog Item',
'Create New Event': 'Create New Event',
'Create New Import Job': 'Import Jobの新規作成',
'Create New Item Category': 'Create New Item Category',
'Create New Location': '新規ロケーションの作成',
'Create New Request': 'Create New Request',
'Create New Scenario': 'Create New Scenario',
'Create New Vehicle': 'Create New Vehicle',
'Create Rapid Assessment': '被災地の現況アセスメントを作成',
'Create Request': '支援要請を作成',
'Create Task': 'タスクの作成',
'Create a group entry in the registry.': '登録にグループエントリを作成。',
'Create new Office': 'Create new Office',
'Create new Organization': 'Create new Organization',
'Create, enter, and manage surveys.': '調査の作成、入力、管理を実施',
'Creation of Surveys': '聞き取り調査の新規作成',
'Creation of assessments': 'Creation of assessments',
'Credential Details': '証明書の詳細',
'Credential added': '証明書を追加しました',
'Credential deleted': '証明書を削除しました',
'Credential updated': '証明書を更新しました',
'Credentialling Organization': 'Credentialling Organization',
'Credentials': '証明書',
'Credit Card': 'Credit Card',
'Crime': '犯罪',
'Criteria': '基準',
'Currency': '通貨',
'Current Entries': 'Current Entries',
'Current Group Members': '現在のグループメンバ',
'Current Identities': '現在のID',
'Current Location': '現在のロケーション',
'Current Location Country': 'Current Location Country',
'Current Location Phone Number': 'Current Location Phone Number',
'Current Location Treating Hospital': 'Current Location Treating Hospital',
'Current Log Entries': '現在のログエントリ',
'Current Memberships': '現在のメンバシップ',
'Current Mileage': 'Current Mileage',
'Current Notes': '現在選択中の追加情報',
'Current Records': 'Current Records',
'Current Registrations': '現在の登録',
'Current Status': '現在の状況',
'Current Team Members': '現在のチームメンバ',
'Current Twitter account': '現在のTwitterアカウント',
'Current community priorities': '現在のコミュニティの優先順位',
'Current general needs': '現在の需要',
'Current greatest needs of vulnerable groups': '現在、被災者が最も必要としている物資/サービス',
'Current health problems': '現在の健康問題',
'Current main income sources': '現在の主な収入源',
'Current major expenses': '現在の主な支出項目',
'Current number of patients': '現在の患者数',
'Current problems, categories': '現在の問題、カテゴリ',
'Current problems, details': '現在の問題の詳細',
'Current request': '現在の要求',
'Current response': '現在の対応状況',
'Current session': '現在のセッション',
'Current type of health problems, adults': '現在発生中の健康問題(成人)',
'Current type of health problems, children': '現在発生中の健康問題(小児)',
'Current type of source for drinking water': '現在の飲料水確保方法',
'Current type of source for sanitary water': '現在の生活用水確保方法',
'Currently no Certifications registered': 'Currently no Certifications registered',
'Currently no Course Certificates registered': 'Currently no Course Certificates registered',
'Currently no Credentials registered': 'Currently no Credentials registered',
'Currently no Missions registered': 'Currently no Missions registered',
'Currently no Skill Equivalences registered': 'Currently no Skill Equivalences registered',
'Currently no Skills registered': 'Currently no Skills registered',
'Currently no Trainings registered': 'Currently no Trainings registered',
'Currently no entries in the catalog': 'Currently no entries in the catalog',
'Custom Database Resource (e.g., anything defined as a resource in Sahana)': 'カストマイズされたデータベースのリソース (例:Sahana 内のリソースとして定義された物)',
'Customisable category of aid': 'カスタマイズ可能な支援カテゴリ',
'DC': '寄付の証明(Donation Certificate)',
'DECISION': '決定',
'DNA Profile': 'DNAプロファイル',
'DNA Profiling': 'DNAプロファイリング',
'DVI Navigator': '被災者の検索',
'Daily': '日次',
'Dam Overflow': 'ダム決壊',
'Damage': '損傷',
'Dangerous Person': '危険人物',
'Dashboard': 'ダッシュボード',
'Data': 'Data',
'Data Type': 'Data Type',
'Data import policy': 'データのインポートポリシー',
'Data uploaded': 'データがアップロードされました',
'Database': 'データベース',
'Date': '日付',
'Date & Time': '日付と時刻',
'Date Avaialble': '日付あり',
'Date Available': '可能な日付',
'Date Received': '物資受領日',
'Date Requested': '要請した日',
'Date Required': '物資が必要になる日',
'Date Sent': '送付日',
'Date Until': 'Date Until',
'Date and Time': '日付と時刻',
'Date and Time of Goods receipt. By default shows the current time but can be modified by editing in the drop down list.': '物資を受領した日時を記録します。デフォルトでは現在の時間が入力されます。変更するには、ドロップダウンリストから選択してください。',
'Date and time this report relates to.': 'このレポートに関連する日付と時刻',
'Date of Birth': '生年月日',
'Date of Latest Information on Beneficiaries Reached': '恩恵を受ける人にたどり着いた最新の情報の日付',
'Date of Report': 'レポートの日付',
'Date of Treatment': 'Date of Treatment',
'Date/Time': '日付/時刻',
'Date/Time of Find': '日付/発見日時',
'Date/Time of disappearance': '行方不明になった日付/時刻',
'Date/Time when found': 'Date/Time when found',
'Date/Time when last seen': 'Date/Time when last seen',
'De-duplicator': '重複解消機能',
'Dead Bodies': 'Dead Bodies',
'Dead Body': '遺体の管理',
'Dead Body Details': '遺体の詳細',
'Dead Body Reports': '遺体情報レポート',
'Dead body report added': '遺体発見レポートを追加しました',
'Dead body report deleted': '遺体報告を削除しました',
'Dead body report updated': '遺体レポートを更新しました',
'Deaths in the past 24h': '過去24時間の死者',
'Deaths/24hrs': '死亡者数/24h',
'Debug': 'デバッグ',
'Deceased': '死亡',
'Decimal Degrees': '十進角',
'Decision': 'Decision',
'Decomposed': '腐乱',
'Default Height of the map window.': '地図ウィンドウの初期の高さ',
'Default Height of the map window. In Window layout the map maximises to fill the window, so no need to set a large value here.': 'マップウィンドウのデフォルトの縦高。ウィンドウレイアウトでは、マップはウィンドウ全体に最大化されるので、大きな値を設定する必要はありません。',
'Default Location': 'Default Location',
'Default Map': 'Default Map',
'Default Marker': 'デフォルトマーカー',
'Default Width of the map window.': '地図ウィンドウの幅の初期値',
'Default Width of the map window. In Window layout the map maximises to fill the window, so no need to set a large value here.': 'マップウィンドウのデフォルトの幅。ウィンドウレイアウトでは、マップはウィンドウ全体に最大化されるので、大きな値を設定する必要はありません。',
'Default synchronization policy': 'データ同期ポリシーのデフォルト設定',
'Defaults': 'デフォルト値',
'Defaults updated': 'デフォルト値を更新しました',
'Defecation area for animals': '動物排便用の地域',
'Define Scenarios for allocation of appropriate Resources (Human, Assets & Facilities).': 'Define Scenarios for allocation of appropriate Resources (Human, Assets & Facilities).',
'Defines the icon used for display of features on handheld GPS.': 'ハンドヘルドGPSに表示するアイコンを決定します。',
'Defines the icon used for display of features on interactive map & KML exports.': 'インタラクティブマップとKMLエクスポートで建物などの表示に使われるアイコン定義',
'Defines the icon used for display of features on interactive map & KML exports. A Marker assigned to an individual Location is set if there is a need to override the Marker assigned to the Feature Class. If neither are defined, then the Default Marker is used.': '対話型地図および KML の出力上で Feature の表示に使用するアイコンを定義します。Feature Class に割り当てられたマーカーを上書きする必要がある場合、個々の場所に割り当てられたマーカーが設定されます。どちらも定義されていない場合は、デフォルトのマーカーが使用されます。',
'Defines the marker used for display & the attributes visible in the popup.': 'ポップアップ時と通常時に表示されるマーカーを指定してください。',
'Degrees must be a number between -180 and 180': '度数は -180 から 180 の間にしてください。',
'Dehydration': '脱水症状',
'Delete': '削除',
'Delete Aid Request': '援助要請を削除',
'Delete Alternative Item': '代わりの物資を削除する',
'Delete Assessment': 'アセスメントを削除',
'Delete Assessment Summary': 'アセスメントの要約を削除',
'Delete Asset': '資産の削除',
'Delete Asset Assignments': '資産割り当ての削除',
'Delete Asset Log Entry': 'Delete Asset Log Entry',
'Delete Baseline': '基準値を削除',
'Delete Baseline Type': '基準値タイプを削除',
'Delete Brand': 'ブランドを削除してください',
'Delete Budget': '予算を削除',
'Delete Bundle': 'Bundleを削除',
'Delete Catalog': 'Delete Catalog',
'Delete Catalog Item': '救援物資カタログを削除',
'Delete Certificate': 'Delete Certificate',
'Delete Certification': 'Delete Certification',
'Delete Cluster': 'クラスタを削除',
'Delete Cluster Subsector': 'クラスタのサブクラスタを削除',
'Delete Commitment': 'コミットメントの削除',
'Delete Commitment Item': 'コミットした物資の削除',
'Delete Competency Rating': 'Delete Competency Rating',
'Delete Config': '設定を削除',
'Delete Contact Information': '連絡先情報の削除',
'Delete Course': 'Delete Course',
'Delete Course Certificate': 'Delete Course Certificate',
'Delete Credential': '証明書の削除',
'Delete Distribution': '配給所を削除',
'Delete Distribution Item': '配給物資を削除',
'Delete Document': '文書を削除',
'Delete Donor': '資金提供組織を削除',
'Delete Entry': 'エントリを削除',
'Delete Event': 'Delete Event',
'Delete Feature Class': 'Feature Classを削除',
'Delete Feature Layer': '機能レイヤを削除',
'Delete GPS data': 'Delete GPS data',
'Delete Group': 'グループを削除',
'Delete Home': 'Delete Home',
'Delete Hospital': '病院を削除',
'Delete Image': '画像を削除',
'Delete Impact': '影響範囲の削除',
'Delete Impact Type': '影響範囲のタイプを削除',
'Delete Incident': 'インシデントを削除',
'Delete Incident Report': 'インシデントレポートを削除',
'Delete Inventory Item': '備蓄物資を削除',
'Delete Inventory Store': '物資集積地点を削除',
'Delete Item': '救援物資を削除',
'Delete Item Category': 'アイテムカテゴリを削除',
'Delete Item Pack': '救援物資パックの削除',
'Delete Job Role': 'Delete Job Role',
'Delete Key': 'Keyを削除',
'Delete Kit': 'Kitを削除',
'Delete Layer': 'レイヤーを削除',
'Delete Level 1 Assessment': 'レベル1アセスメントの削除',
'Delete Level 2 Assessment': 'レベル2アセスメントの削除',
'Delete Location': 'ロケーションを削除',
'Delete Map Configuration': '地図設定を削除',
'Delete Marker': 'マーカーを削除',
'Delete Membership': 'メンバシップを削除',
'Delete Message': 'メッセージを削除',
'Delete Metadata': 'メタデータを削除',
'Delete Mission': 'Delete Mission',
'Delete Need': '要求を削除',
'Delete Need Type': '需要タイプを削除',
'Delete Office': 'オフィスを削除',
'Delete Old': '古いものを削除',
'Delete Organization': '団体情報を削除',
'Delete Patient': 'Delete Patient',
'Delete Peer': 'データ同期先の削除',
'Delete Person': '人物情報を削除',
'Delete Photo': '写真を削除',
'Delete Population Statistic': 'Delete Population Statistic',
'Delete Position': 'Delete Position',
'Delete Project': 'プロジェクトを削除',
'Delete Projection': '地図投影法を削除',
'Delete Rapid Assessment': '被災地の現況アセスメントを削除',
'Delete Received Item': '受け取った物資の削除',
'Delete Received Shipment': '受け取った輸送の削除',
'Delete Record': 'レコードを削除',
'Delete Recovery Report': '遺体回収レポートを削除',
'Delete Relative': 'Delete Relative',
'Delete Report': 'レポートを削除',
'Delete Request': '支援要請を削除',
'Delete Request Item': '物資の要請を削除',
'Delete Resource': 'リソースを削除',
'Delete Room': 'Delete Room',
'Delete Scenario': 'Delete Scenario',
'Delete Section': 'Sectionを削除',
'Delete Sector': '活動分野を削除',
'Delete Sent Item': '送付物資を削除',
'Delete Sent Shipment': '輸送物資を削除',
'Delete Service Profile': 'サービスプロファイルを削除',
'Delete Setting': '設定を削除',
'Delete Skill': 'スキルを削除',
'Delete Skill Equivalence': 'Delete Skill Equivalence',
'Delete Skill Provision': 'Delete Skill Provision',
'Delete Skill Type': 'スキルタイプを削除',
'Delete Staff Type': 'スタッフタイプを削除',
'Delete Status': '状況を削除しました',
'Delete Subscription': '寄付申し込みを削除',
'Delete Subsector': 'Delete Subsector',
'Delete Survey Answer': '調査回答削除',
'Delete Survey Question': 'Survey Questionを削除',
'Delete Survey Section': '調査項目を削除',
'Delete Survey Series': '一連の調査を削除',
'Delete Survey Template': '調査用テンプレートを削除',
'Delete Training': 'Delete Training',
'Delete Unit': '単位を削除',
'Delete User': 'ユーザを削除',
'Delete Vehicle': 'Delete Vehicle',
'Delete Vehicle Details': 'Delete Vehicle Details',
'Delete Volunteer': 'ボランティアを削除',
'Delete Warehouse': '倉庫を削除',
'Delete Warehouse Item': '倉庫物資の削除',
'Delete from Server?': 'サーバから削除しますか?',
'Delivered': '配信済み',
'Delphi Decision Maker': 'Delphi意思決定',
'Demographic': '人口情報',
'Demonstrations': 'デモ発生',
'Dental Examination': '歯科検査',
'Dental Profile': '歯の欠損/治療跡',
'Department/Unit Name': '所属部課名',
'Deployment': '展開',
'Deployment Location': 'Deployment Location',
'Describe the condition of the roads to your hospital.': '道路状況|病院までの道路状況を記載してください',
'Describe the procedure which this record relates to (e.g. "medical examination")': 'このレコードに関連する手続きを説明してください。(例えば "検診" です。)',
'Description': '説明',
'Description of Bin Type': 'Binタイプを記載してください',
'Description of Contacts': '連絡先の説明',
'Description of defecation area': '排泄用地についての補足説明',
'Description of drinking water source': '飲料水に関する補足説明',
'Description of sanitary water source': '生活用水に関する説明',
'Description of water source before the disaster': '災害発生前の水の確保方法について補足説明',
'Descriptive Text (e.g., Prose, etc)': '説明文 (例: 文学、等)',
'Designated for': '指定済み',
'Desire to remain with family': '家族との残留を希望',
'Destination': '目的地',
'Destroyed': 'Destroyed',
'Detail': '詳細',
"Detailed address of the site for informational/logistics purpose. Please note that you can add GIS/Mapping data about this site in the 'Location' field mentioned below.": 'サイトの所在地住所を詳細に記述します。情報伝達と物品搬送に使用します。このサイトに関する情報を、以下の「ロケーション」項目にGIS/地図データを挿入できることに注意してください。',
'Details': '詳細',
'Details field is required!': 'Details field is required!',
'Dialysis': '透析',
'Diaphragms, horizontal bracing': '仕切り板、水平部材',
'Diarrhea': '下痢',
'Diarrhea among children under 5': '5歳未満の幼児に下痢が蔓延している',
'Dignitary Visit': '要人の訪問',
'Dimensions of the storage bin. Input in the following format 1 x 2 x 3 for width x depth x height followed by choosing the unit from the drop down list.': '物資備蓄スペースの容積。ドロップダウンリストから単位を選び、以下の形式にしたがって入力してください。 1 x 2 x 3 , 横幅 x 奥行き x 縦幅。',
'Dimensions of the storage location. Input in the following format 1 x 2 x 3 for width x depth x height followed by choosing the unit from the drop down list.': '物資備蓄スペースの容積。ドロップダウンリストから単位を選び、以下の形式にしたがって入力してください。 1 x 2 x 3 , 横幅 x 奥行き x 縦幅。',
'Direction': '方向',
'Disable': '無効',
'Disabled': '無効',
'Disabled participating in coping activities': '障害者が災害対応に従事',
'Disabled?': '無効になっているか?',
'Disaster Victim Identification': '被災者の同定',
'Disaster Victim Registry': '被災者登録',
'Disaster clean-up/repairs': '災害の清掃活動や修復',
'Discharge (cusecs)': '流水量 (cusecs)',
'Discharges/24hrs': '退院者数/24h',
'Discussion Forum': 'フォーラム',
'Discussion Forum on item': 'フォーラム(物資について)',
'Disease vectors': '病原媒介者',
'Dispatch': '発送',
'Dispatch Items': 'アイテムの発送',
'Dispensary': '診療所',
'Displaced': '避難中',
'Displaced Populations': '避難者数',
'Display Polygons?': '多角形を表示しますか?',
'Display Routes?': 'ルートを表示しますか?',
'Display Tracks?': 'Tracksを表示しますか?',
'Display Waypoints?': 'ウェイポイントを表示しますか?',
'Dispose': '処分',
'Dispose Expired/Unusable Items': '期限切れ / 使用できない物資の処分',
'Distance between defecation area and water source': '水資源採取場所と排泄場所の間の距離',
'Distance between latrines and temporary shelter in meters': 'トイレと避難所の距離(m)',
'Distance between shelter and latrines': '簡易避難所と排泄場所との間の距離(メートル)',
'Distance from %s:': 'Distance from %s:',
'Distance(Kms)': '距離(Kms)',
'Distribution': '配給所',
'Distribution Details': '配給所の詳細',
'Distribution Item': '配給物資',
'Distribution Item Details': '配給物資の詳細',
'Distribution Item added': '配給物資を追加しました',
'Distribution Item deleted': '配給物資を削除しました',
'Distribution Item updated': '配給物資を更新しました',
'Distribution Items': '配給物資',
'Distribution added': '配給所を追加しました',
'Distribution deleted': '配給所を削除しました',
'Distribution groups': '配信グループ',
'Distribution updated': '配給所を更新しました',
'Distributions': '配給所',
'District': '地区(行政地区)',
'Do adolescent and youth in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)': 'あなたの地域の青年は、災害に対応するための支援活動に参加しますか?(例: 打ち合わせ、宗教活動、清掃活動ボランティアなど)',
'Do households each have at least 2 containers (10-20 litres each) to hold water?': '1つの世帯ごとに、少なくとも2つ以上の水貯蔵容器(10-20リットル/容器)があるかどうかを記載してください',
'Do households have appropriate equipment and materials to cook their food (stove, pots, dished plates, and a mug/drinking vessel, etc)?': '調理や食事に必要となる道具や器材(コンロ、ポット、皿やプレート、マグカップ、飲料容器など)が世帯に存在するかを記載します',
'Do households have bedding materials available (tarps, plastic mats, blankets)?': 'ベッド、あるいはベッド用部材(例:タープ、プラスチックマット、毛布)が世帯に存在するかを記載します',
'Do households have household water storage containers?': '水貯蔵容器が世帯に存在するかを記載します',
'Do minority members in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)': '地域にいるマイノリティ(社会的少数者)の人が、自助的な災害対処につながる活動に参加しているか記載してください。(例 打ち合わせ、宗教活動、地域の清掃ボランティアなど)',
'Do older people in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)': '災害復旧活動に従事している高齢者が、共同体の中にいるかどうかを記載してください(例: 打ち合わせ、宗教活動、清掃活動ボランティアなど)',
'Do people have at least 2 full sets of clothing (shirts, pants/sarong, underwear)?': '個人に対して、少なくとも2セット以上の衣服(シャツ、ズボン/腰巻、下着など)があるかどうか記載してください',
'Do people have reliable access to sufficient sanitation/hygiene items (bathing soap, laundry soap, shampoo, toothpaste and toothbrush)?': '十分な量のサニタリ / 衛生用品が、安定して供給されているかどうかを記載します(石鹸、シャンプー、歯ブラシ、洗濯用洗剤など)',
'Do people with disabilities in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)': 'あなたの地域で障害者と一緒にいる方は、災害に対処るための彼らの支援活動に参加しますか?(例: 打ち合わせ、宗教活動、清掃活動ボランティアなど)',
'Do women and girls have easy access to sanitary materials?': '女性用生理用品の入手が容易かどうかを記載してください',
'Do women in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)': 'あなたの地域の女性は、災害対応のための支援活動に参加しますか?(例: 打ち合わせ、宗教活動、清掃活動ボランティアなど)',
'Do you have access to cash to restart your business?': 'ビジネス再開に必要な現金が入手可能かどうかを記載してください',
'Do you know of any incidents of violence?': '暴力事件が発生したかどうかを記載してください',
'Do you know of children living on their own (without adults)?': '成人がおらず、未成年のみで生活しているグループがあるかどうかを記載してください',
'Do you know of children separated from their parents or caregivers?': '親や養育者とはぐれた未成年がいるかどうかを記載してください',
'Do you know of children that have been orphaned by the disaster?': '災害によって孤児となった未成年がいるかどうかを記載してください',
'Do you know of children that have been sent to safe places?': '安全な場所に疎開した未成年がいるかどうかを記載してください',
'Do you know of children that have disappeared without explanation in the period since the disaster?': '災害発生後、行き先の説明ないまま連絡が取れなくなった未成年がいるかどうかを記載してください',
'Do you know of older people who are primary caregivers of children?': '未成年に対する介護経験がある高齢者がいるかどうかを記載してください',
'Do you know of parents/caregivers missing children?': '子供と連絡が取れなくなった親や養育者がいるかどうかを記載してください',
'Do you really want to delete these records?': '本当にこれらのデータを削除しますか?',
'Do you want to cancel this received shipment? The items will be removed from the Inventory. This action CANNOT be undone!': 'この輸送の受領をキャンセルしますか?キャンセルするとこの物資は備蓄から削除されます。この操作は *取り消せません!*',
'Do you want to cancel this sent shipment? The items will be returned to the Inventory. This action CANNOT be undone!': '出荷された物資をキャンセルしますか?この物資は、在庫に返されます。このアクションは、元に戻せません。',
'Do you want to over-write the file metadata with new default values?': 'ファイルのメタデータを、新しいデフォルト値で上書きしますか?',
'Do you want to receive this shipment?': 'この輸送物資を受け取られますか?',
'Do you want to send these Committed items?': 'これらコミットされた物資を送付してよいですか?',
'Do you want to send this shipment?': 'この発送情報を送信しますか?',
'Document': '文書',
'Document Details': '文書の詳細',
'Document Scan': '文書のスキャン',
'Document added': '文書を追加しました',
'Document deleted': '文書を削除しました',
'Document removed': 'Document removed',
'Document updated': '文書を更新しました',
'Documents': '文書',
'Documents and Photos': '文書と写真',
'Does this facility provide a cholera treatment center?': 'コレラ治療センターの機能を提供可能かどうか',
'Doing nothing (no structured activity)': '活動なし(組織立った行動なし)',
'Dollars': 'ドル',
'Domain': 'ドメイン',
'Domestic chores': '家事手伝い',
'Donated': 'Donated',
'Donation Certificate': '寄付証明書',
'Donation Phone #': '寄付受付電話番号',
'Donor': '資金提供組織',
'Donor Details': '資金提供組織の詳細',
'Donor added': '資金提供組織を追加しました',
'Donor deleted': '資金提供組織を削除しました',
'Donor updated': '資金提供組織を更新しました',
'Donors': '資金提供組織',
'Donors Report': '資金提供レポート',
'Door frame': 'ドア枠',
'Download PDF': 'PDFをダウンロード',
'Download Template': 'Download Template',
'Draft': 'ドラフト',
'Draft Features': '草案(ドラフト)',
'Drainage': '排水',
'Drawing up a Budget for Staff & Equipment across various Locations.': 'ロケーションに対する、スタッフと備品の予算を作成します。',
'Drill Down by Group': 'グループで絞り込み',
'Drill Down by Incident': 'インシデントで絞り込み',
'Drill Down by Shelter': '避難所で絞り込み',
'Driving License': '運転免許',
'Drought': '干ばつ',
'Drugs': '医薬品',
'Dug Well': '丸井戸',
'Duplicate?': '重複?',
'Duration': '活動実施期間',
'Dust Storm': '粉塵嵐',
'Dwelling': '居住施設',
'Dwellings': '住居数',
'E-mail': '電子メール',
'EMS Reason': '緊急医療受け入れ状態',
'EMS Status': 'EMSステータス',
'EMS Status Reason': '救急医療状況の理由',
'EMS Traffic Status': '救急医療の混雑状況',
'ER Status': 'ER ステータス',
'ER Status Reason': 'ER医療状況の理由',
'EXERCISE': 'EXERCISE',
'Early Recovery': '早期復旧',
'Earth Enabled?': 'Earth Enabled?',
'Earthquake': '地震',
'Easy access to sanitation items for women/girls': '女性用サニタリ用品の入手が容易である',
'Edit': '編集',
'Edit Activity': '支援活動を編集',
'Edit Address': '住所の編集',
'Edit Aid Request': '援助要請を編集',
'Edit Alternative Item': '代わりの物資を編集',
'Edit Application': 'アプリケーションの編集',
'Edit Assessment': 'アセスメントを編集',
'Edit Assessment Summary': 'アセスメントの要約を編集',
'Edit Asset': '資産を編集',
'Edit Asset Assignment': '資産割り当ての編集',
'Edit Asset Log Entry': 'Edit Asset Log Entry',
'Edit Baseline': 'Baselineの編集',
'Edit Baseline Type': '基準値のタイプを編集',
'Edit Brand': '銘柄の編集',
'Edit Budget': '予算の編集',
'Edit Bundle': 'Bundleの編集',
'Edit Camp': 'Edit Camp',
'Edit Camp Service': 'Edit Camp Service',
'Edit Camp Type': 'Edit Camp Type',
'Edit Catalog': 'Edit Catalog',
'Edit Catalog Item': '救援物資カタログの編集',
'Edit Category<>Sub-Category<>Catalog Relation': 'Category<>Sub-Category<>Catalog 関係の編集',
'Edit Certificate': 'Edit Certificate',
'Edit Certification': 'Edit Certification',
'Edit Cluster': 'クラスタを編集',
'Edit Cluster Subsector': 'クラスタのサブセクターの編集',
'Edit Commitment': 'コミットを編集',
'Edit Commitment Item': 'コミットされた物資の検索',
'Edit Committed Person': 'Edit Committed Person',
'Edit Competency Rating': 'Edit Competency Rating',
'Edit Config': '設定の編集',
'Edit Contact': '連絡先の編集',
'Edit Contact Information': '連絡先情報の編集',
'Edit Contents': '内容の編集',
'Edit Course': 'Edit Course',
'Edit Course Certificate': 'Edit Course Certificate',
'Edit Credential': '証明書の編集',
'Edit Dead Body Details': '遺体の詳細を編集',
'Edit Defaults': 'デフォルト値の編集',
'Edit Description': '説明の編集',
'Edit Details': '詳細の編集',
'Edit Disaster Victims': '被災者情報の編集',
'Edit Distribution': '配給所の編集',
'Edit Distribution Item': '配給物資の編集',
'Edit Document': '文書を編集',
'Edit Donor': '資金提供組織の編集',
'Edit Email Settings': '電子メール設定の編集',
'Edit Entry': 'Edit Entry',
'Edit Event': 'Edit Event',
'Edit Facility': 'Edit Facility',
'Edit Feature Class': 'Feature Classの編集',
'Edit Feature Layer': 'Feature Layerの編集',
'Edit Flood Report': '洪水レポートの編集',
'Edit GPS data': 'Edit GPS data',
'Edit Gateway Settings': 'ゲートウェイ設定の編集',
'Edit Group': 'グループの編集',
'Edit Home': 'Edit Home',
'Edit Hospital': '病院の編集',
'Edit Human Resource': 'Edit Human Resource',
'Edit Identification Report': 'IDレポートの編集',
'Edit Identity': 'IDの編集',
'Edit Image': '画像の編集',
'Edit Image Details': '画像の詳細の編集',
'Edit Impact': '被災影響の編集',
'Edit Impact Type': '災害影響のタイプを編集',
'Edit Import File': 'Edit Import File',
'Edit Incident': 'インシデントを編集',
'Edit Incident Report': 'インシデントレポートの編集',
'Edit Inventory Item': '備蓄物資の編集',
'Edit Inventory Store': '物資集積地点の編集',
'Edit Item': '物資の編集',
'Edit Item Catalog': '救援物資カタログの編集',
'Edit Item Catalog Categories': '救援物資カタログのカテゴリを編集',
'Edit Item Category': '救援物資カテゴリの編集',
'Edit Item Pack': '物資パックを編集',
'Edit Item Sub-Categories': '救援物資サブカテゴリの編集',
'Edit Job Role': 'Edit Job Role',
'Edit Key': 'Keyの編集',
'Edit Kit': 'Kitの編集',
'Edit Layer': 'レイヤの編集',
'Edit Level %d Locations?': 'Edit Level %d Locations?',
'Edit Level 1 Assessment': 'レベル1アセスメントを編集する',
'Edit Level 2 Assessment': 'レベル2アセスメントを編集',
'Edit Location': 'ロケーションの編集',
'Edit Log Entry': 'ログエントリの編集',
'Edit Map Configuration': '地図設定を編集する',
'Edit Map Services': '地図サービスの編集',
'Edit Marker': 'マーカーの編集',
'Edit Membership': 'メンバシップの編集',
'Edit Message': 'メッセージの編集',
'Edit Messaging Settings': 'メッセージ設定の編集',
'Edit Metadata': 'メタデータの編集',
'Edit Mission': 'Edit Mission',
'Edit Modem Settings': 'モデム設定の編集',
'Edit Need': 'ニーズを編集',
'Edit Need Type': '需要タイプの編集',
'Edit Note': '追加情報を編集',
'Edit Office': 'オフィスの編集',
'Edit Options': 'オプション編集',
'Edit Organization': '団体の編集',
'Edit Parameters': 'パラメータの編集',
'Edit Patient': 'Edit Patient',
'Edit Peer': 'データ同期先の編集',
'Edit Peer Details': 'データ同期先の詳細を編集',
'Edit Person Details': '人物情報の詳細を編集',
'Edit Personal Effects Details': 'Personal Effectsの詳細の編集',
'Edit Photo': '写真の編集',
'Edit Pledge': '寄付の編集',
'Edit Population Statistic': 'Edit Population Statistic',
'Edit Position': '場所の編集',
'Edit Problem': '問題の編集',
'Edit Project': 'プロジェクトの編集',
'Edit Projection': '地図投影法の編集',
'Edit Rapid Assessment': '被災地の現況アセスメントの編集',
'Edit Received Item': '物資の受領を編集',
'Edit Received Shipment': '物資の輸送の受領報告を編集',
'Edit Record': 'レコードの編集',
'Edit Recovery Details': '遺体回収の詳細を編集',
'Edit Registration': '登録の編集',
'Edit Registration Details': '登録状況の詳細を編集',
'Edit Relative': 'Edit Relative',
'Edit Report': 'レポートの編集',
'Edit Request': '支援要請の編集',
'Edit Request Item': '物資の要請を編集',
'Edit Requested Skill': 'Edit Requested Skill',
'Edit Resource': 'リソースの編集',
'Edit Response': '返信を編集',
'Edit River': '河川の編集',
'Edit Role': '役割の編集',
'Edit Room': 'Edit Room',
'Edit SMS Settings': 'Edit SMS Settings',
'Edit SMTP to SMS Settings': 'Edit SMTP to SMS Settings',
'Edit Scenario': 'Edit Scenario',
'Edit Sector': '活動分野を編集',
'Edit Sent Item': '送付した物資の編集',
'Edit Setting': '設定の編集',
'Edit Settings': '設定の編集',
'Edit Shelter': '避難所の編集',
'Edit Shelter Service': '避難所提供サービスの編集',
'Edit Shelter Type': '避難所タイプの編集',
'Edit Shipment Transit Log': '輸送履歴の編集',
'Edit Shipment to Send': '送付する輸送を編集',
'Edit Shipment/Way Bills': '輸送費/移動費の編集',
'Edit Shipment<>Item Relation': '輸送<>物資の関係を編集',
'Edit Site': 'Siteを編集',
'Edit Skill': 'スキルの編集',
'Edit Skill Equivalence': 'Edit Skill Equivalence',
'Edit Skill Provision': 'Edit Skill Provision',
'Edit Skill Type': 'スキルタイプの編集',
'Edit Solution': '解決案の編集',
'Edit Staff': 'スタッフの編集',
'Edit Staff Type': 'スタッフタイプの編集',
'Edit Storage Bin Type(s)': 'Storage Binタイプを編集',
'Edit Storage Bins': 'Storage Binの編集',
'Edit Storage Location': '備蓄地点の編集',
'Edit Subscription': '寄付申し込みの編集',
'Edit Subsector': 'Edit Subsector',
'Edit Survey Answer': '調査回答の編集',
'Edit Survey Question': '調査の質問項目を編集',
'Edit Survey Section': 'フィードバック内容を編集します',
'Edit Survey Series': '一連の調査の編集',
'Edit Survey Template': '調査テンプレートを編集',
'Edit Task': 'タスクの編集',
'Edit Team': 'チームの編集',
'Edit Theme': 'テーマの編集',
'Edit Themes': 'テーマの編集',
'Edit Ticket': 'チケットの編集',
'Edit Track': '追跡情報の編集',
'Edit Training': 'Edit Training',
'Edit Tropo Settings': 'Tropo 設定の編集',
'Edit Unit': '単位の編集',
'Edit User': 'ユーザの編集',
'Edit Vehicle': 'Edit Vehicle',
'Edit Vehicle Details': 'Edit Vehicle Details',
'Edit Volunteer Availability': 'Edit Volunteer Availability',
'Edit Volunteer Details': 'ボランティアの詳細を編集する',
'Edit Volunteer Registration': 'ボランティア登録の編集',
'Edit Warehouse': '倉庫を編集',
'Edit Warehouse Item': '倉庫物資を編集',
'Edit Web API Settings': 'Edit Web API Settings',
'Edit current record': '現在のレコードの編集',
'Edit message': 'メッセージの編集',
'Edit the Application': 'アプリケーションの編集',
'Editable?': '編集可能?',
'Education': '教育',
'Education materials received': '教育資材を受領した',
'Education materials, source': '教育資材の送付元',
'Effects Inventory': '備蓄物資への影響',
'Eggs': '卵',
'Either a shelter or a location must be specified': '避難所かロケーションのどちらかを特定する必要があります',
'Either file upload or document URL required.': 'ファイルのアップロードと文書のURLの両方が必要です。',
'Either file upload or image URL required.': 'アップロードするファイルか、URLを指定してください。',
'Elderly person headed households (>60 yrs)': '代表者が60歳以上の世帯数',
'Electrical': '電動の',
'Electrical, gas, sewerage, water, hazmats': '電気、ガス、下水道、水、有害物',
'Elevated': '高まる',
'Elevators': 'エレベーター',
'Email': '電子メール',
'Email Address': 'メールアドレス',
'Email Address to which to send SMS messages. Assumes sending to phonenumber@address': 'Email Address to which to send SMS messages. Assumes sending to phonenumber@address',
'Email Settings': '電子メール設定',
'Email address verified, however registration is still pending approval - please wait until confirmation received.': '電子メールの認証は完了しましたが、登録はまだ完了していません。確認が完了するまで少々お待ちください。',
'Email settings updated': '電子メールの設定を更新しました',
'Email verification': '利用者登録の確認',
'Embalming': '遺体防腐処理',
'Embassy': '大使館',
'Emergency Capacity Building project': 'ECB (緊急時の被災者収容建築プロジェクト)',
'Emergency Department': '救急部門',
'Emergency Shelter': '緊急避難所',
'Emergency Support Facility': '緊急支援施設',
'Emergency Support Service': '緊急支援サービス',
'Emergency Telecommunications': '緊急時電話連絡先',
'Enable': 'Enable',
'Enable/Disable Layers': 'レイヤの有効化/無効化',
'Enabled': '有効',
'Enabled?': 'Enabled?',
'Enabling MapMaker layers disables the StreetView functionality': 'Enabling MapMaker layers disables the StreetView functionality',
'End Date': 'End Date',
'End date': '終了日',
'End date should be after start date': '終了日付は開始日付より後にしてください',
'End of Period': '終了期間',
'English': 'English 英語',
'Enter Coordinates': '緯度経度を入力',
'Enter Coordinates:': '座標入力:',
'Enter a GPS Coord': 'GPS Coordを入力',
'Enter a GPS Coordinate': 'GPS座標を入力してください',
'Enter a date before': '以前の日時を入力',
'Enter a few characters of the name to select an existing Location or else simply type the name of the new Location.': '最初の数文字を入力して既存の項目から選ぶか、あるいは新しいロケーション名を入力して、ロケーションを特定してください。',
'Enter a name for the spreadsheet you are uploading (mandatory).': 'アップロードするスプレッドシートの名前を入力してください。(必須項目)',
'Enter a name for the spreadsheet you are uploading.': 'Enter a name for the spreadsheet you are uploading.',
'Enter a new support request.': '新規の支援要請を登録',
'Enter a summary of the request here.': '要求事項の概要を入力',
'Enter a unique label!': 'そのラベル名は使われています。一意のラベル名を入力してください。',
'Enter a valid date before': 'より前の正しい日付を入力してください',
'Enter a valid email': '正しいメールアドレスを入力してください',
'Enter a valid future date': '正しい未来の日付を入力してください',
'Enter a valid past date': 'Enter a valid past date',
'Enter some characters to bring up a list of possible matches': '文字を入力することで、候補の一覧が表示されます',
'Enter some characters to bring up a list of possible matches.': '検索文字列を入力してください',
'Enter tags separated by commas.': 'タグはカンマで区切って入力してください。',
'Enter the data for an assessment': 'Enter the data for an assessment',
'Enter the same password as above': '確認のため、パスワードを再入力',
'Enter your firstname': 'あなたの名前を入力',
'Enter your organization': 'Enter your organization',
'Entered': '入力された',
'Entering a phone number is optional, but doing so allows you to subscribe to receive SMS messages.': '電話番号の入力は任意です。入力すると、SMS メッセージの受け取り登録ができます。',
'Entering an Organisation is optional, but doing so directs you to the appropriate approver & means you automatically get the appropriate permissions.': '選択リストに含まれる団体のメンバーであれば、所属する団体を選択してください。(団体の選択は必須ではありません)',
'Entry deleted': 'エントリを削除しました',
'Environment': '環境',
'Equipment': '備品',
'Error encountered while applying the theme.': 'テーマ適用時にエラーが発生しました。',
'Error in message': 'エラーメッセージ',
'Error logs for "%(app)s"': '"%(app)s" に関するエラーログ',
'Errors': 'エラー',
'Est. Delivery Date': 'Est. Delivery Date',
'Estimated # of households who are affected by the emergency': '非常事態の影響を受けた世帯の推定数',
'Estimated # of people who are affected by the emergency': '非常事態の影響を受けた住民の推定数',
'Estimated Overall Building Damage': '建物全体の被害見積り',
'Estimated total number of people in institutions': 'なんらかの施設に収容されている住民の推定数',
'Euros': 'ユーロ',
'Evacuating': '退避中',
'Evaluate the information in this message. (This value SHOULD NOT be used in public warning applications.)': 'このメッセージの情報を評価します。(この値は、公開される警告アプリケーションで使用してはなりません)',
'Event': 'Event',
'Event Details': 'Event Details',
'Event Time': 'イベント発生時刻',
'Event Type': 'イベントタイプ',
'Event added': 'Event added',
'Event deleted': 'Event deleted',
'Event type': 'イベントタイプ',
'Event updated': 'Event updated',
'Events': 'Events',
'Example': '例',
'Exceeded': '超過',
'Excellent': 'Excellent',
'Exclude contents': 'コンテンツを除く',
'Excreta disposal': 'し尿処理',
'Execute a pre-planned activity identified in <instruction>': '事前に準備していた計画 <instruction>を実行する',
'Exercise': 'Exercise',
'Exercise?': 'Exercise?',
'Exercises mean all screens have a watermark & all notifications have a prefix.': 'Exercises mean all screens have a watermark & all notifications have a prefix.',
'Existing Placard Type': '設置されたポスターのタイプ',
'Existing Sections': 'Existing Sections',
'Existing food stocks': '食糧備蓄あり',
'Existing food stocks, main dishes': '備蓄中の食料(主皿)',
'Existing food stocks, side dishes': '備蓄中の食料(副皿)',
'Existing location cannot be converted into a group.': 'Existing location cannot be converted into a group.',
'Exits': '出口',
'Expected In': '予定期間',
'Expected Out': '予期される出力',
'Expected Return Home': 'Expected Return Home',
'Experience': '熟練者',
'Expiry Date': '有効期限',
'Expiry Time': '有効期限',
'Expiry_Date': '有効期限',
'Explosive Hazard': '爆発災害',
'Export': 'エクスポート',
'Export Data': 'データのエクスポート',
'Export Database as CSV': 'データベースをCSV形式でエクスポート',
'Export in GPX format': 'GPXフォーマットでエクスポート',
'Export in KML format': 'KMLフォーマットでエクスポート',
'Export in OSM format': 'OSMフォーマットでエクスポート',
'Export in PDF format': 'PDFフォーマットでエクスポート',
'Export in RSS format': 'RSSフォーマットでエクスポート',
'Export in XLS format': 'XLSフォーマットでエクスポート',
'Exterior Only': '外装のみ',
'Exterior and Interior': '外装と内装',
'External Features': '外部機能',
'Eye Color': '目の色',
'Facebook': 'Facebook',
'Facial hair, color': 'ヒゲ, 色',
'Facial hair, type': 'ヒゲ, 形状',
'Facial hear, length': 'ヒゲ, 長さ',
'Facilities': 'Facilities',
'Facility': 'Facility',
'Facility Details': 'Facility Details',
'Facility Operations': '施設の運用',
'Facility Status': '施設の状態',
'Facility Type': '施設タイプ',
'Facility added': 'Facility added',
'Facility or Location': 'Facility or Location',
'Facility removed': 'Facility removed',
'Facility updated': 'Facility updated',
'Factors affecting school attendance': '生徒の就学に影響する要因',
'Fail': 'Fail',
'Failed to send mail to Approver - see if you can notify them manually!': '承認依頼メールを送信できませんでした。利用者登録は完了していません。サイト管理者へ連絡してください。',
'Failed!': '失敗しました!',
'Fair': 'Fair',
'Falling Object Hazard': '落下/墜落による災害',
'Families/HH': '家族/世帯',
'Family': '家族',
'Family tarpaulins received': 'タープ(家族用簡易テント)を受領した',
'Family tarpaulins, source': 'タープ(家族用簡易テント)の送付元',
'Family/friends': '家族/友人',
'Farmland/fishing material assistance, Rank': '農業 / 漁業用物資の補助、ランク',
'Fatalities': '死亡者',
'Fax': 'ファックス',
'Feature': '機能',
'Feature Class': 'Feature クラス',
'Feature Class Details': 'Feature Classの詳細',
'Feature Class added': '機能クラスを追加しました',
'Feature Class deleted': '機能クラスを削除しました',
'Feature Class updated': '機能クラスを更新しました',
'Feature Classes': 'Feature クラス',
'Feature Classes are collections of Locations (Features) of the same type': 'Feature クラスは同じタイプの位置(Features)を集めた物です。',
'Feature Layer Details': '機能レイヤの詳細',
'Feature Layer added': '機能レイヤを追加しました',
'Feature Layer deleted': '機能レイヤを削除しました',
'Feature Layer updated': '機能レイヤを更新しました',
'Feature Layers': '機能レイヤ',
'Feature Namespace': 'Feature 名前空間',
'Feature Request': '機能の要求',
'Feature Type': 'Feature タイプ',
'Features Include': '含まれる機能',
'Female': '女性',
'Female headed households': '代表者が女性の世帯数',
'Few': '少数',
'Field': 'Field',
'Field Hospital': '野外病院',
'File': 'ファイル',
'File Imported': 'File Imported',
'File Importer': 'File Importer',
'File name': 'File name',
'Fill in Latitude': '緯度を記入',
'Fill in Longitude': '経度を記入',
'Fill out Rapid Evaluation Forms': '迅速評価フォームに記入します',
'Fill out detailed Evaluation Forms': '詳細な評価フォームに入力する',
'Filter': 'フィルタ',
'Filter Field': 'フィールドをフィルタする',
'Filter Value': '値をフィルタ',
'Filtered search of aid pledges and requests': '援助申出と要請の検索されたもの',
'Find': '検索',
'Find All Matches': '完全一致',
'Find Dead Body Report': '遺体レポートの発見',
'Find Hospital': '病院を探す',
'Find Person Record': '人物情報を検索',
'Find Recovery Report': '遺体発見レポート',
'Find Volunteers': 'ボランティアを探す',
'Find a Person Record': '人物情報を検索する',
'Find by Name': '名前で検索',
'Finder': '発見者',
'Fingerprint': '指紋',
'Fingerprinting': '指紋',
'Fingerprints': '指紋',
'Finish': '完了',
'Finished Jobs': '完了したジョブ',
'Fire': '火災',
'Fire suppression and rescue': '消火・救出活動',
'First Name': '苗字',
'First name': '苗字',
'Fishing': '漁業',
'Flash Flood': '鉄砲水',
'Flash Freeze': '瞬間凍結',
'Fleet Management': '船舶の管理',
'Flexible Impact Assessments': '災害影響範囲アセスメント',
'Flood': '洪水',
'Flood Alerts': '洪水警報',
'Flood Alerts show water levels in various parts of the country': '洪水警報では、国内各所の水位情報を確認することができます。',
'Flood Report': '洪水レポート',
'Flood Report Details': '洪水レポートの詳細',
'Flood Report added': '洪水レポートを追加しました',
'Flood Report deleted': '洪水レポートを削除しました',
'Flood Report updated': '洪水レポートを更新しました',
'Flood Reports': '洪水レポート',
'Flow Status': '流れの状況',
'Focal Point': '代表者',
'Fog': '濃霧',
'Food': '食料',
'Food Supply': '食料の供給',
'Food assistance': '食糧援助',
'Food assistance available/expected': '食糧援助が利用可能 / 期待できる',
'Footer': 'フッタ',
'Footer file %s missing!': 'フッターファイル%sが見つかりません。',
'For': ' ',
'For Eden instances enter the application base URL, e.g. http://sync.sahanfoundation.org/eden, for other peers the URL of the synchronization interface.': 'Eden の場合はベースURL(例えば http://sync.sahanfoundation.org/eden)、他のシステムの場合は同期インターフェースのURL。',
'For POP-3 this is usually 110 (995 for SSL), for IMAP this is usually 143 (993 for IMAP).': 'POP-3では通常110 (SSLでは995)で、IMAPでは通常143 (IMAPSでは993)。',
'For Warehouse': '倉庫向け',
'For a country this would be the ISO2 code, for a Town, it would be the Airport Locode.': '国の場合は ISO2 コード、町の場合は 空港コード(Airport Locode)',
'For each sync partner, there is a default sync job that runs after a specified interval of time. You can also set up more sync jobs which could be customized on your needs. Click the link on the right to get started.': 'それぞれの同期パートナーについて、指定した間隔で実行する同期ジョブがデフォルトで存在します。必要に応じて、さらなる同期ジョブを設定し、カスタマイズすることができます。開始するには、リンクをクリックしてください。',
'For enhanced security, you are recommended to enter a username and password, and notify administrators of other machines in your organization to add this username and password against your UUID in Synchronization -> Sync Partners': 'セキュリティ向上のため、ユーザー名とパスワードを入力し、団体の他端末の管理者にユーザー名とパスワードを通知して「データ同期」 -> 「データ同期パートナー」であなたのUUIDに追加してもらうことを推奨します。',
'For live help from the Sahana community on using this application, go to': 'Sahanaの使い方について Sahanaコミュニティからライブヘルプを希望する際は、以下に進んでください。',
'For messages that support alert network internal functions': '警戒(alert)ネットワークの内部機能をサポートするメッセージの場合',
'For more details on the Sahana Eden system, see the': 'Sahana Edenに関する詳細は、以下をごらんください。',
'For more information, see ': '詳細は、以下を参照してください。',
'For other types, the next screen will allow you to enter the relevant details...': 'その他の種類については、次の画面で関連する詳細情報を入力できます…',
'For:': '対象:',
'Forest Fire': '森林火災',
'Formal camp': '指定避難所',
'Format': 'フォーマット',
"Format the list of attribute values & the RGB value to use for these as a JSON object, e.g.: {Red: '#FF0000', Green: '#00FF00', Yellow: '#FFFF00'}": "Format the list of attribute values & the RGB value to use for these as a JSON object, e.g.: {Red: '#FF0000', Green: '#00FF00', Yellow: '#FFFF00'}",
'Forms': 'フォーム',
'Found': '発見された',
'Foundations': '構造基礎',
'Freezing Drizzle': '凍結霧雨',
'Freezing Rain': 'みぞれ',
'Freezing Spray': '冷却スプレー',
'French': 'フランス語',
'Friday': '金曜日',
'From': '輸送元',
'From Facility': 'From Facility',
'From Inventory': '送付元',
'From Location': '送付元ロケーション',
'From Organisation': '送付元団体',
'From Organization': '送付元団体',
'From Person': '送付元の担当者',
'From Warehouse': '倉庫から',
'Frost': '凍結',
'Fulfil. Status': '確保量は十分か',
'Fulfillment Status': '充足状況',
'Full': '満員',
'Full beard': 'もみあげまでのアゴヒゲ、口髭あり',
'Fullscreen Map': 'フルスクリーン表示',
'Function': '機能',
'Function Permissions': '機能に対する権限',
'Functional Tests': '機能テスト',
'Functions available': '利用可能な機能',
'Funding Organization': '資金提供団体',
'Funeral': '葬儀',
'Further Action Recommended': '更なる対応が推奨されている',
'GIS Reports of Shelter': '避難所のGISレポート',
'GIS integration to view location details of the Shelter': '避難所のロケーション詳細を閲覧するGISインテグレーション',
'GPS': 'GPS',
'GPS Data': 'GPS Data',
'GPS ID': 'GPS ID',
'GPS Marker': 'GPSマーカー',
'GPS Track': 'GPS トラック',
'GPS Track File': 'GPS Track ファイル',
'GPS data': 'GPS data',
'GPS data added': 'GPS data added',
'GPS data deleted': 'GPS data deleted',
'GPS data updated': 'GPS data updated',
'GPX Layers': 'GPX レイヤ',
'GPX Track': 'GPX形式の追跡情報',
'GRN': 'GRN',
'GRN Status': 'GRNステータス',
'Gale Wind': '強風',
'Gantt Chart': 'ガントチャート',
'Gap Analysis': 'ギャップ解析',
'Gap Analysis Map': 'ギャップ解析マップ',
'Gap Analysis Report': 'ギャップ解析報告',
'Gap Map': '需給ギャップマップ',
'Gap Report': '需給ギャップの報告',
'Gateway Settings': 'ゲートウェイ設定',
'Gateway settings updated': 'ゲートウェイ設定を更新しました',
'Gender': '性別',
'General Comment': '包括コメント',
'General Medical/Surgical': '一般医学/外科',
'General emergency and public safety': '一般的緊急事態と公共の安全',
'General information on demographics': '人口統計の情報',
'Generator': '発電機',
'Geocode': 'Geocode',
'Geocoder Selection': 'Geocoder 選択',
'Geometry Name': 'Geometry名',
'Geonames.org search requires Internet connectivity!': 'Geonames.org の検索を行うには、インターネットに接続している必要があります。',
'Geophysical (inc. landslide)': '地球物理 (地滑りを含む)',
'Geotechnical': '地質工学',
'Geotechnical Hazards': '地盤災害',
'Geraldo module not available within the running Python - this needs installing for PDF output!': '実行中のPythonでGeraldoモジュールが利用できません。PDF出力に必要です。',
'Geraldo not installed': 'Geraldoがインストールされていません',
'German': 'German',
'Get incoming recovery requests as RSS feed': '遺体回収要請をRSSフィードとして取得する',
'Girls 13-18 yrs in affected area': '影響地域内の13-18歳の女子数',
'Girls 13-18 yrs not attending school': '学校に来ていなかった13-18歳の女子数',
'Girls 6-12 yrs in affected area': '影響地域内の6-12歳の女子数',
'Girls 6-12 yrs not attending school': '学校に来ていなかった6-12歳の女子数',
'Give a brief description of the image, e.g. what can be seen where on the picture (optional).': '画像に関する説明。特に、写真のどの箇所に何が確認できるかを記載します (オプション)',
'Give information about where and when you have seen the person': '人物を見かけた場所や時間の情報を提供してください',
'Give information about where and when you have seen them': 'どこで、いつ、彼らを見かけたのか、情報をください',
'Global Messaging Settings': 'メッセージの全般設定',
'Glossary': '用語集',
'Go': 'Go',
'Go to Request': '支援要請に行く',
'Goatee': 'やぎヒゲ',
'Good': 'Good',
'Good Condition': 'Good Condition',
'Goods Received Note': '受諾した物資の注釈',
"Google Layers cannot be displayed if there isn't a valid API Key": "Google Layers cannot be displayed if there isn't a valid API Key",
'Government': '政府・行政機関',
'Government UID': '政府UID',
'Government building': '政府所管の建物',
'Grade': '学年',
'Greek': 'ギリシャ語',
'Green': '緑',
'Ground movement, fissures': '地盤移動、亀裂',
'Ground movement, settlement, slips': '地盤移動、沈下、がけ崩れ',
'Group': 'グループ',
'Group %(group_id)s created': 'グループ %(group_id)s を作成しました',
'Group Description': 'グループの説明',
'Group Details': 'グループの詳細',
'Group ID': 'グループID',
'Group Member added': 'グループメンバを追加しました',
'Group Members': 'グループメンバ',
'Group Memberships': 'グループメンバシップ',
'Group Name': 'グループ名',
'Group Title': 'グループのタイトル',
'Group Type': 'グループのタイプ',
'Group added': 'グループを追加しました',
'Group deleted': 'グループを削除しました',
'Group description': 'グループの説明',
'Group name': 'グループ名',
'Group type': 'グループタイプ',
'Group updated': 'グループを更新しました',
'Groups': 'グループ',
'Groups removed': 'グループを削除しました',
'Guest': 'ゲスト',
'HR Data': '人的資源の情報',
'HR Manager': '人的資源マネージャー',
'Hail': 'あられ',
'Hair Color': '頭髪の色',
'Hair Length': '頭髪の長さ',
'Hair Style': 'ヘアスタイル',
'Has additional rights to modify records relating to this Organization or Site.': 'この団体やサイトに関連するレコードを変更するための権限を追加します',
'Has data from this Reference Document been entered into Sahana?': 'リファレンス文書の内容が Sahanaに登録してあるかどうかを記載してください。',
'Has only read-only access to records relating to this Organization or Site.': 'この団体やサイトに関連するレコードを閲覧のみに制限します',
'Has the Certificate for receipt of the shipment been given to the sender?': 'Has the Certificate for receipt of the shipment been given to the sender?',
'Has the GRN (Goods Received Note) been completed?': 'Has the GRN (Goods Received Note) been completed?',
'Has the safety and security of women and children in your community changed since the emergency?': '緊急事態以来、女性や未成年の生活の危険度が変化したかどうかを記載してください',
'Has your business been damaged in the course of the disaster?': '災害の過程で、ビジネス上の損害を受けているかどうかを記載してください',
'Have households received any shelter/NFI assistance or is assistance expected in the coming days?': '世帯に対して避難所用品や生活必需品が配布されている、あるいは数日以内に配布を実施できるかを記載してください',
'Have normal food sources been disrupted?': '平常時の食料調達源が利用不可能になったかどうかを記載してください',
'Have schools received or are expecting to receive any assistance?': '学校に対してなんらかの支援が行われた、あるいは行われる予定であるかどうかを記載してください',
'Have the people received or are you expecting any medical or food assistance in the coming days?': '医療品や食糧支援を、被災者、あるいはあなたが受領したかどうか、あるいは数日以内に受領できそうかどうかを記載してください。',
'Hazard Pay': '災害補償金',
'Hazardous Material': '危険物',
'Hazardous Road Conditions': '災害発生後の道路状況',
'Header Background': 'ヘッダー背景',
'Header background file %s missing!': 'ヘッダー背景ファイル%sが存在しません。',
'Headquarters': '本部・本社',
'Health': '保険・介護',
'Health care assistance, Rank': '医療 / 介護支援、ランク',
'Health center': '保健所',
'Health center with beds': '保健所(ベッドあり)',
'Health center without beds': '保健所(ベッドなし)',
'Health services functioning prior to disaster': '災害発生以前 ヘルスサービスの提供',
'Health services functioning since disaster': '災害発生後 ヘルスサービスの提供',
'Health services status': '医療サービス状況',
'Healthcare Worker': 'ヘルスケア要員',
'Heat Wave': '熱波',
'Heat and Humidity': '熱と湿度',
'Height': '身長',
'Height (cm)': '身長 (cm)',
'Height (m)': 'Height (m)',
'Help': ' ヘルプ ',
'Helps to monitor status of hospitals': '病院の現状把握に役立つ情報を管理します',
'Helps to report and search for Missing Persons': '行方不明者の報告と検索を支援します。',
'Helps to report and search for missing persons': 'Helps to report and search for missing persons',
'Here are the solution items related to the problem.': '問題に関連する解決案です。',
'Heritage Listed': '遺産登録',
'Hide Details': '詳細を隠す',
'Hierarchy Level 0 Name (e.g. Country)': '階層レベル0の名前(例: 国)',
'Hierarchy Level 0 Name (i.e. Country)': 'Hierarchy Level 0 Name (i.e. Country)',
'Hierarchy Level 1 Name (e.g. Province)': '階層レベル1の名前 (例: 都道府県)',
'Hierarchy Level 1 Name (e.g. State or Province)': 'Hierarchy Level 1 Name (e.g. State or Province)',
'Hierarchy Level 2 Name': 'ロケーション階層レベル2の名前',
'Hierarchy Level 2 Name (e.g. District or County)': 'Hierarchy Level 2 Name (e.g. District or County)',
'Hierarchy Level 3 Name': '階層レベル3の名前',
'Hierarchy Level 3 Name (e.g. City / Town / Village)': 'Hierarchy Level 3 Name (e.g. City / Town / Village)',
'Hierarchy Level 4 Name': '階層レベル4の名前',
'Hierarchy Level 4 Name (e.g. Neighbourhood)': 'Hierarchy Level 4 Name (e.g. Neighbourhood)',
'Hierarchy Level 5 Name': 'Hierarchy Level 5 Name',
'High': '高',
'High Water': '最高水位',
'Hindu': 'ヒンズー教徒',
'History': '履歴',
'Hit the back button on your browser to try again.': 'ブラウザの「戻る」ボタンを押して、やり直してください。',
'Holiday Address': '休日の住所',
'Home': 'ホーム',
'Home Address': '自宅住所',
'Home City': 'Home City',
'Home Country': '所属国',
'Home Crime': '住居犯罪',
'Home Details': 'Home Details',
'Home Phone Number': 'Home Phone Number',
'Home Relative': 'Home Relative',
'Home added': 'Home added',
'Home deleted': 'Home deleted',
'Home updated': 'Home updated',
'Homes': 'Homes',
'Hospital': '病院',
'Hospital Details': '病院の詳細',
'Hospital Status Report': '病院ステータスレポート',
'Hospital information added': '病院情報を追加しました',
'Hospital information deleted': '病院情報を削除しました',
'Hospital information updated': '病院情報を更新しました',
'Hospital status assessment.': '病院ステータスアセスメント',
'Hospitals': '病院情報',
'Hot Spot': 'ホットスポット',
'Hour': '時間',
'Hourly': '1時間毎',
'Hours': 'Hours',
'Household kits received': '家事用品を受領しました',
'Household kits, source': '家事用品の送付元',
'How did boys 13-17yrs spend most of their time prior to the disaster?': '災害発生前、13-17歳の男子がよく集まっていた場所と活動は?',
'How did boys <12yrs spend most of their time prior to the disaster?': '災害発生前、12歳以下の男子がよく集まっていた場所と活動は?',
'How did boys girls 13-17yrs spend most of their time prior to the disaster?': '災害発生前、13-17歳の女子がよく集まっていた場所と活動は?',
'How did girls <12yrs spend most of their time prior to the disaster?': '災害発生前、12歳以下の女子がよく集まっていた場所と活動は?',
'How do boys 13-17yrs spend most of their time now?': '現在、13-17歳の男子は普段何をして過ごしていますか?',
'How do boys <12yrs spend most of their time now?': '現在、12歳以下の男子は普段何をして過ごしていますか?',
'How do girls 13-17yrs spend most of their time now?': '現在、13-17歳の女子は普段何をして過ごしていますか?',
'How do girls <12yrs spend most of their time now?': '現在、12歳以下の女子は普段何をして過ごしていますか?',
'How does it work?': 'どのように動きますか?',
'How is this person affected by the disaster? (Select all that apply)': 'この人物の被災状況を記載してください(該当する項目を全て選択)',
'How long does it take you to reach the available water resources? Specify the time required to go there and back, including queuing time, by foot.': '水資源を確保できる地点までの距離を記載します。徒歩で往復し、待ち時間も含めた時間を記載してください。',
'How long does it take you to walk to the health service?': '医療サービスが提供されている場所まで、徒歩で必要な時間を記載します。',
'How long will the food last?': '洪水の残存予測期間',
'How long will this water resource last?': '水の供給が枯渇する時期',
'How many Boys (0-17 yrs) are Dead due to the crisis': '災害で死亡した少年の数(0-17歳)',
'How many Boys (0-17 yrs) are Injured due to the crisis': '災害で負傷した少年の数(0-17歳)',
'How many Boys (0-17 yrs) are Missing due to the crisis': '災害で行方不明となった少年の数(0-17歳)',
'How many Girls (0-17 yrs) are Dead due to the crisis': '災害で死亡した少女の数(0-17歳)',
'How many Girls (0-17 yrs) are Injured due to the crisis': '災害で負傷した少女の数(0-17歳)',
'How many Girls (0-17 yrs) are Missing due to the crisis': '災害で行方不明になった少女の数(0-17歳)',
'How many Men (18 yrs+) are Dead due to the crisis': '災害で死亡した男性の数(18歳以上)',
'How many Men (18 yrs+) are Injured due to the crisis': '災害で負傷した男性の数(18歳以上)',
'How many Men (18 yrs+) are Missing due to the crisis': '災害で行方不明となった男性の数(18歳以上)',
'How many Women (18 yrs+) are Dead due to the crisis': '災害で死亡した女性の数(18歳以上)',
'How many Women (18 yrs+) are Injured due to the crisis': '災害で負傷した女性の数(18歳以上)',
'How many Women (18 yrs+) are Missing due to the crisis': '災害で行方不明となった女性の数(18歳以上)',
'How many days will the supplies last?': '支援物資がなくなるまでの日数',
'How many doctors in the health centers are still actively working?': 'ヘルスセンター内の医師の人数を記載してください',
'How many houses are uninhabitable (uninhabitable = foundation and structure destroyed)?': '居住不可になった家屋数を記載してください(居住不可 = 基礎構造や土台部分の破壊など)',
'How many houses suffered damage but remain usable (usable = windows broken, cracks in walls, roof slightly damaged)?': '災害によって破損したが、まだ利用が可能である住居の数を記載してください(利用可能 = 窓の破壊、壁のヒビ、屋根の軽微な破損など)',
'How many latrines are available in the village/IDP centre/Camp?': '村落/IDPセンター/仮泊施設内で利用可能なトイレの数を記載してください',
'How many midwives in the health centers are still actively working?': '医療センター内の助産師の人数を記載してください',
'How many new cases have been admitted to this facility in the past 24h?': '過去24時間でこの施設で受け入れたケースの数は?',
'How many nurses in the health centers are still actively working?': '保健所で活動可能な看護師は何人居ますか?',
'How many of the patients with the disease died in the past 24h at this facility?': 'この施設で過去24時間で何人の患者がこの病気で亡くなりましたか?',
'How many of the primary school age boys (6-12) in the area are not attending school?': 'この地域の、登校していない学童期男児(6-12歳)の数を記載してください。',
'How many of the primary school age girls (6-12) in the area are not attending school?': 'この地域の、登校していない学童期女児(6-12歳)の数を記載してください。',
'How many of the primary/secondary schools are now open and running a regular schedule of class?': '平常通りの授業を実施できている小学校・中学校・高校の数を記入してください',
'How many of the secondary school age boys (13-18) in the area are not attending school?': 'この地域の、登校していない中高校生年齢男子(13-18歳)の数を記載してください。',
'How many of the secondary school age girls (13-18) in the area are not attending school?': 'この地域の、登校していない女子中高生(13-18歳)の数を記載してください。',
'How many patients with the disease are currently hospitalized at this facility?': 'この病気のためにこの施設に入院している患者は現在何人ですか?',
'How many primary school age boys (6-12) are in the affected area?': '被災地域内の学童期男児(6-12歳)の数を記載してください',
'How many primary school age girls (6-12) are in the affected area?': '被災地域内の学童期女児(6-12歳)の数を記載してください。',
'How many primary/secondary schools were opening prior to the disaster?': '災害発生前に授業が行われていた小学校・中学校・高校の数を記載してください',
'How many secondary school age boys (13-18) are in the affected area?': '被災地域内の男子中学生・男子高校生(13-18歳)の数を記載してください',
'How many secondary school age girls (13-18) are in the affected area?': '被災地域内の中高生年齢女子(13-18歳)の数を記載してください。',
'How many teachers have been affected by the disaster (affected = unable to work)?': '被災し、授業ができない状態の教師の人数を記載してください',
'How many teachers worked in the schools prior to the disaster?': '災害発生前の教師の人数を記載してください',
'How much detail is seen. A high Zoom level means lot of detail, but not a wide area. A low Zoom level means seeing a wide area, but not a high level of detail.': 'どの程度詳細な情報が表示されるかを定義します。ズームすることで詳細が表示されるようになりますが、そのかわり、広域を見渡すことができなくなります。逆に、ズームしないことで広域を表示できますが、詳細情報の確認は行えなくなります。',
'Human Resource': '人的資源',
'Human Resource Details': 'Human Resource Details',
'Human Resource Management': '人的資源マネージメント',
'Human Resource added': 'Human Resource added',
'Human Resource removed': 'Human Resource removed',
'Human Resource updated': 'Human Resource updated',
'Human Resources': '人的資源',
'Human Resources Management': '人的資源管理',
'Humanitarian NGO': '人道支援NGO',
'Hurricane': 'ハリケーン',
'Hurricane Force Wind': 'ハリケーンの風力',
'Hybrid Layer': 'Hybrid Layer',
'Hygiene': '衛生',
'Hygiene NFIs': '衛生用品',
'Hygiene kits received': '衛生用品を受領した',
'Hygiene kits, source': '衛生用品の送付元',
'Hygiene practice': '衛生習慣',
'Hygiene problems': '衛生上の問題',
'I accept. Create my account.': 'I accept. Create my account.',
'I am available in the following area(s)': '以下の地域を担当できます',
'ID Label': 'IDラベル',
'ID Label: ': 'IDラベル: ',
'ID Tag': 'ID タグ',
'ID Tag Number': 'IDタグ番号',
'ID type': 'IDタイプ',
'Ice Pressure': '氷結圧力',
'Iceberg': 'アイスバーグ',
'Ideally a full URL to the source file, otherwise just a note on where data came from.': 'できればソースファイルの完全なURLを記載します。難しい場合はデータ入手元のメモでも構いません。',
'Identification': 'ID',
'Identification Report': 'IDレポート',
'Identification Reports': 'IDレポート',
'Identification Status': 'IDステータス',
'Identification label of the Storage bin.': '備蓄コンテナの区別用ラベル番号。',
'Identified as': '判明した身元',
'Identified by': 'によって識別された',
'Identity': '身元確認',
'Identity Details': '身元確認の詳細',
'Identity added': '身元情報を追加しました',
'Identity deleted': '身元確認を削除しました',
'Identity updated': '身元確認を更新しました',
'If Staff have login accounts then they are given access to edit the details of the': 'スタッフがログイン用アカウントを有している場合、以下項目の詳細を編集することができます:',
'If Unit = m, Base Unit = Km, then multiplicator is 0.0001 since 1m = 0.001 km.': '「Unit = m, Base Unit = Km」の場合、「1m = 0.001 km」なので乗数は0.0001 です。',
'If a ticket was issued then please provide the Ticket ID.': 'If a ticket was issued then please provide the Ticket ID.',
'If a user verifies that they own an Email Address with this domain, the Approver field is used to determine whether & by whom further approval is required.': 'このドメインの電子メールアドレスを所有するユーザーを認証する場合は、承認がさらに必要かどうか、必要なら誰が承認するか、を決めるのに承認者フィールドを使用します。',
'If enabled then a log is maintained of all records a user accesses. If disabled then it can still be enabled on a per-module basis.': '有効にすると、ユーザーがアクセスしたときに、全てのレコードがログに保存されます。無効にすると、モジュール毎に有効にすることができます。',
'If enabled then a log is maintained of all records a user edits. If disabled then it can still be enabled on a per-module basis.': '有効にすると、ユーザーが編集したすべてのレコードを記録します。無効にすると、モジュール毎に有効にできます。',
'If it is a URL leading to HTML, then this will downloaded.': 'If it is a URL leading to HTML, then this will downloaded.',
'If neither are defined, then the Default Marker is used.': 'もし両方共定義されていない場合、デフォルトマーカーが使われます。',
'If no marker defined then the system default marker is used': 'マーカーが定義されていない場合は、システムのデフォルトマーカーを使用します。',
'If no, specify why': 'いいえ、の場合はその理由を記載してください',
'If none are selected, then all are searched.': 'もしなにも選択しなければ、全てを検索します',
"If selected, then this Asset's Location will be updated whenever the Person's Location is updated.": "If selected, then this Asset's Location will be updated whenever the Person's Location is updated.",
'If the location is a geographic area, then state at what level here.': '場所が地理的に確定できる場所ならば、その場所のレベルを記載してくだい。',
'If the request is for %s, please enter the details on the next screen.': 'If the request is for %s, please enter the details on the next screen.',
'If the request is for type "Other", you should enter a summary of the request here.': '支援要請が"その他"の場合、概要をここに入力する必要があります',
'If the request type is "Other", please enter request details here.': 'If the request type is "Other", please enter request details here.',
"If this configuration represents a region for the Regions menu, give it a name to use in the menu. The name for a personal map configuration will be set to the user's name.": 'もしこの設定が地域メニューにある地域を指しているのであれば、メニューで使う名前を設定してください。個人用の地図設定の名前では、ユーザの名前で設定されます。',
"If this field is populated then a user who specifies this Organization when signing up will be assigned as a Staff of this Organization unless their domain doesn't match the domain field.": 'この項目が設定されている場合、ユーザーは、登録の際、この団体のスタッフとしてアサインされるように指定することができます。ただし、ユーザーのドメインと団体のドメイン項目に差異がない場合のみ有効です。',
'If this field is populated then a user with the Domain specified will automatically be assigned as a Staff of this Organization': 'この項目が設定されている場合、ユーザーは、登録の際、この団体のスタッフとして登録されるように指定することができます',
'If this is set to True then mails will be deleted from the server after downloading.': 'Trueに設定されている場合は、メールはダウンロード後にサーバーから削除されます。',
"If this is ticked, then this will become the user's Base Location & hence where the user is shown on the Map": 'この項目の内容はユーザーの基本所在地となり、ユーザーが地図上に表示されるようになります。',
'If this record should be restricted then select which role is required to access the record here.': 'このレコードへのアクセスを制限する際には、アクセスに必要となる権限を選択してください',
'If this record should be restricted then select which role(s) are permitted to access the record here.': 'このレコードを制限したい場合、アクセスを許可する権限を指定してください。',
"If this setting is enabled then all deleted records are just flagged as deleted instead of being really deleted. They will appear in the raw database access but won't be visible to normal users.": 'この設定が有効の場合、削除されたレコードには削除済みフラグが付与されるだけで、実際のデータは消去されません。一般のユーザが閲覧することはできませんが、データベースを直接参照することでデータを確認できます。',
'If yes, specify what and by whom': '「はい」の場合、供給される食料と供給元',
'If yes, which and how': '「はい」の場合、混乱している場所や原因を記載',
"If you cannot find the record of the person you want to report missing, you can add it by clicking 'Add Person' below:": '行方不明者の登録が存在しない場合、「人物情報を追加」ボタンを押して、新規登録を行ってください。',
'If you do not enter a Reference Document, your email will be displayed to allow this data to be verified.': '参照文書を入力しない場合は、データ検証のために入力者の電子メールが表示されます。',
"If you don't see the Hospital in the list, you can add a new one by clicking link 'Add Hospital'.": 'リストに病院が表示されない場合、「病院情報を追加」することで新規に登録が可能です。',
"If you don't see the Office in the list, you can add a new one by clicking link 'Add Office'.": 'オフィスが一覧にない場合は、「オフィスを追加」をクリックすることで新規のオフィスを追加できます。',
"If you don't see the Organization in the list, you can add a new one by clicking link 'Add Organization'.": "もしあなたの団体の登録がない場合、'団体を追加'リンクをクリックすることで追加が可能です",
'If you know what the Geonames ID of this location is then you can enter it here.': 'このロケーションの Geonames ID がある場合、ここに入力してください。',
'If you know what the OSM ID of this location is then you can enter it here.': 'このロケーションの OSM ID がある場合、ここに入力してください。',
'If you need to add a new document then you can click here to attach one.': '文書の添付はこのページから可能です。',
'If you want several values, then separate with': '複数の値を入力したい場合、この文字で分割してください : ',
'If you would like to help, then please': 'ご協力いただける方は登録をお願いします',
'Illegal Immigrant': '不法移民',
'Image': '画像',
'Image Details': '画像の詳細',
'Image File(s), one image per page': 'Image File(s), one image per page',
'Image Tags': '画像のタグ',
'Image Type': '画像のタイプ',
'Image Upload': '画像のアップロード',
'Image added': '画像を追加しました',
'Image deleted': '画像を削除しました',
'Image updated': '画像を更新しました',
'Image/Attachment': '画像/添付資料',
'Image/Other Attachment': '画像/その他の添付ファイル',
'Imagery': '画像',
'Images': '画像',
'Immediate reconstruction assistance, Rank': '建築物の緊急修理 / 再建築支援、ランク',
'Impact Assessment Summaries': '災害影響範囲アセスメントの概要',
'Impact Assessments': '災害影響範囲アセスメント',
'Impact Baselines': '影響範囲の基準値',
'Impact Details': '被害の詳細',
'Impact Type': '災害影響タイプ',
'Impact Type Details': '災害影響のタイプ詳細',
'Impact Type added': '災害の影響タイプを追加しました',
'Impact Type deleted': '影響範囲タイプを削除しました',
'Impact Type updated': '災害影響のタイプを更新しました',
'Impact Types': '災害影響のタイプ',
'Impact added': '被災影響を追加しました',
'Impact deleted': '影響範囲を削除しました',
'Impact updated': '被災状況を更新しました',
'Impacts': '影響',
'Import': 'インポート',
'Import & Export Data': 'データのインポートとエクスポート',
'Import Data': 'データのインポート',
'Import File': 'Import File',
'Import File Details': 'Import File Details',
'Import File deleted': 'Import File deleted',
'Import Files': 'Import Files',
'Import Job': 'Jobのインポート',
'Import Job Count': 'Import Job Count',
'Import Jobs': 'Jobsのインポート',
'Import New File': 'Import New File',
'Import and Export': 'インポートとエクスポート',
'Import from Ushahidi Instance': 'Ushahidi インスタンスから設定をインポート',
'Import if Master': 'マスターなら取り込む',
'Import job created': 'Import jobを作成しました',
'Import multiple tables as CSV': '複数のテーブルをCSVとしてインポート',
'Import/Export': 'インポート/エクスポート',
'Important': '重要',
'Importantly where there are no aid services being provided': '救護サービスが提供されていない地域において重要となります',
'Imported': 'インポートしました',
'Importing data from spreadsheets': 'スプレッドシートからデータをインポートしています',
'Improper decontamination': '不適切な汚染の除去',
'Improper handling of dead bodies': '誤った扱いをされている遺体',
'In Catalogs': 'In Catalogs',
'In GeoServer, this is the Layer Name. Within the WFS getCapabilities, this is the FeatureType Name part after the colon(:).': 'In GeoServerでは、これはレイヤ名です。WFS getCapabilitiesでは、これはコロン(:)後のFeatureType名の部分です。',
'In GeoServer, this is the Workspace Name. Within the WFS getCapabilities, this is the FeatureType Name part before the colon(:).': 'GeoServer では、これはワークスペース名です。WFS getCapabilities では、これはコロン「:」の前の FeatureType の部分となります。',
'In Inventories': 'この物資の在処',
'In Process': '実行中',
'In Progress': '実行中',
'In Transit': '輸送中',
'In Window layout the map maximises to fill the window, so no need to set a large value here.': 'この地図のウィンドウレイアウトは、全体を覆い隠します。従って、ここで大きな値を入力する必要はありません',
'In general, what are the greatest needs of older people, people with disabilities, children, youth and women in your community?': '一般的に、コミュニティ内の高齢者、障がい者、子供、青年、女性たちが最も必要としている物資やサービスがなんであるかを記載してください',
'Inbound Mail Settings': '着信メール設定',
'Inbox': '受信箱',
'Incident': 'インシデント',
'Incident Categories': 'インシデントカテゴリ',
'Incident Details': 'インシデントの詳細',
'Incident Report': 'インシデントレポート',
'Incident Report Details': 'インシデントレポートの詳細',
'Incident Report added': '災害影響範囲レポートを追加しました',
'Incident Report deleted': 'インシデントレポートを削除しました',
'Incident Report updated': 'インシデントレポートを更新しました',
'Incident Reporting': 'インシデントレポート',
'Incident Reporting System': 'インシデントの報告を行ないます',
'Incident Reports': 'インシデントレポート',
'Incident added': 'インシデントを追加しました',
'Incident deleted': 'インシデントを削除しました',
'Incident updated': 'インシデントを更新しました',
'Incidents': 'インシデント',
'Include any special requirements such as equipment which they need to bring.': 'Include any special requirements such as equipment which they need to bring.',
'Incoming': '入荷',
'Incoming Shipment canceled': '到着する配送が取消しされました',
'Incoming Shipment updated': '入荷した物資が更新されました',
'Incomplete': '未完了',
'Individuals': '個人',
'Industrial': '産業',
'Industrial Crime': '産業犯罪',
'Industry Fire': '工場から出火',
'Industry close to village/camp': '村落/仮泊施設の周辺に工場が存在',
'Infant (0-1)': '乳児(0-1歳)',
'Infectious Disease': '感染症',
'Infectious Disease (Hazardous Material)': 'Infectious Disease (Hazardous Material)',
'Infectious Diseases': '感染症',
'Infestation': '感染',
'Informal Leader': '非公式なリーダー',
'Informal camp': '非指定避難所',
'Information gaps': '情報のギャップ',
'Infusion catheters available': '注入カテーテルが利用可能',
'Infusion catheters need per 24h': '24時間毎に必要な注入カテーテル数',
'Infusion catheters needed per 24h': '24時間ごとに、注入カテーテルが必要',
'Infusions available': '点滴が利用可能',
'Infusions needed per 24h': '24時間毎に必要な点滴の数',
'Input Job': 'Jobのインポート',
'Inspected': '調査済み',
'Inspection Date': '調査した日付',
'Inspection date and time': '調査日時',
'Inspection time': '調査した時刻',
'Inspector ID': '調査者ID',
'Instance Type': 'インスタンスタイプ',
'Instant Porridge': 'インスタント粥',
"Instead of automatically syncing from other peers over the network, you can also sync from files, which is necessary where there's no network. You can use this page to import sync data from files and also export data to sync files. Click the link on the right to go to this page.": 'データを同期する際には、ネットワークを経由してではなく、ファイルから行うことも可能です。ネットワークが存在しない場合に利用されます。ファイルからのデータインポート、およびファイルへのエクスポートはこのページから実行可能です。右部のリンクをクリックしてください。',
'Institution': 'その他の組織',
'Insufficient': '不足',
'Insufficient Privileges': '権限が足りません',
'Insufficient privileges': 'Insufficient privileges',
'Insufficient vars: Need module, resource, jresource, instance': '不十分な変数: module, resource, jresource, instance が必要です',
'Insurance Renewal Due': 'Insurance Renewal Due',
'Intake Items': 'アイテムの受け入れ',
'Intergovernmental Organization': '国際政府間組織',
'Interior walls, partitions': '室内の壁、仕切り',
'Internal Features': '内部機能',
'Internal State': '内部状態',
'International NGO': '国際NGO',
'International Organization': '国際機関',
'International Staff': '国外からのスタッフ',
'Intervention': '介入',
'Interview taking place at': 'インタビュー実施場所',
'Invalid': '無効な',
'Invalid Query': '無効なクエリ',
'Invalid email': '無効な電子メール',
'Invalid login': '無効なログイン',
'Invalid phone number': 'Invalid phone number',
'Invalid phone number!': 'Invalid phone number!',
'Invalid request!': 'リクエストは無効です。',
'Invalid ticket': '無効なチケット',
'Inventories': '在庫管理',
'Inventories with Item': '在庫アイテム',
'Inventory': '在庫',
'Inventory Item': '備蓄物資',
'Inventory Item Details': '救援物資の在庫詳細',
'Inventory Item added': '救援物資の在庫を追加しました',
'Inventory Item deleted': '備蓄物資を削除しました',
'Inventory Item updated': '備蓄物資を更新しました',
'Inventory Items': '備蓄物資',
'Inventory Items Available for Request Item': '要求された物資に適合する、倉庫内の物資',
'Inventory Items include both consumable supplies & those which will get turned into Assets at their destination.': 'Inventory Items include both consumable supplies & those which will get turned into Assets at their destination.',
'Inventory Management': '物資の管理',
'Inventory Stock Position': 'Inventory Stock Position',
'Inventory Store': '物資集積地点',
'Inventory Store Details': '物資集積地点の詳細',
'Inventory Store added': '物資集積地点を追加しました',
'Inventory Store deleted': '物資集積地点を削除しました',
'Inventory Store updated': '物資集積地点を更新しました',
'Inventory Stores': '物資集積地点',
'Inventory functionality is available for:': '備蓄機能を利用可能:',
'Inventory of Effects': '救援物資の影響',
'Inventory/Ledger': '在庫 / 元帳',
'Is adequate food and water available for these institutions?': '関係者に対して十分な水と食料が供給されていますか?',
'Is editing level L%d locations allowed?': 'Is editing level L%d locations allowed?',
'Is it safe to collect water?': '水の確保は安全に行えるか?',
'Is there any industrial or agro-chemical production close to the affected area/village?': '村落/集落の近くに、工場あるいは農業化学プラントなどが存在しますか?',
'Is this a strict hierarchy?': 'これは厳密な階層構造ですか?',
'Issuing Authority': '発行機関',
'It captures not only the places where they are active, but also captures information on the range of projects they are providing in each area.': 'It captures not only the places where they are active, but also captures information on the range of projects they are providing in each area.',
'It is built using the Template agreed by a group of NGOs working together as the': '聞き取り項目のテンプレートは、以下リンクのNGO組織と協同で作成されています。',
'Italian': 'Italian',
'Item': '物資',
'Item Added to Shipment': '輸送情報に物資を追加する',
'Item Catalog Categories': '物資カタログカテゴリ',
'Item Catalog Category': '救援物資カタログのカテゴリ',
'Item Catalog Category Details': '救援物資カタログのカテゴリ詳細',
'Item Catalog Category added': '救援物資カタログのカテゴリを追加しました',
'Item Catalog Category deleted': '救援物資カタログのカテゴリを削除しました',
'Item Catalog Category updated': '物資カタログカテゴリを更新しました',
'Item Catalog Details': '物資カタログの詳細',
'Item Catalog added': '救援物資カタログを追加しました',
'Item Catalog deleted': '物資カタログを削除しました',
'Item Catalog updated': '物資カタログを更新しました',
'Item Catalogs': '救援物資カタログ',
'Item Categories': '物資カテゴリ',
'Item Category': '物資カテゴリ',
'Item Category Details': '物資カテゴリの詳細',
'Item Category added': '救援物資カテゴリを追加しました',
'Item Category deleted': '救援物資カテゴリを削除しました',
'Item Category updated': '物資カテゴリを更新しました',
'Item Details': '救援物資の詳細',
'Item Pack Details': '救援物資パックの詳細',
'Item Pack added': '物資パックを追加しました',
'Item Pack deleted': '救援物資のパックを削除しました',
'Item Pack updated': '救援物資パックを更新しました',
'Item Packs': '物資パック',
'Item Sub-Categories': '救援物資のサブカテゴリ',
'Item Sub-Category': '物資サブカテゴリ',
'Item Sub-Category Details': '物資サブカテゴリの詳細',
'Item Sub-Category added': '救援物資のサブカテゴリを追加しました',
'Item Sub-Category deleted': '物資サブカテゴリを削除しました',
'Item Sub-Category updated': '救援物資サブカテゴリを更新しました',
'Item added': '救援物資を追加しました',
'Item added to Inventory': 'Item added to Inventory',
'Item added to shipment': '物資が輸送に回りました',
'Item already in Bundle!': '物資がすでにバンドルに存在しています。',
'Item already in Kit!': '救援物資は既にキットに存在しています',
'Item already in budget!': '物資は既に予算に登録されています',
'Item deleted': '物資を削除しました',
'Item removed from Inventory': 'Item removed from Inventory',
'Item updated': '救援物資を更新しました',
'Items': '救援物資',
'Items in Category can be Assets': 'Items in Category can be Assets',
'Japan': '日本',
'Japanese': '日本語',
'Jerry can': 'ジェリ缶',
'Jew': 'ユダヤ教徒',
'Job Market': '求人',
'Job Role': 'Job Role',
'Job Role Catalog': 'Job Role Catalog',
'Job Role Details': 'Job Role Details',
'Job Role added': 'Job Role added',
'Job Role deleted': 'Job Role deleted',
'Job Role updated': 'Job Role updated',
'Job Roles': 'Job Roles',
'Job Title': '肩書き',
'Jobs': '職業',
'Journal': 'Journal',
'Journal Entry Details': 'Journal Entry Details',
'Journal entry added': 'Journal entry added',
'Journal entry deleted': 'Journal entry deleted',
'Journal entry updated': 'Journal entry updated',
'Just Once': '一度だけ',
'KPIs': 'KPI',
'Key': 'キー',
'Key Details': 'Keyの詳細',
'Key added': 'キーを追加しました',
'Key deleted': 'キーを削除しました',
'Key updated': 'キーを更新しました',
'Keys': 'キー',
'Kit': 'キット',
'Kit Contents': 'Kitの内容',
'Kit Details': 'Kitの詳細',
'Kit Updated': 'キットを更新しました',
'Kit added': 'キットを追加しました',
'Kit deleted': 'キットを削除しました',
'Kit updated': 'キットを更新しました',
'Kits': 'キット',
'Known Identities': '既知のID',
'Known incidents of violence against women/girls': '女性に対する暴力行為が発生した',
'Known incidents of violence since disaster': '災害発生後に暴力行為が発生した',
'Korean': 'Korean',
'LICENSE': 'ライセンス',
'LMS Administration': 'LMSの管理',
'Label': 'ラベル',
'Lack of material': '資材不足',
'Lack of school uniform': '学校制服が不足',
'Lack of supplies at school': '学校用物資の不足',
'Lack of transport to school': '学校への輸送手段の不足',
'Lactating women': '授乳中の女性の数',
'Lahar': 'ラハール',
'Landslide': '地すべり',
'Language': 'Language 言語',
'Last Name': '名前',
'Last known location': '最後に目撃された場所',
'Last name': '名前',
'Last synchronization time': 'データ同期の最終実施時刻',
'Last updated ': '最終更新日',
'Last updated by': '最終更新者',
'Last updated on': '直近のアップデート実施時刻',
'Latitude': '緯度',
'Latitude & Longitude': '緯度&経度',
'Latitude is North-South (Up-Down).': '緯度は南北(上下)です',
'Latitude is North-South (Up-Down). Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere.': '緯度は南北方向(上下)を定義します。赤道ではゼロ、北半球ではプラス、南半球ではマイナスとなります。',
'Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere.': '緯度は赤道では0、北半球ではプラス、南半球ではマイナスになります',
'Latitude of Map Center': 'Latitude of Map Center',
'Latitude of far northern end of the region of interest.': 'Latitude of far northern end of the region of interest.',
'Latitude of far southern end of the region of interest.': 'Latitude of far southern end of the region of interest.',
'Latitude should be between': '緯度の値として有効な値は',
'Latrines': 'トイレ',
'Law enforcement, military, homeland and local/private security': '法執行機関、自衛隊、警察および警備会社',
'Layer': 'レイヤ',
'Layer Details': 'レイヤの詳細',
'Layer ID': 'Layer ID',
'Layer Name': 'Layer Name',
'Layer Type': 'Layer Type',
'Layer added': 'レイヤを追加しました',
'Layer deleted': 'レイヤを削除しました',
'Layer has been Disabled': 'Layer has been Disabled',
'Layer has been Enabled': 'Layer has been Enabled',
'Layer updated': 'レイヤを更新しました',
'Layers': 'レイヤ',
'Layers updated': 'レイヤを更新しました',
'Layout': 'レイアウト',
'Leader': 'Leader',
'Leave blank to request an unskilled person': 'Leave blank to request an unskilled person',
'Legend Format': '凡例形式',
'Length': '長さ',
'Length (m)': 'Length (m)',
'Level': 'レベル',
'Level 1': 'レベル1',
'Level 1 Assessment Details': 'レベル1アセスメントの詳細',
'Level 1 Assessment added': 'レベル1アセスメントを追加しました',
'Level 1 Assessment deleted': 'レベル1のアセスメントを削除しました',
'Level 1 Assessment updated': 'レベル1アセスメントを更新しました',
'Level 1 Assessments': 'レベル1 アセスメント',
'Level 2': 'レベル2',
'Level 2 Assessment Details': 'レベル2アセスメントの詳細',
'Level 2 Assessment added': 'レベル2アセスメントを追加しました',
'Level 2 Assessment deleted': 'レベル2アセスメントを削除しました',
'Level 2 Assessment updated': 'レベル2アセスメントを更新しました',
'Level 2 Assessments': 'レベル2アセスメント',
'Level 2 or detailed engineering evaluation recommended': 'レベル2あるいは詳細な技術的評価を行うことを推奨します',
"Level is higher than parent's": '親情報よりも高いレベルです',
'Library support not available for OpenID': 'OpenIDのライブラリサポートが利用できません',
'License Number': 'License Number',
'License Plate': '個人認証カード',
'Line': '行',
'LineString': '折れ線',
'Link Item & Shipment': 'アイテムと輸送を紐付ける',
'Link an Item & Shipment': 'アイテムと出荷を結び付ける',
'Linked Records': '参照しているレコード',
'Linked records': '関連しているレコード',
'List': '一覧',
'List / Add Baseline Types': '基準値タイプの一覧 / 追加',
'List / Add Impact Types': '災害影響のタイプを表示 / 追加',
'List / Add Services': 'サービスの一覧表示 / 追加',
'List / Add Types': 'タイプの一覧表示 / 追加',
'List Activities': '支援活動一覧',
'List Aid Requests': '援助要請の一覧',
'List All': '全項目一覧',
'List All Assets': 'List All Assets',
'List All Catalog Items': 'List All Catalog Items',
'List All Commitments': 'List All Commitments',
'List All Entries': '全てのエントリ一覧',
'List All Item Categories': 'List All Item Categories',
'List All Memberships': '全てのメンバシップ一覧',
'List All Received Shipments': 'List All Received Shipments',
'List All Records': 'List All Records',
'List All Reports': '報告すべての一覧',
'List All Requested Items': 'List All Requested Items',
'List All Requested Skills': 'List All Requested Skills',
'List All Requests': 'List All Requests',
'List All Sent Shipments': 'List All Sent Shipments',
'List All Vehicles': 'List All Vehicles',
'List Alternative Items': '代わりの物資一覧',
'List Assessment Summaries': 'アセスメント要約の一覧',
'List Assessments': 'アセスメント一覧',
'List Asset Assignments': '資産割り当ての一覧',
'List Assets': '資産一覧',
'List Availability': 'List Availability',
'List Baseline Types': '基準値タイプ一覧',
'List Baselines': '基準値一覧',
'List Brands': '銘柄の一覧',
'List Budgets': '予算の一覧',
'List Bundles': 'Bundleの一覧',
'List Camp Services': 'List Camp Services',
'List Camp Types': 'List Camp Types',
'List Camps': 'List Camps',
'List Catalog Items': '物資カタログの一覧',
'List Catalogs': 'List Catalogs',
'List Category<>Sub-Category<>Catalog Relation': 'Category<>Sub-Category<>Catalog 関係一覧',
'List Certificates': 'List Certificates',
'List Certifications': 'List Certifications',
'List Checklists': 'チェックリスト一覧',
'List Cluster': 'クラスタ一覧',
'List Cluster Subsectors': 'クラスタのサブセクタ一覧',
'List Clusters': 'クラスタ一覧',
'List Commitment Items': 'コミットされた救援物資の一覧',
'List Commitments': 'コミットメントの一覧',
'List Committed People': 'List Committed People',
'List Competency Ratings': 'List Competency Ratings',
'List Configs': '設定一覧',
'List Conflicts': 'データ競合一覧',
'List Contact Information': '連絡先情報の一覧',
'List Contacts': '連絡先一覧',
'List Course Certificates': 'List Course Certificates',
'List Courses': 'List Courses',
'List Credentials': '証明書一覧',
'List Current': '現在の一覧',
'List Distribution Items': '配給物資リスト',
'List Distributions': '配給所リスト',
'List Documents': '文書の一覧',
'List Donors': '資金提供組織一覧',
'List Events': 'List Events',
'List Facilities': 'List Facilities',
'List Feature Classes': '機能クラス一覧',
'List Feature Layers': 'Featureレイヤリスト',
'List Flood Reports': '洪水レポート一覧',
'List GPS data': 'List GPS data',
'List GPX Layers': 'GPXレイヤ一覧',
'List Groups': 'グループ一覧',
'List Groups/View Members': 'グループを一覧/メンバーを表示',
'List Homes': 'List Homes',
'List Hospitals': '病院の一覧',
'List Human Resources': 'List Human Resources',
'List Identities': 'ID一覧',
'List Images': '画像の一覧',
'List Impact Assessments': '災害影響範囲アセスメント一覧',
'List Impact Types': '災害影響のタイプ一覧',
'List Impacts': '被害一覧',
'List Import Files': 'List Import Files',
'List Incident Reports': 'インシデントレポート一覧',
'List Incidents': 'インシデント一覧',
'List Inventory Items': '備蓄物資リスト',
'List Inventory Stores': '物資集積地点リスト',
'List Item Catalog Categories': '救援物資カタログのカテゴリ一覧',
'List Item Catalogs': '救援物資カタログ一覧',
'List Item Categories': '物資カテゴリ一覧',
'List Item Packs': '物資パックの一覧',
'List Item Sub-Categories': '物資サブカテゴリ一覧',
'List Items': '救援物資一覧',
'List Items in Inventory': 'List Items in Inventory',
'List Job Roles': 'List Job Roles',
'List Keys': 'Keyの一覧',
'List Kits': 'Kit一覧',
'List Layers': 'レイヤ一覧',
'List Level 1 Assessments': 'レベル1アセスメントの一覧',
'List Level 1 assessments': 'レベル1アセスメント一覧',
'List Level 2 Assessments': 'レベル2のアセスメント一覧',
'List Level 2 assessments': 'レベル2アセスメント一覧',
'List Locations': 'ロケーション一覧',
'List Log Entries': 'ログエントリ一覧',
'List Map Configurations': '地図設定の一覧',
'List Markers': 'マーカー一覧',
'List Members': 'メンバ一覧',
'List Memberships': 'メンバシップ一覧',
'List Messages': 'メッセージ一覧',
'List Metadata': 'メタデータ一覧',
'List Missing Persons': '行方不明者リストを表示',
'List Missions': 'List Missions',
'List Need Types': '需要タイプ一覧',
'List Needs': 'ニーズ一覧',
'List Notes': '追加情報一覧',
'List Offices': 'オフィス一覧',
'List Organizations': '団体一覧',
'List Patients': 'List Patients',
'List Peers': 'データ同期先一覧',
'List Personal Effects': '携帯品のリスト',
'List Persons': '人物情報一覧',
'List Photos': '写真リスト',
'List Population Statistics': 'List Population Statistics',
'List Positions': '場所一覧',
'List Problems': '問題一覧',
'List Projections': '地図投影法リスト',
'List Projects': 'プロジェクト一覧',
'List Rapid Assessments': '被災地の現況アセスメント一覧',
'List Received Items': '受領された物資の一覧',
'List Received Shipments': '受領された輸送一覧',
'List Records': 'レコード一覧',
'List Registrations': '登録証明書の一覧',
'List Relatives': 'List Relatives',
'List Reports': 'レポート一覧',
'List Request Items': '物資要請リスト',
'List Requested Skills': 'List Requested Skills',
'List Requests': '支援要請の一覧',
'List Resources': 'リソース一覧',
'List Responses': '回答の一覧',
'List Rivers': '河川リスト',
'List Roles': '役割一覧',
'List Rooms': 'List Rooms',
'List Scenarios': 'List Scenarios',
'List Sections': 'Section一覧',
'List Sectors': '活動分野の一覧',
'List Sent Items': '送付した物資一覧',
'List Sent Shipments': '送付済み物資一覧',
'List Service Profiles': 'サービスプロファイル一覧',
'List Settings': '設定一覧',
'List Shelter Services': '避難所での提供サービス一覧',
'List Shelter Types': '避難所タイプ一覧',
'List Shelters': '避難所の一覧',
'List Shipment Transit Logs': '物資輸送履歴の一覧',
'List Shipment/Way Bills': '輸送費/渡航費の一覧',
'List Shipment<>Item Relation': '輸送と物資の関連性一覧',
'List Shipments': '配送の一覧',
'List Sites': 'Site一覧',
'List Skill Equivalences': 'List Skill Equivalences',
'List Skill Provisions': 'List Skill Provisions',
'List Skill Types': 'スキルタイプを一覧表示',
'List Skills': 'スキルを一覧表示',
'List Solutions': '解決案一覧',
'List Staff': 'スタッフ一覧',
'List Staff Types': 'スタッフタイプ一覧',
'List Status': '状況一覧',
'List Storage Bin Type(s)': 'Storage Binタイプ一覧',
'List Storage Bins': 'Storage Bin一覧',
'List Storage Location': '備蓄地点の一覧',
'List Subscriptions': '寄付申し込み一覧',
'List Subsectors': 'List Subsectors',
'List Support Requests': '支援要求のリスト',
'List Survey Answers': '調査の回答の一覧',
'List Survey Questions': 'Survey Question一覧',
'List Survey Sections': 'Survey Sectionsの一覧',
'List Survey Series': '一連の調査リスト',
'List Survey Templates': '調査テンプレートの一覧',
'List TMS Layers': 'TMS レイヤの一覧',
'List Tasks': 'タスク一覧',
'List Teams': 'チーム一覧',
'List Themes': 'テーマ一覧',
'List Tickets': 'チケット一覧',
'List Tracks': '追跡情報の一覧',
'List Trainings': 'List Trainings',
'List Units': '単位一覧',
'List Users': 'ユーザ一覧',
'List Vehicle Details': 'List Vehicle Details',
'List Vehicles': 'List Vehicles',
'List Volunteers': 'ボランティアの表示',
'List WMS Layers': 'WMSレイヤ一覧',
'List Warehouse Items': '倉庫に備蓄中の物資一覧',
'List Warehouses': '倉庫の一覧',
'List all': '全項目を表示',
'List available Scenarios': 'List available Scenarios',
'List of CSV files': 'List of CSV files',
'List of CSV files uploaded': 'List of CSV files uploaded',
'List of Items': '物資一覧',
'List of Missing Persons': '行方不明者リスト',
'List of Peers': 'データ同期先一覧',
'List of Reports': 'レポート一覧',
'List of Requests': '支援要請の一覧',
'List of Roles': '権限リスト',
'List of Spreadsheets': 'スプレッドシート一覧',
'List of Spreadsheets uploaded': 'アップロード済スプレッドシート一覧',
'List of Volunteers for this skill set': 'このスキルを所持するボランティアの一覧',
'List of addresses': '住所一覧',
'List unidentified': '身元不明者の一覧',
'List/Add': '一覧/追加',
'Lists "who is doing what & where". Allows relief agencies to coordinate their activities': '救援団体は自身の支援活動の内容と場所を登録し、公開することで、他の組織との活動を調整することが可能となります。',
'Live Help': 'ライブヘルプ',
'Livelihood': '生計',
'Load Cleaned Data into Database': '整形したデータをデータベースへロード',
'Load Details': '詳細情報の読み込み',
'Load Raw File into Grid': 'Rawファイルをグリッドにロードしてください',
'Load the details to help decide which is the best one to keep out of the 2.': '2つのうちどちらを残すほうがよいか判断するため、詳細情報を確認します。',
'Loading': '読み込み中',
'Loading Locations': 'ロケーションデータロード中',
'Loading Locations...': '位置を読込みしています ...',
'Local Name': 'ローカル名',
'Local Names': 'ローカル名',
'Location': 'ロケーション',
'Location 1': 'ロケーション 1',
'Location 2': 'ロケーション 2',
'Location De-duplicated': 'ロケーションの重複解消',
'Location Details': 'ロケーションの詳細',
'Location Hierarchy Level 0 Name': 'ロケーション階層レベル0の名前',
'Location Hierarchy Level 1 Name': 'ロケーション階層レベル1の名前',
'Location Hierarchy Level 2 Name': 'ロケーション階層レベル2の名前',
'Location Hierarchy Level 3 Name': 'ロケーション階層レベル3の名前',
'Location Hierarchy Level 4 Name': 'ロケーション階層レベル4の名前',
'Location Hierarchy Level 5 Name': 'ロケーション階層レベル5の名前',
'Location added': 'ロケーションを追加しました',
'Location cannot be converted into a group.': 'ロケーションはグループに変換できません',
'Location deleted': 'ロケーションを削除しました',
'Location details': 'ロケーションの詳細',
'Location group cannot be a parent.': 'ロケーショングループは親にできません',
'Location group cannot have a parent.': 'ロケーショングループに親情報がありません。',
'Location groups can be used in the Regions menu.': 'Location groups can be used in the Regions menu.',
'Location groups may be used to filter what is shown on the map and in search results to only entities covered by locations in the group.': 'Location groups may be used to filter what is shown on the map and in search results to only entities covered by locations in the group.',
'Location updated': 'ロケーションを更新しました',
'Location: ': 'ロケーション: ',
'Locations': 'ロケーション',
'Locations De-duplicator': 'ロケーションの重複解消',
'Locations of this level need to have a parent of level': 'このレベルのロケーションには、親属性となるレベルが必要です',
'Locations should be different!': '異なる位置を設定してください!',
'Lockdown': '厳重監禁',
'Log': 'ログ',
'Log Entry Details': 'ログエントリの詳細',
'Log entry added': 'ログエントリを追加しました',
'Log entry deleted': 'ログエントリを削除しました',
'Log entry updated': 'ログエントリを更新しました',
'Logged in': 'ログインしました',
'Logged out': 'ログアウトしました',
'Login': 'ログイン',
'Logistics': '物流',
'Logistics Management': '物流管理',
'Logistics Management System': '物流管理システム',
'Logo': 'ロゴ',
'Logo file %s missing!': 'ロゴファイル%sが見つかりません。',
'Logout': 'ログアウト',
'Long Text': '詳細テキスト',
'Longitude': '経度',
'Longitude is West - East (sideways).': '緯度は東西です(横方向)',
'Longitude is West - East (sideways). Latitude is North-South (Up-Down). Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere. Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas. These need to be added in Decimal Degrees.': '経度は東西方向(横)の座標軸です。緯度は南北方向(上下)の座標軸です。赤道ではゼロ、北半球ではプラス、南半球ではマイナスとなります。経度は、子午線(グリニッジ標準時)をゼロとして、東(ヨーロッパ、アジア)がプラスとなります。西(大西洋、アメリカ)がマイナスです。10進法で記入してください。',
'Longitude is West - East (sideways). Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas.': '経度は東西(横)です。経度は子午線(グリニッジ標準時)でゼロ、東(ヨーロッパ、アジア)でプラスです。西(大西洋、アメリカ)でマイナスです。',
'Longitude is West-East (sideways).': 'Longitude is West-East (sideways).',
'Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas.': '経度はグリニッジ子午線(グリニッジ標準時)上が0度です。東側に向かってヨーロッパやアジアの各地で正の値となります。西に向かって大西洋やアメリカの各地で負の値となります。',
'Longitude is zero on the prime meridian (through Greenwich, United Kingdom) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas.': 'Longitude is zero on the prime meridian (through Greenwich, United Kingdom) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas.',
'Longitude of Map Center': 'Longitude of Map Center',
'Longitude of far eastern end of the region of interest.': 'Longitude of far eastern end of the region of interest.',
'Longitude of far western end of the region of interest.': 'Longitude of far western end of the region of interest.',
'Longitude should be between': '経度の値の有効な範囲は',
'Looking up Parents': '親を検索',
'Looting': '略奪',
'Lost': '行方不明',
'Lost Password': 'パスワードの紛失',
'Low': '低',
'Magnetic Storm': '磁気嵐',
'Main cash source': '主な現金収入源',
'Main income sources before disaster': '災害発生前の主な収入源',
'Major Damage': 'Major Damage',
'Major expenses': '主な費用',
'Major outward damage': '大きな損傷あり',
'Make Commitment': 'コミットの作成',
'Make New Commitment': 'Make New Commitment',
'Make Pledge': '寄付の作成',
'Make Request': '支援を要請する',
'Make a Request': '支援要請を登録',
'Make a Request for Aid': '援助要請を登録',
'Make preparations per the <instruction>': '<instruction>毎に準備作業を行う',
'Male': '男性',
'Malnutrition present prior to disaster': '災害前から栄養が失調発生していた',
'Manage': '管理',
'Manage Category': 'カテゴリ管理',
'Manage Events': 'Manage Events',
'Manage Item catalog': '物資カタログの管理',
'Manage Kits': 'Kitsの管理',
'Manage Relief Item Catalogue': '救援アイテムカタログの管理',
'Manage Sub-Category': 'サブカテゴリの管理',
'Manage Users & Roles': 'ユーザと役割の管理',
'Manage Vehicles': 'Manage Vehicles',
'Manage Warehouses/Sites': '倉庫/Sitesの管理',
'Manage Your Facilities': 'Manage Your Facilities',
'Manage requests for supplies, assets, staff or other resources. Matches against Inventories where supplies are requested.': '支援物資、資産、人員、その他のリソースに対する要求を管理します。支援物資が要求された時に在庫と照合します。',
'Manage requests of hospitals for assistance.': '病院からの支援要請の管理',
'Manage volunteers by capturing their skills, availability and allocation': 'ボランティアのスキル、稼働状況、割り当て状況を管理します',
'Manager': 'マネージャ',
'Managing Office': 'オフィスの管理',
'Managing, Storing and Distributing Relief Items': '救援物資の保管、流通、配布状況を管理します',
'Managing, Storing and Distributing Relief Items.': '救援物資の管理、保存、配布状況を管理します。',
'Mandatory. In GeoServer, this is the Layer Name. Within the WFS getCapabilities, this is the FeatureType Name part after the colon(:).': '必須項目。GeoServerでのこの項目はレイヤー名となります。WFSの get Capabilitiesでは、コロン( : )の後に付与される FeatureTypeとして表示されます。',
'Mandatory. The URL to access the service.': '省略できません。サービスにアクセスするためのURLです。',
'Manual': 'マニュアル',
'Manual Synchronization': 'データ手動同期',
'Many': '多数',
'Map': '地図',
'Map Center Latitude': 'Map Center Latitude',
'Map Center Longitude': 'Map Center Longitude',
'Map Configuration': '地図の設定',
'Map Configuration Details': 'Map Configuration Details',
'Map Configuration added': '地図の設定を追加しました',
'Map Configuration deleted': '地図設定を削除しました',
'Map Configuration removed': 'Map Configuration removed',
'Map Configuration updated': '地図設定を更新しました',
'Map Configurations': '地図の設定',
'Map Height': '地図の縦高',
'Map Service Catalogue': '地図サービスカタログ',
'Map Settings': '地図の設定',
'Map Viewing Client': '地図閲覧クライアント',
'Map Width': '地図の横幅',
'Map Zoom': 'Map Zoom',
'Map of Hospitals': '病院の地図',
'MapMaker Hybrid Layer': 'MapMaker Hybrid Layer',
'MapMaker Layer': 'MapMaker Layer',
'Mapping': 'マッピング',
'Maps': 'Maps',
'Marine Security': '海上保安',
'Marital Status': '婚姻状況',
'Marker': 'マーカー',
'Marker Details': 'マーカーの詳細',
'Marker added': 'マーカーを追加しました',
'Marker deleted': 'マーカーを削除しました',
'Marker updated': 'マーカーを更新しました',
'Markers': 'マーカー',
'Master': 'Master',
'Master Message Log': 'マスターメッセージログ',
'Master Message Log to process incoming reports & requests': '受け取ったレポートと要求を処理するマスターメッセージログ',
'Match Percentage': '一致率',
'Match Requests': '支援要請マッチ',
'Match percentage indicates the % match between these two records': 'マッチの割合は、2つのレコードの間のマッチ状況をあわらします',
'Match?': 'Match?',
'Matching Catalog Items': '適合する救援物資カタログ',
'Matching Items': 'Matching Items',
'Matching Records': '一致するレコード',
'Matrix of Choices (Multiple Answers)': '選択肢 (複数可)',
'Matrix of Choices (Only one answer)': '選択肢 (複数選択不可)',
'Matrix of Text Fields': 'テキストフィールドのマトリックス',
'Max Persons per Dwelling': '住居ごとの最大収容人数',
'Maximum Location Latitude': 'Maximum Location Latitude',
'Maximum Location Longitude': 'Maximum Location Longitude',
'Maximum Weight': '最大重量',
'Maximum weight capacity of the Storage Location followed by choosing the unit from the drop down list.': '最大重量| ドロップダウンリストで単位を選択してから、備蓄地点の最大重量を指定します。',
'Maximum weight capacity of the items the storage bin can contain. followed by choosing the unit from the drop down list.': 'storage binに収容することができるアイテムの最大重量を指定します。ドロップダウンリストから、単位を選択してください。',
'Measure Area: Click the points around the polygon & end with a double-click': '観測領域: 多角形の角をクリックし、ダブルクリックで終了',
'Measure Length: Click the points along the path & end with a double-click': '距離を計測: 経路上の中継点をクリックして、終点でダブルクリックしてください',
'Medical and public health': '医療、公衆衛生',
'Medicine': '薬品',
'Medium': '中',
'Megabytes per Month': '1月毎のメガバイト数',
'Members': 'メンバ',
'Membership': 'メンバシップ',
'Membership Details': 'メンバシップの詳細',
'Membership added': 'メンバシップを追加しました',
'Membership deleted': 'メンバシップを削除しました',
'Membership updated': 'メンバシップを更新しました',
'Memberships': 'メンバシップ',
'Message': 'メッセージ',
'Message Details': 'メッセージの詳細',
'Message Sent': 'メッセージが送信されました',
'Message Variable': 'メッセージ変数',
'Message added': 'メッセージを追加しました',
'Message deleted': 'メッセージを削除しました',
'Message field is required!': 'メッセージは必須です',
'Message sent to outbox': 'メッセージを送信箱に送りました',
'Message updated': 'メッセージを更新しました',
'Message variable': 'メッセージ変数',
'Messages': 'メッセージ',
'Messaging': 'メッセージング',
'Messaging settings updated': 'メッセージング設定を更新しました',
'Metadata': 'メタデータ',
'Metadata Details': 'メタデータの詳細',
'Metadata added': 'メタデータを追加しました',
'Metadata can be supplied here to be applied to all uploaded photos, if desired.': '必要に応じて、アップロードした全ての画像に適用されるメタデータをここで入力できます。',
'Metadata deleted': 'メタデータを削除しました',
'Metadata updated': 'メタデータを更新しました',
'Meteorite': '隕石落下',
'Meteorological (inc. flood)': '気象 (洪水を含む)',
'Method used': '使用されるメソッド',
'Micronutrient malnutrition prior to disaster': '災害前から栄養失調傾向あり',
'Middle Name': 'ミドルネーム',
'Migrants or ethnic minorities': '移民、あるいは少数民族の数',
'Mileage': 'Mileage',
'Military': '軍隊',
'Minimum Bounding Box': '最小:領域を指定した枠組み',
'Minimum Location Latitude': 'Minimum Location Latitude',
'Minimum Location Longitude': 'Minimum Location Longitude',
'Minimum shift time is 6 hours': '最小シフト時間は6時間です。',
'Minor Damage': 'Minor Damage',
'Minor/None': '少数 / なし',
'Minorities participating in coping activities': '少数民族が災害対応に従事',
'Minute': '分',
'Minutes must be a number between 0 and 60': '分には0-60の間の数字を記入してください',
'Minutes must be a number greater than 0 and less than 60': '分数は0から60の間で入力してください',
'Minutes per Month': '一ヶ月に数分間',
'Minutes should be a number greater than 0 and less than 60': '分は0から60の間で入力してください',
'Miscellaneous': 'その他',
'Missing': '行方不明',
'Missing Person': '行方不明者',
'Missing Person Details': '行方不明者の詳細',
'Missing Person Registry': 'Missing Person Registry',
'Missing Person Reports': '行方不明者レポート',
'Missing Persons': '行方不明者',
'Missing Persons Registry': '行方不明者の登録',
'Missing Persons Report': '行方不明者のレポート',
'Missing Report': '行方不明レポート',
'Missing Senior Citizen': '高齢者の行方不明',
'Missing Vulnerable Person': '被介護者の行方不明',
'Mission Details': 'Mission Details',
'Mission Record': 'Mission Record',
'Mission added': 'Mission added',
'Mission deleted': 'Mission deleted',
'Mission updated': 'Mission updated',
'Missions': 'Missions',
'Mobile': 'モバイル',
'Mobile Assess.': '移動端末アクセス',
'Mobile Basic': 'モバイルの基礎',
'Mobile Basic Assessment': 'モバイルの基本アセスメント',
'Mobile Phone': '携帯番号',
'Mode': 'モード',
'Model/Type': 'Model/Type',
'Modem Settings': 'モバイル機器の設定',
'Modem settings updated': 'モバイル機器の設定を更新しました',
'Moderate': 'モデレート',
'Moderator': 'モデレータ',
'Modify Feature: Select the feature you wish to deform & then Drag one of the dots to deform the feature in your chosen manner': '地物の変更: 変形する地物を選択し、点の一つをドラッグすることで地物の形を修正可能です。',
'Modify Information on groups and individuals': 'グループと個人の情報更新',
'Modifying data in spreadsheet before importing it to the database': 'データベース登録前に、スプレッドシート内のデータ項目を修正',
'Module': 'Module',
'Module Administration': 'モジュール管理',
'Module disabled!': 'モジュールが無効です',
'Module provides access to information on current Flood Levels.': 'このモジュールにより、洪水の現在の水位情報にアクセス可能です',
'Module stores structured reports done by Professional Organizations - currently data includes WFP Assessments.': 'モジュールでは、専門団体によって作成された調査文書を管理します。データには、WFP(国連世界食糧計画)アセスメントも含まれます。',
'Monday': '月曜日',
'Monthly Cost': '月額費用',
'Monthly Salary': '給与(月額)',
'Months': '月',
'Morgue': 'Morgue',
'Morgue Details': 'Morgue Details',
'Morgue Status': '死体安置所のステータス',
'Morgue Units Available': '死体公示所の収容可能数',
'Morgues': 'Morgues',
'Mosque': 'モスク',
'Motorcycle': 'オートバイ',
'Moustache': '口ひげ',
'Move Feature: Drag feature to desired location': 'Featureの移動: Feature を希望するロケーションにドラッグしてください',
'Movements (Filter In/Out/Lost)': '活動 (フィルター イン/アウト/ロスト)',
'MultiPolygon': 'マルチポリゴン',
'Multiple': '複数',
'Multiple Choice (Multiple Answers)': '複数選択(複数回答)',
'Multiple Choice (Only One Answer)': '複数選択(1つだけ回答)',
'Multiple Matches': '複数の結果が適合しました',
'Multiple Text Fields': '複数の入力項目',
'Multiplicator': '乗数',
'Muslim': 'イスラム教徒',
'Must a location have a parent location?': 'ある場所にはその親の場所が無ければならないですか?',
'My Current function': '現在登録している機能',
'My Details': 'My Details',
'My Tasks': '自分のタスク',
'My Volunteering': 'My Volunteering',
'N/A': '該当なし',
"NB SMS requests are filtered to just those which are 'actionable', whilst the Tweet requests are unfiltered, so that is likely to be a good place to start Searching.": "注意: SMS は'アクション可能'のためリクエストがフィルターされます。一方、ツイートのリクエストはフィルターされません。よって、これは検索する手段となります",
'NO': 'NO',
'NZSEE Level 1': 'NZSEE レベル1',
'NZSEE Level 2': 'NZSEE レベル 2',
'Name': '名前',
'Name and/or ID': '名前および/またはID',
'Name and/or ID Label': '名前および/またはIDラベル',
'Name of Storage Bin Type.': '物資保管タイプの名前です。',
'Name of the file (& optional sub-path) located in static which should be used for the background of the header.': 'ヘッダーの背景に使用される、static にあるファイルの名前 (オプションでサブパス)。',
'Name of the file (& optional sub-path) located in static which should be used for the top-left image.': '左上の画像で静的位置を表すファイル名(サブパス名はオプション)',
'Name of the file (& optional sub-path) located in views which should be used for footer.': 'フッターに使われるビューにあるファイル名 (オプションとしてサブパス)。',
'Name of the person in local language and script (optional).': '現地言語での名前と表記(オプション)',
'Name of the unit or department this report refers to. Leave empty if your hospital has no subdivisions.': 'このレポートに関連する組織や部署の名前。部署をもたない病院の場合は空欄にしてください。',
'Name or Job Title': '名前あるいは役職名',
'Name, Org and/or ID': '名前、組織、IDなど',
'Name/Model/Type': '名前/ モデル/タイプ',
'Name: ': '名前: ',
'Names can be added in multiple languages': '名前は、複数の言語で記述することができます。',
'National': 'National',
'National ID Card': 'ナショナルIDカード',
'National NGO': '国内NPO',
'National Staff': '現地スタッフ',
'Nationality': '国籍',
'Nationality of the person.': 'この人物の国籍です。',
'Nautical Accident': '船舶事故',
'Nautical Hijacking': '船舶ハイジャック',
'Need Type': '需要タイプ',
'Need Type Details': '需要タイプの詳細',
'Need Type added': '需要タイプを追加しました',
'Need Type deleted': '需要タイプを削除しました',
'Need Type updated': '需要タイプを更新しました',
'Need Types': '需要タイプ',
"Need a 'url' argument!": "'url'引数が必要です。",
'Need added': 'ニーズを追加しました',
'Need deleted': 'ニーズを削除しました',
'Need to be logged-in to be able to submit assessments': '評価を確定させるには、ログインが必要です',
'Need to configure Twitter Authentication': 'Twitterの認証を設定する必要があります',
'Need to select 2 Locations': 'ロケーションを2つ指定してください',
'Need to specify a Budget!': '予算を指定する必要があります。',
'Need to specify a Kit!': 'Kitを指定する必要があります。',
'Need to specify a Resource!': 'リソースを指定する必要があります。',
'Need to specify a bundle!': 'bundleを指定する必要があります。',
'Need to specify a group!': 'グループを指定する必要があります。',
'Need to specify a location to search for.': '検索対象となるロケーションを指定する必要があります。',
'Need to specify a role!': '役割を指定する必要があります。',
'Need to specify a service!': 'サービスを指定してください!',
'Need to specify a table!': 'テーブルを指定する必要があります。',
'Need to specify a user!': 'ユーザを指定する必要があります。',
'Need updated': 'ニーズを更新しました',
'Needs': '要求',
'Needs Details': '需要の詳細',
'Needs Maintenance': 'Needs Maintenance',
'Needs to reduce vulnerability to violence': '暴力行為の対策として必要な物資 / サービス',
'Negative Flow Isolation': '逆流の分離',
'Neighborhood': 'Neighborhood',
'Neighbourhood': '近隣',
'Neighbouring building hazard': '隣接ビルが危険な状態',
'Neonatal ICU': '新生児ICU',
'Neonatology': '新生児科',
'Network': 'ネットワーク',
'Neurology': '神経科',
'New': '新規',
'New Assessment reported from': '新規アセスメントの報告元',
'New Certificate': 'New Certificate',
'New Checklist': '新規チェックリスト',
'New Entry': 'New Entry',
'New Event': 'New Event',
'New Home': 'New Home',
'New Item Category': 'New Item Category',
'New Job Role': 'New Job Role',
'New Location': 'New Location',
'New Location Group': 'New Location Group',
'New Patient': 'New Patient',
'New Peer': '新しいデータ同期先',
'New Record': '新規レコード',
'New Relative': 'New Relative',
'New Report': '新規レポート',
'New Request': '新規の支援要請',
'New Scenario': 'New Scenario',
'New Skill': 'New Skill',
'New Solution Choice': '新しい解決案を選択',
'New Staff Member': 'New Staff Member',
'New Support Request': '新しい支援要請',
'New Synchronization Peer': '新しい同期先',
'New Team': 'New Team',
'New Ticket': 'New Ticket',
'New Training Course': 'New Training Course',
'New Volunteer': 'New Volunteer',
'New cases in the past 24h': '過去24時間の新規ケース数',
'News': 'ニュース',
'Next': '次へ',
'Next View': '次を表示',
'No': 'いいえ',
'No Activities Found': '支援活動が見つかりませんでした',
'No Activities currently registered in this event': 'No Activities currently registered in this event',
'No Addresses currently registered': '住所は、まだ登録がありません。',
'No Aid Requests have been made yet': '援助要請がまだ作成されていません',
'No Alternative Items currently registered': '代替物資は現在登録されていません',
'No Assessment Summaries currently registered': 'アセスメントの要約が登録されていません',
'No Assessments currently registered': '登録済みのアセスメントがありません',
'No Asset Assignments currently registered': '現在のところ資産割り当ては登録されていません',
'No Assets currently registered': '登録されている資産は現在ありません。',
'No Assets currently registered in this event': 'No Assets currently registered in this event',
'No Assets currently registered in this scenario': 'No Assets currently registered in this scenario',
'No Baseline Types currently registered': '登録済みのBaseline Typesはありません',
'No Baselines currently registered': '登録されている基準値はありません',
'No Brands currently registered': '登録されている銘柄がありません',
'No Budgets currently registered': '予算は、まだ登録がありません。',
'No Bundles currently registered': 'Bundleは、まだ登録がありません。',
'No Camp Services currently registered': 'No Camp Services currently registered',
'No Camp Types currently registered': 'No Camp Types currently registered',
'No Camps currently registered': 'No Camps currently registered',
'No Catalog Items currently registered': '登録済みのカタログアイテムがありません',
'No Catalogs currently registered': 'No Catalogs currently registered',
'No Category<>Sub-Category<>Catalog Relation currently registered': 'Category<>Sub-Category<>Catalog間の関係は、まだ登録がありません。',
'No Checklist available': '利用可能なチェックリストがありません',
'No Cluster Subsectors currently registered': 'クラスタのサブセクタはまだ登録がありません',
'No Clusters currently registered': '登録済みのクラスタはありません',
'No Commitment Items currently registered': '現在のところコミット済み物資は登録されていません',
'No Commitments': 'コミットメントがありません',
'No Configs currently defined': '設定は、まだ定義されていません',
'No Credentials currently set': '現在のところ証明書が設定されていません',
'No Details currently registered': '詳細は、まだ登録されていません',
'No Distribution Items currently registered': '配給物資の登録がありません',
'No Distributions currently registered': '配給所の登録がありません',
'No Documents currently attached to this request': 'No Documents currently attached to this request',
'No Documents found': '文書が見つかりませんでした。',
'No Donors currently registered': '資金提供組織はまだ登録されていません',
'No Events currently registered': 'No Events currently registered',
'No Facilities currently registered in this event': 'No Facilities currently registered in this event',
'No Facilities currently registered in this scenario': 'No Facilities currently registered in this scenario',
'No Feature Classes currently defined': '定義されているFeature Classがありません',
'No Feature Layers currently defined': 'Feature Layersはまだ定義されていません',
'No Flood Reports currently registered': '登録済みの洪水情報はありません',
'No GPS data currently registered': 'No GPS data currently registered',
'No GPX Layers currently defined': 'GPXレイヤはまだ定義されていません',
'No Groups currently defined': 'グループはまだ定義されていません',
'No Groups currently registered': 'グループはまだ登録されていません',
'No Homes currently registered': 'No Homes currently registered',
'No Hospitals currently registered': '病院はまだ登録されていません',
'No Human Resources currently registered in this event': 'No Human Resources currently registered in this event',
'No Human Resources currently registered in this scenario': 'No Human Resources currently registered in this scenario',
'No Identification Report Available': '利用可能なIDレポートはありません',
'No Identities currently registered': '登録されているIDはありません',
'No Image': '画像なし',
'No Images currently registered': '画像の登録はありません',
'No Impact Types currently registered': '被害の種類は未登録です',
'No Impacts currently registered': 'これまでに登録されたImpactはありません',
'No Import Files currently uploaded': 'No Import Files currently uploaded',
'No Incident Reports currently registered': '登録されているインシデントレポートはありません',
'No Incidents currently registered': '登録済みのインシデントはありません。',
'No Incoming Shipments': '到着予定の輸送物資',
'No Inventories currently have suitable alternative items in stock': 'No Inventories currently have suitable alternative items in stock',
'No Inventories currently have this item in stock': 'No Inventories currently have this item in stock',
'No Inventory Items currently registered': '備蓄物資の登録がありません',
'No Inventory Stores currently registered': '現在登録されている物資集積地点はありません',
'No Item Catalog Category currently registered': '救援物資カタログのカテゴリはまだ登録がありません',
'No Item Catalog currently registered': 'アイテムカタログはまだ登録されていません',
'No Item Categories currently registered': '救援物資カテゴリの登録がありません',
'No Item Packs currently registered': '救援物資のパックは、まだ登録がありません',
'No Item Sub-Category currently registered': '救援物資のサブカテゴリはまだ登録されていません',
'No Item currently registered': 'アイテムはまだ登録されていません',
'No Items currently registered': '物資はまだ登録されていません',
'No Items currently registered in this Inventory': 'No Items currently registered in this Inventory',
'No Items currently requested': '要求されている物資はありません',
'No Keys currently defined': 'Keyはまだ定義されていません',
'No Kits currently registered': 'Kitはまだ登録されていません',
'No Level 1 Assessments currently registered': '現在のところ、レベル1アセスメントは登録されていません',
'No Level 2 Assessments currently registered': '現在のところ、レベル2アセスメントは登録されていません',
'No Locations currently available': '現在利用可能なロケーションはありません',
'No Locations currently registered': 'ロケーションはまだ登録されていません',
'No Map Configurations currently defined': '地図の設定が定義されていません',
'No Map Configurations currently registered in this event': 'No Map Configurations currently registered in this event',
'No Map Configurations currently registered in this scenario': 'No Map Configurations currently registered in this scenario',
'No Markers currently available': '現在利用可能なマーカーはありません',
'No Match': '合致する結果がありません',
'No Matching Catalog Items': '適合する救援物資はありませんでした',
'No Matching Items': 'No Matching Items',
'No Matching Records': '適合する検索結果がありませんでした',
'No Members currently registered': 'メンバはまだ登録されていません',
'No Memberships currently defined': 'メンバシップはまだ登録されていません',
'No Memberships currently registered': 'メンバシップはまだ登録されていません',
'No Messages currently in Outbox': '送信箱にメッセージがありません',
'No Metadata currently defined': 'メタデータはまだ定義されていません',
'No Need Types currently registered': '現在登録されている需要タイプはありません',
'No Needs currently registered': '現在要求は登録されていません',
'No Offices currently registered': 'オフィスはまだ登録されていません',
'No Offices found!': 'オフィスが見つかりませんでした',
'No Organizations currently registered': '団体はまだ登録されていません',
'No Packs for Item': 'この物資に対する救援物資パックはありません',
'No Patients currently registered': 'No Patients currently registered',
'No Peers currently registered': '登録済みのデータ同期先はありません',
'No People currently committed': 'No People currently committed',
'No People currently registered in this camp': 'No People currently registered in this camp',
'No People currently registered in this shelter': 'この避難所に登録されている人物情報はありません',
'No Persons currently registered': '人物情報はまだ登録されていません',
'No Persons currently reported missing': '現在、行方不明者の登録はありません',
'No Persons found': '該当する人物はいませんでした',
'No Photos found': '写真の登録がありません',
'No Picture': '写真がありません',
'No Population Statistics currently registered': 'No Population Statistics currently registered',
'No Presence Log Entries currently registered': '所在地履歴の登録がありません',
'No Problems currently defined': '定義済みの問題がありません',
'No Projections currently defined': '地図投影法は、まだ定義されていません。',
'No Projects currently registered': '定義済みのプロジェクトはありません',
'No Rapid Assessments currently registered': '被災地の現況アセスメントはまだ登録されていません',
'No Ratings for Skill Type': 'No Ratings for Skill Type',
'No Received Items currently registered': '受領された救援物資の登録はありません',
'No Received Shipments': '受け取った輸送はありません',
'No Records currently available': '利用可能なレコードはありません',
'No Records matching the query': '条件に当てはまるレコードが存在しません',
'No Relatives currently registered': 'No Relatives currently registered',
'No Request Items currently registered': '物資要請の登録がありません',
'No Requests': '支援要請がありません',
'No Requests have been made yet': '支援要請は、まだ行われていません',
'No Requests match this criteria': 'この条件に一致する支援要請はありません',
'No Responses currently registered': '現在登録されていて返答が無いもの',
'No Rivers currently registered': '河川情報の登録がありません',
'No Roles currently defined': '役割はまだ定義されていません',
'No Rooms currently registered': 'No Rooms currently registered',
'No Scenarios currently registered': 'No Scenarios currently registered',
'No Sections currently registered': 'このセクションの登録情報がありません',
'No Sectors currently registered': '登録済みの活動分野がありません',
'No Sent Items currently registered': '送付した物資の登録がありません',
'No Sent Shipments': '送付が行われた輸送がありません',
'No Settings currently defined': '設定は、まだ定義されていません',
'No Shelter Services currently registered': '登録されている避難所サービスがありません',
'No Shelter Types currently registered': '登録済みの避難所タイプがありません',
'No Shelters currently registered': '避難所はまだ登録されていません',
'No Shipment Transit Logs currently registered': '物資輸送履歴の登録がありません',
'No Shipment/Way Bills currently registered': '輸送費/Way Billsはまだ登録されていません',
'No Shipment<>Item Relation currently registered': '輸送とアイテムの関連付けはまだ登録されていません',
'No Sites currently registered': '登録されているサイトはありません',
'No Skill Types currently set': '設定済みのスキルタイプはありません',
'No Skills currently requested': 'No Skills currently requested',
'No Solutions currently defined': '解決案はまだ定義されていません',
'No Staff Types currently registered': 'スタッフタイプはまだ登録されていません',
'No Staff currently registered': 'スタッフはまだ登録されていません',
'No Storage Bin Type currently registered': '登録済みのStorage Binタイプがありません',
'No Storage Bins currently registered': 'Storage Binはまだ登録されていません',
'No Storage Locations currently registered': '登録されている備蓄地点がありません',
'No Subscription available': '寄付の申し込みがありません',
'No Subsectors currently registered': 'No Subsectors currently registered',
'No Support Requests currently registered': '現在のところ、支援要請は登録されていません',
'No Survey Answers currently registered': 'これまでに登録されたフィードバックの回答はありません',
'No Survey Questions currently registered': '登録済みのSurvey Questionsはありません',
'No Survey Sections currently registered': '登録済みのSurvey Sectionはありません',
'No Survey Series currently registered': '現在、調査報告は登録されていません',
'No Survey Template currently registered': '登録されている調査テンプレートがありません',
'No TMS Layers currently defined': 'TMS レイヤーがまだ定義されていません',
'No Tasks currently registered in this event': 'No Tasks currently registered in this event',
'No Tasks currently registered in this scenario': 'No Tasks currently registered in this scenario',
'No Tasks with Location Data': 'ロケーション情報を持っているタスクがありません',
'No Teams currently registered': 'No Teams currently registered',
'No Themes currently defined': 'テーマはまだ定義されていません',
'No Tickets currently registered': 'チケットはまだ定義されていません',
'No Tracks currently available': '利用可能な追跡情報はありません',
'No Units currently registered': '単位はまだ登録されていません',
'No Users currently registered': '登録済みのユーザがありません',
'No Vehicle Details currently defined': 'No Vehicle Details currently defined',
'No Vehicles currently registered': 'No Vehicles currently registered',
'No Volunteers currently registered': 'ボランティアの登録がありません',
'No Warehouse Items currently registered': '現在登録済みの倉庫物資はありません',
'No Warehouses currently registered': '倉庫が登録されていません',
'No Warehouses match this criteria': '条件に合致する倉庫がありません',
'No access at all': '完全に孤立中',
'No access to this record!': 'このレコードにはアクセスできません',
'No action recommended': 'アクション無しを推奨',
'No calculations made': '見積が作成されていません',
'No conflicts logged': 'コンフリクトのログはありません。',
'No contact information available': '利用可能な連絡先情報はありません',
'No contact method found': 'No contact method found',
'No contacts currently registered': '連絡先が登録されていません',
'No data in this table - cannot create PDF!': 'テーブルにデータがありません。PDF を作成できません。',
'No databases in this application': 'このアプリケーションにデータベースはありません',
'No dead body reports available': '遺体情報のレポートはありません',
'No entries found': 'エントリが見つかりません',
'No entries matching the query': 'クエリに一致するエントリはありませんでした。',
'No entry available': 'No entry available',
'No forms to the corresponding resource have been downloaded yet.': 'No forms to the corresponding resource have been downloaded yet.',
'No import jobs': 'インポートされたJobがありません',
'No linked records': 'リンクされているレコードはありません',
'No location known for this person': 'この人物の消息が不明です',
'No locations found for members of this team': 'このチームのメンバーの場所が見つかりませんでした',
'No locations registered at this level': 'この階層に登録されているロケーションはありません',
'No log entries matching the query': '検索に合致するログエントリがありません',
'No match': 'No match',
'No matching items for this request': 'この支援要請に適合する物資はありません',
'No matching records found': 'No matching records found',
'No matching records found.': '一致するレコードがありませんでした。',
'No messages in the system': 'システム上にメッセージが存在しません',
'No notes available': '追加情報はありません',
'No peers currently registered': '現在登録されているデータ同期先はありません',
'No pending registrations found': '処理保留中の登録申請はありません',
'No pending registrations matching the query': '検索に合致する処理保留登録申請がありません。',
'No person record found for current user.': '現在のユーザの人物情報レコードが見つかりませんでした。',
'No positions currently registered': '登録されているpositionがありません',
'No problem group defined yet': '定義済みの問題グループがありません。',
'No records matching the query': '条件に当てはまるレコードが存在しません',
'No records to delete': '削除するレコードがありません',
'No recovery reports available': '利用可能な遺体回収レポートはありません',
'No report available.': '利用可能なレポートはありません。',
'No reports available.': '利用可能なレポートがありません。',
'No reports currently available': '利用可能なレポートはありません',
'No requests found': '支援要請は見つかりませんでした',
'No resources currently registered': 'リソースはまだ登録されていません',
'No resources currently reported': 'レポート済みのリソースはありません',
'No service profile available': '利用可能なサービスプロファイルはありません',
'No skills currently set': 'スキルが登録されていません',
'No staff or volunteers currently registered': 'No staff or volunteers currently registered',
'No status information available': '状況に関する情報はありません',
'No synchronization': '同期なし',
'No tasks currently assigned': 'No tasks currently assigned',
'No tasks currently registered': 'タスクはまだ登録されていません',
'No template found!': 'テンプレートが見つかりません。',
'No units currently registered': '単位はまだ登録されていません',
'No volunteer availability registered': 'No volunteer availability registered',
'No volunteer information registered': 'ボランティア情報はまだ登録されていません',
'Non-structural Hazards': 'その他の災害',
'None': 'なし',
'None (no such record)': 'なし(記録がありません)',
'Noodles': '麺',
'Normal': '通常どおり',
'Normal food sources disrupted': '普段の食料供給源が混乱している',
'Not Applicable': '該当なし',
'Not Authorised!': '認証されていません',
'Not Possible': '対応不可',
'Not Set': '設定されていません',
'Not authorised!': '認証されていません',
'Not installed or incorrectly configured.': 'インストールされていないか、適切な設定がされていません',
'Note': '追加情報',
'Note Details': '追加情報の詳細',
'Note Status': '状態を記録',
'Note Type': '追加情報の種類',
'Note added': '追加情報を追加しました',
'Note deleted': '追加情報を削除しました',
'Note that this list only shows active volunteers. To see all people registered in the system, do a search from the home screen instead': '注意:このリストは、活動中のボランティアのみ表示しています。システムに登録しているすべての人をみるには、ホーム・スクリーンから検索してください。',
'Note updated': '追加情報を更新しました',
'Notes': '追加情報',
'Notice to Airmen': 'NOTAM (航空従事者用)',
'Number': '番号',
'Number of Columns': '列数',
'Number of Patients': '患者数',
'Number of People Required': 'Number of People Required',
'Number of Rows': '行数',
'Number of Vehicles': '車両数',
'Number of additional beds of that type expected to become available in this unit within the next 24 hours.': 'この施設において、今後24時間以内に利用可能になると予測されている、このタイプの追加ベッド数。',
'Number of alternative places for studying': '授業用に確保できる場所の数',
'Number of available/vacant beds of that type in this unit at the time of reporting.': 'このタイプの利用可能/空きベッド数(報告時点)',
'Number of bodies found': 'Number of bodies found',
'Number of deaths during the past 24 hours.': '過去24時間以内の死亡者数',
'Number of discharged patients during the past 24 hours.': '退院患者数(過去24時間以内)',
'Number of doctors': '医者の人数',
'Number of doctors actively working': '現在活動中の医師の数',
'Number of houses damaged, but usable': '破損しているが利用可能な家屋の数',
'Number of houses destroyed/uninhabitable': '全壊/居住不可になった家屋数',
'Number of in-patients at the time of reporting.': 'レポート時の患者数です。',
'Number of latrines': 'トイレ総数',
'Number of midwives actively working': '現在活動中の助産師の数',
'Number of newly admitted patients during the past 24 hours.': '入院患者数(過去24時間以内)',
'Number of non-medical staff': '医療従事以外のスタッフ数',
'Number of nurses': '看護師の人数',
'Number of nurses actively working': '現在活動中の看護師の数',
'Number of private schools': '私立学校の数',
'Number of public schools': '公立学校の数',
'Number of religious schools': '宗教学校の数',
'Number of residential units': '居住施設の数',
'Number of residential units not habitable': '住めなくなった住居の数',
'Number of schools damaged but usable': '破損しているが利用可能な校舎の数',
'Number of schools destroyed/uninhabitable': '全壊 / 利用不可能な校舎の数',
'Number of schools open before disaster': '災害前に開校していた学校数',
'Number of schools open now': '現在開校している学校の数',
'Number of teachers affected by disaster': '被災した教師の数',
'Number of teachers before disaster': '災害発生前の教師の数',
'Number of vacant/available beds in this hospital. Automatically updated from daily reports.': '病院に設置されている、現在利用可能なベッドの数。日時レポートにより、自動的に更新されます。',
'Number of vacant/available units to which victims can be transported immediately.': '現在利用可能なユニット数。犠牲者を即座に安置できる数。',
'Number or Label on the identification tag this person is wearing (if any).': 'この人物の衣服につけられているタグの番号、あるいはラベル名(ある場合のみ).',
'Number or code used to mark the place of find, e.g. flag code, grid coordinates, site reference number or similar (if available)': 'この場所をあとで検索するための番号かコード 例: フラグ番号、グリッドの位置、サイトの参照番号など',
'Number/Percentage of affected population that is Female & Aged 0-5': '女性(0-5歳)の被災者数 / 割合',
'Number/Percentage of affected population that is Female & Aged 13-17': '女性(13-17歳)の被災者数 / 割合',
'Number/Percentage of affected population that is Female & Aged 18-25': '女性(18-25歳)の被災者数 / 割合',
'Number/Percentage of affected population that is Female & Aged 26-60': '女性(26-60歳)の被災者数 / 割合',
'Number/Percentage of affected population that is Female & Aged 6-12': '女性(6-12歳)の被災者数 / 割合',
'Number/Percentage of affected population that is Female & Aged 61+': '女性(61歳以上)の被災者数 / 割合',
'Number/Percentage of affected population that is Male & Aged 0-5': '男性(0-5歳)の被災者数 / 割合',
'Number/Percentage of affected population that is Male & Aged 13-17': '男性(13-17歳)の被災者数 / 割合',
'Number/Percentage of affected population that is Male & Aged 18-25': '男性(18-25歳)の被災者数 / 割合',
'Number/Percentage of affected population that is Male & Aged 26-60': '男性(26-60歳)の被災者数 / 割合',
'Number/Percentage of affected population that is Male & Aged 6-12': '男性(6-12歳)の被災者数 / 割合',
'Number/Percentage of affected population that is Male & Aged 61+': '男性(61歳以上)の被災者数 / 割合',
'Numbers Only': '数値のみ',
'Nursery Beds': '看護ベッド',
'Nutrition': '食料・栄養',
'Nutrition problems': '栄養問題',
'OK': 'OK',
'OR Reason': '手術室の詳細',
'OR Status': '手術室の状態',
'OR Status Reason': '手術室の状態理由',
'Observer': 'オブザーバ',
'Obsolete': '廃止済み',
'Obstetrics/Gynecology': '産婦人科',
'Office': 'オフィス',
'Office Address': 'オフィスの住所',
'Office Details': 'オフィスの詳細',
'Office Phone': 'Office Phone',
'Office added': 'オフィスを追加しました',
'Office deleted': 'オフィスを削除しました',
'Office updated': 'オフィスを更新しました',
'Offices': 'オフィス',
'Offices & Warehouses': 'Offices & Warehouses',
'Offline Sync': 'データのオフライン同期',
'Offline Sync (from USB/File Backup)': 'データのオフライン同期(USB/バックアップファイル利用)',
'Old': '古い',
'Older people as primary caregivers of children': '子供の介護を、高齢者が担当',
'Older people in care homes': '介護施設で生活する高齢者がいる',
'Older people participating in coping activities': '高齢者が災害対応に従事',
'Older people with chronical illnesses': '慢性疾患をもつ高齢者がいる',
'Older person (>60 yrs)': '高齢者(60歳以上)',
'On by default?': 'デフォルトでON?',
'On by default? (only applicable to Overlays)': 'デフォルトでオン(オーバーレイにのみ有効)',
'One Time Cost': '1回毎の費用',
'One time cost': '一回毎の費用',
'One-time': '1回毎',
'One-time costs': '一回毎の費用',
'Oops! Something went wrong...': '申し訳ありません、何か問題が発生しています。',
'Oops! something went wrong on our side.': '申し訳ありません、システム側に問題が発生しています。',
'Opacity (1 for opaque, 0 for fully-transparent)': '不透明度(1は不透明、0は完全に透明)',
'Open': '開く',
'Open Assessment': '未解決のアセスメント',
'Open Map': '地図を開く',
'Open area': '空き地',
'Open recent': '最近使用したものを開く',
'OpenStreetMap Editor': 'OpenStreetMap エディタ',
'Operating Rooms': '手術室',
'Optional': '任意',
'Optional Subject to put into Email - can be used as a Security Password by the service provider': 'Optional Subject to put into Email - can be used as a Security Password by the service provider',
'Optional link to an Incident which this Assessment was triggered by.': 'このアセスメントの端緒となった事故へのオプション・リンク',
'Optional selection of a MapServer map.': 'Optional selection of a MapServer map.',
'Optional selection of a background color.': 'Optional selection of a background color.',
'Optional selection of an alternate style.': 'Optional selection of an alternate style.',
'Optional. If you wish to style the features based on values of an attribute, select the attribute to use here.': 'Optional. If you wish to style the features based on values of an attribute, select the attribute to use here.',
'Optional. In GeoServer, this is the Workspace Namespace URI (not the name!). Within the WFS getCapabilities, this is the FeatureType Name part before the colon(:).': 'Optional. In GeoServer, this is the Workspace Namespace URI (not the name!). Within the WFS getCapabilities, this is the FeatureType Name part before the colon(:).',
'Optional. In GeoServer, this is the Workspace Namespace URI. Within the WFS getCapabilities, this is the FeatureType Name part before the colon(:).': 'オプション。GeoServerでは、ワークスペース名前空間のURIです。WFS getCapabilitiesでは、FeatureType名のコロンの前の部分です。',
'Optional. The name of an element whose contents should be a URL of an Image file put into Popups.': 'Optional. The name of an element whose contents should be a URL of an Image file put into Popups.',
'Optional. The name of an element whose contents should be put into Popups.': 'Optional. The name of an element whose contents should be put into Popups.',
"Optional. The name of the geometry column. In PostGIS this defaults to 'the_geom'.": "オプション項目。ジオメトリカラムの名称です。PostGISでのデフォルト値は 'the_geom'となります。",
'Optional. The name of the schema. In Geoserver this has the form http://host_name/geoserver/wfs/DescribeFeatureType?version=1.1.0&;typename=workspace_name:layer_name.': 'Optional. The name of the schema. In Geoserver this has the form http://host_name/geoserver/wfs/DescribeFeatureType?version=1.1.0&;typename=workspace_name:layer_name.',
'Options': 'オプション',
'Organisation': '団体',
'Organization': '団体',
'Organization Details': '団体の詳細',
'Organization Registry': '団体情報の登録',
'Organization added': '団体を追加しました',
'Organization deleted': '団体を削除しました',
'Organization updated': '団体を更新しました',
'Organizations': '団体',
'Origin': '出身地',
'Origin of the separated children': '離別した子供たちの出身地',
'Other': 'その他',
'Other (describe)': 'その他 (要記述)',
'Other (specify)': 'その他(具体的に)',
'Other Evidence': 'その他の証跡',
'Other Faucet/Piped Water': 'その他 蛇口/パイプによる水源',
'Other Isolation': 'その他の孤立',
'Other Name': 'その他の名前',
'Other activities of boys 13-17yrs': 'その他、13-17歳男子の活動状況',
'Other activities of boys 13-17yrs before disaster': 'その他、災害発生前の13-17歳男子の活動状況',
'Other activities of boys <12yrs': 'その他、12歳以下男子の活動状況',
'Other activities of boys <12yrs before disaster': 'その他、災害発生前の12歳以下男子の活動状況',
'Other activities of girls 13-17yrs': 'その他、13-17歳女子の活動状況',
'Other activities of girls 13-17yrs before disaster': 'その他、災害発生前の13-17歳女子の活動状況',
'Other activities of girls<12yrs': 'その他、12歳以下女子の活動状況',
'Other activities of girls<12yrs before disaster': 'その他、災害発生前の12歳以下女子の活動状況',
'Other alternative infant nutrition in use': 'その他、使用されている乳児用代替食',
'Other alternative places for study': 'その他、授業開設に利用可能な施設',
'Other assistance needed': 'その他に必要な援助活動',
'Other assistance, Rank': 'その他の援助、ランク',
'Other current health problems, adults': 'その他の健康問題(成人)',
'Other current health problems, children': 'その他の健康問題(小児)',
'Other events': '他のイベント',
'Other factors affecting school attendance': 'その他、生徒の就学に影響する要因',
'Other major expenses': 'その他の主な支出',
'Other non-food items': '食料以外の救援物資',
'Other recommendations': '他の推薦',
'Other residential': '住宅その他',
'Other school assistance received': 'その他の学校用品を受領した',
'Other school assistance, details': '受領した学校用品の内訳',
'Other school assistance, source': 'その他の学校用品の送付元',
'Other settings can only be set by editing a file on the server': 'Other settings can only be set by editing a file on the server',
'Other side dishes in stock': '在庫のあるその他食材',
'Other types of water storage containers': 'それ以外の水貯蔵容器タイプ',
'Other ways to obtain food': 'それ以外の食料調達方法',
'Outbound Mail settings are configured in models/000_config.py.': '送信メール設定は、models/000_config.py で定義されています。',
'Outbox': '送信箱',
'Outgoing SMS Handler': 'SMS 送信ハンドラ',
'Outgoing SMS handler': 'SMS送信ハンドラ',
'Overall Hazards': 'すべての危険',
'Overhead falling hazard': '頭上落下物の危険',
'Overland Flow Flood': '陸上の洪水流量',
'Overlays': 'オーバーレイ',
'Owned Records': '自身のレコード',
'Owned Resources': '保持しているリソース',
'PAHO UID': 'PAHO UID',
'PDAM': '水道会社(PDAM)',
'PDF File': 'PDF File',
'PIN': '暗証番号',
'PIN number ': 'PIN 番号',
'PL Women': 'PL 女性',
'Pack': 'パック',
'Packs': 'パック',
'Page': 'Page',
'Pan Map: keep the left mouse button pressed and drag the map': 'マップをパン: マウスの左ボタンを押したまま、地図をドラッグしてください',
'Parameters': 'パラメータ',
'Parapets, ornamentation': '欄干、オーナメント',
'Parent': '親',
'Parent Office': '親組織のオフィス',
"Parent level should be higher than this record's level. Parent level is": '親レベルは、このレコードのレベルより上位でなければなりません。親レベルは',
'Parent needs to be of the correct level': '適切なレベルの親属性を指定してください',
'Parent needs to be set': '親情報が設定される必要があります',
'Parent needs to be set for locations of level': 'ロケーションのレベルには親属性が必要です',
'Parents/Caregivers missing children': '親/介護者とはぐれた子供たち',
'Parking Area': 'Parking Area',
'Partial': '一部 / 不足',
'Participant': '参加者',
'Pashto': 'パシュトー語',
'Pass': 'Pass',
'Passport': 'パスポート',
'Password': 'パスワード',
"Password fields don't match": 'パスワードが一致しません。',
'Password for authentication at the peer. Note that only HTTP Basic authentication is supported.': 'Password for authentication at the peer. HTTPベーシック認証のみサポートしています。',
'Path': 'パス',
'Pathology': '病理学',
'Patient': 'Patient',
'Patient Details': 'Patient Details',
'Patient Tracking': 'Patient Tracking',
'Patient added': 'Patient added',
'Patient deleted': 'Patient deleted',
'Patient updated': 'Patient updated',
'Patients': '患者数',
'Pediatric ICU': '小児ICU',
'Pediatric Psychiatric': '小児精神科',
'Pediatrics': '小児科医',
'Peer': 'データ同期先',
'Peer Details': 'データ同期先の詳細',
'Peer Registration': 'データ同期先登録',
'Peer Registration Details': 'データ同期先登録の詳細',
'Peer Registration Request': 'データ同期先の登録要求',
'Peer Type': '同期先タイプ',
'Peer UID': '同期先UID',
'Peer added': 'データ同期先を追加しました',
'Peer deleted': 'データ同期先を削除しました',
'Peer not allowed to push': '同期先がデータのプッシュを許可していません',
'Peer registration request added': 'データ同期先の登録要求を追加しました',
'Peer registration request deleted': 'データ同期先の登録要求を削除しました',
'Peer registration request updated': 'データ同期先の登録要求を更新しました',
'Peer updated': '同期先を更新しました',
'Peers': '同期先',
'Pending': '中断',
'Pending Requests': '保留中の支援要請',
'People': '人物情報',
'People Needing Food': '食料不足',
'People Needing Shelter': '避難所が必要',
'People Needing Water': '水が必要',
'People Trapped': '救難者',
'People with chronical illnesses': '慢性疾患をもつ成人がいる',
'Performance Rating': 'Performance Rating',
'Person': '人物情報',
'Person 1': '人物 1',
'Person 1, Person 2 are the potentially duplicate records': '人物情報1と人物情報2は重複したレコードの可能性があります。',
'Person 2': '人物 2',
'Person Data': '人物データ',
'Person De-duplicator': '人物情報の重複削除',
'Person Details': '人物情報の詳細',
'Person Finder': '消息情報',
'Person Registry': '人物情報の登録',
'Person added': '人物情報を追加しました',
'Person added to Commitment': 'Person added to Commitment',
'Person deleted': '人物情報を削除しました',
'Person details updated': '人物情報を更新しました',
'Person interviewed': 'インタビュー担当者',
'Person missing': '行方不明中',
'Person must be specified!': '登録がありません',
'Person removed from Commitment': 'Person removed from Commitment',
'Person reporting': 'レポート報告者',
'Person who has actually seen the person/group.': '人物/グループで実際に目撃された人物情報',
'Person who is reporting about the presence.': 'この所在報告を行った人物です。',
'Person who observed the presence (if different from reporter).': '人物の所在を確認したひとの情報(報告者と異なる場合のみ記入)。',
'Person/Group': '人物/グループ',
'Personal': '個人',
'Personal Data': '個人情報',
'Personal Effects': '所持品',
'Personal Effects Details': '個人の影響の詳細',
'Personal Map': 'Personal Map',
'Personal Profile': 'Personal Profile',
'Personal impact of disaster': 'この人物の被災状況',
'Persons': '人物情報',
'Persons in institutions': '施設居住中の住人',
'Persons with disability (mental)': '障がい者数(精神的障がい者を含む)',
'Persons with disability (physical)': '肉体的な障がい者の数',
'Phone': '電話番号',
'Phone 1': '電話番号',
'Phone 2': '電話番号(予備)',
"Phone number to donate to this organization's relief efforts.": 'この団体の救援活動に対して寄付を行う際の連絡先となる電話番号を記載します。',
'Phone/Business': '電話番号/仕事',
'Phone/Emergency': '電話番号/緊急連絡先',
'Phone/Exchange': '電話/とりつぎ',
'Phone/Exchange (Switchboard)': 'Phone/Exchange (Switchboard)',
'Photo': '写真',
'Photo Details': '写真の詳細',
'Photo Taken?': '写真撮影済み?',
'Photo added': '写真を追加しました',
'Photo deleted': '写真を削除しました',
'Photo updated': '写真を更新しました',
'Photograph': '写真',
'Photos': '写真',
'Physical Description': '身体外見の説明',
'Physical Safety': '身体的安全',
'Picture': '写真',
'Picture upload and finger print upload facility': '指紋や写真のアップロード機能',
'Place': 'Place',
'Place for solid waste disposal': '廃棄物の処理を行う場所を記載してください',
'Place of Recovery': '遺体回収場所',
'Place on Map': '地図上の場所',
'Places for defecation': 'トイレ',
'Places the children have been sent to': '子供たちの避難先',
'Planner': '立案者',
'Playing': '家庭内/外で遊ぶ',
"Please come back after sometime if that doesn't help.": 'この方法で問題が解決しない場合は、しばらく時間を置いて再度アクセスしてください。',
'Please correct all errors.': 'すべてのエラーを修正してください。',
'Please enter a First Name': '苗字を入力してください',
'Please enter a first name': 'Please enter a first name',
'Please enter a number only': 'Please enter a number only',
'Please enter a site OR a location': 'Please enter a site OR a location',
'Please enter a valid email address': '有効な電子メールアドレスを入力してください。',
'Please enter the first few letters of the Person/Group for the autocomplete.': '自動入力するには人物あるいはグループの最初の数文字を入力してください',
'Please enter the recipient': '受取担当者を入力してください',
'Please fill this!': 'ここに入力してください',
'Please give an estimated figure about how many bodies have been found.': 'Please give an estimated figure about how many bodies have been found.',
'Please provide the URL of the page you are referring to, a description of what you expected to happen & what actually happened.': 'Please provide the URL of the page you are referring to, a description of what you expected to happen & what actually happened.',
'Please provide the URL of the page you are referring to, a description of what you expected to happen & what actually happened. If a ticket was issued then please provide the Ticket ID.': '言及先のURLを明示し、期待する結果と実際に発生した結果を記述してください。不具合チケットが発行された場合は、そのチケットIDも記載してください。',
'Please report here where you are:': 'いまあなたが居る場所を入力してください。',
'Please select': '選んでください',
'Please select another level': '別のレベルを選択してください',
'Please sign-up with your Cell Phone as this allows us to send you Text messages. Please include full Area code.': '携帯電話番号でサインアップし、Sahanaからのテキストメッセージを受け取れるようにします。国際電話コードまで含めた形式で入力してください',
'Please specify any problems and obstacles with the proper handling of the disease, in detail (in numbers, where appropriate). You may also add suggestions the situation could be improved.': '病気の治療に当たって問題となる事象の詳細を記載します。状況を改善するための提案も、もしあれば記載してください。',
'Please use this field to record any additional information, including a history of the record if it is updated.': '追加情報はこの項目に記載してください。レコードの変更履歴などにも利用可能です。',
'Please use this field to record any additional information, including any Special Needs.': '特別な要求など、どんな追加情報でも構いませんので、この部分に記録してください',
'Please use this field to record any additional information, such as Ushahidi instance IDs. Include a history of the record if it is updated.': 'UshahidiのインスタンスIDなど、追加情報がある場合はこの項目に記載してください。レコードの変更履歴などにも利用可能です。',
'Pledge': '寄付',
'Pledge Aid': '寄付する',
'Pledge Aid to match these Requests': 'これらの要求に一致する支援に寄付する',
'Pledge Status': '寄付のステータス',
'Pledge Support': '寄付サポート',
'Pledged': '寄付済み',
'Pledges': '寄付',
'Point': 'ポイント',
'Poisoning': '中毒',
'Poisonous Gas': '有毒ガス',
'Police': '警察',
'Pollution and other environmental': '汚染、あるいはその他の環境要因',
'Polygon': 'ポリゴン',
'Polygon reference of the rating unit': 'その評価単位への参照ポリゴン',
'Poor': 'Poor',
'Population': '利用者数',
'Population Statistic Details': 'Population Statistic Details',
'Population Statistic added': 'Population Statistic added',
'Population Statistic deleted': 'Population Statistic deleted',
'Population Statistic updated': 'Population Statistic updated',
'Population Statistics': 'Population Statistics',
'Population and number of households': '人口と世帯数',
'Popup Fields': 'Popup Fields',
'Popup Label': 'Popup Label',
'Porridge': 'おかゆ',
'Port': 'ポート',
'Port Closure': '港湾閉鎖',
'Portuguese': 'Portuguese',
'Portuguese (Brazil)': 'Portuguese (Brazil)',
'Position': 'Position',
'Position Catalog': 'Position Catalog',
'Position Details': 'ポジションの詳細',
'Position added': 'Position を追加しました',
'Position deleted': 'ポジションを削除しました',
'Position type': '場所のタイプ',
'Position updated': 'ポジションを更新しました',
'Positions': 'ポジション',
'Postcode': '郵便番号',
'Poultry': '家禽(ニワトリ)',
'Poultry restocking, Rank': '家禽の補充、ランク',
'Pounds': 'ポンド',
'Power Failure': '停電',
'Powered by Sahana Eden': 'Powered by Sahana Eden',
'Pre-cast connections': 'プレキャスト連結',
'Preferred Name': '呼び名',
'Pregnant women': '妊婦の数',
'Preliminary': '予備',
'Presence': '所在',
'Presence Condition': '所在情報',
'Presence Log': '所在履歴',
"Press the 'Delete Old' button to have all records which reference this one be repointed at the new one & then the old record will be deleted.": "'Delete Old'ボタンを押すことで、データを参照しているレコードは全て参照先を再指定され、古い方のレコードは削除されます。",
'Previous': '前へ',
'Previous View': '前を表示',
'Primary Name': '基本名',
'Primary Occupancy': '主要な従事者',
'Priority': '優先度',
'Priority Level': '優先度レベル',
'Priority from 1 to 9. 1 is most preferred.': 'Priority from 1 to 9. 1 is most preferred.',
'Private': '企業',
'Problem': '問題',
'Problem Administration': '問題管理',
'Problem Details': '問題の詳細',
'Problem Group': '問題グループ',
'Problem Title': '問題の名称',
'Problem added': '問題を追加しました',
'Problem connecting to twitter.com - please refresh': 'twitter.comに接続できません。更新ボタンを押してください',
'Problem deleted': '問題を削除しました',
'Problem updated': '問題を更新しました',
'Problems': '問題',
'Procedure': '手続き',
'Process Received Shipment': 'Process Received Shipment',
'Process Shipment to Send': 'Process Shipment to Send',
'Procurements': '物資の調達',
'Product Description': '製品の説明',
'Product Name': '製品名',
'Profile': 'プロファイル',
'Project': 'プロジェクト',
'Project Activities': 'プロジェクト活動状況',
'Project Details': 'プロジェクトの詳細',
'Project Management': 'プロジェクト管理',
'Project Status': 'プロジェクトのステータス',
'Project Tracking': 'プロジェクト追跡',
'Project added': 'プロジェクトを追加しました',
'Project deleted': 'プロジェクトを削除しました',
'Project has no Lat/Lon': 'プロジェクトの緯度/経度情報はありません',
'Project updated': 'プロジェクトを更新しました',
'Projection': '地図投影法',
'Projection Details': '地図投影法の詳細',
'Projection Type': 'Projection Type',
'Projection added': '地図投影法を追加しました',
'Projection deleted': '地図投影法を削除しました',
'Projection updated': '地図投影法を更新しました',
'Projections': '地図投影法',
'Projects': 'プロジェクト',
'Property reference in the council system': '評議システムで使用されるプロパティリファレンス',
'Protected resource': '保護されたリソース',
'Protection': '被災者保護',
'Provide Metadata for your media files': 'メディアファイルにメタデータを提供',
'Provide a password': 'パスワードを入力',
'Provide an optional sketch of the entire building or damage points. Indicate damage points.': '建物全体か損傷箇所のスケッチを提供し、損傷箇所を明示してください。',
'Province': '都道府県',
'Proxy-server': 'プロキシサーバ',
'Psychiatrics/Adult': '精神病/成人',
'Psychiatrics/Pediatric': '精神病/小児',
'Public': '公開',
'Public Event': '公開イベント',
'Public and private transportation': '公共および民営の交通機関',
'Public assembly': '公会堂',
'Pull tickets from external feed': '外部フィードからのticketの取得',
'Punjabi': 'パンジャブ',
'Purchase Date': 'Purchase Date',
'Push tickets to external system': '外部システムにチケットの発信',
'Put a choice in the box': '箱の中から選んで取る',
'Pyroclastic Flow': '火砕流',
'Pyroclastic Surge': '火砕サージ',
'Python Serial module not available within the running Python - this needs installing to activate the Modem': 'PythonでPython Serial moduleが利用できません。モデムの有効化に必要です。',
'Quantity': '数量',
'Quantity Committed': '引き受けた量',
'Quantity Fulfilled': '十分な量がある',
"Quantity in %s's Inventory": '%s 倉庫にある量',
'Quantity in Transit': '運送中の数量',
'Quarantine': '隔離施設',
'Queries': 'クエリ',
'Query': 'クエリ',
'Query Feature': '問合せ機能',
'Queryable?': '検索可能?',
'RC frame with masonry infill': '鉄骨入りコンクリートブロック',
'RECORD A': 'レコード A',
'RECORD B': 'レコード B',
'RESPONSE': '対応',
'Race': '人種',
'Radio': 'Radio',
'Radio Details': 'Radio Details',
'Radiological Hazard': '放射能災害',
'Radiology': '放射線科',
'Railway Accident': '鉄道事故',
'Railway Hijacking': '鉄道ハイジャック',
'Rain Fall': '降雨',
'Rapid Assessment': '被災地の現況アセスメント',
'Rapid Assessment Details': '被災地の現況アセスメントの詳細',
'Rapid Assessment added': '被災地の現況アセスメントを追加しました',
'Rapid Assessment deleted': '被災地の現況アセスメントを削除しました',
'Rapid Assessment updated': '被災地の現況アセスメントを更新しました',
'Rapid Assessments': '被災地の現況アセスメント',
'Rapid Assessments & Flexible Impact Assessments': '被災地の現況アセスメントと、災害影響範囲アセスメント',
'Rapid Close Lead': '急いで閉め、先導してください。',
'Rapid Data Entry': 'データ入力簡易版',
'Rating Scale': '評価尺度',
'Raw Database access': 'データベースへの直接アクセス',
'Read-Only': '読み込み専用',
'Read-only': '登録内容の編集を禁止',
'Real World Arbitrary Units': '実在の任意単位',
'Receive': '物資受領',
'Receive Items': '物資を受領',
'Receive New Shipment': 'Receive New Shipment',
'Receive Shipment': '輸送を受け取る',
'Receive this shipment?': 'この物資送付を受領しますか?',
'Received': '受領済み',
'Received By': '物資受領責任者',
'Received By Person': 'Received By Person',
'Received Item Details': '配送済み物資の詳細',
'Received Item deleted': '受領した物資を削除しました',
'Received Item updated': '受領された物資を更新しました',
'Received Shipment Details': '受け取った輸送の詳細',
'Received Shipment canceled': '受け取った輸送をキャンセルしました',
'Received Shipment canceled and items removed from Inventory': '受領した輸送をキャンセルしました。物資は備蓄から削除されます',
'Received Shipment updated': '受領済みの配送物の情報が更新されました',
'Received Shipments': '受諾した輸送物資',
'Receiving and Sending Items': '送付 / 受領した救援物資',
'Recipient': '受け取り担当者',
'Recipients': '受信者',
'Recommendations for Repair and Reconstruction or Demolition': '再築や取り壊し、修繕を推奨',
'Record': 'レコード',
'Record %(id)s created': 'レコード %(id)s が作成されました',
'Record Created': '作成されたレコード',
'Record Details': 'レコードの詳細',
'Record ID': 'レコードID',
'Record Saved': 'レコードが保存されました',
'Record added': 'レコードを追加しました',
'Record any restriction on use or entry': '利用や入力に当たっての制限事項を記載',
'Record deleted': 'レコードを削除しました',
'Record last updated': '最近更新されたレコード',
'Record not found': 'Record not found',
'Record not found!': 'レコードが見つかりませんでした',
'Record updated': 'レコードを更新しました',
'Recording and Assigning Assets': '物資の割り当てと記録',
'Records': 'レコード',
'Recovery': '遺体回収',
'Recovery Request': '遺体回収の要請',
'Recovery Request added': '遺体の回収要請を追加しました',
'Recovery Request deleted': '遺体回収要請を削除しました',
'Recovery Request updated': '遺体回収要請を更新しました',
'Recovery Requests': '遺体回収要請',
'Recovery report added': '遺体回収レポートを追加しました',
'Recovery report deleted': '遺体回収レポートを削除しました',
'Recovery report updated': '遺体回収レポートを更新しました',
'Recruitment': '人材募集',
'Recurring': '採用活動',
'Recurring Cost': '経常費用',
'Recurring cost': '経常費用',
'Recurring costs': '経常費用',
'Red': '赤',
'Red Cross / Red Crescent': 'Red Cross / Red Crescent',
'Reference Document': '関連文書',
'Refresh Rate (seconds)': 'Refresh Rate (seconds)',
'Region Location': '地域のロケーション',
'Regional': '国際支部',
'Register': '登録',
'Register Person': '人物情報を登録',
'Register Person into this Camp': 'Register Person into this Camp',
'Register Person into this Shelter': 'この避難所に人物情報を登録',
'Register them as a volunteer': 'ボランティアとして登録',
'Registered People': '登録した人物情報',
'Registered users can': '登録済みのユーザは',
'Registering ad-hoc volunteers willing to contribute': '貢献を希望する臨時ボランティアを登録',
'Registration': '登録',
'Registration Details': '登録情報詳細',
'Registration Disabled!': '現在アカウント登録は受け付けていません。',
'Registration added': '登録を追加しました',
'Registration entry deleted': '登録を削除しました',
'Registration is still pending approval from Approver (%s) - please wait until confirmation received.': '登録はまだ承認されていません (承認者:(%s)) -- 確認メールが届くまでもうしばらくお待ちください。',
'Registration key': '登録key',
'Registration successful': '登録に成功しました',
'Registration updated': '登録を更新しました',
'Registry keeps track of all the relief organizations working in the disaster region. It captures not only the places where they are active, but also captures information on the range of projects they are providing in each area.': '地域内で活動する全ての支援団体を追跡し、情報を保持します。これにより、各団体が活動している地域の情報だけでなく、それぞれの地域でどのような活動が行われているかも掌握することができます。',
'Rehabilitation/Long Term Care': 'リハビリ/長期介護',
'Reinforced masonry': 'コンクリートブロック壁',
'Rejected': '拒否されました',
'Relative Details': 'Relative Details',
'Relative added': 'Relative added',
'Relative deleted': 'Relative deleted',
'Relative updated': 'Relative updated',
'Relatives': 'Relatives',
'Reliable access to sanitation/hygiene items': 'サニタリ / 衛生用品の安定供給がある',
'Relief': '救援',
'Relief Item': '救援物資',
'Relief Item Catalog': '救援物資カタログ',
'Relief Items': '救援物資',
'Relief Team': '救援チーム',
'Religion': '宗教',
'Religious': '宗教',
'Religious Leader': '宗教指導者',
'Relocate as instructed in the <instruction>': '<instruction>の内容に従って再配置',
'Remove': '削除',
'Remove Activity from this event': 'Remove Activity from this event',
'Remove Asset from this event': 'Remove Asset from this event',
'Remove Asset from this scenario': 'Remove Asset from this scenario',
'Remove Document from this request': 'Remove Document from this request',
'Remove Facility from this event': 'Remove Facility from this event',
'Remove Facility from this scenario': 'Remove Facility from this scenario',
'Remove Feature: Select the feature you wish to remove & press the delete key': 'Featureの削除: 削除したいfeatureを選択し、削除キーを押下してください',
'Remove Human Resource from this event': 'Remove Human Resource from this event',
'Remove Human Resource from this scenario': 'Remove Human Resource from this scenario',
'Remove Item from Inventory': 'Remove Item from Inventory',
'Remove Map Configuration from this event': 'Remove Map Configuration from this event',
'Remove Map Configuration from this scenario': 'Remove Map Configuration from this scenario',
'Remove Person from Commitment': 'Remove Person from Commitment',
'Remove Skill': 'Remove Skill',
'Remove Skill from Request': 'Remove Skill from Request',
'Remove Task from this event': 'Remove Task from this event',
'Remove Task from this scenario': 'Remove Task from this scenario',
'Remove this asset from this event': 'Remove this asset from this event',
'Remove this asset from this scenario': 'Remove this asset from this scenario',
'Remove this facility from this event': 'Remove this facility from this event',
'Remove this facility from this scenario': 'Remove this facility from this scenario',
'Remove this human resource from this event': 'Remove this human resource from this event',
'Remove this human resource from this scenario': 'Remove this human resource from this scenario',
'Remove this task from this event': 'Remove this task from this event',
'Remove this task from this scenario': 'Remove this task from this scenario',
'Repair': 'Repair',
'Repaired': 'Repaired',
'Repeat your password': 'パスワードをもう一度入力してください',
'Replace': '置換',
'Replace if Master': 'マスターなら置換',
'Replace if Newer': '新しいものがあれば置き換える',
'Report': 'レポート',
'Report Another Assessment...': '別のアセスメントをレポートする',
'Report Details': 'レポートの詳細',
'Report Resource': 'レポートリソース',
'Report Type': 'レポートタイプ',
'Report Types Include': 'レポートタイプを含む',
'Report a Problem with the Software': 'ソフトウェアの不具合を報告',
'Report added': 'レポートを追加しました',
'Report deleted': 'レポートを削除しました',
'Report my location': '自分の現在地を報告',
'Report that person missing': '行方不明者の情報を報告',
'Report the contributing factors for the current EMS status.': '現在の緊急受け入れ状態に影響している事由を記載',
'Report the contributing factors for the current OR status.': '現在の手術室の状況報告',
'Report the person as found': '人物の所在情報を報告',
'Report them as found': '発見として報告',
'Report them missing': '行方不明として報告',
'Report updated': 'レポートを更新しました',
'ReportLab module not available within the running Python - this needs installing for PDF output!': '実行中のPythonでReportLabモジュールが利用できません。PDF出力に必要です。',
'ReportLab not installed': 'ReportLabがインストールされていません',
'Reporter': 'レポーター',
'Reporter Name': 'レポーターの氏名',
'Reporting on the projects in the region': 'この地域で展開しているプロジェクトのレポート',
'Reports': 'レポート',
'Request': '支援要請',
'Request Added': '支援要請を追加しました',
'Request Canceled': '支援要請をキャンセルしました',
'Request Details': '支援要請の詳細',
'Request From': 'Request From',
'Request Item': '物資を要請',
'Request Item Details': '救援物資要請の詳細',
'Request Item added': '救援物資の要請を追加しました',
'Request Item deleted': '救援物資の要請を削除しました',
'Request Item from Available Inventory': 'Request Item from Available Inventory',
'Request Item updated': '救援物資の要請を更新しました',
'Request Items': '物資の要請',
'Request New People': 'Request New People',
'Request Status': '支援要請の状況',
'Request Type': '支援要請のタイプ',
'Request Updated': '支援要請を更新しました',
'Request added': '支援要請を追加しました',
'Request deleted': '支援要請を削除しました',
'Request for Role Upgrade': '上位権限の取得要求',
'Request updated': '支援要請を更新しました',
'Request, Response & Session': '要求、応答、およびセッション',
'Requested': '要求済み',
'Requested By': '支援要求元',
'Requested By Facility': 'Requested By Facility',
'Requested By Site': '支援要請を行ったサイト',
'Requested By Warehouse': '倉庫からの要請',
'Requested From': 'Requested From',
'Requested Items': '支援要請が行われた物資',
'Requested Skill': 'Requested Skill',
'Requested Skill Details': 'Requested Skill Details',
'Requested Skill updated': 'Requested Skill updated',
'Requested Skills': 'Requested Skills',
'Requested by': '要求元',
'Requested on': 'に関する要請',
'Requester': '要請の実施者',
'Requestor': '要請者',
'Requests': '支援要請',
'Requests From': '支援要請フォーム',
'Requests Management': 'Requests Management',
'Requests for Item': '物資に関する要請',
'Required Skill': 'Required Skill',
'Requires Login!': 'ログインしてください。',
'Requires login': 'ログインが必要です',
'Rescue and recovery': '救出、あるいは遺体回収作業',
'Reset': 'リセット',
'Reset Password': 'パスワードのリセット',
'Reset form': 'フォームをクリア',
'Resize Feature: Select the feature you wish to resize & then Drag the associated dot to your desired size': 'Featureのリサイズ: リサイズしたいfeatureを選択し、適切なサイズになるようドラッグしてください',
'Resolve': '解決済みか',
'Resolve Conflict': '競合の解決',
'Resolve link brings up a new screen which helps to resolve these duplicate records and update the database.': '"解決"リンクでは、新しい画面を開き、重複している情報を解決してデータベースを更新します',
'Resource': 'リソース',
'Resource Details': 'リソースの詳細',
'Resource added': 'リソースを追加しました',
'Resource deleted': 'リソースを削除しました',
'Resource updated': 'リソースを更新しました',
'Resources': 'リソース',
'Respiratory Infections': '呼吸器感染症',
'Response': '対応',
'Response Details': '応答の詳細',
'Response added': '返答を追加しました',
'Response deleted': 'Responseを削除しました',
'Response updated': '返答を更新しました',
'Responses': '対応',
'Restricted Access': 'アクセス制限中',
'Restricted Use': '制限された目的での使用',
'Restrictions': '制限',
'Results': '結果',
'Retail Crime': '小売犯罪',
'Retrieve Password': 'パスワードの取得',
'Return': 'Return',
'Return to Request': 'Return to Request',
'Returned': 'Returned',
'Returned From': 'Returned From',
'Review Incoming Shipment to Receive': 'Review Incoming Shipment to Receive',
'Rice': '米穀',
'Riot': '暴動',
'River': '河川',
'River Details': '河川の詳細',
'River added': '河川を追加しました',
'River deleted': '河川を削除しました',
'River updated': '河川を更新しました',
'Rivers': '河川',
'Road Accident': '道路障害',
'Road Closed': '道路(通行止め)',
'Road Conditions': '路面の状況',
'Road Delay': '道路遅延',
'Road Hijacking': '道路ハイジャック',
'Road Usage Condition': '道路の路面状況',
'Roads Layer': 'Roads Layer',
'Role': '権限',
'Role Details': '権限の詳細',
'Role Name': '権限の名称',
'Role Required': '権限が必要',
'Role Updated': '権限を更新しました',
'Role added': '権限を追加しました',
'Role deleted': '権限を削除しました',
'Role updated': '権限を更新しました',
'Role-based': '権限に基づいた',
'Roles': '権限',
'Roles Permitted': '許可された権限',
'Roof tile': '屋根瓦',
'Roofs, floors (vertical load)': '屋根、床板 (vertical load)',
'Room': 'Room',
'Room Details': 'Room Details',
'Room added': 'Room added',
'Room deleted': 'Room deleted',
'Room updated': 'Room updated',
'Rooms': 'Rooms',
'Roster': '名簿',
'Rotate Feature: Select the feature you wish to rotate & then Drag the associated dot to rotate to your desired location': '地物の回転: 回転させたい地物を選択し、目的の位置に回転させるために関連付けられた点をドラッグします。',
'Row Choices (One Per Line)': '行の選択 (One Per Line)',
'Rows in table': 'テーブルの行',
'Rows selected': '行が選択されました',
'Run Functional Tests': '動作テストの実行',
'Run Interval': '実行間隔',
'Running Cost': 'ランニングコスト',
'Russian': 'Russian',
'SITUATION': '状況',
'SMS Modems (Inbound & Outbound)': 'SMS Modems (Inbound & Outbound)',
'SMS Outbound': 'SMS Outbound',
'SMS Settings': 'SMS Settings',
'SMS settings updated': 'SMS settings updated',
'SMTP to SMS settings updated': 'SMTP to SMS settings updated',
'Safe environment for vulnerable groups': '被災者にとって安全な環境である',
'Safety Assessment Form': '安全性アセスメントフォーム',
'Safety of children and women affected by disaster': '被災した女性と未成年が保護されている',
'Safety of children and women affected by disaster?': 'Safety of children and women affected by disaster?',
'Sahana Administrator': 'Sahana管理者',
'Sahana Agasti': 'Sahana Agasti',
'Sahana Blue': 'Sahana ブルー',
'Sahana Community Chat': 'Sahanaコミュニティチャット',
'Sahana Eden': 'Sahana Eden',
'Sahana Eden <=> Other': 'Sahana Eden <=> 他のシステム',
'Sahana Eden <=> Other (Sahana Agasti, Ushahidi, etc.)': 'Sahana Eden <=> その他 (Sahana Agasti, Ushahidi 等.)',
'Sahana Eden <=> Sahana Eden': 'Sahana Eden <=> Sahana Eden',
'Sahana Eden Disaster Management Platform': 'Sahana Eden 被災地支援情報共有プラットフォーム',
'Sahana Eden Humanitarian Management Platform': 'Sahana Eden Humanitarian Management Platform',
'Sahana Eden Website': 'Sahana Eden公式ページ',
'Sahana Eden is a family of applications that provide solutions to coordination and collaboration for organizations working in disaster management.': 'Sahana Edenは、災害復旧に関わる様々な支援団体が、お互いに協力しあうために存在します。',
'Sahana FOSS Disaster Management System': 'Sahana オープンソース 被災地情報共有システム',
'Sahana Green': 'Sahana グリーン',
'Sahana Login Approval Pending': 'Sahana ログインは承認待ちです',
'Sahana Steel': 'Sahana Steel',
'Sahana access granted': 'Sahanaへのアクセス権を付与',
'Sahana: new request has been made. Please login to see if you can fulfil the request.': 'Sahana: 新しい支援要請が行われました。ログインして、支援要請を実現できるか確認してください。',
'Salted Fish': '塩漬けの魚',
'Salvage material usable from destroyed houses': '全壊した家屋から回収した物品(使用可能)',
'Salvage material usable from destroyed schools': '全壊した校舎から回収した物品(使用可能)',
'Sanitation problems': '衛生設備に問題',
'Satellite': '衛星',
'Satellite Layer': 'Satellite Layer',
'Satellite Office': '現地活動拠点',
'Saturday': '土曜日',
'Save': '保存',
'Save any Changes in the one you wish to keep': '残す方の候補地へ行った変更を保存します。',
'Save: Default Lat, Lon & Zoom for the Viewport': 'デフォルト表示範囲の緯度,経度,ズームレベルを保存',
'Saved.': '保存しました',
'Saving...': '保存しています...',
'Scale of Results': '結果の規模',
'Scanned Copy': 'Scanned Copy',
'Scanned Forms Upload': 'Scanned Forms Upload',
'Scenario': 'Scenario',
'Scenario Details': 'Scenario Details',
'Scenario added': 'Scenario added',
'Scenario deleted': 'Scenario deleted',
'Scenario updated': 'Scenario updated',
'Scenarios': 'Scenarios',
'Schedule': 'スケジュール',
'Schema': 'Schema',
'School': '学校',
'School Closure': '学校閉鎖',
'School Lockdown': '学校の厳重封鎖',
'School Reports': '学校のレポート',
'School Teacher': '学校教師',
'School activities': '学校の活動',
'School assistance': '学校の援助',
'School assistance received/expected': '学校用支援品を受領済み/受領予定',
'School attendance': '学校へ出席者',
'School destroyed': '校舎全壊',
'School heavily damaged': '校舎の深刻な損壊',
'School tents received': '仮校舎用テントを受領',
'School tents, source': '仮校舎用テント、送付元',
'School used for other purpose': '校舎を他目的で利用中',
'School/studying': '学校/勉強',
'Schools': '学校',
'Search': '検索',
'Search & List Bin Types': 'Bin Typeを検索して一覧表示',
'Search & List Bins': 'Binsを検索して一覧表示',
'Search & List Catalog': 'カタログを検索して一覧表示',
'Search & List Category': 'カテゴリを検索して一覧表示',
'Search & List Items': '救援物資を検索して一覧表示',
'Search & List Locations': 'ロケーションを検索して一覧表示',
'Search & List Site': 'Siteを検索して一覧表示',
'Search & List Sub-Category': 'サブカテゴリを検索して一覧表示',
'Search & List Unit': '単位を検索して一覧表示',
'Search Activities': '支援活動の検索',
'Search Activity Report': '支援活動レポートの検索',
'Search Addresses': '住所を検索',
'Search Aid Requests': '援助要請を検索',
'Search Alternative Items': 'その他のアイテムを検索',
'Search Assessment Summaries': 'アセスメントの要約を検索',
'Search Assessments': 'アセスメントを検索',
'Search Asset Assignments': '資産割り当ての検索',
'Search Asset Log': 'Search Asset Log',
'Search Assets': '資産の検索',
'Search Baseline Type': 'Baseline Typeを検索',
'Search Baselines': '基準値の検索',
'Search Brands': '銘柄を検索',
'Search Budgets': '予算を検索',
'Search Bundles': 'Bundleを検索',
'Search Camp Services': 'Search Camp Services',
'Search Camp Types': 'Search Camp Types',
'Search Camps': 'Search Camps',
'Search Catalog Items': '救援物資カタログを検索',
'Search Catalogs': 'Search Catalogs',
'Search Category<>Sub-Category<>Catalog Relation': 'Category<>Sub-Category<>Catalog関係の検索',
'Search Certificates': 'Search Certificates',
'Search Certifications': 'Search Certifications',
'Search Checklists': 'チェックリストを検索',
'Search Cluster Subsectors': 'クラスタのサブセクタを検索',
'Search Clusters': 'クラスタを検索',
'Search Commitment Items': 'コミットされた救援物資の検索',
'Search Commitments': 'コミットの検索',
'Search Committed People': 'Search Committed People',
'Search Competency Ratings': 'Search Competency Ratings',
'Search Configs': '設定を検索',
'Search Contact Information': '連絡先情報を検索',
'Search Contacts': '連絡先を検索',
'Search Course Certificates': 'Search Course Certificates',
'Search Courses': 'Search Courses',
'Search Credentials': '証明書の検索',
'Search Distribution Items': '配給物資を検索',
'Search Distributions': '配給所を検索',
'Search Documents': 'ドキュメントを検索',
'Search Donors': '資金提供組織の検索',
'Search Entries': 'Search Entries',
'Search Events': 'Search Events',
'Search Existing Locations': '既存のロケーションを検索する',
'Search Facilities': 'Search Facilities',
'Search Feature Class': 'Feature Classの検索',
'Search Feature Layers': 'Feature Layersの検索',
'Search Flood Reports': '洪水レポートの検索',
'Search GPS data': 'Search GPS data',
'Search Geonames': 'Geonamesの検索',
'Search Groups': 'グループの検索',
'Search Homes': 'Search Homes',
'Search Hospitals': '病院情報の検索',
'Search Human Resources': 'Search Human Resources',
'Search Identity': 'ID情報の検索',
'Search Images': '画像の検索',
'Search Impact Type': '被害の種類を検索',
'Search Impacts': '影響の検索',
'Search Import Files': 'Search Import Files',
'Search Incident Reports': 'インシデントレポートを検索',
'Search Incidents': 'インシデントの検索',
'Search Inventory Items': '備蓄物資を検索',
'Search Inventory Stores': '物資集積地点の検索',
'Search Inventory items': 'Search Inventory items',
'Search Item Catalog Category(s)': 'アイテムカタログカテゴリの検索',
'Search Item Catalog(s)': '救援物資カタログの検索',
'Search Item Categories': '救援物資カテゴリを検索',
'Search Item Packs': '物資のパックを検索',
'Search Item Sub-Category(s)': 'アイテムサブカテゴリの検索',
'Search Items': 'アイテムの検索',
'Search Job Roles': 'Search Job Roles',
'Search Keys': 'Keyの検索',
'Search Kits': 'Kitsの検索',
'Search Layers': 'レイヤの検索',
'Search Level': 'Search Level',
'Search Level 1 Assessments': 'レベル1アセスメントの検索',
'Search Level 2 Assessments': 'レベル2のアセスメントを検索',
'Search Locations': 'ロケーションの検索',
'Search Log Entry': 'ログエントリの検索',
'Search Map Configurations': '地図設定の検索',
'Search Markers': 'マーカーの検索',
'Search Member': 'メンバーの検索',
'Search Membership': 'メンバシップの検索',
'Search Memberships': 'メンバシップの検索',
'Search Metadata': 'メタデータの検索',
'Search Missions': 'Search Missions',
'Search Need Type': '需要タイプの検索',
'Search Needs': '必要な物資を検索',
'Search Notes': '追加情報を検索',
'Search Offices': 'オフィスの検索',
'Search Organizations': '団体の検索',
'Search Patients': 'Search Patients',
'Search Peer': '同期先を検索',
'Search Peers': 'データ同期先を検索',
'Search Personal Effects': 'Personal Effectsの検索',
'Search Persons': '人物情報の検索',
'Search Photos': '写真の検索',
'Search Population Statistics': 'Search Population Statistics',
'Search Positions': 'Positionsの検索',
'Search Problems': '問題の検索',
'Search Projections': '地図投影法の検索',
'Search Projects': 'プロジェクトの検索',
'Search Rapid Assessments': '被災地の現況アセスメントを検索',
'Search Received Items': '受領済み救援物資の検索',
'Search Received Shipments': '受信済みの出荷の検索',
'Search Records': 'レコードの検索',
'Search Recovery Reports': '遺体回収レポートを検索',
'Search Registations': '登録情報の検索',
'Search Registration Request': '登録要請を検索',
'Search Relatives': 'Search Relatives',
'Search Report': 'レポートの検索',
'Search Reports': 'レポートの検索',
'Search Request': '支援要請の検索',
'Search Request Items': '物資の要請を検索',
'Search Requested Items': '支援要請されている物資を検索',
'Search Requested Skills': 'Search Requested Skills',
'Search Requests': '支援要請の検索',
'Search Resources': 'リソースの検索',
'Search Responses': '検索の応答',
'Search Rivers': '河川を検索',
'Search Roles': '役割の検索',
'Search Rooms': 'Search Rooms',
'Search Scenarios': 'Search Scenarios',
'Search Sections': 'セクションの検索',
'Search Sectors': '活動分野を検索',
'Search Sent Items': '送付した物資を検索',
'Search Sent Shipments': '送信した出荷の検索',
'Search Service Profiles': 'サービスプロファイルの検索',
'Search Settings': '設定の検索',
'Search Shelter Services': '避難所での提供サービスを検索',
'Search Shelter Types': '避難所タイプの検索',
'Search Shelters': '避難所の検索',
'Search Shipment Transit Logs': '輸送履歴の検索',
'Search Shipment/Way Bills': '輸送費/渡航費の検索',
'Search Shipment<>Item Relation': '輸送と救援物資の関係性の検索',
'Search Site(s)': 'Siteの検索',
'Search Skill Equivalences': 'Search Skill Equivalences',
'Search Skill Provisions': 'Search Skill Provisions',
'Search Skill Types': 'スキルタイプの検索',
'Search Skills': 'スキルを検索',
'Search Solutions': '解決案の検索',
'Search Staff': 'スタッフの検索',
'Search Staff Types': 'スタッフタイプの検索',
'Search Staff or Volunteer': 'Search Staff or Volunteer',
'Search Status': '状態の検索',
'Search Storage Bin Type(s)': 'Storage Bin Typeの検索',
'Search Storage Bin(s)': 'Storage Bin(s)の検索',
'Search Storage Location(s)': '備蓄地点の検索',
'Search Subscriptions': '寄付申し込みを検索',
'Search Subsectors': 'Search Subsectors',
'Search Support Requests': '支援要求の検索',
'Search Tasks': 'タスクの検索',
'Search Teams': 'チームの検索',
'Search Themes': 'テーマの検索',
'Search Tickets': 'チケットの検索',
'Search Tracks': '追跡情報の検索',
'Search Trainings': 'Search Trainings',
'Search Twitter Tags': 'Twitterのタグを検索',
'Search Units': '単位の検索',
'Search Users': 'ユーザの検索',
'Search Vehicle Details': 'Search Vehicle Details',
'Search Vehicles': 'Search Vehicles',
'Search Volunteer Availability': 'Search Volunteer Availability',
'Search Volunteer Registrations': 'ボランティア登録の検索',
'Search Volunteers': 'ボランティアの検索',
'Search Warehouse Items': '倉庫の物資を検索',
'Search Warehouses': 'Warehousesの検索',
'Search and Edit Group': 'グループを検索して編集',
'Search and Edit Individual': '人物情報を検索して個別に編集',
'Search by ID Tag': 'IDタグで検索',
'Search for Items': '物資の検索',
'Search for Staff or Volunteers': 'Search for Staff or Volunteers',
'Search for a Hospital': '病院を探す',
'Search for a Location': '検索地域を指定します',
'Search for a Location by name, including local names.': 'Search for a Location by name, including local names.',
'Search for a Person': '人物を探す',
'Search for a Project': 'プロジェクトを探す',
'Search for a Request': '支援要請の検索',
'Search for a shipment by looking for text in any field.': 'Search for a shipment by looking for text in any field.',
'Search for a shipment received between these dates': 'ある期間内に受け取られた輸送を検索する',
'Search for a vehicle by text.': 'Search for a vehicle by text.',
'Search for an Organization by name or acronym': 'Search for an Organization by name or acronym',
'Search for an Organization by name or acronym.': 'Search for an Organization by name or acronym.',
'Search for an asset by text.': 'Search for an asset by text.',
'Search for an item by category.': 'カテゴリで物資を検索',
'Search for an item by Year of Manufacture.': 'Search for an item by Year of Manufacture.',
'Search for an item by brand.': 'Search for an item by brand.',
'Search for an item by catalog.': 'Search for an item by catalog.',
'Search for an item by category.': 'Search for an item by category.',
'Search for an item by its code, name, model and/or comment.': 'Search for an item by its code, name, model and/or comment.',
'Search for an item by text.': 'テキストで項目を検索',
'Search for asset by location.': 'Search for asset by location.',
'Search for office by location.': 'Search for office by location.',
'Search for office by organization.': 'Search for office by organization.',
'Search for office by text.': 'Search for office by text.',
'Search for vehicle by location.': 'Search for vehicle by location.',
'Search for warehouse by location.': 'Search for warehouse by location.',
'Search for warehouse by organization.': 'Search for warehouse by organization.',
'Search for warehouse by text.': 'Search for warehouse by text.',
'Search here for a person record in order to:': '人物情報を検索することで、以下の事柄を行うことができます。',
"Search here for a person's record in order to:": '人物情報の検索を行い、以下の機能を実現します:',
'Search messages': 'メッセージの検索',
'Searching for different groups and individuals': '他のグループと個人を探す',
'Secondary Server (Optional)': 'セカンダリサーバ(オプション)',
'Seconds must be a number between 0 and 60': '秒には0-60の間の数字を記入してください',
'Seconds must be a number greater than 0 and less than 60': '秒は0から60の間で入力してください',
'Section': 'Section',
'Section Details': 'Sectionの詳細',
'Section deleted': 'Sectionを削除しました',
'Section updated': 'セクションを更新しました',
'Sections': 'セクション',
'Sections that are part of this template': 'Sections that are part of this template',
'Sections that can be selected': 'Sections that can be selected',
'Sector': '活動分野',
'Sector Details': '活動分野の詳細',
'Sector added': '活動分野を追加しました',
'Sector deleted': '活動分野を削除しました',
'Sector updated': '活動分野を更新しました',
'Sector(s)': 'Sector(s)',
'Sectors': '活動分野',
'Security Policy': 'セキュリティポリシー',
'Security Status': 'セキュリティステータス',
'Security problems': 'セキュリティーの問題',
'See All Entries': 'See All Entries',
'See all': 'See all',
'See unassigned recovery requests': 'まだ割り当てられていない遺体回収要請を見る',
'Seen': '発見情報あり',
'Select': '選択',
'Select 2 potential locations from the dropdowns.': '候補地を2つ、ドロップダウンから選択します。',
'Select Items from the Request': '支援要請を基にアイテムを選択する',
'Select Items from this Inventory': '備蓄中の物資から選択',
'Select Language': '言語選択',
'Select Organization': '団体の選択',
'Select Photos': '写真の選択',
'Select Skills from the Request': 'Select Skills from the Request',
"Select a Room from the list or click 'Add Room'": "Select a Room from the list or click 'Add Room'",
'Select a location': 'ロケーションを選択',
"Select a manager for status 'assigned'": "Select a manager for status 'assigned'",
"Select a person in charge for status 'assigned'": "状況が '割り当て済み' である担当者を選択します",
'Select a question from the list': 'リストから質問を選択してください',
'Select a range for the number of total beds': 'ベッド総数の範囲を選択',
'Select all that apply': '該当する項目を全て選択',
'Select an Organization to see a list of offices': '団体を選択すると、所属するオフィスが表示されます',
'Select an existing Location': '既に登録してあるロケーションを選択してください',
'Select the Cluster Layers for Assessments and Activities to analyse the Gaps:': 'アセスメントと支援活動のギャップを解析するクラスタの層を選択:',
'Select the overlays for Assessments and Activities relating to each Need to identify the gap.': 'オーバーレイを指定し、適切なアセスメントと支援活動を表示させてニーズを明確にします。',
'Select the person assigned to this role for this project.': 'この人物に、プロジェクト内の権限を担当させます。',
'Select the person associated with this scenario.': 'このタスクに関連する人物を選択してください。',
"Select this if all specific locations need a parent at the deepest level of the location hierarchy. For example, if 'district' is the smallest division in the hierarchy, then all specific locations would be required to have a district as a parent.": 'もし全ての特定の場所が住所階層の最下層で親の場所を必要とするなら、これを選択して下さい。例えば、もし「地区」が階層の最小の地域なら、全ての特定の場所は親階層の地区を持っている必要が有るでしょう。',
"Select this if all specific locations need a parent location in the location hierarchy. This can assist in setting up a 'region' representing an affected area.": 'もし全ての特定の場所が住所階層での親の場所を必要とするなら、これを選択して下さい。これは被災地の「地域」表示の設定に役立てられます。',
'Select to see a list of subdivisions.': '項目を選択すると、より細かい分類を選択できます。',
'Select to show this configuration in the Regions menu.': '範囲メニューで表示する構成を選択して下さい',
'Select to show this configuration in the menu.': 'Select to show this configuration in the menu.',
'Selected Jobs': 'Selected Jobs',
'Selects what type of gateway to use for outbound SMS': 'Selects what type of gateway to use for outbound SMS',
'Selects whether to use a Modem, Tropo or other Gateway for sending out SMS': 'SMS送信時に、モデム、Tropoまたはゲートウェイのどちらを使用するかを選択',
'Selects whether to use the gateway or the Modem for sending out SMS': 'SMS送信時、モデムとゲートウェイのどちらを使用するか選択',
'Self Registration': '本人による登録',
'Self-registration': '本人による登録',
'Send': '物資送付',
'Send Alerts using Email &/or SMS': '電子メールまたはSMSを使用してアラートを送信',
'Send Commitment as Shipment': 'Send Commitment as Shipment',
'Send Items': '物資を送付',
'Send Mail': 'メール送信',
'Send Message': 'メッセージを送る',
'Send New Shipment': 'Send New Shipment',
'Send Notification': '通知を送信',
'Send Shipment': '輸送を開始する',
'Send a message to this person': 'Send a message to this person',
'Send from %s': '依頼主( %s )',
'Send message': 'メッセージ送信',
'Send new message': '新規メッセージ送信',
'Sends & Receives Alerts via Email & SMS': '電子メール/SMS 経由でアラート送信/受信',
'Senior (50+)': '高齢者 (50+)',
'Sensitivity': '感度',
'Sent': '送信',
'Sent By': 'Sent By',
'Sent By Person': 'Sent By Person',
'Sent Item Details': '送付した物資の詳細',
'Sent Item deleted': '輸送済み物資を削除しました',
'Sent Item updated': '送付した救援物資を更新しました',
'Sent Shipment Details': '送付物資の詳細',
'Sent Shipment canceled': '輸送開始をキャンセルしました',
'Sent Shipment canceled and items returned to Inventory': '送付処理した輸送がキャンセルされ、物資は倉庫に戻りました',
'Sent Shipment updated': '送信した物資が更新されました',
'Sent Shipments': '物資を送付しました',
'Separate latrines for women and men': 'トイレは男女別である',
'Separated children, caregiving arrangements': '親と離れた子供だちのための保育手配',
'Seraiki': 'セライキ',
'Serial Number': 'シリアルナンバー',
'Series': 'シリーズ',
'Server': 'サーバ',
'Service': 'サービス',
'Service Catalogue': 'サービスカタログ',
'Service Due': 'Service Due',
'Service or Facility': 'サービス、または施設',
'Service profile added': 'サービスプロファイルを追加しました',
'Service profile deleted': 'サービスプロファイルを削除しました',
'Service profile updated': 'サービスプロファイルを更新しました',
'Services': 'サービス',
'Services Available': '利用可能なサービス',
'Set Base Site': 'Set Base Site',
'Set By': 'Set By',
'Set True to allow editing this level of the location hierarchy by users who are not MapAdmins.': 'Set True to allow editing this level of the location hierarchy by users who are not MapAdmins.',
'Setting Details': '設定の詳細',
'Setting added': '設定を追加しました',
'Setting deleted': '設定を削除しました',
'Setting updated': '設定を更新しました',
'Settings': '設定',
'Settings updated': '設定を更新しました',
'Settings were reset because authenticating with Twitter failed': 'Twitterの認証に失敗したため、設定をクリアします',
'Settings which can be configured through the web interface are available here.': 'Settings which can be configured through the web interface are available here.',
'Severe': '深刻',
'Severity': '深刻度',
'Severity:': '深刻度:',
'Share a common Marker (unless over-ridden at the Feature level)': 'マーカーの共有 (機能レイヤで上書きされない限り)',
'Shelter': '避難所',
'Shelter & Essential NFIs': '避難所/生活用品',
'Shelter Details': '避難所の詳細',
'Shelter Name': '避難所名称',
'Shelter Registry': '避難所登録',
'Shelter Service': '避難所サービス',
'Shelter Service Details': '避難所サービスの詳細',
'Shelter Service added': '避難所サービスを追加しました',
'Shelter Service deleted': '避難所サービスを削除しました',
'Shelter Service updated': '避難所サービスを更新しました',
'Shelter Services': '避難所サービス',
'Shelter Type': '避難所タイプ',
'Shelter Type Details': '避難所タイプの詳細',
'Shelter Type added': '避難所タイプを追加しました',
'Shelter Type deleted': '避難所タイプを削除しました',
'Shelter Type updated': '避難所サービスを更新しました',
'Shelter Types': '避難所タイプ',
'Shelter Types and Services': '避難所のタイプとサービス',
'Shelter added': '避難所を追加しました',
'Shelter deleted': '避難所を削除しました',
'Shelter updated': '避難所を更新しました',
'Shelter/NFI Assistance': '避難所 / 生活用品支援',
'Shelter/NFI assistance received/expected': '避難所 / 生活必需品の支援を受領済み、あるいは受領予定',
'Shelters': '避難所',
'Shipment Created': '輸送が作成されました',
'Shipment Details': '輸送の詳細',
'Shipment Items': '救援物資の輸送',
'Shipment Items received by Inventory': '物資備蓄地点から送付された救援物資',
'Shipment Items sent from Inventory': '備蓄物資から輸送を行いました',
'Shipment Transit Log Details': '輸送履歴の詳細',
'Shipment Transit Log added': '輸送履歴を追加しました',
'Shipment Transit Log deleted': '輸送履歴を削除しました',
'Shipment Transit Log updated': '輸送履歴を更新しました',
'Shipment Transit Logs': '輸送履歴',
'Shipment to Send': 'Shipment to Send',
'Shipment/Way Bill added': '輸送/移動費を追加しました',
'Shipment/Way Bills': '輸送/移動費',
'Shipment/Way Bills Details': '輸送/移動費の詳細',
'Shipment/Way Bills deleted': '輸送/移動費を削除しました',
'Shipment/Way Bills updated': '輸送/移動費を更新しました',
'Shipment<>Item Relation added': '輸送<>物資間の関係を追加しました',
'Shipment<>Item Relation deleted': '輸送<>アイテム間の関係を削除しました',
'Shipment<>Item Relation updated': '輸送<>物資間の関係を更新しました',
'Shipment<>Item Relations': '輸送<>物資間の関係',
'Shipment<>Item Relations Details': '輸送<>物資間の関係詳細',
'Shipments': '輸送',
'Shipments To': '輸送先',
'Shooting': '銃撃',
'Short Assessment': '簡易評価',
'Short Description': '概要',
'Show Checklist': 'チェックリストを表示',
'Show Details': '詳細を表示',
'Show Map': '地図の表示',
'Show Region in Menu?': '地域をメニューで表示しますか?',
'Show in Menu?': 'Show in Menu?',
'Show on Map': 'Show on Map',
'Show on map': '地図上に表示',
'Sign-up as a volunteer': 'ボランティアとして登録する',
'Sign-up for Account': 'アカウント登録',
'Sign-up succesful - you should hear from us soon!': '登録できました。すぐに連絡が送られます。',
'Sindhi': 'シンド語',
'Single PDF File': 'Single PDF File',
'Site': 'サイト',
'Site Address': 'サイトの住所',
'Site Administration': 'このサイト自体の管理',
'Site Description': 'サイトの説明',
'Site Details': 'Siteの詳細',
'Site ID': 'サイトID',
'Site Location Description': 'サイト ロケーションの説明',
'Site Location Name': 'サイトロケーション名',
'Site Manager': 'Site 管理者',
'Site Name': 'Site の名前',
'Site added': 'サイトを追加しました',
'Site deleted': 'サイトを削除しました',
'Site updated': 'サイトを更新しました',
'Site/Warehouse': 'サイト/倉庫',
'Sites': 'サイト',
'Situation': 'Situation',
'Situation Awareness & Geospatial Analysis': '広域情報の取得や、地理情報の分析を行ないます',
'Sketch': 'スケッチ',
'Skill': 'スキル',
'Skill Catalog': 'Skill Catalog',
'Skill Details': 'スキルの詳細',
'Skill Equivalence': 'Skill Equivalence',
'Skill Equivalence Details': 'Skill Equivalence Details',
'Skill Equivalence added': 'Skill Equivalence added',
'Skill Equivalence deleted': 'Skill Equivalence deleted',
'Skill Equivalence updated': 'Skill Equivalence updated',
'Skill Equivalences': 'Skill Equivalences',
'Skill Provision': 'Skill Provision',
'Skill Provision Catalog': 'Skill Provision Catalog',
'Skill Provision Details': 'Skill Provision Details',
'Skill Provision added': 'Skill Provision added',
'Skill Provision deleted': 'Skill Provision deleted',
'Skill Provision updated': 'Skill Provision updated',
'Skill Provisions': 'Skill Provisions',
'Skill Status': 'スキル状況',
'Skill Type': 'Skill Type',
'Skill Type Catalog': 'Skill Type Catalog',
'Skill Type Details': 'スキルタイプの詳細',
'Skill Type added': 'スキルタイプを追加しました',
'Skill Type deleted': 'スキルタイプを削除しました',
'Skill Type updated': 'スキルタイプを更新しました',
'Skill Types': 'スキルタイプ',
'Skill added': 'スキルを追加しました',
'Skill added to Request': 'Skill added to Request',
'Skill deleted': 'スキルを削除しました',
'Skill removed': 'Skill removed',
'Skill removed from Request': 'Skill removed from Request',
'Skill updated': 'スキルを更新しました',
'Skills': 'スキル',
'Skills Catalog': 'Skills Catalog',
'Skills Management': 'Skills Management',
'Skype ID': 'Skype ID',
'Slope failure, debris': '斜面崩壊・崩壊堆積物',
'Small Trade': '小規模取引',
'Smoke': '煙',
'Snapshot': 'スナップショット',
'Snapshot Report': 'スナップショットレポート',
'Snow Fall': '降雪',
'Snow Squall': '豪雪',
'Soil bulging, liquefaction': '土壌隆起・液状化',
'Solid waste': '固形廃棄物',
'Solution': '解決案',
'Solution Details': '解決案の詳細',
'Solution Item': '解決案項目',
'Solution added': '解決案を追加しました',
'Solution deleted': '解決案を削除しました',
'Solution updated': '解決案を更新しました',
'Solutions': '解決案',
'Some': '散見',
'Sorry - the server has a problem, please try again later.': 'すみません、サーバーに問題が発生しています。時間を置いてやり直してください。',
'Sorry that location appears to be outside the area of the Parent.': 'このロケーションは親属性のエリアの外に表示されます。',
'Sorry that location appears to be outside the area supported by this deployment.': 'すいません、この位置は、このデプロイメントでサポートされている領域の外です。',
'Sorry, I could not understand your request': '残念ながら、リクエストが理解できませんでした。',
'Sorry, only users with the MapAdmin role are allowed to create location groups.': '申し訳ありませんが、 MapAdmin 権限を持つユーザだけがロケーションのグループを作れます',
'Sorry, only users with the MapAdmin role are allowed to edit these locations': '申し訳ありませんが、ロケーションの編集を行うにはMapAdmin権限を持ったユーザである必要があります。',
'Sorry, something went wrong.': 'すいません、何か問題が発生しています。',
'Sorry, that page is forbidden for some reason.': 'すいません、都合により、このページは閲覧禁止です。',
'Sorry, that service is temporary unavailable.': 'すいません、このサービスは一時的に利用不可となっています。',
'Sorry, there are no addresses to display': 'すいません、表示する住所がありません',
"Sorry, things didn't get done on time.": 'すいません、時間通りに行われていません。',
"Sorry, we couldn't find that page.": 'すいません、お探しのページは見つかりませんでした。',
'Source': '情報元',
'Source ID': '情報元ID',
'Source Time': '情報ソース入手時刻',
'Source Type': '情報ソース種別',
'Sources of income': '収入源',
'Space Debris': '宇宙廃棄物',
'Spanish': 'スペイン語',
'Special Ice': '特別な氷',
'Special Marine': '特別海上',
'Special needs': '特別な要求',
'Specialized Hospital': '専門病院',
'Specific Area (e.g. Building/Room) within the Location that this Person/Group is seen.': 'ある人々やグループが見られるロケーションの中の特別な場所 (建物、部屋等)',
'Specific Location': '特定のロケーション',
'Specific locations need to have a parent of level': 'ロケーションを指定するには、そのロケーションの親属性指定が必要です',
'Specify a descriptive title for the image.': '画像の説明として一言タイトルをつけてください。',
'Specify the bed type of this unit.': 'この施設にある寝具の種別を指定してください',
'Specify the minimum sustainability in weeks or days.': '最短で何週間、あるいは何日以内に枯渇の可能性があるかを記載してください',
'Specify the number of available sets': '利用可能なセットの個数を入力してください',
'Specify the number of available units (adult doses)': '(成人が使用するとして)使用可能な個数を入力してください',
'Specify the number of available units (litres) of Ringer-Lactate or equivalent solutions': '使用可能な乳酸リンゲル液あるいは同等品のリッター数を入力してください',
'Specify the number of sets needed per 24h': '24時間ごとに必要なセットの数を指定する',
'Specify the number of units (adult doses) needed per 24h': '(成人が使用するとして)24時間ごとに必要な個数を入力してください',
'Specify the number of units (litres) of Ringer-Lactate or equivalent solutions needed per 24h': '24時間ごとに必要な乳酸リンゲル液あるいは同等品のリッター数を入力してください',
'Speed': 'Speed',
'Spherical Mercator?': '球面メルカトル?',
'Spreadsheet Importer': 'スプレッドシートの取り込み',
'Spreadsheet uploaded': 'スプレッドシートがアップロードされました',
'Spring': '湧き水',
'Squall': 'スコール',
'Staff': 'スタッフ',
'Staff & Volunteers': 'Staff & Volunteers',
'Staff 2': 'スタッフ 2',
'Staff Details': 'スタッフの詳細',
'Staff ID': 'Staff ID',
'Staff Member Details': 'Staff Member Details',
'Staff Members': 'Staff Members',
'Staff Record': 'Staff Record',
'Staff Type Details': 'スタッフタイプの詳細',
'Staff Type added': 'スタッフタイプを追加しました',
'Staff Type deleted': 'スタッフタイプを削除しました',
'Staff Type updated': 'スタッフタイプを更新しました',
'Staff Types': 'スタッフ分類',
'Staff added': 'スタッフを追加しました',
'Staff and Volunteers': 'Staff and Volunteers',
'Staff deleted': 'スタッフを削除しました',
'Staff member added': 'Staff member added',
'Staff present and caring for residents': '上記施設にスタッフが配置され、ケアを行っている',
'Staff updated': 'スタッフを更新しました',
'Staff2': 'Staff2',
'Staffing': 'スタッフ配備',
'Stairs': '階段',
'Start Date': 'Start Date',
'Start date': '開始日',
'Start date and end date should have valid date values': '開始日と終了日は正しい値である必要があります',
'Start of Period': '開始期間',
'State': 'State',
'Stationery': '文房具',
'Status': 'ステータス',
'Status Report': 'ステータスレポート',
'Status Updated': 'Status Updated',
'Status added': '状況が追加されました',
'Status deleted': 'ステータスを削除しました',
'Status of clinical operation of the facility.': '施設で行われている診療の状況を記載してください。',
'Status of general operation of the facility.': '施設の運用状況情報を記載してください。',
'Status of morgue capacity.': '死体安置所の収容状況です。',
'Status of operations of the emergency department of this hospital.': 'この病院の緊急手術室の状態です。',
'Status of security procedures/access restrictions in the hospital.': '病院のアクセス制限/セキュリティ手順の状態。',
'Status of the operating rooms of this hospital.': 'この病院の手術室の状態。',
'Status updated': '状況を更新しました',
'Steel frame': '鉄骨',
'Stolen': 'Stolen',
'Storage Bin': '物資貯蔵容器',
'Storage Bin Details': '物資保管場所の詳細',
'Storage Bin Number': 'Storage Bin番号',
'Storage Bin Type': 'Storage Binタイプ',
'Storage Bin Type Details': '物資保管タイプの詳細',
'Storage Bin Type added': '物資保管タイプを追加しました',
'Storage Bin Type deleted': 'Storage Binタイプを削除しました',
'Storage Bin Type updated': 'Storage Binタイプを更新しました',
'Storage Bin Types': '収納箱のタイプ',
'Storage Bin added': 'Storage Binを追加しました',
'Storage Bin deleted': 'Storage Bin を削除しました',
'Storage Bin updated': 'Storage Bin を更新しました',
'Storage Bins': '物資保管場所',
'Storage Location': '備蓄地点',
'Storage Location Details': '備蓄地点の詳細',
'Storage Location ID': '備蓄地点ID',
'Storage Location Name': '備蓄地点名称',
'Storage Location added': '備蓄地点を追加しました',
'Storage Location deleted': '備蓄地点を削除しました',
'Storage Location updated': '備蓄地点を更新しました',
'Storage Locations': '備蓄地点',
'Store spreadsheets in the Eden database': 'Edenのデータベースにスプレッドシートを格納',
'Storeys at and above ground level': '階層、あるいは地面より上部',
'Storm Force Wind': '嵐の風の強さ',
'Storm Surge': '高潮',
'Stowaway': '密航者',
'Street': 'ストリート',
'Street (continued)': '住所 (続き)',
'Street Address': '住所',
'Streetview Enabled?': 'Streetview Enabled?',
'Strong Wind': '強風',
'Structural': '構造的な',
'Structural Hazards': '構造破壊',
'Style': 'Style',
'Style Field': 'Style Field',
'Style Values': 'Style Values',
'Sub Category': 'サブカテゴリ',
'Sub-type': 'サブタイプ',
'Subject': '件名',
'Submission successful - please wait': '送信に成功しました。しばらくお待ちください',
'Submission successful - please wait...': '送信に成功しました。しばらくお待ちください',
'Submit': '送信',
'Submit New': '新規登録',
'Submit New (full form)': '(完全なフォームで)新しく投稿する',
'Submit New (triage)': '新しい (トリアージ) を追加',
'Submit a request for recovery': '遺体回収要請を作成する',
'Submit new Level 1 assessment (full form)': 'レベル1のアセスメントを投稿する(完全なフォーム)',
'Submit new Level 1 assessment (triage)': '新しいレベル1アセスメント(トリアージ)を追加',
'Submit new Level 2 assessment': '新しいレベル2アセスメントの登録',
'Subscription Details': '寄付申し込みの詳細',
'Subscription added': '寄付申し込みを追加しました',
'Subscription deleted': '寄付申し込みを削除しました',
'Subscription updated': '寄付申し込みを更新しました',
'Subscriptions': '寄付申し込み',
'Subsector': 'Subsector',
'Subsector Details': 'Subsector Details',
'Subsector added': 'Subsector added',
'Subsector deleted': 'Subsector deleted',
'Subsector updated': 'Subsector updated',
'Subsectors': 'Subsectors',
'Subsistence Cost': '生存コスト',
'Suburb': '郊外',
'Sufficient care/assistance for chronically ill': '慢性疾患罹患者への十分なケア / 介護がある',
'Suggest not changing this field unless you know what you are doing.': 'よくわからない場合は、この項目を変更しないでください。',
'Summary': '要約',
'Summary by Administration Level': '管理レベルの概要',
'Sunday': '日曜',
'Supervisor': '管理権限を追加',
'Supplies': '支給品',
'Supply Chain Management': 'Supply Chain Management',
'Supply Item Categories': 'Supply Item Categories',
'Support Request': '支援要請',
'Support Requests': '支援の要請',
'Supports the decision making of large groups of Crisis Management Experts by helping the groups create ranked list.': '危機管理の専門グループの助言を取り入れることで、救援活動の優先順位を作成しやすくします。',
'Sure you want to delete this object?': 'このオブジェクトを削除してもよろしいですか?',
'Surgery': '外科',
'Survey Answer': '調査回答',
'Survey Answer Details': '調査回答詳細',
'Survey Answer added': '調査の回答を追加しました',
'Survey Answer deleted': '調査の回答を削除しました',
'Survey Answer updated': '調査回答を更新しました',
'Survey Module': '調査モジュール',
'Survey Name': 'Survey 名',
'Survey Question': '調査の質問',
'Survey Question Details': '調査項目の詳細',
'Survey Question Display Name': 'フィードバックの質問の表示名',
'Survey Question added': '調査の質問を追加しました',
'Survey Question deleted': '調査の質問を削除しました',
'Survey Question updated': 'Survey Questionを更新しました',
'Survey Section': '調査項目',
'Survey Section Details': 'フィードバック項目の詳細',
'Survey Section Display Name': '調査項目の表示名',
'Survey Section added': '調査項目を追加しました',
'Survey Section deleted': 'フィードバック項目を削除しました',
'Survey Section updated': 'サーベイセクションを更新しました',
'Survey Series': '一連の調査',
'Survey Series Details': 'Survey Seriesの詳細',
'Survey Series Name': 'フィードバックシリーズ名',
'Survey Series added': '一連の調査を追加しました',
'Survey Series deleted': '一連の調査を削除しました',
'Survey Series updated': '連続調査を更新しました',
'Survey Template': '調査テンプレート',
'Survey Template Details': '調査テンプレートの詳細',
'Survey Template added': 'Surveyテンプレートを追加しました',
'Survey Template deleted': '調査テンプレートを削除しました',
'Survey Template updated': '調査のテンプレートを更新しました',
'Survey Templates': '調査のテンプレート',
'Switch this on to use individual CSS/Javascript files for diagnostics during development.': '開発時にこのスイッチをONにすることで、CSS/Javascriptファイルの診断を行なえます。',
'Symbology': 'コード',
'Sync Conflicts': 'データ同期中に競合が発生しました',
'Sync History': 'データ同期履歴',
'Sync Now': 'データ同期中',
'Sync Partners': 'データ同期パートナー',
'Sync Partners are instances or peers (SahanaEden, SahanaAgasti, Ushahidi, etc.) that you want to sync information with. Click on the link on the right to go the page where you can add sync partners, search for sync partners and modify them.': 'データ同期先とは、情報の同期を行うインスタンスやピアのことを指します。(Sahana EdenやSahanaAgasti、Ushahidiなどと同期可能です) 同期先の登録や検索、登録情報の変更を行う際は、リンクをクリックしてページを表示してください。',
'Sync Pools': 'プールの同期',
'Sync Schedule': 'データ同期スケジュール',
'Sync Settings': 'データ同期設定',
'Sync process already started on ': 'データ同期プロセスは既に開始しています',
'Synchronisation': '同期',
'Synchronisation History': 'データ同期履歴',
'Synchronization': 'データ同期',
'Synchronization Conflicts': '同期のコンフリクト',
'Synchronization Details': 'データ同期の詳細',
'Synchronization History': 'データ同期履歴',
'Synchronization Peers': 'データ同期先',
'Synchronization Settings': 'データ同期設定',
'Synchronization allows you to share data that you have with others and update your own database with latest data from other peers. This page provides you with information about how to use the synchronization features of Sahana Eden': 'データ同期を使用すると、他の端末とデータを共有し、自身のデータを最新の状態に更新することができます。このページには、SahanaEdenにおいてデータ同期を行う方法が記載されています。',
'Synchronization not configured.': 'データ同期が設定されていません',
'Synchronization settings updated': 'データ同期設定を更新しました',
'Syncronisation History': 'データ同期履歴',
'Syncronisation Schedules': 'データ同期スケジュール',
'System allows the General Public to Report Incidents & have these Tracked.': 'システムを使うことで、一般市民によるインシデントの報告、および報告されたインシデントの追跡を行うことができます。',
'System allows the tracking & discovery of Items stored in Locations.': 'システムにより、物資がどこで保持されているかを追跡、明確化することができます。',
'System is a central online repository where all relief organizations, relief workers, government agents and camp sites for displaced personnel can coordinate the supply of aid with their demand. It allows users to allocate the available resources to fulfill the demands effectively and efficiently.': 'このシステムは、支援団体、個々の支援者、政府職員、そして避難所に移動した人々の間で、援助の需要と供給の調整を図るための、オンラインの中央データベースです。このシステムを用いて、利用可能な資源を、需要を満たすように、有効かつ効率的に割り当てることができます。',
'System keeps track of all Volunteers working in the disaster region. It captures not only the places where they are active, but also captures information on the range of services they are providing in each area.': 'この仕組みでは、災害地域の全てのボランティア情報を提供します。ボランティアの活動場所に加え、そこで提供する支援内容も提供します。',
"System's Twitter account updated": 'システムのTwitterアカウントを変更しました',
'TMS Layers': 'TMSレイヤ',
'Table name': 'テーブル名',
'Tags': 'タグ',
'Take shelter in place or per <instruction>': '場所や<instruction>ごとに避難してください',
'Task': 'Task',
'Task Details': 'タスクの詳細',
'Task List': 'タスク一覧',
'Task Status': 'タスクの状況',
'Task added': 'タスクを追加しました',
'Task deleted': 'タスクを削除しました',
'Task removed': 'Task removed',
'Task status': 'タスク状況',
'Task updated': 'タスクを更新しました',
'Tasks': 'タスク',
'Team': 'チーム',
'Team Description': 'チーム概要',
'Team Details': 'チームの詳細',
'Team Head': 'チーム代表者',
'Team ID': 'Team ID',
'Team Id': 'チームID',
'Team Leader': 'チームリーダー',
'Team Member added': 'チームメンバーを追加しました',
'Team Members': 'チームメンバー',
'Team Name': 'チーム名',
'Team Type': 'チームタイプ',
'Team added': 'チームを追加しました',
'Team deleted': 'チームを削除しました',
'Team updated': 'チームを更新しました',
'Teams': 'チーム',
'Technical testing only, all recipients disregard': '技術検証のみで、すべての受取人は無視されます',
'Telecommunications': '通信・情報',
'Telephone': '電話',
'Telephone Details': 'Telephone Details',
'Telephony': '電話',
'Tells GeoServer to do MetaTiling which reduces the number of duplicate labels.': 'Tells GeoServer to do MetaTiling which reduces the number of duplicate labels.',
'Temp folder %s not writable - unable to apply theme!': '一時フォルダ%sが書き込み不可になっています。テーマを適用できません。',
'Template Name': 'Template Name',
'Template file %s not readable - unable to apply theme!': 'テンプレートファイル %s が読み込み不可になっています。テーマを適用できません。',
'Templates': 'テンプレート',
'Term for the fifth-level within-country administrative division (e.g. a voting or postcode subdivision). This level is not often used.': '国内における第五段階管理部門を示す用語(例: 郵便番号の下位部分)。このレベルは通常使われません。',
'Term for the fourth-level within-country administrative division (e.g. Village, Neighborhood or Precinct).': '国内で第4の行政区域を示す用語 (例えば村、地区)',
'Term for the primary within-country administrative division (e.g. State or Province).': '国内で最大の行政区域を示す用語 (例えば州や都道府県)',
'Term for the secondary within-country administrative division (e.g. District or County).': 'Term for the secondary within-country administrative division (e.g. District or County).',
'Term for the secondary within-country administrative division (e.g. District).': '国内で二番目の管理部門の用語 (例: 区)',
'Term for the third-level within-country administrative division (e.g. City or Town).': '国内で三番目の管理部門を示す用語 (例: 市や町)',
'Term for the top-level administrative division (i.e. Country).': 'Term for the top-level administrative division (i.e. Country).',
'Term for the top-level administrative division (typically Country).': '最上位の統制区域を示す用語 (一般的には国)',
'Terms of Service\n\nYou have to be eighteen or over to register as a volunteer.': 'Terms of Service\n\nYou have to be eighteen or over to register as a volunteer.',
'Terms of Service:': 'Terms of Service:',
'Territorial Authority': '地方機関',
'Terrorism': 'テロリズム',
'Tertiary Server (Optional)': '三番目のサーバ(オプション)',
'Test Results': 'テスト結果',
'Text': 'テキスト',
'Text Color for Text blocks': 'テキストブロックのテキスト色',
'Text before each Text Field (One per line)': 'テキストフィールドの前のテキスト (一行に一つ)',
'Text in Message': 'メッセージのテキスト',
'Text in Message: ': 'メッセージのテキスト: ',
'Thank you for validating your email. Your user account is still pending for approval by the system administator (%s).You will get a notification by email when your account is activated.': 'Thank you for validating your email. Your user account is still pending for approval by the system administator (%s).You will get a notification by email when your account is activated.',
'Thanks for your assistance': 'ご協力ありがとうございます',
'The': ' ',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1 == db.table2.field2" results in a SQL JOIN.': '"query"は"db.table1.field1==\'value\'"のような条件です。SQL JOINの"db.table1.field1 == db.table2.field2"結果のようなものです。',
"The <a href='http://en.wikipedia.org/wiki/Well-known_text' target=_blank>Well-Known Text</a> representation of the Polygon/Line.": "この線、あるいは面の<a href='http://en.wikipedia.org/wiki/Well-known_text' target=_blank>具体的な説明</a>",
'The Area which this Site is located within.': 'このサイトが含まれる地域',
'The Assessments module allows field workers to send in assessments.': 'アセスメントモジュールは、被災現場で活動する人々による現状の査定報告を記録することができます。',
'The Assessments module allows field workers to send in assessments. 2 different options are provided here currently:': 'アセスメントモジュールは、被災現場で活動する人々による現状の査定報告を記録することができます。現在は、2種類のオプションが提供されています。',
'The Author of this Document (optional)': 'この文書の作成者氏名(オプション)',
'The Building Asssesments module allows building safety to be assessed, e.g. after an Earthquake.': 'ビルアセスメントモジュールではビルの安全性評価を行います (例:地震の後など)',
'The Camp this Request is from': 'The Camp this Request is from',
'The Camp this person is checking into.': 'The Camp this person is checking into.',
'The Current Location of the Person/Group, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'その人物/グループの現在地は報告用の概要レベルの情報あるいは地図上の表示のため正確な情報いずれの場合もあります。場所名の数文字を入力すると、登録済みの場所から検索できます。',
'The District for this Report.': 'このレポートが関連する地区。',
"The Donor(s) for this project. Multiple values can be selected by holding down the 'Control' key.": 'このプロジェクトの資金提供組織を選択します。複数の項目を選択するには、Ctrlキーを押しながらクリックしてください。',
'The Email Address to which approval requests are sent (normally this would be a Group mail rather than an individual). If the field is blank then requests are approved automatically if the domain matches.': '承認依頼が送信されるメールアドレス(通常は個人のメールアドレスではなく、グループのメールアドレス)。この欄が空白の場合、ドメインが一致すれば依頼は自動的に承認されます',
'The Group whose members can edit data in this record.': 'このグループのメンバーは、レコード上のデータを修正することができます。',
'The Incident Reporting System allows the General Public to Report Incidents & have these Tracked.': '一般ユーザは、インシデント・レポートシステムからインシデントを報告し、その結果を表示させることができます。',
'The Location of this Site, which can be general (for Reporting) or precise (for displaying on a Map).': 'Siteのロケーション、(レポート用で)おおまかな場合と、(地図表示用で)正確な場合とがあります。',
'The Location the Person has come from, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'その人物がやって来たロケーションで、報告のためのだいたいの場所、あるいは地図で表示するための正確な緯度経度です。使用可能なロケーションを検索するには最初の数文字を入力してください',
'The Location the Person is going to, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'その人物が向かう場所は報告用の概要レベルの情報あるいは地図上の表示のため正確な情報いずれの場合もあります。場所名の数文字を入力すると、登録済みの場所から検索できます。',
'The Media Library provides a catalog of digital media.': 'メディア・ライブラリーは、デジタル・メディアの一覧を提供します。',
'The Messaging Module is the main communications hub of the Sahana system. It is used to send alerts and/or messages using SMS & Email to various groups and individuals before, during and after a disaster.': 'メッセージング・モジュールは、SAHANAシステムのコミュニケーション中心となります。災害の前、災害中または災害の後に様々なグループや個人にSMSとeメールで警報やメッセージを送ります。',
'The Office this record is associated with.': 'このレコードに関連するオフィス',
'The Organization Registry keeps track of all the relief organizations working in the area.': 'The Organization Registry keeps track of all the relief organizations working in the area.',
'The Organization Registry keeps track of all the relief organizations working in the disaster region. It captures not only the places where they are active, but also captures information on the range of projects they are providing in each area.': '団体情報を登録することで、被災地域で活動するすべての団体の活動を追跡します。また、それぞれの地域において、彼らがどこで活動しているかという情報だけでなく、彼らが各地で提供しているプロジェクトの範囲についての情報も提供します。',
'The Organization this record is associated with.': 'このレコードに関連する団体',
'The Organization which is funding this Activity.': 'この支援活動に資金を提供する団体',
'The Patient Tracking system keeps track of all the evacuated patients & their relatives.': 'The Patient Tracking system keeps track of all the evacuated patients & their relatives.',
'The Person currently filling this Role.': '現在この役割に属している人物',
'The Project Tracking module allows the creation of Activities to meet Gaps in Needs Assessments.': 'プロジェクト追跡モジュールでは、支援活動(アクティビティ)を作成し、必要な物資 / サービスのギャップを満たすことを目的とします。',
'The Rapid Assessments Module stores structured reports done by Professional Organizations.': '被災地の現況アセスメントには、専門団体によって行われたレポートの結果が格納されます。',
'The Request this record is associated with.': 'このレコードに関連する支援要請',
'The Requests Management System is a central online repository where all relief organizations, relief workers, government agents and camp sites for displaced personnel can coordinate the supply of aid with their demand. It allows users to allocate the available resources to fulfill the demands effectively and efficiently.': '支援要請管理システムは、全ての支援団体、救援者、政府職員、および避難所に暮らす避難者たち自身が、要求に応じて援助の供給を調整できる中央のオンラインデータベースです。支援要請管理システムは効果的かつ効率的に要求を満たすことができる利用可能な資源の割り当てを可能にします。',
'The Role this person plays within this Office/Project.': 'オフィス/プロジェクトにおける役割',
'The Role this person plays within this hospital.': '病院内における役割',
'The Role to which this Role reports.': 'この権限の報告先となる権限',
"The Sector(s) this organization works in. Multiple values can be selected by holding down the 'Control' key.": 'この団体の活動分野を選択します。複数の項目を選択するには、コントロールキーを押しながらクリックしてください。',
'The Shelter Registry tracks all shelters and stores basic details regarding them. It collaborates with other modules to track people associated with a shelter, the services available etc.': '避難所登録は、避難所を追跡し、それらの詳細を蓄積します。避難所に関連付けられた人、利用可能なサービス等の他のモジュールと協業します。',
'The Shelter this Request is from': 'The Shelter this Request is from',
'The Shelter this Request is from (optional).': '要請を行った避難所(オプション)',
'The Shelter this person is checking into.': 'この人物がチェックインした避難所',
'The URL for the GetCapabilities of a WMS Service whose layers you want accessible via the Map.': '地図を用いてレイヤを利用できる WMS サービスの GetCapabilities の URL。',
'The URL for the GetCapabilities page of a Web Map Service (WMS) whose layers you want available via the Browser panel on the Map.': 'The URL for the GetCapabilities page of a Web Map Service (WMS) whose layers you want available via the Browser panel on the Map.',
"The URL of the image file. If you don't upload an image file, then you must specify its location here.": '画像ファイルのURLです。ファイルのアップロードを行わない場合、ロケーションをURL項目に入力してください。',
'The URL of your web gateway without the post parameters': 'ポストパラメータを指定しないWebゲートウェイのURL',
'The URL to access the service.': 'サービスにアクセスするためのURL',
'The Unique Identifier (UUID) as assigned to this facility by the government.': '政府UUID|政府がこの施設に割り当てている汎用一意識別子(UUID)。',
'The area is ': 'この地域は',
'The asset must be assigned to a site OR location.': 'The asset must be assigned to a site OR location.',
'The attribute which is used for the title of popups.': 'The attribute which is used for the title of popups.',
'The attribute within the KML which is used for the title of popups.': 'このKML属性はポップアップのタイトルに使われます。',
'The attribute(s) within the KML which are used for the body of popups. (Use a space between attributes)': 'KMLで定義されている属性はポップアップの本文に使用されます。(各属性ごとに半角スペースで分割して記載してください)',
'The body height (crown to heel) in cm.': '頭頂からかかとまでの身長(単位はcm)',
'The category of the Item.': 'この救援物資のカテゴリです',
'The contact person for this organization.': '団体の代表窓口',
'The country the person usually lives in.': 'この人物が普段の生活を営む国',
'The default Facility for which this person is acting.': 'The default Facility for which this person is acting.',
'The default Facility for which you are acting.': 'The default Facility for which you are acting.',
'The default Organization for whom this person is acting.': 'The default Organization for whom this person is acting.',
'The default Organization for whom you are acting.': 'The default Organization for whom you are acting.',
'The default policy for data import from this peer.': 'このデータ同期先からデータをインポートする際のデフォルト設定。',
'The descriptive name of the peer.': 'データ同期先のわかりやすい名称',
'The duplicate record will be deleted': '重複したレコードは削除されます',
'The entered unit links to this unit. For e.g. if you are entering m for meter then choose kilometer(if it exists) and enter the value 0.001 as multiplicator.': '入力した単位をこのユニットにリンクします。例えば、mをメートルとする場合、(存在するなら) kilometer を選択して、乗数に値 0.001 を入力します。',
'The first or only name of the person (mandatory).': '人物の苗字(必須)。 外国籍の方等については避難所等での管理上の主たる表記/順に従ってください。',
'The following modules are available': '利用可能なモジュールは以下のとおりです。',
'The form of the URL is http://your/web/map/service?service=WMS&request=GetCapabilities where your/web/map/service stands for the URL path to the WMS.': 'The form of the URL is http://your/web/map/service?service=WMS&request=GetCapabilities where your/web/map/service stands for the URL path to the WMS.',
'The hospital this record is associated with.': 'このレコードに関連のある病院。',
'The item is designated to be sent for specific project, population, village or other earmarking of the donation such as a Grant Code.': 'ある特定のプロジェクトや、人々、市町村への物資または、交付コード等のついた特定区域への寄付等のは物資は、送付されることになっています。',
'The language to use for notifications.': '通知に使用する言語',
'The language you wish the site to be displayed in.': 'このサイトを表示するための言語',
'The last known location of the missing person before disappearance.': '行方不明者が最後に目撃された場所',
'The length is ': '長さは',
'The level at which Searches are filtered.': 'The level at which Searches are filtered.',
'The list of Brands are maintained by the Administrators.': '銘柄一覧の整備は、管理者によって可能です',
'The list of Catalogs are maintained by the Administrators.': 'The list of Catalogs are maintained by the Administrators.',
'The list of Item categories are maintained by the Administrators.': '供給物資カテゴリの一覧は、管理者によってメンテナンスされています。',
'The map will be displayed initially with this latitude at the center.': 'The map will be displayed initially with this latitude at the center.',
'The map will be displayed initially with this longitude at the center.': 'The map will be displayed initially with this longitude at the center.',
'The minimum number of features to form a cluster.': 'The minimum number of features to form a cluster.',
'The name to be used when calling for or directly addressing the person (optional).': '電話をかける際など、直接連絡をとりたい場合に使われる名前(オプション)',
'The next screen will allow you to detail the number of people here & their needs.': '次の画面では、人数および必要な物資/サービスの詳細を確認できます。',
'The next screen will allow you to enter a detailed list of items and quantities, if appropriate...': '次のスクリーンで、項目の詳細なリストと量を入力できる場合があります。',
'The number of Units of Measure of the Alternative Items which is equal to One Unit of Measure of the Item': '元の物資一つと同じだけの、代替品の測定単位での数量',
'The number of pixels apart that features need to be before they are clustered.': 'The number of pixels apart that features need to be before they are clustered.',
'The number of tiles around the visible map to download. Zero means that the 1st page loads faster, higher numbers mean subsequent panning is faster.': '表示している地図の周辺タイルをダウンロードする数。0は最初のページの読み込みがより早い事を意味し、数字を大きくすると視点をパンした際に表示がより早くなります。',
'The person at the location who is reporting this incident (optional)': '現地からこのインシデントを報告した人物(オプション)',
'The person reporting about the missing person.': '行方不明者情報の提供者。',
'The person reporting the missing person.': '行方不明者を報告した人',
"The person's manager within this Office/Project.": 'このオフィス/プロジェクトのマネージャ。',
'The post variable containing the phone number': '電話番号を含む post 変数',
'The post variable on the URL used for sending messages': 'メッセージ送信に使用するURLのPOST変数',
'The post variables other than the ones containing the message and the phone number': 'メッセージや電話番号以外を含むpost変数',
'The serial port at which the modem is connected - /dev/ttyUSB0, etc on linux and com1, com2, etc on Windows': 'モデムが接続されているシリアルポート - Linuxでは /dev/ttyUSB0 等、Windowsでは com1, com2 等',
'The server did not receive a timely response from another server that it was accessing to fill the request by the browser.': '要求を満たすためアクセスしていた別のサーバーからの応答がありませんでした。',
'The server received an incorrect response from another server that it was accessing to fill the request by the browser.': '要求を満たすためアクセスしていた別のサーバーから不正な応答が返ってきました。',
'The simple policy allows anonymous users to Read & registered users to Edit. The full security policy allows the administrator to set permissions on individual tables or records - see models/zzz.py.': 'シンプルポリシーでは、匿名ユーザーによるデータの閲覧、および、登録ユーザーによる編集が許可されます。完全版ポリシーでは、個々のテーブルやレコードに対して管理権限を設定することができます。詳細はmodels/zzz.pyを参照してください。',
'The site where this position is based.': 'The site where this position is based.',
'The staff responsibile for Facilities can make Requests for assistance. Commitments can be made against these Requests however the requests remain open until the requestor confirms that the request is complete.': 'The staff responsibile for Facilities can make Requests for assistance. Commitments can be made against these Requests however the requests remain open until the requestor confirms that the request is complete.',
'The subject event no longer poses a threat or concern and any follow on action is described in <instruction>': '件名のイベントはこれ以上の脅威や懸案事項を引き起こすことはありません。よって、<instruction>には、今後実施すべきアクションが記述されていません。',
'The time at which the Event started.': 'The time at which the Event started.',
'The time difference between UTC and your timezone, specify as +HHMM for eastern or -HHMM for western timezones.': 'あなたのタイムゾーンとUTCとの差を、東では+HHMMで、西では-HHMMで指定してください',
'The title of the WMS Browser panel in the Tools panel.': '[ツール]パネルのWMS Browserパネルのタイトル',
'The token associated with this application on': 'このアプリケーションが関連づけられているトークン',
'The unique identifier of the peer. Leave blank if the peer is no Sahana Eden instance, it will be auto-assigned in that case.': '一意のデータ同期先識別子です。データ同期先がSahana Edenシステムではない場合は、空白にしておくことで自動的に割り当てが行われます。',
'The unique identifier which identifies this instance to other instances.': 'このインスタンスを他のインスタンスと区別するための固有識別子',
'The way in which an item is normally distributed': '物資が配給される際の通常経路',
'The weight in kg.': '重量(単位:kg)',
'Theme': 'テーマ',
'Theme Details': 'テーマの詳細',
'Theme added': 'テーマを追加しました',
'Theme deleted': 'テーマを削除しました',
'Theme updated': 'テーマを更新しました',
'Themes': 'テーマ',
'There are errors': 'エラーが発生しました',
'There are insufficient items in the Inventory to send this shipment': 'There are insufficient items in the Inventory to send this shipment',
'There are multiple records at this location': 'このロケーションに複数のレコードが存在します',
'There are not sufficient items in the Inventory to send this shipment': 'この輸送を開始するために十分な量の物資が備蓄されていません',
'There is no address for this person yet. Add new address.': 'この人物の住所がまだありません。新しい住所を入力してください',
'There was a problem, sorry, please try again later.': '問題が発生しています。すみませんが、時間を置いてからやり直してください。',
'These are settings for Inbound Mail.': '電子メール受信箱の設定です',
'These are the Incident Categories visible to normal End-Users': '普通のユーザーが見ることができるインシデント一覧です',
'These are the default settings for all users. To change settings just for you, click ': 'これらは、全てのユーザーのデフォルト設定です。個人用の設定を変更するには、以下をクリックしてください。',
'These need to be added in Decimal Degrees.': 'これらは、十進角で追加する必要があります。',
'They': 'それら',
'This appears to be a duplicate of ': 'これは、以下のものと重複しているようです。',
'This can either be the postal address or a simpler description (such as `Next to the Fuel Station`).': '住所か、あるいは簡単な記述(ガソリンスタンドの隣、など)を記載しています。',
'This email address is already in use': 'このメールアドレスは使用されています',
'This file already exists on the server as': 'このファイルは別の名前でサーバに既に存在しています : ',
'This form allows the administrator to remove a duplicate location.': '管理者はこのフォームを使うことで、重複したロケーションデータを削除できます。',
'This is appropriate if this level is under construction. To prevent accidental modification after this level is complete, this can be set to False.': 'This is appropriate if this level is under construction. To prevent accidental modification after this level is complete, this can be set to False.',
'This is the way to transfer data between machines as it maintains referential integrity.': '参照整合性を保ちつつ、端末間でデータを転送する方法が記載されています。',
'This is the way to transfer data between machines as it maintains referential integrity...duplicate data should be removed manually 1st!': '参照整合性を保ちつつ、端末間でデータを転送する方法が記載されています。...重複したデータは最初に手動で削除する必要があります。',
'This level is not open for editing.': 'This level is not open for editing.',
'This might be due to a temporary overloading or maintenance of the server.': 'サーバーが一時的に過負荷状態になっているか、あるいはメンテナンスを行っています。',
'This module allows Inventory Items to be Requested & Shipped between the Inventories of Facilities.': 'This module allows Inventory Items to be Requested & Shipped between the Inventories of Facilities.',
'This module allows you to manage Events - whether pre-planned (e.g. exercises) or Live Incidents. You can allocate appropriate Resources (Human, Assets & Facilities) so that these can be mobilized easily.': 'This module allows you to manage Events - whether pre-planned (e.g. exercises) or Live Incidents. You can allocate appropriate Resources (Human, Assets & Facilities) so that these can be mobilized easily.',
'This module allows you to plan scenarios for both Exercises & Events. You can allocate appropriate Resources (Human, Assets & Facilities) so that these can be mobilized easily.': 'This module allows you to plan scenarios for both Exercises & Events. You can allocate appropriate Resources (Human, Assets & Facilities) so that these can be mobilized easily.',
'This page shows you logs of past syncs. Click on the link below to go to this page.': '過去に行ったデータ同期履歴を表示します。以下のリンクをクリックしてください。',
'This screen allows you to upload a collection of photos to the server.': 'この画面では、複数の画像をサーバーにアップロードすることができます。',
'This setting can only be controlled by the Administrator.': 'This setting can only be controlled by the Administrator.',
'This shipment has already been received.': '輸送が開始され、物資が受領されました',
'This shipment has already been sent.': '輸送が開始され、送付されました',
'This shipment has not been received - it has NOT been canceled because can still be edited.': 'この輸送は受領されていません。 - まだ編集可能であり、キャンセルされてはいません',
'This shipment has not been sent - it has NOT been canceled because can still be edited.': '輸送はまだ開始されていませんが、キャンセルされてはいません。編集可能です。',
'This shipment will be confirmed as received.': 'この輸送された物資は、受信済み扱いになります',
'This value adds a small mount of distance outside the points. Without this, the outermost points would be on the bounding box, and might not be visible.': 'この値はその地点の外側までの距離の小さなマウントを追加します。この値が無い場合は、一番外側の地点が境界ボックスになり、表示されない可能性があります。',
'This value gives a minimum width and height in degrees for the region shown. Without this, a map showing a single point would not show any extent around that point. After the map is displayed, it can be zoomed as desired.': 'この値はこの地域を表示する時に使う最小の幅と高さを示します。この値がない場合、ある単一の地点を表示するときにその周辺の範囲は表示されません。地図が表示された後では、好きな大きさに拡大・縮小できます。',
'Thunderstorm': '雷雨',
'Thursday': '木曜日',
'Ticket': 'チケット',
'Ticket Details': 'チケットの詳細',
'Ticket ID': 'チケットID',
'Ticket added': 'チケットを追加しました',
'Ticket deleted': 'チケットを削除しました',
'Ticket updated': 'チケットを更新しました',
'Ticketing Module': 'チケット発行モジュール',
'Tickets': 'チケット',
'Tiled': 'Tiled',
'Tilt-up concrete': 'ティルトアップ式コンクリート',
'Timber frame': '木造',
'Time needed to collect water': '水の確保に必要な時間',
'Time of Request': '要求発生時刻',
'Timeline': 'タイムライン',
'Timeline Report': 'タイムラインレポート',
'Timestamp': 'タイムスタンプ',
'Timestamps can be correlated with the timestamps on the photos to locate them on the map.': 'Timestamps can be correlated with the timestamps on the photos to locate them on the map.',
'Title': 'タイトル',
'Title to show for the Web Map Service panel in the Tools panel.': 'Title to show for the Web Map Service panel in the Tools panel.',
'To': ' ',
'To Location': '送付先ロケーション',
'To Organization': '送付先団体',
'To Person': '送付先人物情報',
'To Site': '送付先サイト',
'To begin the sync process, click the button on the right => ': '右のボタンを押すと、データ同期が開始されます。',
'To begin the sync process, click this button => ': 'このボタンを押すと、データ同期が開始されます。=>',
'To create a personal map configuration, click ': '個人用の地図設定を作成するにはクリックしてください',
'To delete': '削除する側',
'To edit OpenStreetMap, you need to edit the OpenStreetMap settings in models/000_config.py': 'OpenStreetMapを編集する際は、models/000_config.pyで定義されている設定を編集してください',
'To search by job title, enter any portion of the title. You may use % as wildcard.': 'To search by job title, enter any portion of the title. You may use % as wildcard.',
"To search by person name, enter any of the first, middle or last names, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": "To search by person name, enter any of the first, middle or last names, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.",
"To search for a body, enter the ID label of the body. You may use % as wildcard. Press 'Search' without input to list all bodies.": '遺体の検索を行うには、遺体のID番号を入力してください。検索時のワイルドカード文字として、%を使うことができます。入力せずに「検索」すると、全ての遺体が表示されます。',
"To search for a body, enter the ID tag number of the body. You may use % as wildcard. Press 'Search' without input to list all bodies.": 'ID情報を入力することで、遺体を検索します。ワイルドカードとして % が使用できます。何も指定せずに「検索」すると、全ての遺体が表示されます。',
"To search for a hospital, enter any of the names or IDs of the hospital, or the organization name or acronym, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all hospitals.": "病院を検索するには、名前、病院のID、団体名、省略名のいずれかをスペース(空白)で区切って入力してください。 % がワイルドカードとして使えます。全病院のリストを表示するにはなにも入力せずに '検索' ボタンを押してください。",
"To search for a hospital, enter any of the names or IDs of the hospital, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all hospitals.": '探し出したい病院をテキスト入力し、検索を行うことができます。検索時のワイルドカード文字として、%を使うことができます。何も入力せずに「検索」ボタンを押した場合、全ての病院を表示します。',
"To search for a hospital, enter any part of the name or ID. You may use % as wildcard. Press 'Search' without input to list all hospitals.": '病院を検索するには、名称の一部かIDを入力してください。検索時のワイルドカード文字として、%を使うことができます。何も入力せずに「検索」を押した場合、全ての病院を表示します。',
"To search for a location, enter the name. You may use % as wildcard. Press 'Search' without input to list all locations.": "ロケーションを検索するには、名前を入力します。%をワイルドカード文字として使用することが出来ます。何も入力しないで '検索' をクリックするとすべてのロケーションが表示されます。",
"To search for a patient, enter any of the first, middle or last names, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all patients.": "To search for a patient, enter any of the first, middle or last names, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all patients.",
"To search for a person, enter any of the first, middle or last names and/or an ID number of a person, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": '苗字、名前などを半角スペースで区切って入力し、人物検索して下さい。「%」を使うとファジー検索できます。何も入力せずに検索すれば、全ての情報を検索表示します。',
"To search for a person, enter any of the first, middle or last names, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": '人を検索するためには、お名前(苗字、名前または両方)を入力してください。また姓名の間にはスペースをいれてください。ワイルドカードとして % が使えます。すべての人物情報をリストするには、検索ボタンをおしてください。',
"To search for a request, enter some of the text that you are looking for. You may use % as wildcard. Press 'Search' without input to list all requests.": '探し出したい支援要請をテキスト入力し、検索を行うことができます。検索時のワイルドカード文字として、%を使うことができます。何も入力せずに「検索」ボタンを押した場合、全ての支援要請を表示します。',
"To search for an assessment, enter any portion the ticket number of the assessment. You may use % as wildcard. Press 'Search' without input to list all assessments.": 'アセスメントを検索するには、アセスメントのチケット番号の一部を入力してください。ワイルドカードとして % が使えます。すべてのアセスメントをリストするには、なにも入力せず検索ボタンをおしてください。',
'To submit a new job, use the': 'jobを新規送信するには、以下を使用してください。',
'To variable': '変数に',
'Tools': 'ツール',
'Tornado': '竜巻',
'Total': '合計数',
'Total # of Beneficiaries Reached ': '支援が到達した受益者の合計数 ',
'Total # of Target Beneficiaries': '受益対象者の合計人数',
'Total # of households of site visited': '訪問した世帯数',
'Total Beds': '合計ベッド数',
'Total Beneficiaries': '受益者の総数',
'Total Cost per Megabyte': 'メガバイト毎の合計費用',
'Total Cost per Minute': '一分毎の合計費用',
'Total Households': '総世帯数',
'Total Monthly': '月ごとの合計',
'Total Monthly Cost': '月額総計',
'Total Monthly Cost: ': '月毎の費用の合計: ',
'Total One-time Costs': '1回毎の費用総計',
'Total Persons': '合計者数',
'Total Recurring Costs': '経常費用総計',
'Total Unit Cost': '単価合計',
'Total Unit Cost: ': '単価合計: ',
'Total Units': '総数',
'Total gross floor area (square meters)': '延面積(平方メートル)',
'Total number of beds in this hospital. Automatically updated from daily reports.': 'この病院のベッド数総計。日時レポートにより、自動的に更新されます。',
'Total number of houses in the area': 'この地域の家屋総数',
'Total number of schools in affected area': '被災地内の学校総数',
'Total population of site visited': '訪問地域の総人口数',
'Totals for Budget:': '予算の合計:',
'Totals for Bundle:': 'Bundleの合計:',
'Totals for Kit:': 'Kitの合計:',
'Tourist Group': '旅行者グループ',
'Town': '町',
'Traces internally displaced people (IDPs) and their needs': '国内の避難している人(IDP)と彼らの必要としている物資/サービスの追跡',
'Tracing': '履歴の追跡',
'Track': '追跡情報',
'Track Details': '追跡情報の詳細',
'Track deleted': '追跡情報を削除しました',
'Track updated': '追跡情報を更新しました',
'Track uploaded': '追跡情報をアップデートしました',
'Track with this Person?': 'Track with this Person?',
'Tracking of Patients': 'Tracking of Patients',
'Tracking of Projects, Activities and Tasks': 'プロジェクトや支援活動、タスクの追跡',
'Tracking of basic information on the location, facilities and size of the Shelters': '避難所の基本情報(場所、施設、規模等)を追跡',
'Tracks': 'トラック',
'Tracks requests for aid and matches them against donors who have pledged aid': '支援要請を管理し、救援物資の提供者とマッチングします。',
'Tracks the location, distibution, capacity and breakdown of victims in Shelters': '避難所のロケーション、配置、収容能力と被災者の状態を追跡します。',
'Traffic Report': 'トラフィックレポート',
'Training': 'Training',
'Training Course Catalog': 'Training Course Catalog',
'Training Details': 'Training Details',
'Training added': 'Training added',
'Training deleted': 'Training deleted',
'Training updated': 'Training updated',
'Trainings': 'Trainings',
'Transfer': '輸送',
'Transit': '移動中の立ち寄り',
'Transit Status': '輸送状態',
'Transit. Status': '輸送状態',
'Transition Effect': '推移への影響',
'Transparent?': '透明ですか?',
'Transportation assistance, Rank': '移動 / 輸送支援、ランク',
'Trauma Center': '心的外傷センター',
'Travel Cost': '移動費',
'Tree': '樹木',
'Tropical Storm': '熱帯低気圧',
'Tropo Messaging Token': 'Tropo メッセージのトークン',
'Tropo Settings': 'Tropo 設定',
'Tropo Voice Token': 'Tropo 音声トークン',
'Tropo settings updated': 'Tropo 設定を更新しました',
'Truck': 'トラック',
'Try checking the URL for errors, maybe it was mistyped.': '入力したURLに間違いがないか確認してください。',
'Try hitting refresh/reload button or trying the URL from the address bar again.': 'ページの再読み込みを行うか、あるいはアドレスバーに直接URLを入力してみてください。',
'Try refreshing the page or hitting the back button on your browser.': 'ページを再読込するか、ブラウザの[戻る]ボタンを押してください。',
'Tsunami': '津波',
'Tuesday': '火曜日',
'Twitter': 'Twitter',
'Twitter ID or #hashtag': 'Twitter ID あるいは #ハッシュタグ',
'Twitter Settings': 'Twitter設定',
'Type': 'タイプ',
'Type of Construction': '建物の種類',
'Type of cause': '原因のタイプ',
'Type of latrines': 'トイレの種類',
'Type of place for defecation': '排泄用地の種類',
'Type of water source before the disaster': '災害発生前の水の確保方法',
"Type the first few characters of one of the Person's names.": '検索したい人物の名前の先頭数文字を入力してください',
'Types of health services available': '利用可能な健康サービスの種別',
'Types of water storage containers available': '利用可能な水貯蔵容器の種別',
'UID': 'ユニークID',
'UN': '国連',
'URL': 'URL',
'UTC Offset': 'UTC(世界標準時刻)との差',
'Un-Repairable': 'Un-Repairable',
'Unable to parse CSV file!': 'CSVファイルをパースできません。',
'Understaffed': '人員不足',
'Unidentified': '詳細不明',
'Unit': '単位',
'Unit Bed Capacity': 'ベッド収容数',
'Unit Cost': '単価',
'Unit Details': '単位の詳細',
'Unit Name': '単位名',
'Unit Set': '単位の設定',
'Unit Short Code for e.g. m for meter.': '単位の略称、例えばメートルはmと表記。',
'Unit added': '単位を追加しました',
'Unit deleted': '単位を削除しました',
'Unit of Measure': '1個口の内訳',
'Unit updated': '単位を更新しました',
'Units': '単位',
'Units of Measure': '測定単位',
'Unknown': '不明',
'Unknown Peer': '登録に無いデータ同期先',
'Unknown type of facility': '施設規模不明',
'Unreinforced masonry': '補強されていない石造建築物',
'Unresolved Conflicts': '未解決のデータ競合',
'Unsafe': '危険な',
'Unselect to disable the modem': 'モデムを無効化するにはチェックを外す',
'Unselect to disable this API service': 'Unselect to disable this API service',
'Unselect to disable this SMTP service': 'Unselect to disable this SMTP service',
'Unsent': '未送信',
'Unsupported data format!': 'サポートされていないデータフォーマットです。',
'Unsupported method': 'サポートされていないメソッドです',
'Unsupported method!': 'サポートされていないメソッドです。',
'Update': '更新',
'Update Activity Report': '支援活動レポートの更新',
'Update Cholera Treatment Capability Information': 'コレラ対策能力情報を更新',
'Update Import Job': 'Import Jobの更新',
'Update Request': '支援要請を更新',
'Update Service Profile': 'サービスプロファイルの更新',
'Update Status': 'Update Status',
'Update Task Status': 'タスク状況の更新',
'Update Unit': '単位の更新',
'Update if Master': 'マスターサイトなら更新する',
'Update if Newer': '新しいものがあれば更新する',
'Update your current ordered list': '現在の順序付きリストの更新',
'Updated By': 'Updated By',
'Upload': 'アップロード',
'Upload Comma Separated Value File': 'Upload Comma Separated Value File',
'Upload Format': 'Upload Format',
'Upload OCR Form': 'Upload OCR Form',
'Upload Photos': '写真のアップロード',
'Upload Spreadsheet': 'スプレッドシートのアップロード',
'Upload Track': '追跡情報のアップロード',
'Upload a CSV file': 'Upload a CSV file',
'Upload a CSV file formatted according to the Template.': 'Upload a CSV file formatted according to the Template.',
'Upload a Spreadsheet': 'スプレッドシートをアップロード',
'Upload an image file (bmp, gif, jpeg or png), max. 300x300 pixels!': '画像ファイルをアップロード(bmp,gif,jpeg,png) 最大300x300ピクセル',
'Upload an image file here.': '画像ファイルをここにアップロードしてください',
"Upload an image file here. If you don't upload an image file, then you must specify its location in the URL field.": '画像ファイルのアップロードはここから行ってください。ファイルのアップロードを行わない場合、ロケーションをURL項目に入力してください。',
'Upload an image, such as a photo': '写真などのイメージをアップロードしてください',
'Uploaded': 'Uploaded',
'Urban Fire': '都市火災',
'Urban area': '市街地',
'Urdu': 'ウルドゥー語',
'Urgent': '緊急',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': '複雑なクエリを構築するには、ANDは (...)&(...) を、ORは (...)|(...) を、NOTは ~(...) を使用してください。',
'Use Geocoder for address lookups?': 'Use Geocoder for address lookups?',
'Use default': 'デフォルト値を使用',
'Use these links to download data that is currently in the database.': 'これらのリンクを使用して、現在データベースにあるデータをダウンロードします。',
'Use this space to add a description about the Bin Type.': 'Bin Typeに関する説明は、このスペースに記載してください。',
'Use this space to add a description about the site location.': 'このスペースを使って、サイトの位置の説明を追加してください。',
'Use this space to add a description about the warehouse/site.': '倉庫/Siteに関する説明は、このスペースに記載してください。',
'Use this space to add additional comments and notes about the Site/Warehouse.': 'Site/倉庫に関する追加情報を記載するには、このスペースを使用してください。',
'Use this to set the starting location for the Location Selector.': 'Use this to set the starting location for the Location Selector.',
'Used by IRS & Assess': 'Used by IRS & Assess',
'Used in onHover Tooltip & Cluster Popups to differentiate between types.': 'Used in onHover Tooltip & Cluster Popups to differentiate between types.',
'Used to build onHover Tooltip & 1st field also used in Cluster Popups to differentiate between records.': 'Used to build onHover Tooltip & 1st field also used in Cluster Popups to differentiate between records.',
'Used to check that latitude of entered locations is reasonable. May be used to filter lists of resources that have locations.': 'Used to check that latitude of entered locations is reasonable. May be used to filter lists of resources that have locations.',
'Used to check that longitude of entered locations is reasonable. May be used to filter lists of resources that have locations.': 'Used to check that longitude of entered locations is reasonable. May be used to filter lists of resources that have locations.',
'Used to import data from spreadsheets into the database': 'スプレッドシートからデータベースにデータをインポートするために使われます',
'Used within Inventory Management, Request Management and Asset Management': 'Used within Inventory Management, Request Management and Asset Management',
'User': 'ユーザー',
'User %(first_name)s %(last_name)s Approved': '%(first_name)s %(last_name)s のユーザー登録が承認されました',
'User %(id)s Logged-in': 'ユーザー %(id)s がログインしています',
'User %(id)s Logged-out': 'ユーザー %(id)s がログアウトしました',
'User %(id)s Profile updated': 'ユーザ %(id)s のプロファイルを更新しました',
'User %(id)s Registered': 'ユーザー%(id)sを登録しました',
'User Account has been Disabled': 'ユーザアカウントが無効になっています',
'User Details': 'ユーザーの詳細',
'User ID': 'ユーザーID',
'User Management': 'ユーザー管理',
'User Profile': 'ユーザープロファイル',
'User Requests': 'ユーザー要求',
'User Updated': 'ユーザーを更新しました',
'User added': 'ユーザーを追加しました',
'User already has this role': 'この権限のあるユーザー',
'User deleted': 'ユーザーを削除しました',
'User updated': 'ユーザーを更新しました',
'Username': 'ユーザー名',
'Username for authentication at the peer. Note that only HTTP Basic authentication is supported.': 'データ同期先との認証に使うユーザ名。HTTPベーシック認証のみサポートしています。',
'Users': 'ユーザー',
'Users removed': 'ユーザーを削除しました',
'Uses the REST Query Format defined in': 'Uses the REST Query Format defined in',
'Ushahidi': 'Ushahidi',
'Usual food sources in the area': 'この地域の普段の食料調達方法',
'Utilities': 'ユーティリティ',
'Utility, telecommunication, other non-transport infrastructure': 'ユーティリティ、通信、その他のインフラ設備(交通以外)',
'Vacancies': '欠員',
'Value': '値',
'Various Reporting functionalities': '多種多様な報告を行う機能',
'Vehicle': '車両',
'Vehicle Crime': '車両犯罪',
'Vehicle Details': 'Vehicle Details',
'Vehicle Details added': 'Vehicle Details added',
'Vehicle Details deleted': 'Vehicle Details deleted',
'Vehicle Details updated': 'Vehicle Details updated',
'Vehicle Management': 'Vehicle Management',
'Vehicle Types': '車両の種別',
'Vehicle added': 'Vehicle added',
'Vehicle deleted': 'Vehicle deleted',
'Vehicle updated': 'Vehicle updated',
'Vehicles': 'Vehicles',
'Vehicles are assets with some extra details.': 'Vehicles are assets with some extra details.',
'Vendor': 'ベンダー',
'Verification Email sent - please check your email to validate. If you do not receive this email please check you junk email or spam filters': 'メールアドレス確認用のメールを送信しました。メールに記載された確認用URLにアクセスしてください。もしメールが届かない場合迷惑メールフォルダに入ってしまっている可能性がありますのでご確認ください。',
'Verification Status': '認証ステータス',
'Verified': '認証済み',
'Verified?': '認証(ログイン)できません.メールアドレス・パスワードを確認してください.',
'Verify Password': 'パスワード再確認',
'Verify password': 'パスワードの確認',
'Version': 'バージョン',
'Very Good': 'Very Good',
'Very High': '非常に高い',
'View Alerts received using either Email or SMS': '電子メールまたはSMSで受信したアラートの閲覧',
'View All': 'View All',
'View All Tickets': 'View All Tickets',
'View Error Tickets': 'View Error Tickets',
'View Fullscreen Map': '地図をフルスクリーン表示',
'View Image': '画像の閲覧',
'View Items': 'View Items',
'View On Map': '地図上で閲覧',
'View Outbox': '送信箱の表示',
'View Picture': '写真の表示',
'View Requests for Aid': '援助要請を閲覧',
'View Results of completed and/or partially completed assessments': 'View Results of completed and/or partially completed assessments',
'View Settings': '設定の確認',
'View Tickets': 'チケットの閲覧',
"View and/or update details of the person's record": '人物情報を検索し、詳細の閲覧や更新を行ないます',
'View and/or update their details': '詳細の閲覧および更新',
'View or update the status of a hospital.': '病院のステータスの閲覧と更新',
'View pending requests and pledge support.': '処理中の要求と寄付サポートの閲覧',
'View the hospitals on a map.': '病院の場所を地図上で表示します。',
'View/Edit the Database directly': 'View/Edit the Database directly',
"View/Edit the Database directly (caution: doesn't respect the framework rules!)": 'データベースの直接閲覧/編集(注意:フレームワークの規則に反します)',
'Village': '村落',
'Village Leader': '村長',
'Visible?': '表示しますか?',
'Visual Recognition': '画像認識',
'Volcanic Ash Cloud': '火山灰雲',
'Volcanic Event': '火山活動',
'Volume (m3)': 'Volume (m3)',
'Volume - Fluids': '流量 - 液状物',
'Volume - Solids': '流量 - 固形物',
'Volume Capacity': '容量',
'Volume/Dimensions': '容量/外形寸法',
'Volunteer Availability': 'Volunteer Availability',
'Volunteer Data': 'ボランティアデータ',
'Volunteer Details': 'ボランティアの詳細',
'Volunteer Information': 'Volunteer Information',
'Volunteer Management': 'ボランティアの管理',
'Volunteer Project': 'ボランティアプロジェクト',
'Volunteer Record': 'Volunteer Record',
'Volunteer Registration': 'ボランティア登録',
'Volunteer Registrations': 'ボランティア登録',
'Volunteer Request': 'ボランティア要請',
'Volunteer added': 'ボランティアを追加しました',
'Volunteer availability added': 'Volunteer availability added',
'Volunteer availability deleted': 'Volunteer availability deleted',
'Volunteer availability updated': 'Volunteer availability updated',
'Volunteer deleted': 'ボランティアを削除しました',
'Volunteer details updated': 'ボランティアの詳細を更新しました',
'Volunteer registration added': 'ボランティア登録を追加しました',
'Volunteer registration deleted': 'ボランティア登録を削除しました',
'Volunteer registration updated': 'ボランティア登録を更新しました',
'Volunteers': 'ボランティア',
'Volunteers were notified!': 'ボランティアに通知されました',
'Vote': '投票',
'Votes': '投票',
'WASH': '除染',
'WMS Browser Name': 'WMSブラウザ名',
'WMS Browser URL': 'WMSブラウザのURL',
'Walking Only': '徒歩のみ',
'Walking time to the health service': '医療サービス提供所までの徒歩時間',
'Wall or other structural damage': '壁やその他の構造の損傷',
'Warehouse': '倉庫',
'Warehouse Details': '倉庫の詳細',
'Warehouse Item Details': '倉庫物資の詳細',
'Warehouse Item added': '倉庫物資を追加しました',
'Warehouse Item deleted': '倉庫内物資を削除しました',
'Warehouse Item updated': '倉庫物資を更新しました',
'Warehouse Items': '倉庫に備蓄中の物資',
'Warehouse Management': '倉庫管理',
'Warehouse added': '倉庫を追加しました',
'Warehouse deleted': '倉庫を削除しました',
'Warehouse updated': '倉庫を更新しました',
'Warehouse/Sites Registry': '倉庫/Siteの登録',
'Warehouses': '倉庫',
'WatSan': '給水と衛生',
'Water': '水',
'Water Level still high?': '水位はまだ高いままですか?',
'Water Sanitation Hygiene': '水質衛生',
'Water collection': '給水',
'Water gallon': 'ガロン容器',
'Water storage containers available for HH': '世帯用の水貯蔵容器が利用可能である',
'Water storage containers in households': '世帯の水貯蔵容器',
'Water storage containers sufficient per HH': '世帯毎に1つ以上の水貯蔵容器が利用可能である',
'Water supply': '水の供給',
'Waterspout': '水上竜巻',
'Way Bill(s)': '移動費',
'We have tried': '私達は試行しました',
'Web API settings updated': 'Web API settings updated',
'Web Map Service Browser Name': 'Web Map Service Browser Name',
'Web Map Service Browser URL': 'Web Map Service Browser URL',
'Website': 'ウェブサイト',
'Wednesday': '水曜日',
'Weekly': '週次',
'Weight': '体重',
'Weight (kg)': '体重 (kg)',
'Welcome to the Sahana Eden Disaster Management Platform': 'Sahana Eden -災害情報管理プラットフォームへようこそ',
'Welcome to the Sahana Eden Disaster Management System': 'Sahana Eden -災害情報管理システムへようこそ',
'Welcome to the Sahana Portal at': 'Sahanaポータルにようこそ',
'Welcome to the Sahana Portal at ': 'Sahana ポータルへようこそ: ',
'Well-Known Text': '既知の文章',
'Were basic medical supplies available for health services prior to the disaster?': '災害前に、基本的な医療サービスが機能していたかどうかを記載してください',
'Were breast milk substitutes used prior to the disaster?': '災害前に利用していた母乳代用品の入手源を記載してください',
'Were there cases of malnutrition in this area prior to the disaster?': 'この地域で、災害前に栄養失調が発生していたかどうかを記載してください',
'Were there health services functioning for the community prior to the disaster?': '災害前、共同体でヘルスサービスが機能していたかどうかを記載してください',
'Were there reports or evidence of outbreaks of any micronutrient malnutrition disorders before the emergency?': '災害発生前から栄養失調の報告があった、あるいはその証跡があったかどうかを記載します',
'What are the factors affecting school attendance?': '生徒の就学状況に影響する要因を記載してください',
"What are the people's normal ways to obtain food in this area?": 'この地域で食料を調達するための手段を記載してください',
'What are your main sources of cash to restart your business?': 'ビジネス再開に必要な現金の、主な調達源を記載してください',
'What are your main sources of income now?': '現在の主な収入源を記載してください',
'What do you spend most of your income on now?': '現在の主な支出要因を記載してください',
'What food stocks exist? (main dishes)': '備蓄食料の種類(主皿)',
'What food stocks exist? (side dishes)': '備蓄食料の種類(副皿)',
'What is the estimated total number of people in all of these institutions?': '上記施設内の居住者を総計すると、おおよそどの程度になるかを記載してください',
'What is your major source of clean water for daily use (ex: washing, cooking, bathing)?': '洗濯、料理、入浴など、日常生活で必要となる清潔な水の、主な入手源を記載してください',
'What is your major source of drinking water?': '飲料水の主な入手源を記載してください',
'What order to be contacted in.': 'What order to be contacted in.',
"What should be done to reduce women and children's vulnerability to violence?": '未成年や女性を暴力から守るために、どのような活動や設備が必要かを記載してください',
'What type of latrines are available in the village/IDP centre/Camp?': '村落/IDPセンター/仮泊施設内で利用可能なトイレのタイプは?',
'What type of salvage material can be used from destroyed houses?': '全壊した家屋から回収した部材が流用可能な用途を記載します',
'What type of salvage material can be used from destroyed schools?': '倒壊した校舎において、再利用できる部材は何ですか?',
'What types of health problems do children currently have?': '小児が現在抱えている健康問題のタイプを記載してください',
'What types of health problems do people currently have?': '住人たちが現在抱えている健康問題のタイプを記載してください',
'What types of health services are still functioning in the affected area?': '現在、被災地で機能しているヘルスサービスの種類を選択してください',
'What types of household water storage containers are available?': '世帯で使っている水貯蔵容器のタイプを選択してください',
'What were your main sources of income before the disaster?': '災害発生前の主な収入源を選択してください',
'Wheat': '小麦',
'When a map is displayed that focuses on a collection of points, the map is zoomed to show just the region bounding the points.': '地点の集合にフォーカスを合わせた地図を表示すると、この地図はそれら地点の集合を表示できる範囲に拡大・縮小します',
'When a map is displayed that focuses on a collection of points, the map is zoomed to show just the region bounding the points. This value adds a small mount of distance outside the points. Without this, the outermost points would be on the bounding box, and might not be visible.': '地図上に複数のポイントが表示されている場合、それらポイント全てを表示できる縮尺で地図が表示されます。この値は、それらポイントの外に余白を付与します。指定しない場合、表示領域とポイントが重なり、表示範囲から外れてしまう可能性があります。',
'When a map is displayed that focuses on a collection of points, the map is zoomed to show just the region bounding the points. This value gives a minimum width and height in degrees for the region shown. Without this, a map showing a single point would not show any extent around that point. After the map is displayed, it can be zoomed as desired.': '地図上に複数のポイントが表示されている場合、それらポイント全てを表示できる縮尺で地図が表示されます。この値は、地域を表示する際の横幅と縦高の最小値となります。指定しない場合、対象の一点のみ表示され、その周辺は表示されません。一度表示された後であれば、縮尺の変更が可能です。',
'When reports were entered': 'いつ報告が入力されたか',
"When syncing data with others, conflicts happen in cases when two (or more) parties want to sync information which both of them have modified, i.e. conflicting information. Sync module tries to resolve such conflicts automatically but in some cases it can't. In those cases, it is up to you to resolve those conflicts manually, click on the link on the right to go to this page.": '他とデータを同期するとき、二つ(以上)の団体がそれぞれ更新した情報を同期するときにコンフリクトが発生することがあります。同期モジュールは、コンフリクトを自動解決しようと試みますが、解決できないことがあります。そのような場合、手作業でコンフリクトを解決するか、クリックして次のページに進んでください。',
'Where are the alternative places for studying?': '学校以外で、学習が可能な施設の種類を選択してください',
'Where are the separated children originally from?': '保護者が居ない児童の住居地はどこですか?',
'Where do the majority of people defecate?': 'トイレはどこで済ませますか?',
'Where have the children been sent?': '疎開先の情報がある場合は記載してください',
'Where is solid waste disposed in the village/camp?': '村落/仮泊施設内での、固形廃棄物処理場所を記載してください',
'Whether this is a Sahana Eden, Sahana Agasti, Ushahidi or Other instance.': 'Sahana Eden, Sahana Agasti, Ushahidi あるいは他のシステムの場合も',
'Whiskers': 'ほおひげ',
'Who is doing what and where': '誰がどこで何をしているか',
'Who usually collects water for the family?': '日頃、家族のために水を採取しているのは誰か?',
'Width': '横幅',
'Width (m)': 'Width (m)',
'Wild Fire': '野火',
'Wind Chill': '風速冷却',
'Window frame': 'ウィンドウ枠',
'Winter Storm': '吹雪',
'Without mentioning any names or indicating anyone, do you know of any incidents of violence against women or girls occuring since the disaster?': '災害発生後、女性や少女に対する暴力事件が発生したかどうかを記載してください。具体的な人名や場所を記載する必要はありません',
'Women of Child Bearing Age': '出産年齢の女性',
'Women participating in coping activities': '女性が災害対応に従事',
'Women who are Pregnant or in Labour': '妊娠中、あるいは労働中の女性',
'Womens Focus Groups': '女性のフォーカスグループ(Womens Focus Groups)',
'Wooden plank': '木製板',
'Wooden poles': '木製の柱',
'Working hours end': '作業終了時刻',
'Working hours start': '作業開始時刻',
'Working or other to provide money/food': '金銭/食料調達のため就労、あるいは活動を実施',
'Would you like to display the photos on the map?': '地図上に写真を表示しますか?',
'X-Ray': 'X線',
'XMPP': 'XMPP',
'YES': 'YES',
"Yahoo Layers cannot be displayed if there isn't a valid API Key": "Yahoo Layers cannot be displayed if there isn't a valid API Key",
'Year': 'Year',
'Year built': '建築年',
'Year of Manufacture': '製造年',
'Yellow': '黄色',
'Yes': 'はい',
'You are a recovery team?': 'あなたが遺体回収チームの場合',
'You are attempting to delete your own account - are you sure you want to proceed?': '自分のアカウントを削除しようとしています。本当に削除しますか?',
'You are currently reported missing!': 'あなたが行方不明者として登録されています!',
'You can change the configuration of synchronization module in the Settings section. This configuration includes your UUID (unique identification number), sync schedules, beacon service and so on. Click the following link to go to the Sync Settings page.': '同期に関する設定は、「設定」セクションで行うことができます。設定には、UUID(unique identification number)、同期スケジュール、ビーコンサービス等が含まれます。同期設定は以下のリンクから変更可能です。',
'You can click on the map below to select the Lat/Lon fields': '下の地図をクリックすることで、緯度経度情報を入力できます',
'You can click on the map below to select the Lat/Lon fields:': '緯度と経度の設定は、以下の地図をクリックすることでも可能です:',
'You can click on the map to select the Lat/Lon fields. Longitude is West - East (sideways). Latitude is North-South (Up-Down). Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere. Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas. This needs to be added in Decimal Degrees.': '経度/緯度の項目は、地図を選択することでも登録可能です。経度は東西方向(横)の座標軸です。緯度は南北方向(上下)の座標軸です。赤道ではゼロ、北半球ではプラス、南半球ではマイナスとなります。経度は、子午線(グリニッジ標準時)をゼロとして、東(ヨーロッパ、アジア)がプラスとなります。西(大西洋、アメリカ)がマイナスです。10進法で記入してください。',
'You can select the Draw tool': 'ドローツールを選択できます',
'You can select the Draw tool (': '選択可能な描画ツール (',
'You can set the modem settings for SMS here.': 'SMS用モデムの設定をすることができます。',
'You can use the Conversion Tool to convert from either GPS coordinates or Degrees/Minutes/Seconds.': '変換ツールを使うことで、GPS、あるいはDegrees/Minutes/Seconds形式からデータを変換できます。',
'You do no have permission to cancel this received shipment.': '輸送の受け取りをキャンセルする権限がありません',
'You do no have permission to cancel this sent shipment.': '輸送の送付をキャンセルする権限がありません',
'You do no have permission to make this commitment.': 'このコミットを作成する権限がありません',
'You do no have permission to receive this shipment.': 'この輸送を受け取る権限がありません',
'You do no have permission to send this shipment.': 'この輸送を開始する権限がありません',
'You do not have permission for any facility to make a commitment.': 'You do not have permission for any facility to make a commitment.',
'You do not have permission for any facility to make a request.': 'You do not have permission for any facility to make a request.',
'You do not have permission for any facility to receive a shipment.': 'You do not have permission for any facility to receive a shipment.',
'You do not have permission for any facility to send a shipment.': 'You do not have permission for any facility to send a shipment.',
'You do not have permission for any site to add an inventory item.': 'あなたには他の場所から在庫アイテムを追加する権限はありません',
'You do not have permission for any site to make a commitment.': 'どの場所にも受け入れを示す権限が有りません。',
'You do not have permission for any site to make a request.': '支援要請を作成する権限がありません',
'You do not have permission for any site to perform this action.': 'この操作をするための権限がありません',
'You do not have permission for any site to receive a shipment.': '物資の輸送を受け取る権限がありません',
'You do not have permission for any site to send a shipment.': '物資の輸送をする権限がありません',
'You do not have permission to cancel this received shipment.': 'You do not have permission to cancel this received shipment.',
'You do not have permission to cancel this sent shipment.': 'You do not have permission to cancel this sent shipment.',
'You do not have permission to make this commitment.': 'You do not have permission to make this commitment.',
'You do not have permission to receive this shipment.': 'You do not have permission to receive this shipment.',
'You do not have permission to send a shipment from this site.': 'あなたはこのサイトから物資を送る権限はありません',
'You do not have permission to send this shipment.': 'You do not have permission to send this shipment.',
'You have a personal map configuration. To change your personal configuration, click ': '個人用地図設定があります。あなたの個人用地図設定を編集するにはクリックしてください',
'You have found a dead body?': '遺体を発見しましたか?',
"You have personalised settings, so changes made here won't be visible to you. To change your personalised settings, click ": 'ユーザ固有の設定を行っている場合、ここで変更を行っても、目に見える変化がない場合があります。ユーザ固有の設定を行うには、以下をクリックしてください。 ',
"You have unsaved changes. Click Cancel now, then 'Save' to save them. Click OK now to discard them.": '変更が保存されていません。「キャンセル」をクリックした後、「保存」を押して保存してください。変更を破棄するには、OK をクリックしてください。',
"You haven't made any calculations": '計算が実行されていません',
"You haven't yet Verified your account - please check your email": '利用者登録はまだ有効ではありません。',
'You must be logged in to register volunteers.': 'ボランティアとして登録するには、ログインする必要があります',
'You must be logged in to report persons missing or found.': '行方不明者の発見状況を登録するには、ログインする必要があります。',
'You must provide a series id to proceed.': '処理を行うにはシリーズIDを指定する必要があります。',
'You should edit OpenStreetMap settings in models/000_config.py': 'OpenStreetMapの設定を変更するには、models/000_config.pyを編集してください',
'You should edit Twitter settings in models/000_config.py': 'Twitter設定を変更するには、models/000_config.pyを編集してください。',
'Your Account is Approved': '利用者登録が完了しました',
'Your Account is Approved - you can now login\n %s%s/': '利用者登録が完了しました。リンク先のログインページで あなたが登録したユーザー名とパスワードを入力してログインしてください。\n %s%s/',
'Your action is required. Please approve user': 'ユーザーから承認の依頼が届いています。承諾お願いします',
'Your action is required. Please approve user %s asap: ': 'あなたの行動が要求されています。ただちにユーザー %s を承認してください。',
'Your current ordered list of solution items is shown below. You can change it by voting again.': '解決項目の順番付きリストは以下です。再度投票することによって変更可能です。',
'Your post was added successfully.': '投稿が成功しました',
'Your system has been assigned a unique identification (UUID), which other computers around you can use to identify you. To view your UUID, you may go to Synchronization -> Sync Settings. You can also see other settings on that page.': 'あなたがお使いのシステムには、ユニークID (UUID) が割り当てられており、このIDを用いて他のコンピュータがあなたのシステムを同定します。あなたの UUID を閲覧するには、同期 -> 同期設定と進んでください。そのページでは、他の設定を閲覧することもできます。',
'ZIP Code': 'ZIP Code',
'ZIP/Postcode': '郵便番号',
'Zero Hour': 'Zero Hour',
'Zinc roof': 'トタン屋根',
'Zoom': 'ズーム',
'Zoom In: click in the map or use the left mouse button and drag to create a rectangle': 'ズームイン: マップをクリックするか、拡大したい場所をドラッグで選択してください',
'Zoom Levels': 'ズームレベル',
'Zoom Out: click in the map or use the left mouse button and drag to create a rectangle': 'ズームアウト: マップをクリックするか、拡大したい地点をマウスの左ボタンでドラッグしてください',
'Zoom to Current Location': '現在の場所を拡大',
'Zoom to maximum map extent': 'マップの最大範囲までズーム',
'act': '活動',
'active': 'アクティブ',
'added': '追加しました',
'all records': '全てのレコード',
'allows a budget to be developed based on staff & equipment costs, including any admin overheads.': 'では、スタッフや設備、それらの管理コストまで含めた予算編成を行ないます。',
'allows for creation and management of assessments.': 'allows for creation and management of assessments.',
'allows for creation and management of surveys to assess the damage following a natural disaster.': '自然災害による被災影響調査の作成、および管理を許可する',
'an individual/team to do in 1-2 days': '個人やチーム単位で、1-2日中に実施するべき事柄をさします。',
'approved': '承認された',
'assigned': '担当者・部門が確定',
'average': '平均的',
'black': '黒',
'blond': 'ブロンド',
'blue': '青',
'brown': '茶色',
'business_damaged': 'ビジネスへの損害',
'by': ' ',
'c/o Name': 'c/o 名前',
'can be used to extract data from spreadsheets and put them into database tables.': 'スプレッドシートからデータを抽出して、データベーステーブルに挿入できます。',
'can use this to identify the Location': 'ここからロケーションの特定が可能です',
'cancelled': '行動キャンセル',
'caucasoid': '白人',
'check all': '全てチェック',
'click for more details': '詳細はクリック',
'click here': 'click here',
'collateral event': '付帯イベント',
'completed': '完了',
'confirmed': '確認済',
'consider': '考慮',
'constraint_id': 'constraint_id',
"couldn't be parsed so NetworkLinks not followed.": 'パースできなかったため、 NetworkLinksはフォローされません。',
'criminal intent': '犯罪目的',
'crud': '性病',
'curly': '縮れ毛',
'currently registered': '登録済み',
'daily': '日次',
'dark': '濃い',
'data uploaded': 'データがアップロードされました',
'database': 'データベース',
'database %s select': 'データベース%sの選択',
'db': 'データベース',
'deceased': 'deceased',
'delete all checked': 'チェックされた項目を全て削除',
'deleted': '削除されました',
'denied': '拒否されました',
'description': '説明',
'design': 'デザイン',
'diseased': '罹患中',
'displaced': '避難中',
'divorced': '離別',
'done!': '完了!',
'duplicate': '重複',
'edit': '編集',
'editor': '編集者',
'eg. gas, electricity, water': 'ガス、電気、水道など',
'embedded': '埋め込まれた',
'enclosed area': '専用地',
'enter a number between %(min)g and %(max)g': 'enter a number between %(min)g and %(max)g',
'enter an integer between %(min)g and %(max)g': 'enter an integer between %(min)g and %(max)g',
'export as csv file': 'csvファイルとしてエクスポート',
'fat': '肥満',
'feedback': '現地からの要望',
'female': '女性',
'final report': '最終報告書',
'flush latrine with septic tank': '浄化槽つき水洗トイレ',
'follow-up assessment': 'アセスメントのフォローアップ',
'food_sources': '食糧供給源',
'forehead': 'ひたい',
'form data': 'フォームデータ',
'found': 'found',
'from Twitter': 'Twitter経由',
'from_id': 'from_id',
'full': '完全',
'getting': '取得中',
'green': '緑',
'grey': '灰色',
'here': 'ここ',
'high': '高い',
'hourly': '1時間毎',
'households': '世帯情報',
'human error': 'ヒューマンエラー',
'identified': '身元確認済み',
'ignore': '無視する',
'immediately': '即応',
'in Deg Min Sec format': 'Deg Min Sec フォーマットで',
'in GPS format': 'GPS フォーマットで',
'in Inv.': '個',
'inactive': '休止中',
"includes a GroundOverlay or ScreenOverlay which aren't supported in OpenLayers yet, so it may not work properly.": 'OpenLayersで未サポートの機能である GroundOverlayやScreenOverlayを含むため、不具合がある可能性があります。',
'initial assessment': '初期アセスメント',
'injured': '負傷中',
'insert new': '新規挿入',
'insert new %s': '%sの新規挿入',
'invalid': '無効',
'invalid request': '無効な要求',
'is a central online repository where information on all the disaster victims and families, especially identified casualties, evacuees and displaced people can be stored. Information like name, age, contact number, identity card number, displaced location, and other details are captured. Picture and finger print details of the people can be uploaded to the system. People can also be captured by group for efficiency and convenience.': 'は、災害犠牲者とその家族、特に身元の判明した遺体、避難者、難民など、全ての情報を集約可能な中央オンラインレポジトリです。名前、年齢、連絡先番号、IDカード番号、避難した場所、その他の詳細が記録されます。人物の写真や指紋をアップロードすることができます。効率性と利便性のため、人物をグループ分けすることができます。',
'is envisioned to be composed of several sub-modules that work together to provide complex functionality for the management of relief and project items by an organization. This includes an intake system, a warehouse management system, commodity tracking, supply chain management, fleet management, procurement, financial tracking and other asset and resource management capabilities': 'は、支援団体による救援活動や復興プロジェクトの作業を管理するために、複数のサブモジュールを組み合わせて高度な機能を実現しようと考えており、物資の受け入れ、貯蔵設備の管理、必要な物資の記録、サプライチェーン・マネジメント、輸送管理、調達、財務記録、その他様々な資産やリソースの管理といった機能を備えています',
'keeps track of all incoming tickets allowing them to be categorised & routed to the appropriate place for actioning.': '全ての入荷伝票を追跡することで、カテゴリー分けや適切な実行場所への配分を行う',
'kilogram': 'キログラム',
'kit': 'キット',
'latrines': 'トイレ',
'leave empty to detach account': 'アカウントを取り外すには空欄のままにしてください',
'legend URL': '凡例の URL',
'light': '淡い',
'liter': 'リットル',
'locations': 'ロケーション',
'login': 'ログイン',
'long': '長い',
'long>12cm': '12cm以上',
'low': '低い',
'male': '男性',
'manual': 'マニュアル',
'married': '既婚',
'max': '最大',
'maxExtent': '最大範囲',
'maxResolution': '最高分解能',
'medium': '中',
'medium<12cm': '12cm未満',
'menu item': 'メニューアイテム',
'message_id': 'メッセージID',
'meter': 'メートル',
'meter cubed': '立方メートル',
'meters': 'メートル',
'min': '最小',
'missing': 'missing',
'module allows the an inspector to fill information for buildings.': 'モジュールでは、建築物の調査情報を記録できます。',
'module allows the site administrator to configure various options.': 'モジュールを使うことで、サイト管理者が様々な項目を設定する際の手間を省くことができます。',
'module helps monitoring the status of hospitals.': 'モジュールでは、病院の状態をモニタできます。',
'module provides a mechanism to collaboratively provide an overview of the developing disaster, using online mapping (GIS).': 'モジュールでは、オンラインマッピング(GIS)を使用して、現在の災害地域の状態を俯瞰することができます。',
'mongoloid': '黄色人種',
'more': 'その他の項目 ',
'n/a': 'データなし',
'natural hazard': '自然災害',
'negroid': '黒人',
'never': 'まだ',
'new': '新規登録',
'new ACL': '新規ACL',
'new record inserted': '新規レコードを挿入しました',
'next 100 rows': '次の100行',
'no': ' ',
'none': 'なし',
'normal': '通常',
'not accessible - no cached version available!': 'アクセスできません - キャッシュされたバージョンがありません!',
'not accessible - using cached version from': 'アクセス不可 - キャッシュ版を使用しています',
'not specified': '未指定',
'num Zoom Levels': 'ズーム倍率',
'obsolete': '孤立中',
'on': ' ',
'once': '一度',
'open defecation': '野外',
'operational intent': '運用目的',
'optional': 'optional',
'or import from csv file': 'またはcsvファイルからインポート',
'other': 'その他',
'over one hour': '1時間以上',
'pack of 10': '10のパック',
'people': '居住者情報',
'piece': 'ピース(単位)',
'pit': '堀穴',
'pit latrine': '穴掘りトイレ',
'postponed': '実施を延期',
'preliminary template or draft, not actionable in its current form': '現行フォーム内で実用的でない予備テンプレートまたはドラフト',
'previous 100 rows': '前の100行',
'primary incident': '優先すべきインシデント',
'problem connecting to twitter.com - please refresh': 'twitter.comへの接続に問題が発生しました。再読込を行ってください',
'provides a catalogue of digital media.': 'デジタルメディアのカタログを提供します',
'record does not exist': 'レコードが存在しません',
'record id': 'レコードID',
'records deleted': 'レコードを削除しました',
'red': '赤い',
'reported': '報告済み',
'reports successfully imported.': 'レポートは正しくインポートできました',
'representation of the Polygon/Line.': 'Polygon/Lineの表現',
'retired': '終了',
'retry': '再試行',
'river': '河川',
'sack 20kg': '袋 20kg',
'sack 50kg': '袋 50kg',
'secondary effect': '副次効果',
'see comment': 'コメント参照',
'selected': '選択された',
'separated': '別居',
'separated from family': '家族とはぐれた',
'shaved': '坊主',
'shift_end': 'shift_end',
'shift_start': 'シフト開始',
'short': '小柄',
'short<6cm': '6cm未満',
'sides': '側面',
'sign-up now': '今すぐ登録',
'simple': '単純な',
'single': '独身',
'slim': 'やせ型',
'specify': '明記してください',
'staff': 'スタッフ',
'staff members': 'staff members',
'state': '状態',
'state location': 'ステートロケーション',
'straight': '直毛',
'suffered financial losses': '経済的損失',
'table': 'テーブル',
'table_name': 'テーブル名',
'tall': '大柄',
'technical failure': '技術的な原因',
'this': 'この',
'times and it is still not working. We give in. Sorry.': '回繰り返しましたが、処理を完了できません。ご迷惑をおかけしますが、処理を中止します。',
'to access the system': 'してシステムにアクセスしてください',
'to download a OCR Form.': 'to download a OCR Form.',
'to reset your password': 'パスワードのリセット',
'to verify your email': '登録されたメールアドレスに間違いが無いことが確認されます。\nもしこのメールの内容に心当たりがない場合はこのメールを無視してください。',
'to_id': 'to_id',
'ton': 'トン',
'tonsure': '剃髪',
'total': '合計',
'tracks all shelters and stores basic details regarding them. It collaborates with other modules to track people associated with a shelter, the services available etc.': '避難所を追跡し、それらの詳細を蓄積します。避難所に関連付けられた人、利用可能なサービス等の他のモジュールと協業します。',
'tweepy module not available within the running Python - this needs installing for non-Tropo Twitter support!': '実行中のPythonで tweepyモジュールが利用できません。Tropo以外でのTwitter機能利用で必要です',
'unable to parse csv file': 'csvファイルをパースできません。',
'unapproved': '承認されていない',
'uncheck all': 'チェックをすべて外す',
'unidentified': '詳細不明',
'uninhabitable = foundation and structure destroyed': '利用不可能 = 基礎構造や土台部分の破壊など',
'unknown': '不明',
'unspecified': 'その他',
'unverified': '未検証',
'updated': '更新しました',
'updates only': '更新のみ',
'urgent': '緊急',
'using default': '標準値を使用',
'verified': '確認済み',
'vm_action': 'vm_action',
'volunteer': 'ボランティア',
'volunteers': 'volunteers',
'wavy': '波状',
'weekly': '週次',
'white': '白',
'wider area, longer term, usually contain multiple Activities': '活動範囲が広く、長期的目標を有しており、複数の支援活動を包括します。',
'widowed': '死別',
'window': '窓',
'windows broken, cracks in walls, roof slightly damaged': '窓破損、壁にひび割れ、屋根の一部損傷',
'within human habitat': '人間の居住地域内',
'xlwt module not available within the running Python - this needs installing for XLS output!': '実行中のPythonでxlwtモジュールが利用できません。XLS出力に必要です。',
'yes': 'はい',
'管理': '管理',
}
|
flavour/porto
|
languages/ja.py
|
Python
|
mit
| 410,121
|
[
"VisIt"
] |
7acacaef90f427ea3e57a9447a1e8cae9d7cfac7cb6e70962656d3e26c38fcf9
|
# Copyright (C) 2002, Thomas Hamelryck (thamelry@binf.ku.dk)
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Half-sphere exposure and coordination number calculation."""
import warnings
from math import pi
from Bio.PDB.AbstractPropertyMap import AbstractPropertyMap
from Bio.PDB.PDBParser import PDBParser
from Bio.PDB.Polypeptide import CaPPBuilder, is_aa
from Bio.PDB.Vector import rotaxis
class _AbstractHSExposure(AbstractPropertyMap):
"""
Abstract class to calculate Half-Sphere Exposure (HSE).
The HSE can be calculated based on the CA-CB vector, or the pseudo CB-CA
vector based on three consecutive CA atoms. This is done by two separate
subclasses.
"""
def __init__(self, model, radius, offset, hse_up_key, hse_down_key,
angle_key=None):
"""
@param model: model
@type model: L{Model}
@param radius: HSE radius
@type radius: float
@param offset: number of flanking residues that are ignored in the calculation
of the number of neighbors
@type offset: int
@param hse_up_key: key used to store HSEup in the entity.xtra attribute
@type hse_up_key: string
@param hse_down_key: key used to store HSEdown in the entity.xtra attribute
@type hse_down_key: string
@param angle_key: key used to store the angle between CA-CB and CA-pCB in
the entity.xtra attribute
@type angle_key: string
"""
assert(offset>=0)
# For PyMOL visualization
self.ca_cb_list=[]
ppb=CaPPBuilder()
ppl=ppb.build_peptides(model)
hse_map={}
hse_list=[]
hse_keys=[]
for pp1 in ppl:
for i in range(0, len(pp1)):
if i==0:
r1=None
else:
r1=pp1[i-1]
r2=pp1[i]
if i==len(pp1)-1:
r3=None
else:
r3=pp1[i+1]
# This method is provided by the subclasses to calculate HSE
result=self._get_cb(r1, r2, r3)
if result is None:
# Missing atoms, or i==0, or i==len(pp1)-1
continue
pcb, angle=result
hse_u=0
hse_d=0
ca2=r2['CA'].get_vector()
for pp2 in ppl:
for j in range(0, len(pp2)):
if pp1 is pp2 and abs(i-j)<=offset:
# neighboring residues in the chain are ignored
continue
ro=pp2[j]
if not is_aa(ro) or not ro.has_id('CA'):
continue
cao=ro['CA'].get_vector()
d=(cao-ca2)
if d.norm()<radius:
if d.angle(pcb)<(pi/2):
hse_u+=1
else:
hse_d+=1
res_id=r2.get_id()
chain_id=r2.get_parent().get_id()
# Fill the 3 data structures
hse_map[(chain_id, res_id)]=(hse_u, hse_d, angle)
hse_list.append((r2, (hse_u, hse_d, angle)))
hse_keys.append((chain_id, res_id))
# Add to xtra
r2.xtra[hse_up_key]=hse_u
r2.xtra[hse_down_key]=hse_d
if angle_key:
r2.xtra[angle_key]=angle
AbstractPropertyMap.__init__(self, hse_map, hse_keys, hse_list)
def _get_cb(self, r1, r2, r3):
"""This method is provided by the subclasses to calculate HSE."""
return NotImplemented
def _get_gly_cb_vector(self, residue):
"""
Return a pseudo CB vector for a Gly residue.
The pseudoCB vector is centered at the origin.
CB coord=N coord rotated over -120 degrees
along the CA-C axis.
"""
try:
n_v=residue["N"].get_vector()
c_v=residue["C"].get_vector()
ca_v=residue["CA"].get_vector()
except:
return None
# center at origin
n_v=n_v-ca_v
c_v=c_v-ca_v
# rotation around c-ca over -120 deg
rot=rotaxis(-pi*120.0/180.0, c_v)
cb_at_origin_v=n_v.left_multiply(rot)
# move back to ca position
cb_v=cb_at_origin_v+ca_v
# This is for PyMol visualization
self.ca_cb_list.append((ca_v, cb_v))
return cb_at_origin_v
class HSExposureCA(_AbstractHSExposure):
"""
Class to calculate HSE based on the approximate CA-CB vectors,
using three consecutive CA positions.
"""
def __init__(self, model, radius=12, offset=0):
"""
@param model: the model that contains the residues
@type model: L{Model}
@param radius: radius of the sphere (centred at the CA atom)
@type radius: float
@param offset: number of flanking residues that are ignored in the calculation of the number of neighbors
@type offset: int
"""
_AbstractHSExposure.__init__(self, model, radius, offset,
'EXP_HSE_A_U', 'EXP_HSE_A_D', 'EXP_CB_PCB_ANGLE')
def _get_cb(self, r1, r2, r3):
"""
Calculate the approximate CA-CB direction for a central
CA atom based on the two flanking CA positions, and the angle
with the real CA-CB vector.
The CA-CB vector is centered at the origin.
@param r1, r2, r3: three consecutive residues
@type r1, r2, r3: L{Residue}
"""
if r1 is None or r3 is None:
return None
try:
ca1=r1['CA'].get_vector()
ca2=r2['CA'].get_vector()
ca3=r3['CA'].get_vector()
except:
return None
# center
d1=ca2-ca1
d3=ca2-ca3
d1.normalize()
d3.normalize()
# bisection
b=(d1+d3)
b.normalize()
# Add to ca_cb_list for drawing
self.ca_cb_list.append((ca2, b+ca2))
if r2.has_id('CB'):
cb=r2['CB'].get_vector()
cb_ca=cb-ca2
cb_ca.normalize()
angle=cb_ca.angle(b)
elif r2.get_resname()=='GLY':
cb_ca=self._get_gly_cb_vector(r2)
if cb_ca is None:
angle=None
else:
angle=cb_ca.angle(b)
else:
angle=None
# vector b is centered at the origin!
return b, angle
def pcb_vectors_pymol(self, filename="hs_exp.py"):
"""
Write a PyMol script that visualizes the pseudo CB-CA directions
at the CA coordinates.
@param filename: the name of the pymol script file
@type filename: string
"""
if len(self.ca_cb_list)==0:
warnings.warn("Nothing to draw.", RuntimeWarning)
return
fp=open(filename, "w")
fp.write("from pymol.cgo import *\n")
fp.write("from pymol import cmd\n")
fp.write("obj=[\n")
fp.write("BEGIN, LINES,\n")
fp.write("COLOR, %.2f, %.2f, %.2f,\n" % (1.0, 1.0, 1.0))
for (ca, cb) in self.ca_cb_list:
x,y,z=ca.get_array()
fp.write("VERTEX, %.2f, %.2f, %.2f,\n" % (x,y,z))
x,y,z=cb.get_array()
fp.write("VERTEX, %.2f, %.2f, %.2f,\n" % (x,y,z))
fp.write("END]\n")
fp.write("cmd.load_cgo(obj, 'HS')\n")
fp.close()
class HSExposureCB(_AbstractHSExposure):
"""
Class to calculate HSE based on the real CA-CB vectors.
"""
def __init__(self, model, radius=12, offset=0):
"""
@param model: the model that contains the residues
@type model: L{Model}
@param radius: radius of the sphere (centred at the CA atom)
@type radius: float
@param offset: number of flanking residues that are ignored in the calculation of the number of neighbors
@type offset: int
"""
_AbstractHSExposure.__init__(self, model, radius, offset,
'EXP_HSE_B_U', 'EXP_HSE_B_D')
def _get_cb(self, r1, r2, r3):
"""
Method to calculate CB-CA vector.
@param r1, r2, r3: three consecutive residues (only r2 is used)
@type r1, r2, r3: L{Residue}
"""
if r2.get_resname()=='GLY':
return self._get_gly_cb_vector(r2), 0.0
else:
if r2.has_id('CB') and r2.has_id('CA'):
vcb=r2['CB'].get_vector()
vca=r2['CA'].get_vector()
return (vcb-vca), 0.0
return None
class ExposureCN(AbstractPropertyMap):
def __init__(self, model, radius=12.0, offset=0):
"""
A residue's exposure is defined as the number of CA atoms around
that residues CA atom. A dictionary is returned that uses a L{Residue}
object as key, and the residue exposure as corresponding value.
@param model: the model that contains the residues
@type model: L{Model}
@param radius: radius of the sphere (centred at the CA atom)
@type radius: float
@param offset: number of flanking residues that are ignored in the calculation of the number of neighbors
@type offset: int
"""
assert(offset>=0)
ppb=CaPPBuilder()
ppl=ppb.build_peptides(model)
fs_map={}
fs_list=[]
fs_keys=[]
for pp1 in ppl:
for i in range(0, len(pp1)):
fs=0
r1=pp1[i]
if not is_aa(r1) or not r1.has_id('CA'):
continue
ca1=r1['CA']
for pp2 in ppl:
for j in range(0, len(pp2)):
if pp1 is pp2 and abs(i-j)<=offset:
continue
r2=pp2[j]
if not is_aa(r2) or not r2.has_id('CA'):
continue
ca2=r2['CA']
d=(ca2-ca1)
if d<radius:
fs+=1
res_id=r1.get_id()
chain_id=r1.get_parent().get_id()
# Fill the 3 data structures
fs_map[(chain_id, res_id)]=fs
fs_list.append((r1, fs))
fs_keys.append((chain_id, res_id))
# Add to xtra
r1.xtra['EXP_CN']=fs
AbstractPropertyMap.__init__(self, fs_map, fs_keys, fs_list)
if __name__=="__main__":
import sys
p=PDBParser()
s=p.get_structure('X', sys.argv[1])
model=s[0]
# Neighbor sphere radius
RADIUS=13.0
OFFSET=0
hse=HSExposureCA(model, radius=RADIUS, offset=OFFSET)
for l in hse:
print l
print
hse=HSExposureCB(model, radius=RADIUS, offset=OFFSET)
for l in hse:
print l
print
hse=ExposureCN(model, radius=RADIUS, offset=OFFSET)
for l in hse:
print l
print
for c in model:
for r in c:
try:
print r.xtra['PCB_CB_ANGLE']
except:
pass
|
BlogomaticProject/Blogomatic
|
opt/blog-o-matic/usr/lib/python/Bio/PDB/HSExposure.py
|
Python
|
gpl-2.0
| 11,428
|
[
"Biopython",
"PyMOL"
] |
d3446257218095db2c96c38b2ef380afdc29e03082dd9c9a6c4ce7cccf5918d8
|
"""
NeuroTools.analysis
==================
A collection of analysis functions that may be used by NeuroTools.signals or other packages.
.. currentmodule:: NeuroTools.analysis
Classes
-------
.. autosummary::
TuningCurve
Functions
---------
.. autosummary::
:nosignatures:
ccf
crosscorrelate
make_kernel
simple_frequency_spectrum
"""
import numpy as np
from NeuroTools import check_dependency
HAVE_MATPLOTLIB = check_dependency('matplotlib')
if HAVE_MATPLOTLIB:
import matplotlib
matplotlib.use('Agg')
else:
MATPLOTLIB_ERROR = "The matplotlib package was not detected"
HAVE_PYLAB = check_dependency('pylab')
if HAVE_PYLAB:
import pylab
else:
PYLAB_ERROR = "The pylab package was not detected"
def ccf(x, y, axis=None):
"""Fast cross correlation function based on fft.
Computes the cross-correlation function of two series.
Note that the computations are performed on anomalies (deviations from
average).
Returns the values of the cross-correlation at different lags.
Parameters
----------
x, y : 1D MaskedArrays
The two input arrays.
axis : integer, optional
Axis along which to compute (0 for rows, 1 for cols).
If `None`, the array is flattened first.
Examples
--------
>>> z = arange(5)
>>> ccf(z,z)
array([ 3.90798505e-16, -4.00000000e-01, -4.00000000e-01,
-1.00000000e-01, 4.00000000e-01, 1.00000000e+00,
4.00000000e-01, -1.00000000e-01, -4.00000000e-01,
-4.00000000e-01])
"""
assert x.ndim == y.ndim, "Inconsistent shape !"
# assert(x.shape == y.shape, "Inconsistent shape !")
if axis is None:
if x.ndim > 1:
x = x.ravel()
y = y.ravel()
npad = x.size + y.size
xanom = (x - x.mean(axis=None))
yanom = (y - y.mean(axis=None))
Fx = np.fft.fft(xanom, npad, )
Fy = np.fft.fft(yanom, npad, )
iFxy = np.fft.ifft(Fx.conj() * Fy).real
varxy = np.sqrt(np.inner(xanom, xanom) * np.inner(yanom, yanom))
else:
npad = x.shape[axis] + y.shape[axis]
if axis == 1:
if x.shape[0] != y.shape[0]:
raise ValueError("Arrays should have the same length!")
xanom = (x - x.mean(axis=1)[:, None])
yanom = (y - y.mean(axis=1)[:, None])
varxy = np.sqrt((xanom * xanom).sum(1) *
(yanom * yanom).sum(1))[:, None]
else:
if x.shape[1] != y.shape[1]:
raise ValueError("Arrays should have the same width!")
xanom = (x - x.mean(axis=0))
yanom = (y - y.mean(axis=0))
varxy = np.sqrt((xanom * xanom).sum(0) * (yanom * yanom).sum(0))
Fx = np.fft.fft(xanom, npad, axis=axis)
Fy = np.fft.fft(yanom, npad, axis=axis)
iFxy = np.fft.ifft(Fx.conj() * Fy, n=npad, axis=axis).real
# We just turn the lags into correct positions:
iFxy = np.concatenate((iFxy[len(iFxy) / 2:len(iFxy)],
iFxy[0:len(iFxy) / 2]))
return iFxy / varxy
from NeuroTools.plotting import get_display, set_labels
HAVE_PYLAB = check_dependency('pylab')
def crosscorrelate(sua1, sua2, lag=None, n_pred=1, predictor=None,
display=False, kwargs={}):
"""Cross-correlation between two series of discrete events (e.g. spikes).
Calculates the cross-correlation between
two vectors containing event times.
Returns ``(differeces, pred, norm)``. See below for details.
Adapted from original script written by Martin P. Nawrot for the
FIND MATLAB toolbox [1]_.
Parameters
----------
sua1, sua2 : 1D row or column `ndarray` or `SpikeTrain`
Event times. If sua2 == sua1, the result is the autocorrelogram.
lag : float
Lag for which relative event timing is considered
with a max difference of +/- lag. A default lag is computed
from the inter-event interval of the longer of the two sua
arrays.
n_pred : int
Number of surrogate compilations for the predictor. This
influences the total length of the predictor output array
predictor : {None, 'shuffle'}
Determines the type of bootstrap predictor to be used.
'shuffle' shuffles interevent intervals of the longer input array
and calculates relative differences with the shorter input array.
`n_pred` determines the number of repeated shufflings, resulting
differences are pooled from all repeated shufflings.
display : boolean
If True the corresponding plots will be displayed. If False,
int, int_ and norm will be returned.
kwargs : dict
Arguments to be passed to np.histogram.
Returns
-------
differences : np array
Accumulated differences of events in `sua1` minus the events in
`sua2`. Thus positive values relate to events of `sua2` that
lead events of `sua1`. Units are the same as the input arrays.
pred : np array
Accumulated differences based on the prediction method.
The length of `pred` is ``n_pred * length(differences)``. Units are
the same as the input arrays.
norm : float
Normalization factor used to scale the bin heights in `differences` and
`pred`. ``differences/norm`` and ``pred/norm`` correspond to the linear
correlation coefficient.
Examples
--------
>> crosscorrelate(np_array1, np_array2)
>> crosscorrelate(spike_train1, spike_train2)
>> crosscorrelate(spike_train1, spike_train2, lag = 150.0)
>> crosscorrelate(spike_train1, spike_train2, display=True,
kwargs={'bins':100})
See also
--------
ccf
.. [1] Meier R, Egert U, Aertsen A, Nawrot MP, "FIND - a unified framework
for neural data analysis"; Neural Netw. 2008 Oct; 21(8):1085-93.
"""
assert predictor is 'shuffle' or predictor is None, "predictor must be \
either None or 'shuffle'. Other predictors are not yet implemented."
#Check whether sua1 and sua2 are SpikeTrains or arrays
sua = []
for x in (sua1, sua2):
#if isinstance(x, SpikeTrain):
if hasattr(x, 'spike_times'):
sua.append(x.spike_times)
elif x.ndim == 1:
sua.append(x)
elif x.ndim == 2 and (x.shape[0] == 1 or x.shape[1] == 1):
sua.append(x.ravel())
else:
raise TypeError("sua1 and sua2 must be either instances of the" \
"SpikeTrain class or column/row vectors")
sua1 = sua[0]
sua2 = sua[1]
if sua1.size < sua2.size:
if lag is None:
lag = np.ceil(10*np.mean(np.diff(sua1)))
reverse = False
else:
if lag is None:
lag = np.ceil(20*np.mean(np.diff(sua2)))
sua1, sua2 = sua2, sua1
reverse = True
#construct predictor
if predictor is 'shuffle':
isi = np.diff(sua2)
sua2_ = np.array([])
for ni in xrange(1,n_pred+1):
idx = np.random.permutation(isi.size-1)
sua2_ = np.append(sua2_, np.add(np.insert(
(np.cumsum(isi[idx])), 0, 0), sua2.min() + (
np.random.exponential(isi.mean()))))
#calculate cross differences in spike times
differences = np.array([])
pred = np.array([])
for k in xrange(0, sua1.size):
differences = np.append(differences, sua1[k] - sua2[np.nonzero(
(sua2 > sua1[k] - lag) & (sua2 < sua1[k] + lag))])
if predictor == 'shuffle':
for k in xrange(0, sua1.size):
pred = np.append(pred, sua1[k] - sua2_[np.nonzero(
(sua2_ > sua1[k] - lag) & (sua2_ < sua1[k] + lag))])
if reverse is True:
differences = -differences
pred = -pred
norm = np.sqrt(sua1.size * sua2.size)
# Plot the results if display=True
if display:
subplot = get_display(display)
if not subplot or not HAVE_PYLAB:
return differences, pred, norm
else:
# Plot the cross-correlation
try:
counts, bin_edges = np.histogram(differences, **kwargs)
edge_distances = np.diff(bin_edges)
bin_centers = bin_edges[1:] - edge_distances/2
counts = counts / norm
xlabel = "Time"
ylabel = "Cross-correlation coefficient"
#NOTE: the x axis corresponds to the upper edge of each bin
subplot.plot(bin_centers, counts, label='cross-correlation', color='b')
if predictor is None:
set_labels(subplot, xlabel, ylabel)
pylab.draw()
elif predictor is 'shuffle':
# Plot the predictor
norm_ = norm * n_pred
counts_, bin_edges_ = np.histogram(pred, **kwargs)
counts_ = counts_ / norm_
subplot.plot(bin_edges_[1:], counts_, label='predictor')
subplot.legend()
pylab.draw()
except ValueError:
print "There are no correlated events within the selected lag"\
" window of %s" % lag
else:
return differences, pred, norm
def _dict_max(D):
"""For a dict containing numerical values, return the key for the
highest value. If there is more than one item with the same highest
value, return one of them (arbitrary - depends on the order produced
by the iterator).
"""
max_val = max(D.values())
for k in D:
if D[k] == max_val:
return k
def make_kernel(form, sigma, time_stamp_resolution, direction=1):
"""Creates kernel functions for convolution.
Constructs a numeric linear convolution kernel of basic shape to be used
for data smoothing (linear low pass filtering) and firing rate estimation
from single trial or trial-averaged spike trains.
Exponential and alpha kernels may also be used to represent postynaptic
currents / potentials in a linear (current-based) model.
Adapted from original script written by Martin P. Nawrot for the
FIND MATLAB toolbox [1]_ [2]_.
Parameters
----------
form : {'BOX', 'TRI', 'GAU', 'EPA', 'EXP', 'ALP'}
Kernel form. Currently implemented forms are BOX (boxcar),
TRI (triangle), GAU (gaussian), EPA (epanechnikov), EXP (exponential),
ALP (alpha function). EXP and ALP are aymmetric kernel forms and
assume optional parameter `direction`.
sigma : float
Standard deviation of the distribution associated with kernel shape.
This parameter defines the time resolution (in ms) of the kernel estimate
and makes different kernels comparable (cf. [1] for symetric kernels).
This is used here as an alternative definition to the cut-off
frequency of the associated linear filter.
time_stamp_resolution : float
Temporal resolution of input and output in ms.
direction : {-1, 1}
Asymmetric kernels have two possible directions.
The values are -1 or 1, default is 1. The
definition here is that for direction = 1 the
kernel represents the impulse response function
of the linear filter. Default value is 1.
Returns
-------
kernel : array_like
Array of kernel. The length of this array is always an odd
number to represent symmetric kernels such that the center bin
coincides with the median of the numeric array, i.e for a
triangle, the maximum will be at the center bin with equal
number of bins to the right and to the left.
norm : float
For rate estimates. The kernel vector is normalized such that
the sum of all entries equals unity sum(kernel)=1. When
estimating rate functions from discrete spike data (0/1) the
additional parameter `norm` allows for the normalization to
rate in spikes per second.
For example:
``rate = norm * scipy.signal.lfilter(kernel, 1, spike_data)``
m_idx : int
Index of the numerically determined median (center of gravity)
of the kernel function.
Examples
--------
To obtain single trial rate function of trial one should use::
r = norm * scipy.signal.fftconvolve(sua, kernel)
To obtain trial-averaged spike train one should use::
r_avg = norm * scipy.signal.fftconvolve(sua, np.mean(X,1))
where `X` is an array of shape `(l,n)`, `n` is the number of trials and
`l` is the length of each trial.
See also
--------
SpikeTrain.instantaneous_rate
SpikeList.averaged_instantaneous_rate
.. [1] Meier R, Egert U, Aertsen A, Nawrot MP, "FIND - a unified framework
for neural data analysis"; Neural Netw. 2008 Oct; 21(8):1085-93.
.. [2] Nawrot M, Aertsen A, Rotter S, "Single-trial estimation of neuronal
firing rates - from single neuron spike trains to population activity";
J. Neurosci Meth 94: 81-92; 1999.
"""
assert form.upper() in ('BOX','TRI','GAU','EPA','EXP','ALP'), "form must \
be one of either 'BOX','TRI','GAU','EPA','EXP' or 'ALP'!"
assert direction in (1,-1), "direction must be either 1 or -1"
SI_sigma = sigma / 1000. #convert to SI units (ms -> s)
SI_time_stamp_resolution = time_stamp_resolution / 1000. #convert to SI units (ms -> s)
norm = 1./SI_time_stamp_resolution
if form.upper() == 'BOX':
w = 2.0 * SI_sigma * np.sqrt(3)
width = 2 * np.floor(w / 2.0 / SI_time_stamp_resolution) + 1 # always odd number of bins
height = 1. / width
kernel = np.ones((1, width)) * height # area = 1
elif form.upper() == 'TRI':
w = 2 * SI_sigma * np.sqrt(6)
halfwidth = np.floor(w / 2.0 / SI_time_stamp_resolution)
trileft = np.arange(1, halfwidth + 2)
triright = np.arange(halfwidth, 0, -1) # odd number of bins
triangle = np.append(trileft, triright)
kernel = triangle / triangle.sum() # area = 1
elif form.upper() == 'EPA':
w = 2.0 * SI_sigma * np.sqrt(5)
halfwidth = np.floor(w / 2.0 / SI_time_stamp_resolution)
base = np.arange(-halfwidth, halfwidth + 1)
parabula = base**2
epanech = parabula.max() - parabula # inverse parabula
kernel = epanech / epanech.sum() # area = 1
elif form.upper() == 'GAU':
w = 2.0 * SI_sigma * 2.7 # > 99% of distribution weight
halfwidth = np.floor(w / 2.0 / SI_time_stamp_resolution) # always odd
base = np.arange(-halfwidth, halfwidth + 1) * SI_time_stamp_resolution
g = np.exp(-(base**2) / 2.0 / SI_sigma**2) / SI_sigma / np.sqrt(2.0 * np.pi)
kernel = g / g.sum()
elif form.upper() == 'ALP':
w = 5.0 * SI_sigma
alpha = np.arange(1, (2.0 * np.floor(w / SI_time_stamp_resolution / 2.0) + 1) + 1) * SI_time_stamp_resolution
alpha = (2.0 / SI_sigma**2) * alpha * np.exp(-alpha * np.sqrt(2) / SI_sigma)
kernel = alpha / alpha.sum() # normalization
if direction == -1:
kernel = np.flipud(kernel)
elif form.upper() == 'EXP':
w = 5.0 * SI_sigma
expo = np.arange(1, (2.0 * np.floor(w / SI_time_stamp_resolution / 2.0) + 1) + 1) * SI_time_stamp_resolution
expo = np.exp(-expo / SI_sigma)
kernel = expo / expo.sum()
if direction == -1:
kernel = np.flipud(kernel)
kernel = kernel.ravel()
m_idx = np.nonzero(kernel.cumsum() >= 0.5)[0].min()
return kernel, norm, m_idx
def simple_frequency_spectrum(x):
"""Simple frequency spectrum.
Very simple calculation of frequency spectrum with no detrending,
windowing, etc, just the first half (positive frequency components) of
abs(fft(x))
Parameters
----------
x : array_like
The input array, in the time-domain.
Returns
-------
spec : array_like
The frequency spectrum of `x`.
"""
spec = np.absolute(np.fft.fft(x))
spec = spec[:len(x) / 2] # take positive frequency components
spec /= len(x) # normalize
spec *= 2.0 # to get amplitudes of sine components, need to multiply by 2
spec[0] /= 2.0 # except for the dc component
return spec
class TuningCurve(object):
"""Class to facilitate working with tuning curves."""
def __init__(self, D=None):
"""
If `D` is a dict, it is used to give initial values to the tuning curve.
"""
self._tuning_curves = {}
self._counts = {}
if D is not None:
for k,v in D.items():
self._tuning_curves[k] = [v]
self._counts[k] = 1
self.n = 1
else:
self.n = 0
def add(self, D):
for k,v in D.items():
self._tuning_curves[k].append(v)
self._counts[k] += 1
self.n += 1
def __getitem__(self, i):
D = {}
for k,v in self._tuning_curves[k].items():
D[k] = v[i]
return D
def __repr__(self):
return "TuningCurve: %s" % self._tuning_curves
def stats(self):
"""Return the mean tuning curve with stderrs."""
mean = {}
stderr = {}
n = self.n
for k in self._tuning_curves.keys():
arr = np.array(self._tuning_curves[k])
mean[k] = arr.mean()
stderr[k] = arr.std()*n/(n-1)/np.sqrt(n)
return mean, stderr
def max(self):
"""Return the key of the max value and the max value."""
k = _dict_max(self._tuning_curves)
return k, self._tuning_curves[k]
|
meduz/NeuroTools
|
src/analysis.py
|
Python
|
gpl-2.0
| 17,761
|
[
"Gaussian",
"NEURON"
] |
d55aa2c538f3bc4024045ed5499d89ca3166c06546c08b24863704a6c27fb64f
|
# -*- coding: utf-8 -*-
from DeviceTemplate import Ui_Form
import time, os, sys, gc
from PyQt4 import QtCore, QtGui
#from acq4.pyqtgraph.graphicsItems import ImageItem
import acq4.Manager
from acq4.util.imageAnalysis import *
from acq4.util.debug import *
import numpy as np
import acq4.pyqtgraph as pg
#import acq4.pyqtgraph.WidgetGroup as WidgetGroup
#from acq4.pyqtgraph.ProgressDialog import ProgressDialog
from acq4.util.HelpfulException import HelpfulException
class ScannerDeviceGui(QtGui.QWidget):
def __init__(self, dev, win):
QtGui.QWidget.__init__(self)
self.dev = dev
self.win = win
self.ui = Ui_Form()
self.ui.setupUi(self)
self.stateGroup = pg.WidgetGroup({
'duration': self.ui.scanDurationSpin,
'xMin': self.ui.xMinSpin,
'xMax': self.ui.xMaxSpin,
'yMin': self.ui.yMinSpin,
'yMax': self.ui.yMaxSpin,
'splitter': self.ui.splitter,
})
spos = dev.getShutterVals()
if spos is None:
self.ui.shutterGroup.hide()
else:
self.shutterChanged()
self.ui.shutterXSpin.setValue(spos[0])
self.ui.shutterYSpin.setValue(spos[1])
## Populate Device lists
#defCam = None
#if 'defaultCamera' in self.dev.config:
#defCam = self.dev.config['defaultCamera']
defCam = self.dev.config.get('defaultCamera', None)
#defLaser = None
#if 'defaultLaser' in self.dev.config:
#defLaser = self.dev.config['defaultLaser']
defLaser = self.dev.config.get('defaultLaser', None)
#devs = self.dev.dm.listDevices()
#for d in devs:
#self.ui.cameraCombo.addItem(d)
#self.ui.laserCombo.addItem(d)
#if d == defCam:
#self.ui.cameraCombo.setCurrentIndex(self.ui.cameraCombo.count()-1)
#if d == defLaser:
#self.ui.laserCombo.setCurrentIndex(self.ui.laserCombo.count()-1)
self.ui.cameraCombo.setTypes('camera')
self.ui.laserCombo.setTypes('laser')
self.spots = []
## Populate list of calibrations
self.updateCalibrationList()
## load default config
state = self.dev.loadCalibrationDefaults()
if state is not None:
self.stateGroup.setState(state)
## create graphics scene
#self.image = ImageItem()
#self.scene = self.ui.view.scene
#self.ui.view.enableMouse()
#self.scene.addItem(self.image)
#self.ui.view.setAspectLocked(True)
#self.ui.view.invertY()
self.ui.calibrateBtn.clicked.connect(self.calibrateClicked)
self.ui.storeCamConfBtn.clicked.connect(self.storeCamConf)
self.ui.deleteBtn.clicked.connect(self.deleteClicked)
self.ui.shutterBtn.clicked.connect(self.shutterClicked)
self.dev.sigShutterChanged.connect(self.shutterChanged)
def shutterClicked(self):
self.dev.setShutterOpen(not self.lastShutterState)
def shutterChanged(self):
sh = self.dev.getShutterOpen()
self.lastShutterState = sh
if sh:
self.ui.shutterBtn.setText('Close Shutter')
else:
self.ui.shutterBtn.setText('Open Shutter')
def updateCalibrationList(self):
self.ui.calibrationList.clear()
## Populate calibration lists
index = self.dev.getCalibrationIndex()
for laser in index:
for obj in index[laser]:
cal = index[laser][obj]
spot = '%0.0f, %0.1f um' % (cal['spot'][0], cal['spot'][1]*1e6)
date = cal['date']
item = QtGui.QTreeWidgetItem([', '.join(obj), laser, str(spot), date])
item.opticState = obj
self.ui.calibrationList.addTopLevelItem(item)
def storeCamConf(self):
cam = str(self.ui.cameraCombo.currentText())
self.dev.storeCameraConfig(cam)
def calibrateClicked(self):
cam = str(self.ui.cameraCombo.currentText())
laser = str(self.ui.laserCombo.currentText())
#obj = self.dev.getObjective()
opticState = self.dev.getDeviceStateKey()
## Run calibration
(cal, spot) = self.runCalibration()
#gc.collect() ## a lot of memory is used in running calibration, make sure we collect all the leftovers now
#cal = MetaArray((512, 512, 2))
#spot = 100e-6
date = time.strftime('%Y.%m.%d %H:%M', time.localtime())
#fileName = cam + '_' + laser + '_' + obj + '.ma'
index = self.dev.getCalibrationIndex()
if laser not in index:
index[laser] = {}
index[laser][opticState] = {'spot': spot, 'date': date, 'params': cal}
self.dev.writeCalibrationIndex(index)
self.dev.writeCalibrationDefaults(self.stateGroup.state())
#cal.write(os.path.join(self.dev.config['calibrationDir'], fileName))
self.updateCalibrationList()
def deleteClicked(self):
cur = self.ui.calibrationList.currentItem()
optState = cur.opticState
laser = str(cur.text(1))
index = self.dev.getCalibrationIndex()
del index[laser][optState]
self.dev.writeCalibrationIndex(index)
self.updateCalibrationList()
def addSpot(self, pos, size):
"""Add a circle to the image"""
s2 = size/2.0
s = QtGui.QGraphicsEllipseItem(0, 0, 1, 1)
s.scale(size, size)
s.setPos(pos[0]-s2, pos[1]-s2)
s.setPen(QtGui.QPen(QtGui.QColor(100, 255, 100, 70)))
self.ui.view.addItem(s)
s.setZValue(100)
self.spots.append(s)
def clearSpots(self):
"""Clear all circles from the image"""
for s in self.spots:
self.ui.view.removeItem(s)
self.spots = []
def runCalibration(self):
"""The scanner calibration routine:
1) Measure background frame, then scan mirrors
while collecting frames as fast as possible (self.scan())
2) Locate spot in every frame using gaussian fit
3) Map image spot locations to coordinate system of Scanner device's parent
3) Do parabolic fit to determine mapping between voltage and position
"""
camera = str(self.ui.cameraCombo.currentText())
laser = str(self.ui.laserCombo.currentText())
blurRadius = 5
## Do fast scan of entire allowed command range
(background, cameraResult, positions) = self.scan()
#self.calibrationResult = {'bg': background, 'frames': cameraResult, 'pos': positions}
## Forget first 2 frames since some cameras can't seem to get these right.
frames = cameraResult.asArray()
frames = frames[2:]
positions = positions[2:]
## Do background subtraction
## take out half the data until it can do the calculation without having a MemoryError.
finished = False
gc.collect()
while not finished:
try:
frames = frames.astype(np.float32)
frames -= background.astype(np.float32)
finished=True
except MemoryError:
frames = frames[::2,:,:]
positions = positions[::2]
finished = False
#del origFrames
#gc.collect()
## Find a frame with a spot close to the center (within center 1/3)
cx = frames.shape[1] / 3
cy = frames.shape[2] / 3
centerSlice = blur(frames[:, cx:cx*2, cy:cy*2], (0, 5, 5)).max(axis=1).max(axis=1)
maxIndex = argmax(centerSlice)
maxFrame = frames[maxIndex]
#self.calibrationResult['maxFrame'] = maxFrame
#self.calibrationResult['maxIndex'] = maxIndex
## Determine spot intensity and width
mfBlur = blur(maxFrame, blurRadius)
amp = mfBlur.max() - median(mfBlur) ## guess intensity of spot
(x, y) = argwhere(mfBlur == mfBlur.max())[0] ## guess location of spot
fit = fitGaussian2D(maxFrame, [amp, x, y, maxFrame.shape[0] / 10, 0.])[0] ## gaussian fit to locate spot exactly
fit[3] = abs(fit[3]) ## sometimes the fit for width comes out negative. *shrug*
#info = origFrames.infoCopy()[-1]
#pixelSize = info['pixelSize'][0]
#region = info['region']
#binning = info['binning']
someFrame = cameraResult.frames()[0]
frameTransform = pg.SRTTransform(someFrame.globalTransform())
pixelSize = someFrame.info()['pixelSize'][0]
spotAmplitude = fit[0]
spotWidth = fit[3] * pixelSize
size = self.spotSize(mfBlur)
#center = info['centerPosition']
#self.calibrationResult['size'] = size
#self.calibrationResult['spotWidth'] = spotWidth
#self.calibrationResult['spotAmplitude'] = spotAmplitude
#self.calibrationResult['spotFit'] = fit
with pg.ProgressDialog("Calibrating scanner: Computing spot positions...", 0, 100) as dlg:
## Determine location of spot within each frame,
## ignoring frames where the spot is too dim or too close to the frame edge
spotLocations = []
globalSpotLocations = []
spotCommands = []
spotFrames = []
margin = fit[3]
for i in range(len(positions)):
frame = frames[i]
fBlur = blur(frame.astype(np.float32), blurRadius)
mx = fBlur.max()
diff = mx - fBlur.min()
ss = self.spotSize(fBlur)
if ss < size * 0.6:
#print "Ignoring spot:", ss
continue
#else:
#print "Keeping spot:", ss
(x, y) = argwhere(fBlur == mx)[0] # guess location of spot
if x < margin or x > frame.shape[0] - margin:
#print " ..skipping; too close to edge", x, y
continue
if y < margin or y > frame.shape[1] - margin:
#print " ..skipping; too close to edge", x, y
continue
frame[x,y] = -1 ## mark location of peak in image
## convert pixel location to coordinate system of scanner's parent
globalPos = frameTransform.map(pg.Point(x, y)) ## Map from frame pixel location to global coordinates
localPos = self.dev.mapGlobalToParent(globalPos) ## map from global to parent coordinate system. This is the position we calibrate to.
#print (x, y), (globalPos.x(), globalPos.y()), (localPos.x(), localPos.y())
spotLocations.append([localPos.x(), localPos.y()])
globalSpotLocations.append([globalPos.x(), globalPos.y()])
spotCommands.append(positions[i])
spotFrames.append(frame[newaxis])
dlg.setValue(100. * i / frames.shape[0])
if dlg.wasCanceled():
raise HelpfulException('Calibration canceled by user.', msgType='warning')
## sanity check on spot frame
if len(spotFrames) == 0:
self.ui.view.setImage(frames)
raise HelpfulException('Calibration never detected laser spot! Looking for spots that are %f pixels wide.'% fit[3], reasons=['shutter is disabled', 'mirrors are disabled', 'objective is not clean', 'spot is not visible or not bright enough when shutter is open'])
spotFrameMax = concatenate(spotFrames).max(axis=0)
self.ui.view.setImage(spotFrameMax, transform=frameTransform)
self.clearSpots()
for sl in globalSpotLocations:
#self.addSpot(sl, fit[3]*binning[0])
self.addSpot(sl, spotWidth)
self.ui.view.autoRange()
if len(spotFrames) < 10:
raise HelpfulException('Calibration detected only %d frames with laser spot; need minimum of 10.' % len(spotFrames), reasons=['spot is too dim for camera sensitivity', 'objective is not clean', 'mirrors are scanning too quickly', 'mirror scanning region is not within the camera\'s view'])
## Fit all data to a map function
mapParams = self.generateMap(array(spotLocations), array(spotCommands))
#print
#print "Map parameters:", mapParams
return (mapParams, (spotAmplitude, spotWidth))
def generateMap(self, loc, cmd):
"""Generates parameters for functions that map spot locations (Loc) to command values (Cmd).
We assume that command values can be approximated by parabolic functions:
Cmd.X = A + B * Loc.X + C * Loc.Y + D * Loc.X^2 + E * Loc.Y^2
Cmd.Y = F + G * Loc.X + H * Loc.Y + I * Loc.X^2 + J * Loc.Y^2
Returns [[A, B, C, D, E], [F, G, H, I, J]]
"""
# print "==========="
# print loc
# print "============"
# print cmd
#for i in range(loc.shape[0]):
#print tuple(loc[i]), tuple(cmd[i])
## do a two-stage fit, using only linear parameters first.
## this is to make sure the second-order parameters do no interfere with the first-order fit.
def fn1(v, loc):
return v[0] + v[1] * loc[:, 0] + v[2] * loc[:, 1]
def fn2(v, loc):
return v[0] + v[1] * loc[:, 0] + v[2] * loc[:, 1] + v[3] * loc[:, 0]**2 + v[4] * loc[:, 1]**2
def erf1(v, loc, cmd):
return fn1(v, loc) - cmd
def erf2(v, loc, cmd):
return fn2(v, loc) - cmd
### sanity checks here on loc and cmd
if loc.shape[0] < 6:
raise Exception("Calibration only detected %d spots; this is not enough." % loc.shape[0])
## fit linear parameters first
xFit = leastsq(erf1, [0, 0, 0], (loc, cmd[:,0]))[0]
yFit = leastsq(erf1, [0, 0, 0], (loc, cmd[:,1]))[0]
#print "fit stage 1:", xFit, yFit
## then fit the parabolic equations, using the linear fit as the seed
#xFit = leastsq(erf2, list(xFit)+[0, 0], (loc, cmd[:,0]))[0]
#yFit = leastsq(erf2, list(yFit)+[0, 0], (loc, cmd[:,1]))[0]
# 2nd stage disabled -- we can bring this back when we have a good method
# for optimization with constraints.
xFit = list(xFit)+[0,0]
yFit = list(yFit)+[0,0]
#print "fit stage 2:", xFit, yFit
## compute fit error
errx = abs(erf2(xFit, loc, cmd[:, 0])).mean()
erry = abs(erf2(yFit, loc, cmd[:, 1])).mean()
print "Fit error:", errx, erry
self.dev.lastCalData = (loc, cmd)
return (list(xFit), list(yFit))
def spotSize(self, frame):
"""Return the normalized integral of all values in the frame that are between max and max/e"""
med = median(frame)
fr1 = frame - med ## subtract median value so baseline is at 0
mask = fr1 > (fr1.max() / np.e) ## find all values > max/e
ss = (fr1 * mask).sum() / mask.sum() ## integrate values within mask, divide by mask area
assert(not np.isnan(ss))
return ss
def scan(self):
"""Scan over x and y ranges in a nPts x nPts grid, return the image recorded at each location."""
camera = str(self.ui.cameraCombo.currentText())
laser = str(self.ui.laserCombo.currentText())
## Camera settings to use during scan
camParams = self.dev.getCameraConfig(camera)
duration = self.ui.scanDurationSpin.value()
rate = 10000
nPts = int(rate * duration)
sweeps = 20
#cameraTrigger = ones(nPts, dtype=byte)
##(cmdMin, cmdMax) = self.dev.config['commandLimits']
xRange = (self.ui.xMinSpin.value(), self.ui.xMaxSpin.value())
yRange = (self.ui.yMinSpin.value(), self.ui.yMaxSpin.value())
xDiff = xRange[1] - xRange[0]
yDiff = yRange[1] - yRange[0]
xCommand = np.fromfunction(lambda i: xRange[0] + ((xDiff * i * float(sweeps) / nPts) % xDiff), (nPts,), dtype=float)
xCommand[-1] = 0.0
yCommand = np.empty((nPts,), dtype=float)
start = 0
for i in range(sweeps):
stop = start + (nPts / sweeps)
yCommand[start:stop] = yRange[0] + yDiff * (float(i)/(sweeps-1))
start = stop
yCommand[-1] = 0.0
daqName = self.dev.config['XAxis']['device']
## Record 10 camera frames with the shutter closed
#print "parameters:", camParams
cmd = {
'protocol': {'duration': 0.0, 'timeout': 5.0},
camera: {'record': True, 'minFrames': 10, 'params': camParams, 'pushState': 'scanProt'},
#laser: {'Shutter': {'preset': 0, 'holding': 0}}
}
#print "\n\n====> Record background\n"
task = acq4.Manager.getManager().createTask(cmd)
task.execute()
result = task.getResult()
## pull result, convert to ndarray float, take average over all frames
background = result[camera].asArray().astype(float).mean(axis=0)
#print "Background shape:", result[camera]['frames'].shape
## Record full scan.
cmd = {
'protocol': {'duration': duration, 'timeout': duration+5.0},
camera: {'record': True, 'triggerProtocol': True, 'params': camParams, 'channels': {
'exposure': {'record': True},
},
'popState': 'scanProt'},
#laser: {'shutter': {'preset': 0, 'holding': 0, 'command': np.ones(len(xCommand), dtype=byte)}},
laser: {'alignMode': True},
self.dev.name(): {'xCommand': xCommand, 'yCommand': yCommand},
daqName: {'numPts': nPts, 'rate': rate, 'triggerDevice': camera}
}
#print "\n\n====> Scan\n"
task = acq4.Manager.getManager().createTask(cmd)
task.execute(block=False)
with pg.ProgressDialog("Calibrating scanner: Running scan protocol..", 0, 100) as dlg:
while not task.isDone():
dlg.setValue(100.*task.runTime()/task.duration())
if dlg.wasCanceled():
task.abort()
raise HelpfulException('Calibration canceled by user.', msgType='warning')
time.sleep(0.2)
result = task.getResult()
frames = result[camera].asMetaArray()
if frames._info[-1]['preciseTiming'] is not True:
raise HelpfulException("Calibration could not accurately measure camera frame timing.",
reasons=["The exposure signal from the camera was not recorded by the DAQ."])
#print "scan shape:", frames.shape
#print "parameters:", camParams
## Generate a list of the scanner command values for each frame
positions = []
for i in range(frames.shape[0]):
t = frames.xvals('Time')[i]
ind = int((t/duration) * nPts)
if ind >= len(xCommand):
break
positions.append([xCommand[ind], yCommand[ind]])
if frames.ndim != 3 or frames.shape[0] < 5:
raise Exception("Camera did not collect enough frames (data shape is %s)" % str(frames.shape))
if background.shape != frames.shape[1:]:
raise Exception("Background measurement frame has different shape %s from scan frames %s" % (str(background.shape), str(frames.shape[1:])))
return (background, result[camera], positions)
|
hiuwo/acq4
|
acq4/devices/Scanner/DeviceGui.py
|
Python
|
mit
| 20,043
|
[
"Gaussian"
] |
744a01191f2a79fd81ba7d46962fe6ed8f8fff4639e9f4039a66f3d7c28ccbc4
|
""" StratusLabImage
Class used by DIRAC to control virtual machine instances on StratusLab
cloud infrastructures. This provides just the core interface methods for
DIRAC, the real work is done within the StratusLabClient class.
Author: Charles Loomis
"""
#DIRAC
from DIRAC import gLogger, S_ERROR
# VMDIRAC
from VMDIRAC.WorkloadManagementSystem.Client.StratusLabClient import StratusLabClient
from VMDIRAC.WorkloadManagementSystem.Utilities.Configuration import StratusLabConfiguration, ImageConfiguration
#from stratuslab.dirac.StratusLabEndpointConfiguration import StratusLabEndpointConfiguration
class StratusLabImage( object ):
"""
Provides interface for managing virtual machine instances of a
particular appliance on a StratusLab cloud infrastructure.
"""
def __init__( self, imageElementName, endpointElementName ):
"""
Creates an instance that will manage appliances of a given type
on a specific cloud infrastructure. Separate instances must be
created for different cloud infrastructures and different
appliances.
The configuration is validated only when the connect() method is
called. This method MUST be called before any of the other
methods.
:Parameters:
**imageElementName** - `string`
element name in CS:/Resources/VirtualMachines/Images describing
the type of appliance (image) to instantiate
**endpointElementName** - `string`
element name in CS:/Resources/VirtualMachines/CloudEndpoint
giving the configuration parameters for the StratusLab cloud
endpoint
"""
self.log = gLogger.getSubLogger( 'StratusLabImage_%s_%s: ' % ( endpointElementName, imageElementName ) )
self._imageConfig = ImageConfiguration( imageElementName, endpointElementName )
self._endpointConfig = StratusLabConfiguration( endpointElementName )
self._impl = None
def connect(self):
"""
Tests the connection to the StratusLab cloud infrastructure. Validates
the configuration and then makes a request to list active virtual
machine instances to ensure that the connection works.
:return: S_OK | S_ERROR
"""
result = self._imageConfig.validate()
self._logResult(result, 'image configuration check')
if not result['OK']:
return result
result = self._endpointConfig.validate()
self._logResult( result, 'endpoint configuration check' )
if not result[ 'OK' ]:
return result
try:
self._impl = StratusLabClient( self._endpointConfig, self._imageConfig )
except Exception, e:
return S_ERROR( e )
result = self._impl.check_connection()
return self._logResult( result, 'connect' )
def startNewInstance( self, vmdiracInstanceID = '' ):
"""
Create a new instance of the given appliance. If successful, returns
a tuple with the instance identifier (actually the node object itself)
and the public IP address of the instance.
The returned instance identifier (node object) must be treated as an
opaque identifier for the instance. It must be passed back to the other
methods in the class without modification!
:return: S_OK(node, public_IP) | S_ERROR
"""
result = self._impl.create( vmdiracInstanceID )
return self._logResult( result, 'startNewInstance' )
def getInstanceStatus( self, instanceId ):
"""
Given the instance ID, returns the status.
:Parameters:
**instanceId** - `node`
instance ID returned by the create() method, actually a Libcloud node object
:return: S_OK | S_ERROR
"""
result = self._impl.status( instanceId )
return self._logResult( result, 'getInstanceStatus: %s' % instanceId )
def stopInstance( self, instanceId, public_ip = None ):
"""
Destroys (kills) the given instance. The public_ip parameter is ignored.
:Parameters:
**instanceId** - `node`
instance ID returned by the create() method, actually a Libcloud node object
**public_ip** - `string`
ignored
:return: S_OK | S_ERROR
"""
result = self._impl.terminate( instanceId, public_ip )
return self._logResult( result, 'stopInstance: %s' % instanceId )
def contextualizeInstance( self, instanceId, public_ip ):
"""
This method is not a regular method in the sense that is not generic at all.
It will be called only of those VMs which need after-booting contextualisation,
for the time being, just ssh contextualisation.
:Parameters:
**instanceId** - `node`
instance ID returned by the create() method, actually a Libcloud node object
**public_ip** - `string`
public IP of the VM, needed for asynchronous contextualisation
:return: S_OK(instanceId) | S_ERROR
"""
result = self._impl.contextualize( instanceId, public_ip )
return self._logResult( result, 'contextualizeInstance: %s, %s' % ( instanceId, public_ip ) )
def _logResult( self, result, msg ):
"""
Checks if the return value is an error. If so it logs it as an error along with the
message. If not, it just logs a success message as 'info'. In both cases, it
returns the result so that it can be returned by the caller.
"""
if not result[ 'OK' ]:
self.log.error( msg )
self.log.error( result[ 'Message' ] )
else:
self.log.info( 'OK: %s' % msg )
return result
#...............................................................................
#EOF
|
vmendez/VMDIRAC
|
WorkloadManagementSystem/Client/StratusLabImage.py
|
Python
|
gpl-3.0
| 5,501
|
[
"DIRAC"
] |
0cf24aa3f6a734e2d5ffb486f7cb5b94f058c102e6846a40f793f73f30ab8beb
|
from unittest import skipUnless
from bok_choy.web_app_test import WebAppTest
from acceptance_tests import PLATFORM_NAME, APPLICATION_NAME, SUPPORT_EMAIL, ENABLE_ERROR_PAGE_TESTS
from acceptance_tests.pages import ServerErrorPage, NotFoundErrorPage, AccessDeniedErrorPage, \
ServiceUnavailableErrorPage
@skipUnless(ENABLE_ERROR_PAGE_TESTS, 'Error page tests are not enabled.')
class ErrorPagesTests(WebAppTest):
error_page_classes = [ServerErrorPage, NotFoundErrorPage, AccessDeniedErrorPage, ServiceUnavailableErrorPage]
def test_valid_pages(self):
for page_class in self.error_page_classes:
page = page_class(self.browser)
# Visit the page
page.visit()
# Check the title
expected = u'{0} | {1} {2}'.format(page.error_title, PLATFORM_NAME, APPLICATION_NAME)
self.assertEqual(expected, self.browser.title)
# Check the support link
element = page.q(css='a[data-role=support-email]')
self.assertTrue(element.present)
href = element.attrs('href')[0]
self.assertEqual(href, 'mailto:{}'.format(SUPPORT_EMAIL))
|
Stanford-Online/edx-analytics-dashboard
|
acceptance_tests/test_error_pages.py
|
Python
|
agpl-3.0
| 1,164
|
[
"VisIt"
] |
57bdc54f30abeed1f56c437bcf04d921a13a7459984742dda904b03c5660d593
|
# jasoncg
# 2015-02-22
#
# mlperceptron.py - A multilayer perceptron implementation in Python
#
# This implementation is designed for readability, not performance.
# Each layer in the perceptron is stored as a Perceptron instance. Each
# neuron is a seperate Neuron instance.
#
# Example:
# Generate a new random perceptron that takes 2 inputs and has 40 neurons
# in the first layer, then add a 40 neuron hidden layer and a 1 neuron
# output layer. Since the output layer has only 1 neuron, the network
# outputs only one value
#
# p=Perceptron.new_perceptron_random(2, 40)
# p.add_next_layer(40)
# p.add_next_layer(1)
#
# Train the network with backpropagation. This function takes three inputs:
# - A list of the input data
# - A list of the output data
# - The learning rate
# This particular example should train the network to AND two boolean values together.
#
# for i in range(0, 1000):
# p.backpropagate([0,0], [0], 0.1)
# p.backpropagate([0,1], [0], 0.1)
# p.backpropagate([1,0], [0], 0.1)
# p.backpropagate([1,1], [1], 0.1)
#
# Test the trained network. The evaluate function takes a list of input data and
# returns a list of the calculatated result
#
# print("%s %s" %([0,0], p.evaluate([0,0]))) # Shoule be 0
# print("%s %s" %([0,1], p.evaluate([0,1]))) # Shoule be 0
# print("%s %s" %([1,0], p.evaluate([1,0]))) # Shoule be 0
# print("%s %s" %([1,1], p.evaluate([1,1]))) # Shoule be 1
#
#
#
import timer
import random
import math
import numpy as np
import pyopencl as cl
import pyopencl.tools
import pyopencl.array
class Neuron:
def __init__(self, index, weights, perceptron_layer):
self.weights = weights
self.layer = perceptron_layer
self.last_guessed = None
self.error = None
self.dropout = False
@staticmethod
def tanh(input):
return math.tanh(input)
@staticmethod
def dxtanh(input):
return 1.0-math.pow(Neuron.tanh(input), 2.0)
@staticmethod
def sigmoid(input):
try:
ex=math.exp(-input)
return 1.0/(1.0+ex)
except:
# If overflow, snap to either 1 or 0
if -input>0:
# lim(sigmoid(x), -inf)=0
return 1
else:
# lim(sigmoid(x), inf)=1
return 0
#print("Fatal Error %s", -input)
@staticmethod
def dxsigmoid(input):
#ex = math.exp(input)
#return (ex/math.pow(1+ex, 2.0))
s = Neuron.sigmoid(input)
return s*(1-s)
@staticmethod
def activate(input):
return Neuron.sigmoid(input)
@staticmethod
def dxactivate(input):
return Neuron.dxsigmoid(input)
def last_guessedb(self):
if self.last_guessed<0.5:
return 0
return 1
@staticmethod
def to_output(output):
if output<0.5:
return 0
return 1
'''
@staticmethod
def activate(input):
return Neuron.tanh(input)
@staticmethod
def dxactivate(input):
return Neuron.dxtanh(input)
def last_guessedb(self):
if self.last_guessed<=0.0:
return False
return True
@staticmethod
def to_output(output):
if output<=0.0:
return False
return True
'''
def evaluate(self, inputs):
self.dropout = False
result = 0
#Calculate bias
result+=1*self.weights[0]
for i in range(1, len(self.weights)):
result+=inputs[i-1]*self.weights[i]
self.last_guessed=Neuron.activate(result)
return self.last_guessed
def calculate_error(self, expected):
self.error = expected - self.last_guessed
return self.error
def backpropagate(self, err):
di = Neuron.dxactivate(self.last_guessed)
self.error = err * di
return self.error
def backpropagate_update_weights(self, inputs, learning_rate):
# First, update the bias
self.weights[0] = self.weights[0] + learning_rate*self.error
for i in range(1, len(self.weights)):
change = learning_rate*self.error*inputs[i-1]
self.weights[i] = self.weights[i] + change
return self.evaluate(inputs)
class Perceptron:
@staticmethod
def generate_weights(input_count, neuron_count):
length = neuron_count*(input_count+1)
results = length*[0] #np.zeros(length)# []
for i in range(0, length):
results[i] = random.random()*2.0-1.0
return results
@staticmethod
def new_perceptron_random(input_count, neuron_count, perceptron_layer_prev=None):
weights = Perceptron.generate_weights(input_count, neuron_count)
return Perceptron(input_count, neuron_count, weights, perceptron_layer_prev)
def get_weight_count(self):
c=0
for n in range(0, len(self.neurons)):
c+=len(self.neurons[n].weights)
return c
def __init__(self, input_count, neuron_count, weights, perceptron_layer_prev=None):
self.neurons = neuron_count*[None]
self.layer_prev = perceptron_layer_prev
self.layer_next = None
if self.layer_prev == None:
self.index = 0
else:
self.index = self.layer_prev.index+1
weight_index = 0
weights_per_neuron = input_count + 1
for i in range(0, neuron_count):
n = Neuron(i, weights[weight_index:weight_index+weights_per_neuron], self)
weight_index+=weights_per_neuron
self.neurons[i]=n
def add_next_layer(self, neuron_count, weights=None):
if self.layer_next!=None:
return self.layer_next.add_next_layer(neuron_count, weights)
w = weights
if w==None:
w=Perceptron.generate_weights(len(self.neurons), neuron_count)
next = Perceptron(len(self.neurons), neuron_count, w, self)
self.layer_next = next
return next
def evaluate_error(self, inputs, expected):
results = self.evaluate(inputs)
error = 0
for i in range(0, len(expected)):
if results[i]!=expected[i]:
error+=1
error=error/len(expected)
return error
def evaluate(self, inputs, dropout_rate=0.0):
results = len(self.neurons)*[0]
layer_dropout_rate=0.0
# If this is a hidden layer, apply dropout (if set)
if self.layer_prev!=None and self.layer_next!=None:
layer_dropout_rate = dropout_rate
for i in range(0, len(self.neurons)):
if layer_dropout_rate>0:
if random.random()<=layer_dropout_rate:
self.neurons[i].dropout=True
results[i]=0
continue
self.neurons[i].dropout = False
results[i]=self.neurons[i].evaluate(inputs)
if self.layer_next!=None:
return self.layer_next.evaluate(results, dropout_rate)
else:
# If output layer, convert to boolean outputs
for i in range(0, len(results)):
results[i]=Neuron.to_output(results[i])
return results
# Calculate the error for this layer applicable to the
# specified neuron on the previous layer
# En=Sum(Win*Ei)
def get_error_for(self, previous_layer_neuron_index):
output = 0
# Step through each neuron on this layer
for i in range(0, len(self.neurons)):
#index+1 to account for bias (weight[0] is a bias, not fed by a neuron)
if self.neurons[i].dropout!=True:
output+=(self.neurons[i].weights[previous_layer_neuron_index+1]
*self.neurons[i].error)
return output
def backpropagate(self, inputs, expected, learning_rate):
if self.layer_prev is None:
# Input layer
self.evaluate(inputs) #, 0.50)
e = 0
if self.layer_next!=None:
# Not output layer
self.layer_next.backpropagate(None, expected, learning_rate)
for i in range(0, len(self.neurons)):
if self.neurons[i].dropout==True:
continue
err = self.layer_next.get_error_for(i)
e = self.neurons[i].backpropagate(err)
else:
# Output layer
total_errors = 0
for i in range(0, len(self.neurons)):
e = self.neurons[i].calculate_error(expected[i])
self.neurons[i].backpropagate(e)
lg = self.neurons[i].last_guessedb()
if lg!=expected[i]:
total_errors+=1
# Adjust learning_rate
learning_rate*=(1-total_errors/len(self.neurons))
if learning_rate>1.0:
learning_rate=1.0
elif learning_rate<=0.0001:
learning_rate=0.0001
# Now update the weights
if self.layer_prev==None:
# Back to input layer
self.backpropagate_update_weights(inputs, learning_rate)
return e
def backpropagate_update_weights(self, inputs, learning_rate):
results = len(self.neurons)*[0]
for i in range(0, len(self.neurons)):
if self.neurons[i].dropout!=True:
results[i]=self.neurons[i].backpropagate_update_weights(inputs, learning_rate)
else:
results[i]=0
if self.layer_next!=None:
self.layer_next.backpropagate_update_weights(results, learning_rate)
n=Neuron(1,[],3)
def test_AND():
p=Perceptron.new_perceptron_random(2, 40)
p.add_next_layer(40)
p.add_next_layer(1)
print("%s %s" %([0,0], p.evaluate([0,0])))
print("%s %s" %([0,1], p.evaluate([0,1])))
print("%s %s" %([1,0], p.evaluate([1,0])))
print("%s %s" %([1,1], p.evaluate([1,1])))
for i in range(0, 1000):
p.backpropagate([0,0], [0], 0.1)
p.backpropagate([0,1], [0], 0.1)
p.backpropagate([1,0], [0], 0.1)
p.backpropagate([1,1], [1], 0.1)
print("%s %s" %([0,0], p.evaluate([0,0])))
print("%s %s" %([0,1], p.evaluate([0,1])))
print("%s %s" %([1,0], p.evaluate([1,0])))
print("%s %s" %([1,1], p.evaluate([1,1])))
def test_XOR():
p=Perceptron.new_perceptron_random(2, 40)
p.add_next_layer(40)
p.add_next_layer(1)
print("%s %s" %([0,0], p.evaluate([0,0])))
print("%s %s" %([0,1], p.evaluate([0,1])))
print("%s %s" %([1,0], p.evaluate([1,0])))
print("%s %s" %([1,1], p.evaluate([1,1])))
for i in range(0, 2):
for i2 in range(0, 2000):
p.backpropagate([0,0], [0], 0.05)
p.backpropagate([0,1], [1], 0.05)
p.backpropagate([1,0], [1], 0.05)
p.backpropagate([1,1], [0], 0.05)
print("%s %s" %([0,0], p.evaluate([0,0])))
print("%s %s" %([0,1], p.evaluate([0,1])))
print("%s %s" %([1,0], p.evaluate([1,0])))
print("%s %s" %([1,1], p.evaluate([1,1])))
test_XOR()
|
jasoncg/mlperceptronpy
|
mlperceptron.py
|
Python
|
mit
| 9,427
|
[
"NEURON"
] |
d4626088a23ace9c5a8c49584ced40125403061b6569abd685fdced4a5bca956
|
def test_formula_transform():
"""
Check if variables can be added/multiplied/transformed.
The resulting expression can be plugged into a model.
"""
from mle import var
x = var('x', vector=True, observed=True)
a = var('a')
b = var('b')
a * x**2 + b
def test_const():
"""
Check if parameters can be set to be constant.
"""
from mle import var, Normal
import numpy as np
x = var('x', vector=True, observed=True)
mu = var('mu', const=True)
sigma = var('sigma')
model = Normal(x, mu, sigma)
np.random.seed(42)
data = np.random.normal(0, 1, 200)
results = model.fit({'x': data}, {'mu': 1, 'sigma': 1})
assert(results.x['mu'] == 1)
# @raises(ValueError)
# def test_error_on_illegal_bound():
# """
# Check if exception is raised when user specifies illegal bound.
# Some distributions automatically apply certain bounds.
# Example: sigma > 0 for the Normal distribution.
# If a user-specified bound conflicts with that, an exception should be thrown.
# """
# from mle import var, Normal
# x = var('x', vector=True, observed=True)
# mu = var('mu')
# sigma = var('sigma', lower=-1)
# Normal(x, mu, sigma)
def test_simple_fit():
"""
Check if fitting Gaussian data works
"""
from mle import Normal, var
import numpy as np
x = var('x', vector=True, observed=True)
mu = var('mu')
sigma = var('sigma')
dist = Normal(x, mu, sigma)
np.random.seed(42)
data = np.random.normal(0, 1, 100000)
try:
dist.fit({'x': data}, {'mu': 1, 'sigma': 2}, method='BFGS')
except:
assert False, 'Fitting generated data failed'
def test_linear_regression():
"""
Check if fitting a linear model works.
"""
from mle import Normal, var
import numpy as np
x = var('x', vector=True, observed=True)
y = var('y', vector=True, observed=True)
a = var('a')
b = var('b')
sigma = var('sigma')
model = Normal(y, a * x + b, sigma)
np.random.seed(42)
xs = np.linspace(0, 1, 20)
ys = 0.5 * xs - 0.3 + np.random.normal(0, 0.2, 20)
results = model.fit({'x': xs, 'y': ys}, {'a': 2, 'b': 1, 'sigma': 1})
print(results)
def test_pdf_product():
"""
Check if PDF models can be joined
"""
from mle import var, Normal, Join
x = var('x', vector=True, observed=True)
y = var('y', observed=True)
mu = var('mu')
sigma = var('sigma')
model = Join(Join(Normal(x, mu, sigma)), Normal(y, mu, sigma))
assert(model.observed == [x, y])
|
ibab/python-mle
|
tests/__init__.py
|
Python
|
mit
| 2,593
|
[
"Gaussian"
] |
6d57d9ab1a4dbce861173cbb2a951e3d32ed2808750ddc8fb70ea7f9a8b07c5e
|
# -*- coding: utf-8 -*-
"""
Application setup script
To build package:
python3 setup.py sdist bdist_wheel clean
"""
# standard imports
import io
import os
# external imports
from setuptools import find_packages, setup
# application imports
from subsystem import __description__, __program__, __version__
def read(*names, **kwargs):
"""Return contents of text file (in the same directory as this file)."""
return io.open(
os.path.join(os.path.dirname(__file__), *names),
encoding=kwargs.get('encoding', 'utf8')
).read()
setup(
name=__program__,
version=__version__,
description=__description__,
author='Brian Beffa',
author_email='brbsix@gmail.com',
long_description=read('README.rst'),
url='https://github.com/brbsix/subsystem',
license='GPLv3',
keywords=['advertising', 'download', 'periscope', 'rename', 'srt',
'ss', 'subscope', 'subtitle', 'thunar', 'yad'],
packages=find_packages(),
install_requires=['subnuker'],
entry_points={
'console_scripts': ['subsystem=subsystem.subsystem:main'],
},
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Natural Language :: English',
'Operating System :: POSIX',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.0',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3 :: Only',
'Topic :: Multimedia :: Video',
'Topic :: Utilities',
],
)
|
brbsix/subsystem
|
setup.py
|
Python
|
gpl-3.0
| 1,916
|
[
"Brian"
] |
82ff25f8b2bf422689dedccf9a27f8d35829660c28c2816726097437bc493914
|
# myapp/util/assets.py
from flask.ext.assets import Bundle, Environment
from app import app
bundles = {
'common_js': Bundle(
'../../node_modules/react/dist/react.min.js',
output='gen_files/bundle/common.js'
),
'hello_js': Bundle(
'gen_files/gulp/hello.js',
output='gen_files/bundle/hello.js'
),
}
assets = Environment(app)
assets.register(bundles)
|
agentp/spark
|
assets.py
|
Python
|
mit
| 400
|
[
"GULP"
] |
4625a74eb98f22a7e924479657811f696b9120a2f79d9a0f08a40c3ae49e4725
|
__VERSION__="ete2-2.2rev1056"
# -*- coding: utf-8 -*-
# #START_LICENSE###########################################################
#
#
# This file is part of the Environment for Tree Exploration program
# (ETE). http://ete.cgenomics.org
#
# ETE is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ETE is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ETE. If not, see <http://www.gnu.org/licenses/>.
#
#
# ABOUT THE ETE PACKAGE
# =====================
#
# ETE is distributed under the GPL copyleft license (2008-2011).
#
# If you make use of ETE in published work, please cite:
#
# Jaime Huerta-Cepas, Joaquin Dopazo and Toni Gabaldon.
# ETE: a python Environment for Tree Exploration. Jaime BMC
# Bioinformatics 2010,:24doi:10.1186/1471-2105-11-24
#
# Note that extra references to the specific methods implemented in
# the toolkit are available in the documentation.
#
# More info at http://ete.cgenomics.org
#
#
# #END_LICENSE#############################################################
import copy
from evolevents import EvolEvent
def get_reconciled_tree(node, sptree, events):
""" Returns the recoliation gene tree with a provided species
topology """
if len(node.children) == 2:
# First visit childs
morphed_childs = []
for ch in node.children:
mc, ev = get_reconciled_tree(ch, sptree, events)
morphed_childs.append(mc)
# morphed childs are the reconciled children. I trust its
# topology. Remember tree is visited on recursive post-order
sp_child_0 = morphed_childs[0].get_species()
sp_child_1 = morphed_childs[1].get_species()
all_species = sp_child_1 | sp_child_0
# If childs represents a duplication (duplicated species)
# Check that both are reconciliated to the same species
if len(sp_child_0 & sp_child_1) > 0:
newnode = copy.deepcopy(node)
newnode.up = None
newnode.children = []
template = _get_expected_topology(sptree, all_species)
# replaces child0 partition on the template
newmorphed0, matchnode = _replace_on_template(template, morphed_childs[0])
# replaces child1 partition on the template
newmorphed1, matchnode = _replace_on_template(template, morphed_childs[1])
newnode.add_child(newmorphed0)
newnode.add_child(newmorphed1)
newnode.add_feature("evoltype", "D")
node.add_feature("evoltype", "D")
e = EvolEvent()
e.etype = "D"
e.inparalogs = node.children[0].get_leaf_names()
e.outparalogs = node.children[1].get_leaf_names()
e.in_seqs = node.children[0].get_leaf_names()
e.out_seqs = node.children[1].get_leaf_names()
events.append(e)
return newnode, events
# Otherwise, we need to reconciliate species at both sides
# into a single partition.
else:
# gets the topology expected by the observed species
template = _get_expected_topology(sptree, all_species)
# replaces child0 partition on the template
template, matchnode = _replace_on_template(template, morphed_childs[0] )
# replaces child1 partition on the template
template, matchnode = _replace_on_template(template, morphed_childs[1])
template.add_feature("evoltype","S")
node.add_feature("evoltype","S")
e = EvolEvent()
e.etype = "S"
e.inparalogs = node.children[0].get_leaf_names()
e.orthologs = node.children[1].get_leaf_names()
e.in_seqs = node.children[0].get_leaf_names()
e.out_seqs = node.children[1].get_leaf_names()
events.append(e)
return template, events
elif len(node.children)==0:
return copy.deepcopy(node), events
else:
raise ValueError("Algorithm can only work with binary trees.")
def _replace_on_template(orig_template, node):
template = copy.deepcopy(orig_template)
# detects partition within topo that matchs child1 species
nodespcs = node.get_species()
spseed = list(nodespcs)[0] # any sp name woulbe ok
# Set an start point
subtopo = template.search_nodes(children=[], name=spseed)[0]
# While subtopo does not cover all child species
while len(nodespcs - set(subtopo.get_leaf_names() ) )>0:
subtopo= subtopo.up
# Puts original partition on the expected topology template
nodecp = copy.deepcopy(node)
if subtopo.up is None:
return nodecp, nodecp
else:
parent = subtopo.up
parent.remove_child(subtopo)
parent.add_child(nodecp)
return template, nodecp
def _get_expected_topology(t, species):
missing_sp = set(species) - set(t.get_leaf_names())
if missing_sp:
raise KeyError("* The following species are not contained in the species tree: "+ ','.join(missing_sp) )
node = t.search_nodes(children=[], name=list(species)[0])[0]
sps = set(species)
while sps-set(node.get_leaf_names()) != set([]):
node = node.up
template = copy.deepcopy(node)
# make get_species() to work
#template._speciesFunction = _get_species_on_TOL
template.set_species_naming_function(_get_species_on_TOL)
template.detach()
for n in [template]+template.get_descendants():
n.add_feature("evoltype","L")
n.dist = 1
return template
def _get_species_on_TOL(name):
return name
|
csc8630Spring2014/Clusterizer
|
ete2/phylo/reconciliation.py
|
Python
|
mit
| 6,041
|
[
"VisIt"
] |
1f418d4286ce8cb71dd757824e1c8a25d5bb7cc8b3412437c2657056743ac290
|
import sys
sys.path.insert(1, "../../../")
import h2o, tests
import random
def cv_carsGLM(ip,port):
# read in the dataset and construct training set (and validation set)
cars = h2o.import_file(path=h2o.locate("smalldata/junit/cars_20mpg.csv"))
# choose the type model-building exercise (multinomial classification or regression). 0:regression, 1:binomial,
# 2:poisson
problem = random.sample(range(3),1)[0]
# pick the predictors and response column, along with the correct family
predictors = ["displacement","power","weight","acceleration","year"]
if problem == 1 :
response_col = "economy_20mpg"
family = "binomial"
cars[response_col] = cars[response_col].asfactor()
elif problem == 2 :
family = "poisson"
response_col = "cylinders"
else :
family = "gaussian"
response_col = "economy"
print "Distribution: {0}".format(family)
print "Response column: {0}".format(response_col)
## cross-validation
# 1. check that cv metrics are the same over repeated "Modulo" runs
nfolds = random.randint(3,10)
glm1 = h2o.glm(y=cars[response_col], x=cars[predictors], nfolds=nfolds, family=family, fold_assignment="Modulo")
glm2 = h2o.glm(y=cars[response_col], x=cars[predictors], nfolds=nfolds, family=family, fold_assignment="Modulo")
tests.check_models(glm1, glm2, True)
# 2. check that cv metrics are different over repeated "Random" runs
nfolds = random.randint(3,10)
glm1 = h2o.glm(y=cars[response_col], x=cars[predictors], nfolds=nfolds, family=family, fold_assignment="Random")
glm2 = h2o.glm(y=cars[response_col], x=cars[predictors], nfolds=nfolds, family=family, fold_assignment="Random")
try:
tests.check_models(glm1, glm2, True)
assert False, "Expected models to be different over repeated Random runs"
except AssertionError:
assert True
# 3. folds_column
num_folds = random.randint(2,5)
fold_assignments = h2o.H2OFrame(python_obj=[[random.randint(0,num_folds-1)] for f in range(cars.nrow)])
fold_assignments.setNames(["fold_assignments"])
cars = cars.cbind(fold_assignments)
glm = h2o.glm(y=cars[response_col], x=cars[predictors], training_frame=cars, family=family,
fold_column="fold_assignments", keep_cross_validation_predictions=True)
num_cv_models = len(glm._model_json['output']['cross_validation_models'])
assert num_cv_models==num_folds, "Expected {0} cross-validation models, but got " \
"{1}".format(num_folds, num_cv_models)
cv_model1 = h2o.get_model(glm._model_json['output']['cross_validation_models'][0]['name'])
cv_model2 = h2o.get_model(glm._model_json['output']['cross_validation_models'][1]['name'])
assert isinstance(cv_model1, type(glm)), "Expected cross-validation model to be the same model type as the " \
"constructed model, but got {0} and {1}".format(type(cv_model1),type(glm))
assert isinstance(cv_model2, type(glm)), "Expected cross-validation model to be the same model type as the " \
"constructed model, but got {0} and {1}".format(type(cv_model2),type(glm))
# 4. keep_cross_validation_predictions
cv_predictions = glm1._model_json['output']['cross_validation_predictions']
assert cv_predictions is None, "Expected cross-validation predictions to be None, but got {0}".format(cv_predictions)
cv_predictions = glm._model_json['output']['cross_validation_predictions']
assert len(cv_predictions)==num_folds, "Expected the same number of cross-validation predictions " \
"as folds, but got {0}".format(len(cv_predictions))
# # 5. manually construct models
# fold1 = cars[cars["fold_assignments"]==0]
# fold2 = cars[cars["fold_assignments"]==1]
# manual_model1 = h2o.glm(y=fold2[response_col],
# x=fold2[predictors],
# validation_y=fold1[response_col],
# validation_x=fold1[predictors],
# family=family)
# manual_model2 = h2o.glm(y=fold1[response_col],
# x=fold1[predictors],
# validation_y=fold2[response_col],
# validation_x=fold2[predictors],
# family=family)
## boundary cases
# 1. nfolds = number of observations (leave-one-out cross-validation)
# TODO: PUBDEV-1776
#glm = h2o.glm(y=cars[response_col], x=cars[predictors], nfolds=cars.nrow, family=family,
# fold_assignment="Modulo")
# 2. nfolds = 0
glm1 = h2o.glm(y=cars[response_col], x=cars[predictors], nfolds=0, family=family)
# check that this is equivalent to no nfolds
glm2 = h2o.glm(y=cars[response_col], x=cars[predictors], family=family)
tests.check_models(glm1, glm2)
# 3. cross-validation and regular validation attempted
glm = h2o.glm(y=cars[response_col], x=cars[predictors], nfolds=random.randint(3,10), validation_y=cars[response_col],
validation_x=cars[predictors], family=family)
## error cases
# 1. nfolds == 1 or < 0
try:
glm = h2o.glm(y=cars[response_col], x=cars[predictors], nfolds=random.sample([-1,1], 1)[0],
family=family)
assert False, "Expected model-build to fail when nfolds is 1 or < 0"
except EnvironmentError:
assert True
# 2. more folds than observations
try:
glm = h2o.glm(y=cars[response_col], x=cars[predictors], nfolds=cars.nrow+1, family=family,
fold_assignment="Modulo")
assert False, "Expected model-build to fail when nfolds > nobs"
except EnvironmentError:
assert True
# 3. fold_column and nfolds both specified
try:
glm = h2o.glm(y=cars[response_col], x=cars[predictors], nfolds=3, fold_column="fold_assignments",
family=family, training_frame=cars)
assert False, "Expected model-build to fail when fold_column and nfolds both specified"
except EnvironmentError:
assert True
# # 4. fold_column and fold_assignment both specified
# try:
# glm = h2o.glm(y=cars[response_col], x=cars[predictors], fold_assignment="Random", fold_column="fold_assignments",
# family=family, training_frame=cars)
# assert False, "Expected model-build to fail when fold_column and fold_assignment both specified"
# except EnvironmentError:
# assert True
if __name__ == "__main__":
tests.run_test(sys.argv, cv_carsGLM)
|
bospetersen/h2o-3
|
h2o-py/tests/testdir_algos/glm/pyunit_cv_carsGLM.py
|
Python
|
apache-2.0
| 6,750
|
[
"Gaussian"
] |
391ba7c4163ea2cc4ca1b4a48fb74f97fbcecae5c8d57f37e9bb8bac5130187b
|
from __future__ import division
import numpy as np
import scipy as sp
from scipy import sparse
from .signals import smooth_volume
from .utils import image_to_matrix, matrix_to_image
def prewhiten_image_data(ts_img, mask_img, X, smooth_fwhm=5):
"""Estimate autocorrelation and transform data and design for OLS.
Parameters
----------
ts_img : nibabel image
4D image with fMRI data. If using autocorrelation smoothing, the
affine must have correct information about voxel size.
mask_img : nibabel image
3D image with mask defining voxels to include in model. Also used
to constrain the autocorrelation estimate smoothing.
X : n_tp x n_ev array
Design matrix array. Should have zero mean and no constant.
smooth_fwhm : float
Size (in mm) of the smoothing kernel for smoothing the autocorrelation
estimates. Requires that the time series image affine has correct
information about voxel size.
Returns
-------
WY : n_tp x n_vox array
Prewhitened time series data for voxels in mask.
WX : n_tp x n_ev x n_vox array
Prewhitened design matrix for voxels in mask.
"""
from numpy.fft import fft, ifft
# TODO this should be enhanced to take a segmentation image, not
# just a mask image. That will need to update information accodingly.
Y = image_to_matrix(ts_img, mask_img)
Y = Y - Y.mean(axis=0)
n_tp, n_vox = Y.shape
n_ev = X.shape[1]
assert X.shape[0] == n_tp
# Estimate the autocorrelation function of the model residuals
acf = estimate_residual_autocorrelation(Y, X)
tukey_m, _ = acf.shape
# Smooth the autocorrelation estimates
if smooth_fwhm is None:
acf_smooth = acf
else:
acf_img = matrix_to_image(acf, mask_img)
acf_img_smooth = smooth_volume(acf_img, smooth_fwhm, mask_img)
acf_smooth = image_to_matrix(acf_img_smooth, mask_img)
# Compute the autocorrelation kernel
w_pad = n_tp + tukey_m
acf_kernel = np.zeros((w_pad, n_vox))
acf_kernel[:tukey_m] = acf_smooth
acf_kernel[-tukey_m + 1:] = acf_smooth[:0:-1]
assert (acf_kernel != 0).sum() == (n_vox * (tukey_m * 2 - 1))
# Compute the prewhitening kernel in the spectral domain
acf_fft = fft(acf_kernel, axis=0).real
W_fft = np.zeros((w_pad, n_vox))
W_fft[1:] = 1 / np.sqrt(np.abs(acf_fft[1:]))
W_fft /= np.sqrt(np.sum(W_fft[1:] ** 2, axis=0, keepdims=True) / w_pad)
# Prewhiten the data
Y_fft = fft(Y, axis=0, n=w_pad)
WY = ifft(W_fft * Y_fft, axis=0).real[:n_tp].astype(np.float32)
assert WY.shape == (n_tp, n_vox)
# Prewhiten the design
WX = np.empty((n_tp, n_ev, n_vox), np.float32)
for i in range(n_ev):
X_i = X[:, [i]]
X_fft_i = fft(X_i, axis=0, n=w_pad)
WX_i = ifft(W_fft * X_fft_i, axis=0).real[:n_tp]
WX[:, i, :] = WX_i.astype(np.float32)
return WY, WX
def estimate_residual_autocorrelation(Y, X, tukey_m=None):
"""Fit OLS model and estimate residual autocorrelation with regularization.
Parameters
----------
Y : n_tp x n_vox array
Array of time series data for multiple voxels.
X : n_tp x n_ev array
Design matrix for the model.
tukey_m: int or None
Size of tukey taper window or None to use default rule.
Returns
-------
acf : tukey_m x n_vox array
Regularized autocorrelation function estimate for each voxel.
"""
from numpy.fft import fft, ifft
# Fit initial iteration OLS model in one step
B_ols, _, _, _ = np.linalg.lstsq(X, Y)
Yhat_ols = X.dot(B_ols)
resid_ols = Y - Yhat_ols
# Compute empircal residual autocorrelation function
n_tp = Y.shape[0]
if tukey_m is None:
tukey_m = default_tukey_window(n_tp)
acf_pad = n_tp * 2 - 1
resid_fft = fft(resid_ols, n=acf_pad, axis=0)
acf_fft = resid_fft * resid_fft.conjugate()
acf = ifft(acf_fft, axis=0).real[:tukey_m]
acf /= acf[[0]]
# Regularize the autocorrelation estimate with a tukey taper
lag = np.expand_dims(np.arange(tukey_m), 1)
window = .5 * (1 + np.cos(np.pi * lag / tukey_m))
acf *= window
return acf
def default_tukey_window(n):
"""The default rule for choosing the Tukey taper window used by FSL."""
return int(np.floor(np.sqrt(n)))
def iterative_ols_fit(Y, X):
"""Fit a linear model using ordinary least squares in each voxel.
The design matrix is expected to be 3D because this function is intended
to be used in the context of a prewhitened model, where each voxel has a
slightly different (whitened) design.
Parameters
----------
Y : n_tp x n_vox array
Time series for each voxel.
X : n_tp x n_ev x n_vox array
Design matrix for each voxel.
Returns
-------
B : n_vox x n_ev array
Parameter estimates at each voxel.
SS : n_vox array
Model error summary at each voxel.
XtXinv : n_vox x n_ev x n_ev array
The pinv(X' * X) matrices at each voxel.
E : n_tp x n_vox array
Residual time series at each voxel.
"""
from numpy import dot
from numpy.linalg import pinv
Y = Y.astype(np.float64)
X = X.astype(np.float64)
assert Y.shape[0] == X.shape[0]
assert Y.shape[1] == X.shape[2]
n_tp, n_ev, n_vox = X.shape
B = np.empty((n_vox, n_ev), np.float32)
SS = np.empty(n_vox, np.float32)
XtXinv = np.empty((n_vox, n_ev, n_ev), np.float32)
E = np.empty((n_tp, n_vox), np.float32)
I = np.eye(n_tp)
for i in range(n_vox):
y_i, X_i = Y[..., i], X[..., i]
XtXinv_i = pinv(dot(X_i.T, X_i))
b_i = dot(XtXinv_i, dot(X_i.T, y_i))
R_i = I - dot(X_i, dot(XtXinv_i, X_i.T))
e_i = dot(R_i, y_i)
ss_i = dot(e_i, e_i.T) / R_i.trace()
B[i] = b_i
SS[i] = ss_i
XtXinv[i] = XtXinv_i
E[:, i] = e_i
return B, SS, XtXinv, E
def iterative_contrast_estimation(B, SS, XtXinv, C):
"""Compute contrast parameter and variance estimates in each voxel.
Parameters
----------
B : n_vox x n_ev array
Parameter estimates for each voxel.
SS : n_vox array
The model error summary at each voxel.
XtXinv : n_vox x n_ev x n_ev array
The pinv(X' * X) matrices for each voxel.
C : n_con x n_ev array
List of contrast vectors.
Returns
-------
G : n_vox x n_con array
Contrast parameter estimates.
V : n_vox x n_con array
Contrast parameter variance estimates.
T : n_vox x n_con array
Contrast t statistics.
"""
from numpy import dot
assert B.shape[0] == XtXinv.shape[0] == SS.shape[0]
assert B.shape[1] == XtXinv.shape[1] == XtXinv.shape[2]
n_vox, n_ev = B.shape
n_con = len(C)
G = np.empty((n_vox, n_con))
V = np.empty((n_vox, n_con))
for i in range(n_vox):
b_i = B[i]
ss_i = SS[i]
XtXinv_i = XtXinv[i]
for j, c_j in enumerate(C):
keff_ij = dot(c_j, dot(XtXinv_i, c_j))
g_ij = dot(c_j, b_i)
v_ij = keff_ij * ss_i
G[i, j] = g_ij
V[i, j] = v_ij
T = G / np.sqrt(V)
return G, V, T
def contrast_fixed_effects(G, V):
"""Compute higher-order fixed effects parameters.
Parameters
----------
G : n_vox x n_run array
First-level contrast parameter estimates.
V : n_Vox x n_run array
First-level contrast parameter variance estimates.
Returns
-------
con : n_vox array
Fixed effects contrast parameter estimates.
var : n_vox array
Fixed effects contrast parameter variance estimates.
t : n_vox array
Fixed effects t statistics.
"""
var = 1 / (1 / V).sum(axis=-1)
con = var * (G / V).sum(axis=-1)
t = con / np.sqrt(var)
return con, var, t
def highpass_filter_matrix(n_tp, cutoff, tr=1):
"""Return an array to implement a gaussian running line filter.
To implement the filter, premultiply your data with this array.
Parameters
----------
n_tp : int
Number of timepoints in data.
cutoff : float
Filter cutoff, in seconds.
tr : float
Temporal resolution of data, in seconds.
Return
------
F : n_ntp x n_tp array
Filter matrix.
"""
cutoff = cutoff / tr
sig2n = np.square(cutoff / np.sqrt(2))
kernel = np.exp(-np.square(np.arange(n_tp)) / (2 * sig2n))
kernel = 1 / np.sqrt(2 * np.pi * sig2n) * kernel
K = sp.linalg.toeplitz(kernel)
K = np.dot(np.diag(1 / K.sum(axis=1)), K)
H = np.empty((n_tp, n_tp))
X = np.column_stack((np.ones(n_tp), np.arange(n_tp)))
for k in range(n_tp):
W = sparse.diags(K[k])
hat = np.dot(X, np.linalg.pinv(W * X) * W)
H[k] = hat[k]
F = np.eye(n_tp) - H
return F
def highpass_filter(data, cutoff, tr=1, copy=True):
"""Highpass filter data with gaussian running line filter.
Parameters
----------
data : 1d or 2d array
Data array where first dimension is time.
cutoff : float
Filter cutoff in seconds.
tr : float
TR of data in seconds.
copy : boolean
If False, data is filtered in place.
Returns
-------
data : 1d or 2d array
Filtered version of the data.
"""
if copy:
data = data.copy()
# Ensure data is a matrix
if data.ndim == 1:
need_squeeze = True
data = data[:, np.newaxis]
else:
need_squeeze = False
# Filter each column of the data
n_tp = data.shape[0]
F = highpass_filter_matrix(n_tp, cutoff, tr)
data[:] = np.dot(F, data).astype(data.dtype)
# Remove the residueal mean of each timeseries to match FSL
data -= data.mean(axis=0, keepdims=True)
# Remove added dimensions
if need_squeeze:
data = data.squeeze()
return data
|
kastman/lyman
|
lyman/glm.py
|
Python
|
bsd-3-clause
| 9,978
|
[
"Gaussian"
] |
41a55cda91183e9a46123a3148277702142a93cfbb5a348bb054dc6bde32a5a2
|
#!/usr/bin/env python
# coding:utf-8
__version__ = '3.3.1'
__password__ = ''
__hostsdeny__ = ()
#__hostsdeny__ = ('.youtube.com', '.youku.com', ".googlevideo.com")
import os
import re
import time
import struct
import zlib
import base64
import logging
import urlparse
import httplib
import io
import string
import traceback
from google.appengine.api import urlfetch
from google.appengine.api.taskqueue.taskqueue import MAX_URL_LENGTH
from google.appengine.runtime import apiproxy_errors
URLFETCH_MAX = 2
URLFETCH_MAXSIZE = 4*1024*1024
URLFETCH_DEFLATE_MAXSIZE = 4*1024*1024
URLFETCH_TIMEOUT = 30
def message_html(title, banner, detail=''):
MESSAGE_TEMPLATE = '''
<html><head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<title>$title</title>
<style><!--
body {font-family: arial,sans-serif}
div.nav {margin-top: 1ex}
div.nav A {font-size: 10pt; font-family: arial,sans-serif}
span.nav {font-size: 10pt; font-family: arial,sans-serif; font-weight: bold}
div.nav A,span.big {font-size: 12pt; color: #0000cc}
div.nav A {font-size: 10pt; color: black}
A.l:link {color: #6f6f6f}
A.u:link {color: green}
//--></style>
</head>
<body text=#000000 bgcolor=#ffffff>
<table border=0 cellpadding=2 cellspacing=0 width=100%>
<tr><td bgcolor=#3366cc><font face=arial,sans-serif color=#ffffff><b>Message From FetchServer</b></td></tr>
<tr><td> </td></tr></table>
<blockquote>
<H1>$banner</H1>
$detail
<p>
</blockquote>
<table width=100% cellpadding=0 cellspacing=0><tr><td bgcolor=#3366cc><img alt="" width=1 height=4></td></tr></table>
</body></html>
'''
return string.Template(MESSAGE_TEMPLATE).substitute(title=title, banner=banner, detail=detail)
try:
from Crypto.Cipher.ARC4 import new as RC4Cipher
except ImportError:
logging.warn('Load Crypto.Cipher.ARC4 Failed, Use Pure Python Instead.')
class RC4Cipher(object):
def __init__(self, key):
x = 0
box = range(256)
for i, y in enumerate(box):
x = (x + y + ord(key[i % len(key)])) & 0xff
box[i], box[x] = box[x], y
self.__box = box
self.__x = 0
self.__y = 0
def encrypt(self, data):
out = []
out_append = out.append
x = self.__x
y = self.__y
box = self.__box
for char in data:
x = (x + 1) & 0xff
y = (y + box[x]) & 0xff
box[x], box[y] = box[y], box[x]
out_append(chr(ord(char) ^ box[(box[x] + box[y]) & 0xff]))
self.__x = x
self.__y = y
return ''.join(out)
def inflate(data):
return zlib.decompress(data, -zlib.MAX_WBITS)
def deflate(data):
return zlib.compress(data)[2:-4]
def format_response(status, headers, content):
if content:
headers.pop('content-length', None)
headers['Content-Length'] = str(len(content))
data = 'HTTP/1.1 %d %s\r\n%s\r\n\r\n%s' % (status, httplib.responses.get(status, 'Unknown'), '\r\n'.join('%s: %s' % (k.title(), v) for k, v in headers.items()), content)
data = deflate(data)
return struct.pack('!h', len(data)) + data
def application(environ, start_response):
if environ['REQUEST_METHOD'] == 'GET' and 'HTTP_X_URLFETCH_PS1' not in environ:
timestamp = long(os.environ['CURRENT_VERSION_ID'].split('.')[1])/2**28
ctime = time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(timestamp+8*3600))
start_response('200 OK', [('Content-Type', 'text/plain')])
yield 'GoAgent Python Server %s works, deployed at %s\n' % (__version__, ctime)
if len(__password__) > 2:
yield 'Password: %s%s%s' % (__password__[0], '*'*(len(__password__)-2), __password__[-1])
raise StopIteration
start_response('200 OK', [('Content-Type', 'image/gif')])
if environ['REQUEST_METHOD'] == 'HEAD':
raise StopIteration
options = environ.get('HTTP_X_URLFETCH_OPTIONS', '')
if 'rc4' in options and not __password__:
yield format_response(400, {'Content-Type': 'text/html; charset=utf-8'}, message_html('400 Bad Request', 'Bad Request (options) - please set __password__ in gae.py', 'please set __password__ and upload gae.py again'))
raise StopIteration
try:
if 'HTTP_X_URLFETCH_PS1' in environ:
payload = inflate(base64.b64decode(environ['HTTP_X_URLFETCH_PS1']))
body = inflate(base64.b64decode(environ['HTTP_X_URLFETCH_PS2'])) if 'HTTP_X_URLFETCH_PS2' in environ else ''
else:
wsgi_input = environ['wsgi.input']
input_data = wsgi_input.read(int(environ.get('CONTENT_LENGTH', '0')))
if 'rc4' in options:
input_data = RC4Cipher(__password__).encrypt(input_data)
payload_length, = struct.unpack('!h', input_data[:2])
payload = inflate(input_data[2:2+payload_length])
body = input_data[2+payload_length:]
raw_response_line, payload = payload.split('\r\n', 1)
method, url = raw_response_line.split()[:2]
headers = {}
for line in payload.splitlines():
key, value = line.split(':', 1)
headers[key.title()] = value.strip()
except (zlib.error, KeyError, ValueError):
import traceback
yield format_response(500, {'Content-Type': 'text/html; charset=utf-8'}, message_html('500 Internal Server Error', 'Bad Request (payload) - Possible Wrong Password', '<pre>%s</pre>' % traceback.format_exc()))
raise StopIteration
kwargs = {}
any(kwargs.__setitem__(x[len('x-urlfetch-'):].lower(), headers.pop(x)) for x in headers.keys() if x.lower().startswith('x-urlfetch-'))
if 'Content-Encoding' in headers and body:
# fix bug for LinkedIn android client
if headers['Content-Encoding'] == 'deflate':
try:
body2 = inflate(body)
headers['Content-Length'] = str(len(body2))
del headers['Content-Encoding']
body = body2
except:
pass
logging.info('%s "%s %s %s" - -', environ['REMOTE_ADDR'], method, url, 'HTTP/1.1')
if __password__ and __password__ != kwargs.get('password', ''):
yield format_response(403, {'Content-Type': 'text/html; charset=utf-8'}, message_html('403 Wrong password', 'Wrong password(%r)' % kwargs.get('password', ''), 'GoAgent proxy.ini password is wrong!'))
raise StopIteration
netloc = urlparse.urlparse(url).netloc
if __hostsdeny__ and netloc.endswith(__hostsdeny__):
yield format_response(403, {'Content-Type': 'text/html; charset=utf-8'}, message_html('403 Hosts Deny', 'Hosts Deny(%r)' % netloc, detail='公用appid因为资源有限,限制观看视频和文件下载等消耗资源过多的访问,请使用自己的appid <a href=" https://github.com/XX-net/XX-Net/wiki/Register-Google-appid" target="_blank">帮助</a> '))
raise StopIteration
if len(url) > MAX_URL_LENGTH:
yield format_response(400, {'Content-Type': 'text/html; charset=utf-8'}, message_html('400 Bad Request', 'length of URL too long(greater than %r)' % MAX_URL_LENGTH, detail='url=%r' % url))
raise StopIteration
if netloc.startswith(('127.0.0.', '::1', 'localhost')):
yield format_response(400, {'Content-Type': 'text/html; charset=utf-8'}, message_html('GoAgent %s is Running' % __version__, 'Now you can visit some websites', ''.join('<a href="https://%s/">%s</a><br/>' % (x, x) for x in ('google.com', 'mail.google.com'))))
raise StopIteration
fetchmethod = getattr(urlfetch, method, None)
if not fetchmethod:
yield format_response(405, {'Content-Type': 'text/html; charset=utf-8'}, message_html('405 Method Not Allowed', 'Method Not Allowed: %r' % method, detail='Method Not Allowed URL=%r' % url))
raise StopIteration
timeout = int(kwargs.get('timeout', URLFETCH_TIMEOUT))
validate_certificate = bool(int(kwargs.get('validate', 0)))
maxsize = int(kwargs.get('maxsize', 0))
# https://www.freebsdchina.org/forum/viewtopic.php?t=54269
accept_encoding = headers.get('Accept-Encoding', '') or headers.get('Bccept-Encoding', '')
errors = []
allow_truncated = False
for i in xrange(int(kwargs.get('fetchmax', URLFETCH_MAX))):
try:
response = urlfetch.fetch(url, body, fetchmethod, headers, allow_truncated=allow_truncated, follow_redirects=False, deadline=timeout, validate_certificate=validate_certificate)
break
except apiproxy_errors.OverQuotaError as e:
time.sleep(5)
except urlfetch.DeadlineExceededError as e:
errors.append('%r, timeout=%s' % (e, timeout))
logging.error('DeadlineExceededError(timeout=%s, url=%r)', timeout, url)
time.sleep(1)
allow_truncated = True
m = re.search(r'=\s*(\d+)-', headers.get('Range') or headers.get('range') or '')
if m is None:
headers['Range'] = 'bytes=0-%d' % (maxsize or URLFETCH_MAXSIZE)
else:
headers.pop('Range', '')
headers.pop('range', '')
start = int(m.group(1))
headers['Range'] = 'bytes=%s-%d' % (start, start+(maxsize or URLFETCH_MAXSIZE))
timeout *= 2
except urlfetch.DownloadError as e:
errors.append('%r, timeout=%s' % (e, timeout))
logging.error('DownloadError(timeout=%s, url=%r)', timeout, url)
time.sleep(1)
timeout *= 2
except urlfetch.ResponseTooLargeError as e:
errors.append('%r, timeout=%s' % (e, timeout))
response = e.response
logging.error('ResponseTooLargeError(timeout=%s, url=%r) response(%r)', timeout, url, response)
allow_truncated = True
m = re.search(r'=\s*(\d+)-', headers.get('Range') or headers.get('range') or '')
if m is None:
headers['Range'] = 'bytes=0-%d' % (maxsize or URLFETCH_MAXSIZE)
else:
headers.pop('Range', '')
headers.pop('range', '')
start = int(m.group(1))
headers['Range'] = 'bytes=%s-%d' % (start, start+(maxsize or URLFETCH_MAXSIZE))
timeout *= 2
except urlfetch.SSLCertificateError as e:
errors.append('%r, should validate=0 ?' % e)
logging.error('%r, timeout=%s', e, timeout)
except Exception as e:
errors.append(str(e))
stack_str = "stack:%s" % traceback.format_exc()
errors.append(stack_str)
if i == 0 and method == 'GET':
timeout *= 2
else:
error_string = '<br />\n'.join(errors)
if not error_string:
logurl = 'https://appengine.google.com/logs?&app_id=%s' % os.environ['APPLICATION_ID']
error_string = 'Internal Server Error. <p/>try <a href="javascript:window.location.reload(true);">refresh</a> or goto <a href="%s" target="_blank">appengine.google.com</a> for details' % logurl
yield format_response(502, {'Content-Type': 'text/html; charset=utf-8'}, message_html('502 Urlfetch Error', 'Python Urlfetch Error: %r' % method, error_string))
raise StopIteration
#logging.debug('url=%r response.status_code=%r response.headers=%r response.content[:1024]=%r', url, response.status_code, dict(response.headers), response.content[:1024])
status_code = int(response.status_code)
data = response.content
response_headers = response.headers
response_headers['X-Head-Content-Length'] = response_headers.get('Content-Length', '')
#for k in response_headers:
# v = response_headers[k]
# logging.debug("Head:%s: %s", k, v)
content_type = response_headers.get('content-type', '')
if status_code == 200 and maxsize and len(data) > maxsize and response_headers.get('accept-ranges', '').lower() == 'bytes' and int(response_headers.get('content-length', 0)):
logging.debug("data len:%d max:%d", len(data), maxsize)
status_code = 206
response_headers['Content-Range'] = 'bytes 0-%d/%d' % (maxsize-1, len(data))
data = data[:maxsize]
if status_code == 200 and 'content-encoding' not in response_headers and 512 < len(data) < URLFETCH_DEFLATE_MAXSIZE and content_type.startswith(('text/', 'application/json', 'application/javascript')):
if 'gzip' in accept_encoding:
response_headers['Content-Encoding'] = 'gzip'
compressobj = zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -zlib.MAX_WBITS, zlib.DEF_MEM_LEVEL, 0)
dataio = io.BytesIO()
dataio.write('\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff')
dataio.write(compressobj.compress(data))
dataio.write(compressobj.flush())
dataio.write(struct.pack('<LL', zlib.crc32(data) & 0xFFFFFFFFL, len(data) & 0xFFFFFFFFL))
data = dataio.getvalue()
elif 'deflate' in accept_encoding:
response_headers['Content-Encoding'] = 'deflate'
data = deflate(data)
response_headers['Content-Length'] = str(len(data))
if 'rc4' not in options:
yield format_response(status_code, response_headers, '')
yield data
else:
cipher = RC4Cipher(__password__)
yield cipher.encrypt(format_response(status_code, response_headers, ''))
yield cipher.encrypt(data)
|
hexlism/xx_net
|
gae_proxy/server/gae/gae.py
|
Python
|
bsd-2-clause
| 13,880
|
[
"VisIt"
] |
107a0544a87a8c4f8c9391beb2b40ab68bba01b84492faa91837758f3311dfea
|
"""
Augmenters that create weather effects.
List of augmenters:
* :class:`FastSnowyLandscape`
* :class:`CloudLayer`
* :class:`Clouds`
* :class:`Fog`
* :class:`SnowflakesLayer`
* :class:`Snowflakes`
* :class:`RainLayer`
* :class:`Rain`
"""
from __future__ import print_function, division, absolute_import
import numpy as np
import imgaug as ia
from . import meta, arithmetic, blur, contrast, color as colorlib
from .. import parameters as iap
from .. import dtypes as iadt
class FastSnowyLandscape(meta.Augmenter):
"""Convert non-snowy landscapes to snowy ones.
This augmenter expects to get an image that roughly shows a landscape.
This augmenter is based on the method proposed in
https://medium.freecodecamp.org/image-augmentation-make-it-rain-make-it-snow-how-to-modify-a-photo-with-machine-learning-163c0cb3843f?gi=bca4a13e634c
**Supported dtypes**:
* ``uint8``: yes; fully tested
* ``uint16``: no (1)
* ``uint32``: no (1)
* ``uint64``: no (1)
* ``int8``: no (1)
* ``int16``: no (1)
* ``int32``: no (1)
* ``int64``: no (1)
* ``float16``: no (1)
* ``float32``: no (1)
* ``float64``: no (1)
* ``float128``: no (1)
* ``bool``: no (1)
- (1) This augmenter is based on a colorspace conversion to HLS.
Hence, only RGB ``uint8`` inputs are sensible.
Parameters
----------
lightness_threshold : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
All pixels with lightness in HLS colorspace that is below this value
will have their lightness increased by `lightness_multiplier`.
* If a ``number``, then that value will always be used.
* If a ``tuple`` ``(a, b)``, then a value will be uniformly sampled
per image from the discrete interval ``[a..b]``.
* If a ``list``, then a random value will be sampled from that
``list`` per image.
* If a ``StochasticParameter``, then a value will be sampled
per image from that parameter.
lightness_multiplier : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Multiplier for pixel's lightness value in HLS colorspace.
Affects all pixels selected via `lightness_threshold`.
* If a ``number``, then that value will always be used.
* If a ``tuple`` ``(a, b)``, then a value will be uniformly sampled
per image from the discrete interval ``[a..b]``.
* If a ``list``, then a random value will be sampled from that
``list`` per image.
* If a ``StochasticParameter``, then a value will be sampled
per image from that parameter.
from_colorspace : str, optional
The source colorspace of the input images.
See :func:`~imgaug.augmenters.color.ChangeColorspace.__init__`.
seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
name : None or str, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
Old name for parameter `seed`.
Its usage will not yet cause a deprecation warning,
but it is still recommended to use `seed` now.
Outdated since 0.4.0.
deterministic : bool, optional
Deprecated since 0.4.0.
See method ``to_deterministic()`` for an alternative and for
details about what the "deterministic mode" actually does.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.FastSnowyLandscape(
>>> lightness_threshold=140,
>>> lightness_multiplier=2.5
>>> )
Search for all pixels in the image with a lightness value in HLS
colorspace of less than ``140`` and increase their lightness by a factor
of ``2.5``.
>>> aug = iaa.FastSnowyLandscape(
>>> lightness_threshold=[128, 200],
>>> lightness_multiplier=(1.5, 3.5)
>>> )
Search for all pixels in the image with a lightness value in HLS
colorspace of less than ``128`` or less than ``200`` (one of these
values is picked per image) and multiply their lightness by a factor
of ``x`` with ``x`` being sampled from ``uniform(1.5, 3.5)`` (once per
image).
>>> aug = iaa.FastSnowyLandscape(
>>> lightness_threshold=(100, 255),
>>> lightness_multiplier=(1.0, 4.0)
>>> )
Similar to the previous example, but the lightness threshold is sampled
from ``uniform(100, 255)`` (per image) and the multiplier
from ``uniform(1.0, 4.0)`` (per image). This seems to produce good and
varied results.
"""
def __init__(self, lightness_threshold=(100, 255),
lightness_multiplier=(1.0, 4.0),
from_colorspace=colorlib.CSPACE_RGB,
seed=None, name=None,
random_state="deprecated", deterministic="deprecated"):
super(FastSnowyLandscape, self).__init__(
seed=seed, name=name,
random_state=random_state, deterministic=deterministic)
self.lightness_threshold = iap.handle_continuous_param(
lightness_threshold, "lightness_threshold",
value_range=(0, 255), tuple_to_uniform=True, list_to_choice=True)
self.lightness_multiplier = iap.handle_continuous_param(
lightness_multiplier, "lightness_multiplier",
value_range=(0, None), tuple_to_uniform=True, list_to_choice=True)
self.from_colorspace = from_colorspace
def _draw_samples(self, augmentables, random_state):
nb_augmentables = len(augmentables)
rss = random_state.duplicate(2)
thresh_samples = self.lightness_threshold.draw_samples(
(nb_augmentables,), rss[1])
lmul_samples = self.lightness_multiplier.draw_samples(
(nb_augmentables,), rss[0])
return thresh_samples, lmul_samples
# Added in 0.4.0.
def _augment_batch_(self, batch, random_state, parents, hooks):
if batch.images is None:
return batch
images = batch.images
thresh_samples, lmul_samples = self._draw_samples(images, random_state)
gen = enumerate(zip(images, thresh_samples, lmul_samples))
for i, (image, thresh, lmul) in gen:
image_hls = colorlib.change_colorspace_(
image, colorlib.CSPACE_HLS, self.from_colorspace)
cvt_dtype = image_hls.dtype
image_hls = image_hls.astype(np.float64)
lightness = image_hls[..., 1]
lightness[lightness < thresh] *= lmul
image_hls = iadt.restore_dtypes_(image_hls, cvt_dtype)
image_rgb = colorlib.change_colorspace_(
image_hls, self.from_colorspace, colorlib.CSPACE_HLS)
batch.images[i] = image_rgb
return batch
def get_parameters(self):
"""See :func:`~imgaug.augmenters.meta.Augmenter.get_parameters`."""
return [self.lightness_threshold, self.lightness_multiplier]
# TODO add examples and add these to the overview docs
# TODO add perspective transform to each cloud layer to make them look more
# distant?
# TODO alpha_mean and density overlap - remove one of them
class CloudLayer(meta.Augmenter):
"""Add a single layer of clouds to an image.
**Supported dtypes**:
* ``uint8``: yes; indirectly tested (1)
* ``uint16``: no
* ``uint32``: no
* ``uint64``: no
* ``int8``: no
* ``int16``: no
* ``int32``: no
* ``int64``: no
* ``float16``: yes; not tested
* ``float32``: yes; not tested
* ``float64``: yes; not tested
* ``float128``: yes; not tested (2)
* ``bool``: no
- (1) Indirectly tested via tests for :class:`Clouds`` and :class:`Fog`
- (2) Note that random values are usually sampled as ``int64`` or
``float64``, which ``float128`` images would exceed. Note also
that random values might have to upscaled, which is done
via :func:`~imgaug.imgaug.imresize_many_images` and has its own
limited dtype support (includes however floats up to ``64bit``).
Parameters
----------
intensity_mean : number or tuple of number or list of number or imgaug.parameters.StochasticParameter
Mean intensity of the clouds (i.e. mean color).
Recommended to be in the interval ``[190, 255]``.
* If a ``number``, then that value will always be used.
* If a ``tuple`` ``(a, b)``, then a value will be uniformly
sampled per image from the interval ``[a, b]``.
* If a ``list``, then a random value will be sampled from that
``list`` per image.
* If a ``StochasticParameter``, then a value will be sampled
per image from that parameter.
intensity_freq_exponent : number or tuple of number or list of number or imgaug.parameters.StochasticParameter
Exponent of the frequency noise used to add fine intensity to the
mean intensity.
Recommended to be in the interval ``[-2.5, -1.5]``.
See :func:`~imgaug.parameters.FrequencyNoise.__init__` for details.
intensity_coarse_scale : number or tuple of number or list of number or imgaug.parameters.StochasticParameter
Standard deviation of the gaussian distribution used to add more
localized intensity to the mean intensity. Sampled in low resolution
space, i.e. affects final intensity on a coarse level.
Recommended to be in the interval ``(0, 10]``.
* If a ``number``, then that value will always be used.
* If a ``tuple`` ``(a, b)``, then a value will be uniformly sampled
per image from the interval ``[a, b]``.
* If a ``list``, then a random value will be sampled from that
``list`` per image.
* If a ``StochasticParameter``, then a value will be sampled
per image from that parameter.
alpha_min : number or tuple of number or list of number or imgaug.parameters.StochasticParameter
Minimum alpha when blending cloud noise with the image.
High values will lead to clouds being "everywhere".
Recommended to usually be at around ``0.0`` for clouds and ``>0`` for
fog.
* If a ``number``, then that value will always be used.
* If a ``tuple`` ``(a, b)``, then a value will be uniformly sampled
per image from the interval ``[a, b]``.
* If a ``list``, then a random value will be sampled from that
``list`` per image.
* If a ``StochasticParameter``, then a value will be sampled
per image from that parameter.
alpha_multiplier : number or tuple of number or list of number or imgaug.parameters.StochasticParameter
Multiplier for the sampled alpha values. High values will lead to
denser clouds wherever they are visible.
Recommended to be in the interval ``[0.3, 1.0]``.
Note that this parameter currently overlaps with `density_multiplier`,
which is applied a bit later to the alpha mask.
* If a ``number``, then that value will always be used.
* If a ``tuple`` ``(a, b)``, then a value will be uniformly sampled
per image from the interval ``[a, b]``.
* If a ``list``, then a random value will be sampled from that
``list`` per image.
* If a ``StochasticParameter``, then a value will be sampled
per image from that parameter.
alpha_size_px_max : number or tuple of number or list of number or imgaug.parameters.StochasticParameter
Controls the image size at which the alpha mask is sampled.
Lower values will lead to coarser alpha masks and hence larger
clouds (and empty areas).
See :func:`~imgaug.parameters.FrequencyNoise.__init__` for details.
alpha_freq_exponent : number or tuple of number or list of number or imgaug.parameters.StochasticParameter
Exponent of the frequency noise used to sample the alpha mask.
Similarly to `alpha_size_max_px`, lower values will lead to coarser
alpha patterns.
Recommended to be in the interval ``[-4.0, -1.5]``.
See :func:`~imgaug.parameters.FrequencyNoise.__init__` for details.
sparsity : number or tuple of number or list of number or imgaug.parameters.StochasticParameter
Exponent applied late to the alpha mask. Lower values will lead to
coarser cloud patterns, higher values to finer patterns.
Recommended to be somewhere around ``1.0``.
Do not deviate far from that value, otherwise the alpha mask might
get weird patterns with sudden fall-offs to zero that look very
unnatural.
* If a ``number``, then that value will always be used.
* If a ``tuple`` ``(a, b)``, then a value will be uniformly sampled
per image from the interval ``[a, b]``.
* If a ``list``, then a random value will be sampled from that
``list`` per image.
* If a ``StochasticParameter``, then a value will be sampled
per image from that parameter.
density_multiplier : number or tuple of number or list of number or imgaug.parameters.StochasticParameter
Late multiplier for the alpha mask, similar to `alpha_multiplier`.
Set this higher to get "denser" clouds wherever they are visible.
Recommended to be around ``[0.5, 1.5]``.
* If a ``number``, then that value will always be used.
* If a ``tuple`` ``(a, b)``, then a value will be uniformly sampled
per image from the interval ``[a, b]``.
* If a ``list``, then a random value will be sampled from that
``list`` per image.
* If a ``StochasticParameter``, then a value will be sampled
per image from that parameter.
seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
name : None or str, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
Old name for parameter `seed`.
Its usage will not yet cause a deprecation warning,
but it is still recommended to use `seed` now.
Outdated since 0.4.0.
deterministic : bool, optional
Deprecated since 0.4.0.
See method ``to_deterministic()`` for an alternative and for
details about what the "deterministic mode" actually does.
"""
def __init__(self, intensity_mean, intensity_freq_exponent,
intensity_coarse_scale, alpha_min, alpha_multiplier,
alpha_size_px_max, alpha_freq_exponent, sparsity,
density_multiplier,
seed=None, name=None,
random_state="deprecated", deterministic="deprecated"):
super(CloudLayer, self).__init__(
seed=seed, name=name,
random_state=random_state, deterministic=deterministic)
self.intensity_mean = iap.handle_continuous_param(
intensity_mean, "intensity_mean")
self.intensity_freq_exponent = intensity_freq_exponent
self.intensity_coarse_scale = intensity_coarse_scale
self.alpha_min = iap.handle_continuous_param(alpha_min, "alpha_min")
self.alpha_multiplier = iap.handle_continuous_param(
alpha_multiplier, "alpha_multiplier")
self.alpha_size_px_max = alpha_size_px_max
self.alpha_freq_exponent = alpha_freq_exponent
self.sparsity = iap.handle_continuous_param(sparsity, "sparsity")
self.density_multiplier = iap.handle_continuous_param(
density_multiplier, "density_multiplier")
# Added in 0.4.0.
def _augment_batch_(self, batch, random_state, parents, hooks):
if batch.images is None:
return batch
images = batch.images
rss = random_state.duplicate(len(images))
for i, (image, rs) in enumerate(zip(images, rss)):
batch.images[i] = self.draw_on_image(image, rs)
return batch
def get_parameters(self):
"""See :func:`~imgaug.augmenters.meta.Augmenter.get_parameters`."""
return [self.intensity_mean,
self.alpha_min,
self.alpha_multiplier,
self.alpha_size_px_max,
self.alpha_freq_exponent,
self.intensity_freq_exponent,
self.sparsity,
self.density_multiplier,
self.intensity_coarse_scale]
def draw_on_image(self, image, random_state):
iadt.gate_dtypes_strs(
image,
allowed="uint8 float16 float32 float64 float128",
disallowed="bool uint16 uint32 uint64 int8 int16 int32 int64",
augmenter=self
)
alpha, intensity = self.generate_maps(image, random_state)
alpha = alpha[..., np.newaxis]
intensity = intensity[..., np.newaxis]
if image.dtype.kind == "f":
intensity = intensity.astype(image.dtype)
return (1 - alpha) * image + alpha * intensity
intensity = np.clip(intensity, 0, 255)
# TODO use blend_alpha_() here
return np.clip(
(1 - alpha) * image.astype(alpha.dtype)
+ alpha * intensity.astype(alpha.dtype),
0,
255
).astype(np.uint8)
def generate_maps(self, image, random_state):
intensity_mean_sample = self.intensity_mean.draw_sample(random_state)
alpha_min_sample = self.alpha_min.draw_sample(random_state)
alpha_multiplier_sample = \
self.alpha_multiplier.draw_sample(random_state)
alpha_size_px_max = self.alpha_size_px_max
intensity_freq_exponent = self.intensity_freq_exponent
alpha_freq_exponent = self.alpha_freq_exponent
sparsity_sample = self.sparsity.draw_sample(random_state)
density_multiplier_sample = \
self.density_multiplier.draw_sample(random_state)
height, width = image.shape[0:2]
rss_alpha, rss_intensity = random_state.duplicate(2)
intensity_coarse = self._generate_intensity_map_coarse(
height, width, intensity_mean_sample,
iap.Normal(0, scale=self.intensity_coarse_scale),
rss_intensity
)
intensity_fine = self._generate_intensity_map_fine(
height, width, intensity_mean_sample, intensity_freq_exponent,
rss_intensity)
intensity = intensity_coarse + intensity_fine
alpha = self._generate_alpha_mask(
height, width, alpha_min_sample, alpha_multiplier_sample,
alpha_freq_exponent, alpha_size_px_max, sparsity_sample,
density_multiplier_sample, rss_alpha)
return alpha, intensity
@classmethod
def _generate_intensity_map_coarse(cls, height, width, intensity_mean,
intensity_local_offset, random_state):
# TODO (8, 8) might be too simplistic for some image sizes
height_intensity, width_intensity = (8, 8)
intensity = (
intensity_mean
+ intensity_local_offset.draw_samples(
(height_intensity, width_intensity), random_state)
)
intensity = ia.imresize_single_image(
intensity, (height, width), interpolation="cubic")
return intensity
@classmethod
def _generate_intensity_map_fine(cls, height, width, intensity_mean,
exponent, random_state):
intensity_details_generator = iap.FrequencyNoise(
exponent=exponent,
size_px_max=max(height, width, 1), # 1 here for case H, W being 0
upscale_method="cubic"
)
intensity_details = intensity_details_generator.draw_samples(
(height, width), random_state)
return intensity_mean * ((2*intensity_details - 1.0)/5.0)
@classmethod
def _generate_alpha_mask(cls, height, width, alpha_min, alpha_multiplier,
exponent, alpha_size_px_max, sparsity,
density_multiplier, random_state):
alpha_generator = iap.FrequencyNoise(
exponent=exponent,
size_px_max=alpha_size_px_max,
upscale_method="cubic"
)
alpha_local = alpha_generator.draw_samples(
(height, width), random_state)
alpha = alpha_min + (alpha_multiplier * alpha_local)
alpha = (alpha ** sparsity) * density_multiplier
alpha = np.clip(alpha, 0.0, 1.0)
return alpha
# TODO add vertical gradient alpha to have clouds only at skylevel/groundlevel
# TODO add configurable parameters
class Clouds(meta.SomeOf):
"""
Add clouds to images.
This is a wrapper around :class:`~imgaug.augmenters.weather.CloudLayer`.
It executes 1 to 2 layers per image, leading to varying densities and
frequency patterns of clouds.
This augmenter seems to be fairly robust w.r.t. the image size. Tested
with ``96x128``, ``192x256`` and ``960x1280``.
**Supported dtypes**:
* ``uint8``: yes; tested
* ``uint16``: no (1)
* ``uint32``: no (1)
* ``uint64``: no (1)
* ``int8``: no (1)
* ``int16``: no (1)
* ``int32``: no (1)
* ``int64``: no (1)
* ``float16``: no (1)
* ``float32``: no (1)
* ``float64``: no (1)
* ``float128``: no (1)
* ``bool``: no (1)
- (1) Parameters of this augmenter are optimized for the value range
of ``uint8``. While other dtypes may be accepted, they will lead
to images augmented in ways inappropriate for the respective
dtype.
Parameters
----------
seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
name : None or str, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
Old name for parameter `seed`.
Its usage will not yet cause a deprecation warning,
but it is still recommended to use `seed` now.
Outdated since 0.4.0.
deterministic : bool, optional
Deprecated since 0.4.0.
See method ``to_deterministic()`` for an alternative and for
details about what the "deterministic mode" actually does.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.Clouds()
Create an augmenter that adds clouds to images.
"""
def __init__(self,
seed=None, name=None,
random_state="deprecated", deterministic="deprecated"):
layers = [
CloudLayer(
intensity_mean=(196, 255),
intensity_freq_exponent=(-2.5, -2.0),
intensity_coarse_scale=10,
alpha_min=0,
alpha_multiplier=(0.25, 0.75),
alpha_size_px_max=(2, 8),
alpha_freq_exponent=(-2.5, -2.0),
sparsity=(0.8, 1.0),
density_multiplier=(0.5, 1.0),
seed=seed,
random_state=random_state,
deterministic=deterministic
),
CloudLayer(
intensity_mean=(196, 255),
intensity_freq_exponent=(-2.0, -1.0),
intensity_coarse_scale=10,
alpha_min=0,
alpha_multiplier=(0.5, 1.0),
alpha_size_px_max=(64, 128),
alpha_freq_exponent=(-2.0, -1.0),
sparsity=(1.0, 1.4),
density_multiplier=(0.8, 1.5),
seed=seed,
random_state=random_state,
deterministic=deterministic
)
]
super(Clouds, self).__init__(
(1, 2),
children=layers,
random_order=False,
seed=seed, name=name,
random_state=random_state, deterministic=deterministic)
# TODO add vertical gradient alpha to have fog only at skylevel/groundlevel
# TODO add configurable parameters
class Fog(CloudLayer):
"""Add fog to images.
This is a wrapper around :class:`~imgaug.augmenters.weather.CloudLayer`.
It executes a single layer per image with a configuration leading to
fairly dense clouds with low-frequency patterns.
This augmenter seems to be fairly robust w.r.t. the image size. Tested
with ``96x128``, ``192x256`` and ``960x1280``.
**Supported dtypes**:
* ``uint8``: yes; tested
* ``uint16``: no (1)
* ``uint32``: no (1)
* ``uint64``: no (1)
* ``int8``: no (1)
* ``int16``: no (1)
* ``int32``: no (1)
* ``int64``: no (1)
* ``float16``: no (1)
* ``float32``: no (1)
* ``float64``: no (1)
* ``float128``: no (1)
* ``bool``: no (1)
- (1) Parameters of this augmenter are optimized for the value range
of ``uint8``. While other dtypes may be accepted, they will lead
to images augmented in ways inappropriate for the respective
dtype.
Parameters
----------
seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
name : None or str, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
Old name for parameter `seed`.
Its usage will not yet cause a deprecation warning,
but it is still recommended to use `seed` now.
Outdated since 0.4.0.
deterministic : bool, optional
Deprecated since 0.4.0.
See method ``to_deterministic()`` for an alternative and for
details about what the "deterministic mode" actually does.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.Fog()
Create an augmenter that adds fog to images.
"""
def __init__(self,
seed=None, name=None,
random_state="deprecated", deterministic="deprecated"):
super(Fog, self).__init__(
intensity_mean=(220, 255),
intensity_freq_exponent=(-2.0, -1.5),
intensity_coarse_scale=2,
alpha_min=(0.7, 0.9),
alpha_multiplier=0.3,
alpha_size_px_max=(2, 8),
alpha_freq_exponent=(-4.0, -2.0),
sparsity=0.9,
density_multiplier=(0.4, 0.9),
seed=seed, name=name,
random_state=random_state, deterministic=deterministic)
# TODO add examples and add these to the overview docs
# TODO snowflakes are all almost 100% white, add some grayish tones and
# maybe color to them
class SnowflakesLayer(meta.Augmenter):
"""Add a single layer of falling snowflakes to images.
**Supported dtypes**:
* ``uint8``: yes; indirectly tested (1)
* ``uint16``: no
* ``uint32``: no
* ``uint64``: no
* ``int8``: no
* ``int16``: no
* ``int32``: no
* ``int64``: no
* ``float16``: no
* ``float32``: no
* ``float64``: no
* ``float128``: no
* ``bool``: no
- (1) indirectly tested via tests for :class:`Snowflakes`
Parameters
----------
density : number or tuple of number or list of number or imgaug.parameters.StochasticParameter
Density of the snowflake layer, as a probability of each pixel in
low resolution space to be a snowflake.
Valid values are in the interval ``[0.0, 1.0]``.
Recommended to be in the interval ``[0.01, 0.075]``.
* If a ``number``, then that value will always be used.
* If a ``tuple`` ``(a, b)``, then a value will be uniformly sampled
per image from the interval ``[a, b]``.
* If a ``list``, then a random value will be sampled from that
``list`` per image.
* If a ``StochasticParameter``, then a value will be sampled
per image from that parameter.
density_uniformity : number or tuple of number or list of number or imgaug.parameters.StochasticParameter
Size uniformity of the snowflakes. Higher values denote more
similarly sized snowflakes.
Valid values are in the interval ``[0.0, 1.0]``.
Recommended to be around ``0.5``.
* If a ``number``, then that value will always be used.
* If a ``tuple`` ``(a, b)``, then a value will be uniformly sampled
per image from the interval ``[a, b]``.
* If a ``list``, then a random value will be sampled from that
``list`` per image.
* If a ``StochasticParameter``, then a value will be sampled
per image from that parameter.
flake_size : number or tuple of number or list of number or imgaug.parameters.StochasticParameter
Size of the snowflakes. This parameter controls the resolution at
which snowflakes are sampled. Higher values mean that the resolution
is closer to the input image's resolution and hence each sampled
snowflake will be smaller (because of the smaller pixel size).
Valid values are in the interval ``(0.0, 1.0]``.
Recommended values:
* On 96x128 a value of ``(0.1, 0.4)`` worked well.
* On 192x256 a value of ``(0.2, 0.7)`` worked well.
* On 960x1280 a value of ``(0.7, 0.95)`` worked well.
Datatype behaviour:
* If a ``number``, then that value will always be used.
* If a ``tuple`` ``(a, b)``, then a value will be uniformly sampled
per image from the interval ``[a, b]``.
* If a ``list``, then a random value will be sampled from that
``list`` per image.
* If a ``StochasticParameter``, then a value will be sampled
per image from that parameter.
flake_size_uniformity : number or tuple of number or list of number or imgaug.parameters.StochasticParameter
Controls the size uniformity of the snowflakes. Higher values mean
that the snowflakes are more similarly sized.
Valid values are in the interval ``[0.0, 1.0]``.
Recommended to be around ``0.5``.
* If a ``number``, then that value will always be used.
* If a ``tuple`` ``(a, b)``, then a value will be uniformly sampled
per image from the interval ``[a, b]``.
* If a ``list``, then a random value will be sampled from that
``list`` per image.
* If a ``StochasticParameter``, then a value will be sampled
per image from that parameter.
angle : number or tuple of number or list of number or imgaug.parameters.StochasticParameter
Angle in degrees of motion blur applied to the snowflakes, where
``0.0`` is motion blur that points straight upwards.
Recommended to be in the interval ``[-30, 30]``.
See also :func:`~imgaug.augmenters.blur.MotionBlur.__init__`.
* If a ``number``, then that value will always be used.
* If a ``tuple`` ``(a, b)``, then a value will be uniformly sampled
per image from the interval ``[a, b]``.
* If a ``list``, then a random value will be sampled from that
``list`` per image.
* If a ``StochasticParameter``, then a value will be sampled
per image from that parameter.
speed : number or tuple of number or list of number or imgaug.parameters.StochasticParameter
Perceived falling speed of the snowflakes. This parameter controls the
motion blur's kernel size. It follows roughly the form
``kernel_size = image_size * speed``. Hence, values around ``1.0``
denote that the motion blur should "stretch" each snowflake over the
whole image.
Valid values are in the interval ``[0.0, 1.0]``.
Recommended values:
* On 96x128 a value of ``(0.01, 0.05)`` worked well.
* On 192x256 a value of ``(0.007, 0.03)`` worked well.
* On 960x1280 a value of ``(0.001, 0.03)`` worked well.
Datatype behaviour:
* If a ``number``, then that value will always be used.
* If a ``tuple`` ``(a, b)``, then a value will be uniformly sampled
per image from the interval ``[a, b]``.
* If a ``list``, then a random value will be sampled from that
``list`` per image.
* If a ``StochasticParameter``, then a value will be sampled
per image from that parameter.
blur_sigma_fraction : number or tuple of number or list of number or imgaug.parameters.StochasticParameter
Standard deviation (as a fraction of the image size) of gaussian blur
applied to the snowflakes.
Valid values are in the interval ``[0.0, 1.0]``.
Recommended to be in the interval ``[0.0001, 0.001]``. May still
require tinkering based on image size.
* If a ``number``, then that value will always be used.
* If a ``tuple`` ``(a, b)``, then a value will be uniformly sampled
per image from the interval ``[a, b]``.
* If a ``list``, then a random value will be sampled from that
``list`` per image.
* If a ``StochasticParameter``, then a value will be sampled
per image from that parameter.
blur_sigma_limits : tuple of float, optional
Controls allowed min and max values of `blur_sigma_fraction`
after(!) multiplication with the image size. First value is the
minimum, second value is the maximum. Values outside of that range
will be clipped to be within that range. This prevents extreme
values for very small or large images.
seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
name : None or str, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
Old name for parameter `seed`.
Its usage will not yet cause a deprecation warning,
but it is still recommended to use `seed` now.
Outdated since 0.4.0.
deterministic : bool, optional
Deprecated since 0.4.0.
See method ``to_deterministic()`` for an alternative and for
details about what the "deterministic mode" actually does.
"""
def __init__(self, density, density_uniformity, flake_size,
flake_size_uniformity, angle, speed, blur_sigma_fraction,
blur_sigma_limits=(0.5, 3.75),
seed=None, name=None,
random_state="deprecated", deterministic="deprecated"):
super(SnowflakesLayer, self).__init__(
seed=seed, name=name,
random_state=random_state, deterministic=deterministic)
self.density = density
self.density_uniformity = iap.handle_continuous_param(
density_uniformity, "density_uniformity", value_range=(0.0, 1.0))
self.flake_size = iap.handle_continuous_param(
flake_size, "flake_size", value_range=(0.0+1e-4, 1.0))
self.flake_size_uniformity = iap.handle_continuous_param(
flake_size_uniformity, "flake_size_uniformity",
value_range=(0.0, 1.0))
self.angle = iap.handle_continuous_param(angle, "angle")
self.speed = iap.handle_continuous_param(
speed, "speed", value_range=(0.0, 1.0))
self.blur_sigma_fraction = iap.handle_continuous_param(
blur_sigma_fraction, "blur_sigma_fraction", value_range=(0.0, 1.0))
# (min, max), same for all images
self.blur_sigma_limits = blur_sigma_limits
# (height, width), same for all images
self.gate_noise_size = (8, 8)
# Added in 0.4.0.
def _augment_batch_(self, batch, random_state, parents, hooks):
if batch.images is None:
return batch
images = batch.images
rss = random_state.duplicate(len(images))
for i, (image, rs) in enumerate(zip(images, rss)):
batch.images[i] = self.draw_on_image(image, rs)
return batch
def get_parameters(self):
"""See :func:`~imgaug.augmenters.meta.Augmenter.get_parameters`."""
return [self.density,
self.density_uniformity,
self.flake_size,
self.flake_size_uniformity,
self.angle,
self.speed,
self.blur_sigma_fraction,
self.blur_sigma_limits,
self.gate_noise_size]
def draw_on_image(self, image, random_state):
assert image.ndim == 3, (
"Expected input image to be three-dimensional, "
"got %d dimensions." % (image.ndim,))
assert image.shape[2] in [1, 3], (
"Expected to get image with a channel axis of size 1 or 3, "
"got %d (shape: %s)" % (image.shape[2], image.shape))
rss = random_state.duplicate(2)
flake_size_sample = self.flake_size.draw_sample(random_state)
flake_size_uniformity_sample = self.flake_size_uniformity.draw_sample(
random_state)
angle_sample = self.angle.draw_sample(random_state)
speed_sample = self.speed.draw_sample(random_state)
blur_sigma_fraction_sample = self.blur_sigma_fraction.draw_sample(
random_state)
height, width, nb_channels = image.shape
downscale_factor = np.clip(1.0 - flake_size_sample, 0.001, 1.0)
height_down = max(1, int(height*downscale_factor))
width_down = max(1, int(width*downscale_factor))
noise = self._generate_noise(
height_down,
width_down,
self.density,
rss[0]
)
# gate the sampled noise via noise in range [0.0, 1.0]
# this leads to less flakes in some areas of the image and more in
# other areas
gate_noise = iap.Beta(1.0, 1.0 - self.density_uniformity)
noise = self._gate(noise, gate_noise, self.gate_noise_size, rss[1])
noise = ia.imresize_single_image(noise, (height, width),
interpolation="cubic")
# apply a bit of gaussian blur and then motion blur according to
# angle and speed
sigma = max(height, width) * blur_sigma_fraction_sample
sigma = np.clip(sigma,
self.blur_sigma_limits[0], self.blur_sigma_limits[1])
noise_small_blur = self._blur(noise, sigma)
noise_small_blur = self._motion_blur(noise_small_blur,
angle=angle_sample,
speed=speed_sample,
random_state=random_state)
noise_small_blur_rgb = self._postprocess_noise(
noise_small_blur, flake_size_uniformity_sample, nb_channels)
return self._blend(image, speed_sample, noise_small_blur_rgb)
@classmethod
def _generate_noise(cls, height, width, density, random_state):
noise = arithmetic.Salt(p=density, random_state=random_state)
return noise.augment_image(np.zeros((height, width), dtype=np.uint8))
@classmethod
def _gate(cls, noise, gate_noise, gate_size, random_state):
# the beta distribution here has most of its weight around 1.0 and
# will only rarely sample values around 0.0 the average of the
# sampled values seems to be at around 0.6-0.75
gate_noise = gate_noise.draw_samples(gate_size, random_state)
gate_noise_up = ia.imresize_single_image(gate_noise, noise.shape[0:2],
interpolation="cubic")
gate_noise_up = np.clip(gate_noise_up, 0.0, 1.0)
return np.clip(
noise.astype(np.float32) * gate_noise_up, 0, 255
).astype(np.uint8)
@classmethod
def _blur(cls, noise, sigma):
return blur.blur_gaussian_(noise, sigma=sigma)
@classmethod
def _motion_blur(cls, noise, angle, speed, random_state):
size = max(noise.shape[0:2])
k = int(speed * size)
if k <= 1:
return noise
# we use max(k, 3) here because MotionBlur errors for anything less
# than 3
blurer = blur.MotionBlur(
k=max(k, 3), angle=angle, direction=1.0, random_state=random_state)
return blurer.augment_image(noise)
# Added in 0.4.0.
@classmethod
def _postprocess_noise(cls, noise_small_blur,
flake_size_uniformity_sample, nb_channels):
# use contrast adjustment of noise to make the flake size a bit less
# uniform then readjust the noise values to make them more visible
# again
gain = 1.0 + 2*(1 - flake_size_uniformity_sample)
gain_adj = 1.0 + 5*(1 - flake_size_uniformity_sample)
noise_small_blur = contrast.GammaContrast(gain).augment_image(
noise_small_blur)
noise_small_blur = noise_small_blur.astype(np.float32) * gain_adj
noise_small_blur_rgb = np.tile(
noise_small_blur[..., np.newaxis], (1, 1, nb_channels))
return noise_small_blur_rgb
# Added in 0.4.0.
@classmethod
def _blend(cls, image, speed_sample, noise_small_blur_rgb):
# blend:
# sum for a bit of glowy, hardly visible flakes
# max for the main flakes
image_f32 = image.astype(np.float32)
image_f32 = cls._blend_by_sum(
image_f32, (0.1 + 20*speed_sample) * noise_small_blur_rgb)
image_f32 = cls._blend_by_max(
image_f32, (1.0 + 20*speed_sample) * noise_small_blur_rgb)
return image_f32
# TODO replace this by a function from module blend.py
@classmethod
def _blend_by_sum(cls, image_f32, noise_small_blur_rgb):
image_f32 = image_f32 + noise_small_blur_rgb
return np.clip(image_f32, 0, 255).astype(np.uint8)
# TODO replace this by a function from module blend.py
@classmethod
def _blend_by_max(cls, image_f32, noise_small_blur_rgb):
image_f32 = np.maximum(image_f32, noise_small_blur_rgb)
return np.clip(image_f32, 0, 255).astype(np.uint8)
class Snowflakes(meta.SomeOf):
"""Add falling snowflakes to images.
This is a wrapper around
:class:`~imgaug.augmenters.weather.SnowflakesLayer`. It executes 1 to 3
layers per image.
**Supported dtypes**:
* ``uint8``: yes; tested
* ``uint16``: no (1)
* ``uint32``: no (1)
* ``uint64``: no (1)
* ``int8``: no (1)
* ``int16``: no (1)
* ``int32``: no (1)
* ``int64``: no (1)
* ``float16``: no (1)
* ``float32``: no (1)
* ``float64``: no (1)
* ``float128``: no (1)
* ``bool``: no (1)
- (1) Parameters of this augmenter are optimized for the value range
of ``uint8``. While other dtypes may be accepted, they will lead
to images augmented in ways inappropriate for the respective
dtype.
Parameters
----------
density : number or tuple of number or list of number or imgaug.parameters.StochasticParameter
Density of the snowflake layer, as a probability of each pixel in
low resolution space to be a snowflake.
Valid values are in the interval ``[0.0, 1.0]``.
Recommended to be in the interval ``[0.01, 0.075]``.
* If a ``number``, then that value will always be used.
* If a ``tuple`` ``(a, b)``, then a value will be uniformly sampled
per image from the interval ``[a, b]``.
* If a ``list``, then a random value will be sampled from that
``list`` per image.
* If a ``StochasticParameter``, then a value will be sampled
per image from that parameter.
density_uniformity : number or tuple of number or list of number or imgaug.parameters.StochasticParameter
Size uniformity of the snowflakes. Higher values denote more
similarly sized snowflakes.
Valid values are in the interval ``[0.0, 1.0]``.
Recommended to be around ``0.5``.
* If a ``number``, then that value will always be used.
* If a ``tuple`` ``(a, b)``, then a value will be uniformly sampled
per image from the interval ``[a, b]``.
* If a ``list``, then a random value will be sampled from that
``list`` per image.
* If a ``StochasticParameter``, then a value will be sampled
per image from that parameter.
flake_size : number or tuple of number or list of number or imgaug.parameters.StochasticParameter
Size of the snowflakes. This parameter controls the resolution at
which snowflakes are sampled. Higher values mean that the resolution
is closer to the input image's resolution and hence each sampled
snowflake will be smaller (because of the smaller pixel size).
Valid values are in the interval ``(0.0, 1.0]``.
Recommended values:
* On ``96x128`` a value of ``(0.1, 0.4)`` worked well.
* On ``192x256`` a value of ``(0.2, 0.7)`` worked well.
* On ``960x1280`` a value of ``(0.7, 0.95)`` worked well.
Datatype behaviour:
* If a ``number``, then that value will always be used.
* If a ``tuple`` ``(a, b)``, then a value will be uniformly sampled
per image from the interval ``[a, b]``.
* If a ``list``, then a random value will be sampled from that
``list`` per image.
* If a ``StochasticParameter``, then a value will be sampled
per image from that parameter.
flake_size_uniformity : number or tuple of number or list of number or imgaug.parameters.StochasticParameter
Controls the size uniformity of the snowflakes. Higher values mean
that the snowflakes are more similarly sized.
Valid values are in the interval ``[0.0, 1.0]``.
Recommended to be around ``0.5``.
* If a ``number``, then that value will always be used.
* If a ``tuple`` ``(a, b)``, then a value will be uniformly sampled
per image from the interval ``[a, b]``.
* If a ``list``, then a random value will be sampled from that
``list`` per image.
* If a ``StochasticParameter``, then a value will be sampled
per image from that parameter.
angle : number or tuple of number or list of number or imgaug.parameters.StochasticParameter
Angle in degrees of motion blur applied to the snowflakes, where
``0.0`` is motion blur that points straight upwards.
Recommended to be in the interval ``[-30, 30]``.
See also :func:`~imgaug.augmenters.blur.MotionBlur.__init__`.
* If a ``number``, then that value will always be used.
* If a ``tuple`` ``(a, b)``, then a value will be uniformly sampled
per image from the interval ``[a, b]``.
* If a ``list``, then a random value will be sampled from that
``list`` per image.
* If a ``StochasticParameter``, then a value will be sampled
per image from that parameter.
speed : number or tuple of number or list of number or imgaug.parameters.StochasticParameter
Perceived falling speed of the snowflakes. This parameter controls the
motion blur's kernel size. It follows roughly the form
``kernel_size = image_size * speed``. Hence, values around ``1.0``
denote that the motion blur should "stretch" each snowflake over
the whole image.
Valid values are in the interval ``[0.0, 1.0]``.
Recommended values:
* On ``96x128`` a value of ``(0.01, 0.05)`` worked well.
* On ``192x256`` a value of ``(0.007, 0.03)`` worked well.
* On ``960x1280`` a value of ``(0.001, 0.03)`` worked well.
Datatype behaviour:
* If a ``number``, then that value will always be used.
* If a ``tuple`` ``(a, b)``, then a value will be uniformly sampled
per image from the interval ``[a, b]``.
* If a ``list``, then a random value will be sampled from that
``list`` per image.
* If a ``StochasticParameter``, then a value will be sampled
per image from that parameter.
seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
name : None or str, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
Old name for parameter `seed`.
Its usage will not yet cause a deprecation warning,
but it is still recommended to use `seed` now.
Outdated since 0.4.0.
deterministic : bool, optional
Deprecated since 0.4.0.
See method ``to_deterministic()`` for an alternative and for
details about what the "deterministic mode" actually does.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.Snowflakes(flake_size=(0.1, 0.4), speed=(0.01, 0.05))
Add snowflakes to small images (around ``96x128``).
>>> aug = iaa.Snowflakes(flake_size=(0.2, 0.7), speed=(0.007, 0.03))
Add snowflakes to medium-sized images (around ``192x256``).
>>> aug = iaa.Snowflakes(flake_size=(0.7, 0.95), speed=(0.001, 0.03))
Add snowflakes to large images (around ``960x1280``).
"""
def __init__(self, density=(0.005, 0.075), density_uniformity=(0.3, 0.9),
flake_size=(0.2, 0.7), flake_size_uniformity=(0.4, 0.8),
angle=(-30, 30), speed=(0.007, 0.03),
seed=None, name=None,
random_state="deprecated", deterministic="deprecated"):
layer = SnowflakesLayer(
density=density,
density_uniformity=density_uniformity,
flake_size=flake_size,
flake_size_uniformity=flake_size_uniformity,
angle=angle,
speed=speed,
blur_sigma_fraction=(0.0001, 0.001),
seed=seed,
random_state=random_state,
deterministic=deterministic
)
super(Snowflakes, self).__init__(
(1, 3),
children=[layer.deepcopy() for _ in range(3)],
random_order=False,
seed=seed, name=name,
random_state=random_state, deterministic=deterministic)
class RainLayer(SnowflakesLayer):
"""Add a single layer of falling raindrops to images.
Added in 0.4.0.
**Supported dtypes**:
* ``uint8``: yes; indirectly tested (1)
* ``uint16``: no
* ``uint32``: no
* ``uint64``: no
* ``int8``: no
* ``int16``: no
* ``int32``: no
* ``int64``: no
* ``float16``: no
* ``float32``: no
* ``float64``: no
* ``float128``: no
* ``bool``: no
- (1) indirectly tested via tests for :class:`Rain`
Parameters
----------
density : number or tuple of number or list of number or imgaug.parameters.StochasticParameter
Same as in :class:`~imgaug.augmenters.weather.SnowflakesLayer`.
density_uniformity : number or tuple of number or list of number or imgaug.parameters.StochasticParameter
Same as in :class:`~imgaug.augmenters.weather.SnowflakesLayer`.
drop_size : number or tuple of number or list of number or imgaug.parameters.StochasticParameter
Same as `flake_size` in
:class:`~imgaug.augmenters.weather.SnowflakesLayer`.
drop_size_uniformity : number or tuple of number or list of number or imgaug.parameters.StochasticParameter
Same as `flake_size_uniformity` in
:class:`~imgaug.augmenters.weather.SnowflakesLayer`.
angle : number or tuple of number or list of number or imgaug.parameters.StochasticParameter
Same as in :class:`~imgaug.augmenters.weather.SnowflakesLayer`.
speed : number or tuple of number or list of number or imgaug.parameters.StochasticParameter
Same as in :class:`~imgaug.augmenters.weather.SnowflakesLayer`.
blur_sigma_fraction : number or tuple of number or list of number or imgaug.parameters.StochasticParameter
Same as in :class:`~imgaug.augmenters.weather.SnowflakesLayer`.
blur_sigma_limits : tuple of float, optional
Same as in :class:`~imgaug.augmenters.weather.SnowflakesLayer`.
seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
name : None or str, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
Old name for parameter `seed`.
Its usage will not yet cause a deprecation warning,
but it is still recommended to use `seed` now.
Outdated since 0.4.0.
deterministic : bool, optional
Deprecated since 0.4.0.
See method ``to_deterministic()`` for an alternative and for
details about what the "deterministic mode" actually does.
"""
# Added in 0.4.0.
def __init__(self, density, density_uniformity, drop_size,
drop_size_uniformity, angle, speed, blur_sigma_fraction,
blur_sigma_limits=(0.5, 3.75),
seed=None, name=None,
random_state="deprecated", deterministic="deprecated"):
super(RainLayer, self).__init__(
density, density_uniformity, drop_size,
drop_size_uniformity, angle, speed, blur_sigma_fraction,
blur_sigma_limits=blur_sigma_limits,
seed=seed, name=name,
random_state=random_state, deterministic=deterministic)
# Added in 0.4.0.
@classmethod
def _blur(cls, noise, sigma):
return noise
# Added in 0.4.0.
@classmethod
def _postprocess_noise(cls, noise_small_blur,
flake_size_uniformity_sample, nb_channels):
noise_small_blur_rgb = np.tile(
noise_small_blur[..., np.newaxis], (1, 1, nb_channels))
return noise_small_blur_rgb
# Added in 0.4.0.
@classmethod
def _blend(cls, image, speed_sample, noise_small_blur_rgb):
# We set the mean color based on the noise here. That's a pseudo-random
# approach that saves us from adding the random state as a parameter.
# Note that the sum of noise_small_blur_rgb can be 0 when at least one
# image axis size is 0.
noise_sum = np.sum(noise_small_blur_rgb.flat[0:1000])
noise_sum = noise_sum if noise_sum > 0 else 1
drop_mean_color = 110 + (240 - 110) % noise_sum
noise_small_blur_rgb = noise_small_blur_rgb / 255.0
# The 1.3 multiplier increases the visibility of drops a bit.
noise_small_blur_rgb = np.clip(1.3 * noise_small_blur_rgb, 0, 1.0)
image_f32 = image.astype(np.float32)
image_f32 = (
(1 - noise_small_blur_rgb) * image_f32
+ noise_small_blur_rgb * drop_mean_color
)
return np.clip(image_f32, 0, 255).astype(np.uint8)
class Rain(meta.SomeOf):
"""Add falling snowflakes to images.
This is a wrapper around
:class:`~imgaug.augmenters.weather.RainLayer`. It executes 1 to 3
layers per image.
.. note::
This augmenter currently seems to work best for medium-sized images
around ``192x256``. For smaller images, you may want to increase the
`speed` value to e.g. ``(0.1, 0.3)``, otherwise the drops tend to
look like snowflakes. For larger images, you may want to increase
the `drop_size` to e.g. ``(0.10, 0.20)``.
Added in 0.4.0.
**Supported dtypes**:
* ``uint8``: yes; tested
* ``uint16``: no (1)
* ``uint32``: no (1)
* ``uint64``: no (1)
* ``int8``: no (1)
* ``int16``: no (1)
* ``int32``: no (1)
* ``int64``: no (1)
* ``float16``: no (1)
* ``float32``: no (1)
* ``float64``: no (1)
* ``float128``: no (1)
* ``bool``: no (1)
- (1) Parameters of this augmenter are optimized for the value range
of ``uint8``. While other dtypes may be accepted, they will lead
to images augmented in ways inappropriate for the respective
dtype.
Parameters
----------
drop_size : number or tuple of number or list of number or imgaug.parameters.StochasticParameter
See :class:`~imgaug.augmenters.weather.RainLayer`.
speed : number or tuple of number or list of number or imgaug.parameters.StochasticParameter
See :class:`~imgaug.augmenters.weather.RainLayer`.
seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
name : None or str, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
Old name for parameter `seed`.
Its usage will not yet cause a deprecation warning,
but it is still recommended to use `seed` now.
Outdated since 0.4.0.
deterministic : bool, optional
Deprecated since 0.4.0.
See method ``to_deterministic()`` for an alternative and for
details about what the "deterministic mode" actually does.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.Rain(speed=(0.1, 0.3))
Add rain to small images (around ``96x128``).
>>> aug = iaa.Rain()
Add rain to medium sized images (around ``192x256``).
>>> aug = iaa.Rain(drop_size=(0.10, 0.20))
Add rain to large images (around ``960x1280``).
"""
# Added in 0.4.0.
def __init__(self, nb_iterations=(1, 3),
drop_size=(0.01, 0.02),
speed=(0.04, 0.20),
seed=None, name=None,
random_state="deprecated", deterministic="deprecated"):
layer = RainLayer(
density=(0.03, 0.14),
density_uniformity=(0.8, 1.0),
drop_size=drop_size,
drop_size_uniformity=(0.2, 0.5),
angle=(-15, 15),
speed=speed,
blur_sigma_fraction=(0.001, 0.001),
seed=seed,
random_state=random_state,
deterministic=deterministic
)
super(Rain, self).__init__(
nb_iterations,
children=[layer.deepcopy() for _ in range(3)],
random_order=False,
seed=seed, name=name,
random_state=random_state, deterministic=deterministic)
|
aleju/ImageAugmenter
|
imgaug/augmenters/weather.py
|
Python
|
mit
| 61,460
|
[
"Gaussian"
] |
adf87f010f21e79dc4213f505d8ccedee06871d695d1c25ef3f589a6b2decaf5
|
# Copyright 2009 Brian Quinlan. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Implements ThreadPoolExecutor."""
from __future__ import with_statement
import atexit
import threading
import weakref
import sys
from concurrent.futures import _base
try:
import queue
except ImportError:
import Queue as queue
__author__ = 'Brian Quinlan (brian@sweetapp.com)'
# Workers are created as daemon threads. This is done to allow the interpreter
# to exit when there are still idle threads in a ThreadPoolExecutor's thread
# pool (i.e. shutdown() was not called). However, allowing workers to die with
# the interpreter has two undesirable properties:
# - The workers would still be running during interpretor shutdown,
# meaning that they would fail in unpredictable ways.
# - The workers could be killed while evaluating a work item, which could
# be bad if the callable being evaluated has external side-effects e.g.
# writing to a file.
#
# To work around this problem, an exit handler is installed which tells the
# workers to exit when their work queues are empty and then waits until the
# threads finish.
_thread_references = set()
_shutdown = False
def _python_exit():
global _shutdown
_shutdown = True
for thread_reference in _thread_references:
thread = thread_reference()
if thread is not None:
thread.join()
def _remove_dead_thread_references():
"""Remove inactive threads from _thread_references.
Should be called periodically to prevent memory leaks in scenarios such as:
>>> while True:
... t = ThreadPoolExecutor(max_workers=5)
... t.map(int, ['1', '2', '3', '4', '5'])
"""
for thread_reference in set(_thread_references):
if thread_reference() is None:
_thread_references.discard(thread_reference)
atexit.register(_python_exit)
class _WorkItem(object):
def __init__(self, future, fn, args, kwargs):
self.future = future
self.fn = fn
self.args = args
self.kwargs = kwargs
def run(self):
if not self.future.set_running_or_notify_cancel():
return
try:
result = self.fn(*self.args, **self.kwargs)
except BaseException:
e = sys.exc_info()[1]
self.future.set_exception(e)
else:
self.future.set_result(result)
def _worker(executor_reference, work_queue):
try:
while True:
try:
work_item = work_queue.get(block=True, timeout=0.1)
except queue.Empty:
executor = executor_reference()
# Exit if:
# - The interpreter is shutting down OR
# - The executor that owns the worker has been collected OR
# - The executor that owns the worker has been shutdown.
if _shutdown or executor is None or executor._shutdown:
return
del executor
else:
work_item.run()
except BaseException:
_base.LOGGER.critical('Exception in worker', exc_info=True)
class ThreadPoolExecutor(_base.Executor):
def __init__(self, max_workers):
"""Initializes a new ThreadPoolExecutor instance.
Args:
max_workers: The maximum number of threads that can be used to
execute the given calls.
"""
_remove_dead_thread_references()
self._max_workers = max_workers
self._work_queue = queue.Queue()
self._threads = set()
self._shutdown = False
self._shutdown_lock = threading.Lock()
def submit(self, fn, *args, **kwargs):
with self._shutdown_lock:
if self._shutdown:
raise RuntimeError('cannot schedule new futures after shutdown')
f = _base.Future()
w = _WorkItem(f, fn, args, kwargs)
self._work_queue.put(w)
self._adjust_thread_count()
return f
submit.__doc__ = _base.Executor.submit.__doc__
def _adjust_thread_count(self):
# TODO(bquinlan): Should avoid creating new threads if there are more
# idle threads than items in the work queue.
if len(self._threads) < self._max_workers:
t = threading.Thread(target=_worker,
args=(weakref.ref(self), self._work_queue))
t.daemon = True
t.start()
self._threads.add(t)
_thread_references.add(weakref.ref(t))
def shutdown(self, wait=True):
with self._shutdown_lock:
self._shutdown = True
if wait:
for t in self._threads:
t.join()
shutdown.__doc__ = _base.Executor.shutdown.__doc__
|
santegoeds/pythonfutures
|
concurrent/futures/thread.py
|
Python
|
bsd-2-clause
| 4,795
|
[
"Brian"
] |
7980c7794181399465fbc85685fc70457b348607d8abbe9548344736d44bbdb9
|
#!/usr/bin/env python
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
def expected_rsc01():
# these data were generated by Mathematica
data = [
[0.996808823, 0],
[0.989844967, 0.141863271],
[0.976194821, 0.284448102],
[0.953409148, 0.428342817],
[0.920049652, 0.574319928],
[0.87624791, 0.723271831],
[0.823630904, 0.876122478],
[0.764738453, 1.033745067],
[0.702321448, 1.196905344],
[0.638830444, 1.366234168],
[0.57618155, 1.542222489],
[0.515734438, 1.725228947],
[0.458380406, 1.915491935],
[0.404663814, 2.11314096],
[0.35489631, 2.318204555],
[0.309248761, 2.530613669],
[0.267818662, 2.750200428],
[0.230675114, 2.976692948],
[0.197883279, 3.20970778],
[0.169508198, 3.448742886],
[0.145596157, 3.693175817],
[0.126132849, 3.942273462],
[0.11098406, 4.195219984],
[0.099837318, 4.451166256],
[0.092175519, 4.709296094],
[0.087311826, 4.968894341],
[0.084488131, 5.22939629],
[0.08299976, 5.490403553],
[0.082290809, 5.751666319],
[0.081986378, 6.013045608],
[0.081868622, 6.274472787]]
return zip(*data)
def rsc01():
f = open("../../tests/rogers_stallybrass_clements/gold/rsc01_swater_0191.csv")
data = [map(float, line.strip().split(",")) for line in f.readlines()[1:] if line.strip()]
s = [d[0] for d in data]
x = [d[2] for d in data]
return (s, x)
def rsc02():
f = open("../../tests/rogers_stallybrass_clements/gold/rsc02_swater_0016.csv")
data = [map(float, line.strip().split(",")) for line in f.readlines()[1:] if line.strip()]
s = [d[0] for d in data]
x = [d[2] for d in data]
return (s, x)
plt.figure()
plt.plot(expected_rsc01()[0], expected_rsc01()[1], 'k-', linewidth = 2.0, label = 'Analytic (RSC)')
plt.plot(rsc01()[0], rsc01()[1], 'rs', markersize = 6.0, label = 'MOOSE (high-res)')
plt.plot(rsc02()[0], rsc02()[1], 'g^', markersize = 5.0, label = 'MOOSE (low-res)')
plt.legend(loc = 'lower right')
plt.xlabel("Saturation")
plt.ylabel("Depth (m)")
plt.title("Water saturation at t=5s for infiltration into two-phase system")
plt.gca().invert_yaxis()
plt.savefig("rsc.pdf")
sys.exit(0)
|
Chuban/moose
|
modules/porous_flow/doc/tests/rogers_stallybrass_clements.py
|
Python
|
lgpl-2.1
| 2,336
|
[
"MOOSE"
] |
eb1b567e7a8afd80b0f8e097b36680ed79174461e3ceec29ab885f5fe2826487
|
"""
RDKit Utilities.
This file contains utilities that compute useful properties of
molecules. Some of these are simple cleanup utilities, and
others are more sophisticated functions that detect chemical
properties of molecules.
"""
import os
import logging
import itertools
import numpy as np
from io import StringIO
from deepchem.utils.pdbqt_utils import pdbqt_to_pdb
from deepchem.utils.pdbqt_utils import convert_mol_to_pdbqt
from deepchem.utils.pdbqt_utils import convert_protein_to_pdbqt
from deepchem.utils.geometry_utils import compute_pairwise_distances
from deepchem.utils.geometry_utils import compute_centroid
from deepchem.utils.fragment_utils import MolecularFragment
from deepchem.utils.fragment_utils import MoleculeLoadException
from typing import Any, List, Tuple, Set, Optional, Dict
from deepchem.utils.typing import OneOrMany, RDKitMol
logger = logging.getLogger(__name__)
def get_xyz_from_mol(mol):
"""Extracts a numpy array of coordinates from a molecules.
Returns a `(N, 3)` numpy array of 3d coords of given rdkit molecule
Parameters
----------
mol: rdkit Molecule
Molecule to extract coordinates for
Returns
-------
Numpy ndarray of shape `(N, 3)` where `N = mol.GetNumAtoms()`.
"""
xyz = np.zeros((mol.GetNumAtoms(), 3))
conf = mol.GetConformer()
for i in range(conf.GetNumAtoms()):
position = conf.GetAtomPosition(i)
xyz[i, 0] = position.x
xyz[i, 1] = position.y
xyz[i, 2] = position.z
return (xyz)
def add_hydrogens_to_mol(mol, is_protein=False):
"""
Add hydrogens to a molecule object
Parameters
----------
mol: Rdkit Mol
Molecule to hydrogenate
is_protein: bool, optional (default False)
Whether this molecule is a protein.
Returns
-------
Rdkit Mol
Note
----
This function requires RDKit and PDBFixer to be installed.
"""
return apply_pdbfixer(mol, hydrogenate=True, is_protein=is_protein)
def apply_pdbfixer(mol,
add_missing=True,
hydrogenate=True,
pH=7.4,
remove_heterogens=True,
is_protein=True):
"""
Apply PDBFixer to a molecule to try to clean it up.
Parameters
----------
mol: Rdkit Mol
Molecule to clean up.
add_missing: bool, optional
If true, add in missing residues and atoms
hydrogenate: bool, optional
If true, add hydrogens at specified pH
pH: float, optional
The pH at which hydrogens will be added if `hydrogenate==True`. Set to 7.4 by default.
remove_heterogens: bool, optional
Often times, PDB files come with extra waters and salts attached.
If this field is set, remove these heterogens.
is_protein: bool, optional
If false, then don't remove heterogens (since this molecule is
itself a heterogen).
Returns
-------
Rdkit Mol
Note
----
This function requires RDKit and PDBFixer to be installed.
"""
molecule_file = None
try:
from pdbfixer import PDBFixer
except ModuleNotFoundError:
raise ImportError("This function requires pdbfixer")
try:
import simtk
except ModuleNotFoundError:
raise ImportError("This function requires openmm")
try:
from rdkit import Chem
pdbblock = Chem.MolToPDBBlock(mol)
pdb_stringio = StringIO()
pdb_stringio.write(pdbblock)
pdb_stringio.seek(0)
fixer = PDBFixer(pdbfile=pdb_stringio)
if add_missing:
fixer.findMissingResidues()
fixer.findMissingAtoms()
fixer.addMissingAtoms()
if hydrogenate:
fixer.addMissingHydrogens(pH)
if is_protein and remove_heterogens:
# False here specifies that water is to be removed
fixer.removeHeterogens(False)
hydrogenated_io = StringIO()
simtk.openmm.app.PDBFile.writeFile(fixer.topology, fixer.positions,
hydrogenated_io)
hydrogenated_io.seek(0)
return Chem.MolFromPDBBlock(
hydrogenated_io.read(), sanitize=False, removeHs=False)
except ValueError as e:
logger.warning("Unable to add hydrogens %s", e)
raise MoleculeLoadException(e)
finally:
try:
os.remove(molecule_file)
except (OSError, TypeError):
pass
def compute_charges(mol):
"""Attempt to compute Gasteiger Charges on Mol
This also has the side effect of calculating charges on mol. The
mol passed into this function has to already have been sanitized
Parameters
----------
mol: rdkit molecule
Returns
-------
No return since updates in place.
Note
----
This function requires RDKit to be installed.
"""
from rdkit.Chem import AllChem
try:
# Updates charges in place
AllChem.ComputeGasteigerCharges(mol)
except Exception as e:
logging.exception("Unable to compute charges for mol")
raise MoleculeLoadException(e)
def load_complex(molecular_complex: OneOrMany[str],
add_hydrogens: bool = True,
calc_charges: bool = True,
sanitize: bool = True) -> List[Tuple[np.ndarray, RDKitMol]]:
"""Loads a molecular complex.
Given some representation of a molecular complex, returns a list of
tuples, where each tuple contains (xyz coords, rdkit object) for
that constituent molecule in the complex.
For now, assumes that molecular_complex is a tuple of filenames.
Parameters
----------
molecular_complex: list or str
If list, each entry should be a filename for a constituent
molecule in complex. If str, should be the filename of a file that
holds the full complex.
add_hydrogens: bool, optional
If true, add hydrogens via pdbfixer
calc_charges: bool, optional
If true, add charges via rdkit
sanitize: bool, optional
If true, sanitize molecules via rdkit
Returns
-------
List of tuples (xyz, mol)
Note
----
This function requires RDKit to be installed.
"""
if isinstance(molecular_complex, str):
molecular_complex = [molecular_complex]
fragments = []
for mol in molecular_complex:
loaded = load_molecule(
mol,
add_hydrogens=add_hydrogens,
calc_charges=calc_charges,
sanitize=sanitize)
if isinstance(loaded, list):
fragments += loaded
else:
fragments.append(loaded)
return fragments
def load_molecule(molecule_file,
add_hydrogens=True,
calc_charges=True,
sanitize=True,
is_protein=False):
"""Converts molecule file to (xyz-coords, obmol object)
Given molecule_file, returns a tuple of xyz coords of molecule
and an rdkit object representing that molecule in that order `(xyz,
rdkit_mol)`. This ordering convention is used in the code in a few
places.
Parameters
----------
molecule_file: str
filename for molecule
add_hydrogens: bool, optional (default True)
If True, add hydrogens via pdbfixer
calc_charges: bool, optional (default True)
If True, add charges via rdkit
sanitize: bool, optional (default False)
If True, sanitize molecules via rdkit
is_protein: bool, optional (default False)
If True`, this molecule is loaded as a protein. This flag will
affect some of the cleanup procedures applied.
Returns
-------
Tuple (xyz, mol) if file contains single molecule. Else returns a
list of the tuples for the separate molecules in this list.
Note
----
This function requires RDKit to be installed.
"""
from rdkit import Chem
from_pdb = False
if ".mol2" in molecule_file:
my_mol = Chem.MolFromMol2File(molecule_file, sanitize=False, removeHs=False)
elif ".sdf" in molecule_file:
suppl = Chem.SDMolSupplier(str(molecule_file), sanitize=False)
# TODO: This is wrong. Should return all molecules
my_mol = suppl[0]
elif ".pdbqt" in molecule_file:
pdb_block = pdbqt_to_pdb(molecule_file)
my_mol = Chem.MolFromPDBBlock(
str(pdb_block), sanitize=False, removeHs=False)
from_pdb = True
elif ".pdb" in molecule_file:
my_mol = Chem.MolFromPDBFile(
str(molecule_file), sanitize=False, removeHs=False)
from_pdb = True # noqa: F841
else:
raise ValueError("Unrecognized file type for %s" % str(molecule_file))
if my_mol is None:
raise ValueError("Unable to read non None Molecule Object")
if add_hydrogens or calc_charges:
my_mol = apply_pdbfixer(
my_mol, hydrogenate=add_hydrogens, is_protein=is_protein)
if sanitize:
try:
Chem.SanitizeMol(my_mol)
# TODO: Ideally we should catch AtomValenceException but Travis seems to choke on it for some reason.
except:
logger.warning("Mol %s failed sanitization" % Chem.MolToSmiles(my_mol))
if calc_charges:
# This updates in place
compute_charges(my_mol)
xyz = get_xyz_from_mol(my_mol)
return xyz, my_mol
def write_molecule(mol, outfile, is_protein=False):
"""Write molecule to a file
This function writes a representation of the provided molecule to
the specified `outfile`. Doesn't return anything.
Parameters
----------
mol: rdkit Mol
Molecule to write
outfile: str
Filename to write mol to
is_protein: bool, optional
Is this molecule a protein?
Note
----
This function requires RDKit to be installed.
Raises
------
ValueError: if `outfile` isn't of a supported format.
"""
from rdkit import Chem
if ".pdbqt" in outfile:
writer = Chem.PDBWriter(outfile)
writer.write(mol)
writer.close()
if is_protein:
convert_protein_to_pdbqt(mol, outfile)
else:
convert_mol_to_pdbqt(mol, outfile)
elif ".pdb" in outfile:
writer = Chem.PDBWriter(outfile)
writer.write(mol)
writer.close()
elif ".sdf" in outfile:
writer = Chem.SDWriter(outfile)
writer.write(mol)
writer.close()
else:
raise ValueError("Unsupported Format")
def merge_molecules_xyz(xyzs):
"""Merges coordinates of multiple molecules.
Parameters
----------
xyzs: List
List of numpy arrays each of shape `(N_i, 3)` where `N_i` is the number of atoms in the i-th atom.
"""
return np.array(np.vstack(np.vstack(xyzs)))
def merge_molecules(molecules):
"""Helper method to merge two molecules.
Parameters
----------
molecules: list
List of rdkit molecules
Returns
-------
merged: rdkit molecule
"""
from rdkit.Chem import rdmolops
if len(molecules) == 0:
return None
elif len(molecules) == 1:
return molecules[0]
else:
combined = molecules[0]
for nextmol in molecules[1:]:
combined = rdmolops.CombineMols(combined, nextmol)
return combined
def compute_all_ecfp(mol: RDKitMol,
indices: Optional[Set[int]] = None,
degree: int = 2) -> Dict[int, str]:
"""Obtain molecular fragment for all atoms emanating outward to given degree.
For each fragment, compute SMILES string (for now) and hash to
an int. Return a dictionary mapping atom index to hashed
SMILES.
Parameters
----------
mol: rdkit Molecule
Molecule to compute ecfp fragments on
indices: Optional[Set[int]]
List of atom indices for molecule. Default is all indices. If
specified will only compute fragments for specified atoms.
degree: int
Graph degree to use when computing ECFP fingerprints
Returns
----------
dict
Dictionary mapping atom index to hashed smiles.
"""
ecfp_dict = {}
from rdkit import Chem
for i in range(mol.GetNumAtoms()):
if indices is not None and i not in indices:
continue
env = Chem.FindAtomEnvironmentOfRadiusN(mol, degree, i, useHs=True)
submol = Chem.PathToSubmol(mol, env)
smile = Chem.MolToSmiles(submol)
ecfp_dict[i] = "%s,%s" % (mol.GetAtoms()[i].GetAtomicNum(), smile)
return ecfp_dict
def compute_ecfp_features(mol, ecfp_degree=2, ecfp_power=11):
"""Computes ECFP features for provided rdkit molecule.
Parameters
----------
mol: rdkit molecule
Molecule to featurize.
ecfp_degree: int
ECFP radius
ecfp_power: int
Number of bits to store ECFP features (2^ecfp_power will be length of
ECFP array)
Returns
-------
ecfp_array: np.ndarray
Returns an array of size 2^ecfp_power where array at index i has a 1 if
that ECFP fragment is found in the molecule and array at index j has a 0
if ECFP fragment not in molecule.
"""
from rdkit.Chem import AllChem
bv = AllChem.GetMorganFingerprintAsBitVect(
mol, ecfp_degree, nBits=2**ecfp_power)
return np.array(bv)
def compute_contact_centroid(molecular_complex: Any,
cutoff: float = 4.5) -> np.ndarray:
"""Computes the (x,y,z) centroid of the contact regions of this molecular complex.
For a molecular complex, it's necessary for various featurizations
that compute voxel grids to find a reasonable center for the
voxelization. This function computes the centroid of all the contact
atoms, defined as an atom that's within `cutoff` Angstroms of an
atom from a different molecule.
Parameters
----------
molecular_complex: Object
A representation of a molecular complex, produced by
`rdkit_util.load_complex`.
cutoff: float, optional
The distance in Angstroms considered for computing contacts.
"""
fragments = reduce_molecular_complex_to_contacts(molecular_complex, cutoff)
coords = [frag[0] for frag in fragments]
contact_coords = merge_molecules_xyz(coords)
centroid = np.mean(contact_coords, axis=0)
return (centroid)
def reduce_molecular_complex_to_contacts(fragments: List,
cutoff: float = 4.5) -> List:
"""Reduce a molecular complex to only those atoms near a contact.
Molecular complexes can get very large. This can make it unwieldy to
compute functions on them. To improve memory usage, it can be very
useful to trim out atoms that aren't close to contact regions. This
function takes in a molecular complex and returns a new molecular
complex representation that contains only contact atoms. The contact
atoms are computed by calling `get_contact_atom_indices` under the
hood.
Parameters
----------
fragments: List
As returned by `rdkit_util.load_complex`, a list of tuples of
`(coords, mol)` where `coords` is a `(N_atoms, 3)` array and `mol`
is the rdkit molecule object.
cutoff: float
The cutoff distance in angstroms.
Returns
-------
A list of length `len(molecular_complex)`. Each entry in this list
is a tuple of `(coords, MolecularShim)`. The coords is stripped down
to `(N_contact_atoms, 3)` where `N_contact_atoms` is the number of
contact atoms for this complex. `MolecularShim` is used since it's
tricky to make a RDKit sub-molecule.
"""
atoms_to_keep = get_contact_atom_indices(fragments, cutoff)
reduced_complex = []
for frag, keep in zip(fragments, atoms_to_keep):
contact_frag = get_mol_subset(frag[0], frag[1], keep)
reduced_complex.append(contact_frag)
return reduced_complex
def compute_ring_center(mol, ring_indices):
"""Computes 3D coordinates of a center of a given ring.
Parameters:
-----------
mol: rdkit.rdchem.Mol
Molecule containing a ring
ring_indices: array-like
Indices of atoms forming a ring
Returns:
--------
ring_centroid: np.ndarray
Position of a ring center
"""
conformer = mol.GetConformer()
ring_xyz = np.zeros((len(ring_indices), 3))
for i, atom_idx in enumerate(ring_indices):
atom_position = conformer.GetAtomPosition(atom_idx)
ring_xyz[i] = np.array(atom_position)
ring_centroid = compute_centroid(ring_xyz)
return ring_centroid
def get_contact_atom_indices(fragments: List, cutoff: float = 4.5) -> List:
"""Compute the atoms close to contact region.
Molecular complexes can get very large. This can make it unwieldy to
compute functions on them. To improve memory usage, it can be very
useful to trim out atoms that aren't close to contact regions. This
function computes pairwise distances between all pairs of molecules
in the molecular complex. If an atom is within cutoff distance of
any atom on another molecule in the complex, it is regarded as a
contact atom. Otherwise it is trimmed.
Parameters
----------
fragments: List
As returned by `rdkit_util.load_complex`, a list of tuples of
`(coords, mol)` where `coords` is a `(N_atoms, 3)` array and `mol`
is the rdkit molecule object.
cutoff: float
The cutoff distance in angstroms.
Returns
-------
A list of length `len(molecular_complex)`. Each entry in this list
is a list of atom indices from that molecule which should be kept, in
sorted order.
"""
# indices of atoms to keep
keep_inds: List[Set] = [set([]) for _ in fragments]
for (ind1, ind2) in itertools.combinations(range(len(fragments)), 2):
frag1, frag2 = fragments[ind1], fragments[ind2]
pairwise_distances = compute_pairwise_distances(frag1[0], frag2[0])
# contacts is of form (x_coords, y_coords), a tuple of 2 lists
contacts = np.nonzero((pairwise_distances < cutoff))
# contacts[0] is the x_coords, that is the frag1 atoms that have
# nonzero contact.
frag1_atoms = set([int(c) for c in contacts[0].tolist()])
# contacts[1] is the y_coords, the frag2 atoms with nonzero contacts
frag2_atoms = set([int(c) for c in contacts[1].tolist()])
keep_inds[ind1] = keep_inds[ind1].union(frag1_atoms)
keep_inds[ind2] = keep_inds[ind2].union(frag2_atoms)
keep_ind_lists = [sorted(list(keep)) for keep in keep_inds]
return keep_ind_lists
def get_mol_subset(coords, mol, atom_indices_to_keep):
"""Strip a subset of the atoms in this molecule
Parameters
----------
coords: Numpy ndarray
Must be of shape (N, 3) and correspond to coordinates of mol.
mol: Rdkit mol or `MolecularFragment`
The molecule to strip
atom_indices_to_keep: list
List of the indices of the atoms to keep. Each index is a unique
number between `[0, N)`.
Returns
-------
A tuple of (coords, mol_frag) where coords is a Numpy array of
coordinates with hydrogen coordinates. mol_frag is a
`MolecularFragment`.
"""
from rdkit import Chem
indexes_to_keep = []
atoms_to_keep = []
#####################################################
# Compute partial charges on molecule if rdkit
if isinstance(mol, Chem.Mol):
compute_charges(mol)
#####################################################
atoms = list(mol.GetAtoms())
for index in atom_indices_to_keep:
indexes_to_keep.append(index)
atoms_to_keep.append(atoms[index])
coords = coords[indexes_to_keep]
mol_frag = MolecularFragment(atoms_to_keep, coords)
return coords, mol_frag
def compute_ring_normal(mol, ring_indices):
"""Computes normal to a plane determined by a given ring.
Parameters:
-----------
mol: rdkit.rdchem.Mol
Molecule containing a ring
ring_indices: array-like
Indices of atoms forming a ring
Returns:
--------
normal: np.ndarray
Normal vector
"""
conformer = mol.GetConformer()
points = np.zeros((3, 3))
for i, atom_idx in enumerate(ring_indices[:3]):
atom_position = conformer.GetAtomPosition(atom_idx)
points[i] = np.array(atom_position)
v1 = points[1] - points[0]
v2 = points[2] - points[0]
normal = np.cross(v1, v2)
return normal
def compute_all_pairs_shortest_path(
mol) -> Dict[Tuple[int, int], Tuple[int, int]]:
"""Computes the All pair shortest between every pair of nodes
in terms of Rdkit Atom indexes.
Parameters:
-----------
mol: rdkit.rdchem.Mol
Molecule containing a ring
Returns:
--------
paths_dict: Dict representing every atom-atom pair as key in Rdkit index
and value as the shortest path between each atom pair in terms of Atom index.
"""
try:
from rdkit import Chem
except:
raise ImportError("This class requires RDkit installed")
n_atoms = mol.GetNumAtoms()
paths_dict = {(i, j): Chem.rdmolops.GetShortestPath(mol, i, j)
for i in range(n_atoms) for j in range(n_atoms) if i < j}
return paths_dict
def compute_pairwise_ring_info(mol):
""" Computes all atom-atom pair belong to same ring with
its ring size and its aromaticity.
Parameters:
-----------
mol: rdkit.rdchem.Mol
Molecule containing a ring
Returns:
--------
rings_dict: Key consisting of all node-node pair sharing the same ring
and value as a tuple of size of ring and its aromaticity.
"""
try:
from rdkit import Chem
except:
raise ImportError("This class requires RDkit installed")
rings_dict = {}
def ordered_pair(a, b):
return (a, b) if a < b else (b, a)
ssr = [list(x) for x in Chem.GetSymmSSSR(mol)]
for ring in ssr:
ring_sz = len(ring)
is_aromatic = True
for atom_idx in ring:
if not mol.GetAtoms()[atom_idx].GetIsAromatic():
is_aromatic = False
break
for ring_idx, atom_idx in enumerate(ring):
for other_idx in ring[ring_idx:]:
atom_pair = ordered_pair(atom_idx, other_idx)
if atom_pair not in rings_dict:
rings_dict[atom_pair] = [(ring_sz, is_aromatic)]
else:
if (ring_sz, is_aromatic) not in rings_dict[atom_pair]:
rings_dict[atom_pair].append((ring_sz, is_aromatic))
return rings_dict
|
deepchem/deepchem
|
deepchem/utils/rdkit_utils.py
|
Python
|
mit
| 21,198
|
[
"OpenMM",
"RDKit"
] |
9cb25db8a1a1f4dba681607fccd8ae98286b2d0915ebcf1185b5c5465a39a79a
|
from functools import partial
import os
import numpy as np
from sklearn.preprocessing import LabelEncoder
from .. import SDC, NuSDC, Features
data_dir = os.path.join(os.path.dirname(__file__), 'data')
################################################################################
# TODO: add *real* tests
def _check_acc(acc):
assert acc >= .85, "accuracy is only {}".format(acc)
def test_simple():
div_funcs = ['hellinger', 'kl', 'l2',
'renyi:0.7', 'renyi:0.9', 'renyi:0.99']
Ks = [3, 8]
for name in ['gaussian-2d-mean0-std1,2']: # , 'gaussian-20d-mean0-std1,2']:
feats = Features.load_from_hdf5(os.path.join(data_dir, name + '.h5'))
le = LabelEncoder()
y = le.fit_transform(feats.categories)
for div_func in div_funcs:
for K in Ks:
for cls in [SDC, NuSDC]:
for wts in [None, np.random.uniform(.7, 1.3, len(feats))]:
clf = cls(div_func=div_func, K=K, n_proc=1)
acc, preds = clf.crossvalidate(
feats, y, sample_weight=wts, num_folds=3)
fn = partial(_check_acc, acc)
fn.description = "CV: {} - {}, K={}".format(
name, div_func, K)
yield fn
################################################################################
if __name__ == '__main__':
import warnings
warnings.filterwarnings('error', module='sdm')
import nose
nose.main()
|
dougalsutherland/py-sdm
|
sdm/tests/test_sdm.py
|
Python
|
bsd-3-clause
| 1,555
|
[
"Gaussian"
] |
d32c15839b897a43370d7ad1948c20a5cf9f8f081b6341a9543b5bcb78e9528c
|
# -*- coding: utf-8 -*-
# rdesignerProtos.py ---
#
# Filename: rdesignerProtos.py
# Description:
# Author: Subhasis Ray, Upi Bhalla
# Maintainer:
# Created: Tue May 7 12:11:22 2013 (+0530)
# Version:
# Last-Updated: Wed Dec 30 13:01:00 2015 (+0530)
# By: Upi
# URL:
# Keywords:
# Compatibility:
#
#
# Commentary:
#
#
#
#
# Change log:
#
#
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth
# Floor, Boston, MA 02110-1301, USA.
#
#
# Code:
import numpy as np
import moose
import math
from moose import utils
EREST_ACT = -70e-3
per_ms = 1e3
PI = 3.14159265359
FaradayConst = 96485.3365 # Coulomb/mol
def make_HH_Na(name = 'HH_Na', parent='/library', vmin=-110e-3, vmax=50e-3, vdivs=3000):
"""Create a Hodhkin-Huxley Na channel under `parent`.
vmin, vmax, vdivs: voltage range and number of divisions for gate tables
"""
na = moose.HHChannel('%s/%s' % (parent, name))
na.Ek = 50e-3
na.Xpower = 3
na.Ypower = 1
v = np.linspace(vmin, vmax, vdivs+1) - EREST_ACT
m_alpha = per_ms * (25 - v * 1e3) / (10 * (np.exp((25 - v * 1e3) / 10) - 1))
m_beta = per_ms * 4 * np.exp(- v * 1e3/ 18)
m_gate = moose.element('%s/gateX' % (na.path))
m_gate.min = vmin
m_gate.max = vmax
m_gate.divs = vdivs
m_gate.tableA = m_alpha
m_gate.tableB = m_alpha + m_beta
h_alpha = per_ms * 0.07 * np.exp(-v / 20e-3)
h_beta = per_ms * 1/(np.exp((30e-3 - v) / 10e-3) + 1)
h_gate = moose.element('%s/gateY' % (na.path))
h_gate.min = vmin
h_gate.max = vmax
h_gate.divs = vdivs
h_gate.tableA = h_alpha
h_gate.tableB = h_alpha + h_beta
na.tick = -1
return na
def make_HH_K(name = 'HH_K', parent='/library', vmin=-120e-3, vmax=40e-3, vdivs=3000):
"""Create a Hodhkin-Huxley K channel under `parent`.
vmin, vmax, vdivs: voltage range and number of divisions for gate tables
"""
k = moose.HHChannel('%s/%s' % (parent, name))
k.Ek = -77e-3
k.Xpower = 4
v = np.linspace(vmin, vmax, vdivs+1) - EREST_ACT
n_alpha = per_ms * (10 - v * 1e3)/(100 * (np.exp((10 - v * 1e3)/10) - 1))
n_beta = per_ms * 0.125 * np.exp(- v * 1e3 / 80)
n_gate = moose.element('%s/gateX' % (k.path))
n_gate.min = vmin
n_gate.max = vmax
n_gate.divs = vdivs
n_gate.tableA = n_alpha
n_gate.tableB = n_alpha + n_beta
k.tick = -1
return k
#========================================================================
# SynChan: Glu receptor
#========================================================================
def make_glu( name ):
if moose.exists( '/library/' + name ):
return
glu = moose.SynChan( '/library/' + name )
glu.Ek = 0.0
glu.tau1 = 2.0e-3
glu.tau2 = 9.0e-3
sh = moose.SimpleSynHandler( glu.path + '/sh' )
moose.connect( sh, 'activationOut', glu, 'activation' )
sh.numSynapses = 1
sh.synapse[0].weight = 1
return glu
#========================================================================
# SynChan: GABA receptor
#========================================================================
def make_GABA( name ):
if moose.exists( '/library/' + name ):
return
GABA = moose.SynChan( '/library/' + name )
GABA.Ek = EK + 10.0e-3
GABA.tau1 = 4.0e-3
GABA.tau2 = 9.0e-3
sh = moose.SimpleSynHandler( GABA.path + '/sh' )
moose.connect( sh, 'activationOut', GABA, 'activation' )
sh.numSynapses = 1
sh.synapse[0].weight = 1
def makeChemOscillator( name = 'osc', parent = '/library' ):
model = moose.Neutral( parent + '/' + name )
compt = moose.CubeMesh( model.path + '/kinetics' )
"""
This function sets up a simple oscillatory chemical system within
the script. The reaction system is::
s ---a---> a // s goes to a, catalyzed by a.
s ---a---> b // s goes to b, catalyzed by a.
a ---b---> s // a goes to s, catalyzed by b.
b -------> s // b is degraded irreversibly to s.
in sum, **a** has a positive feedback onto itself and also forms **b**.
**b** has a negative feedback onto **a**.
Finally, the diffusion constant for **a** is 1/10 that of **b**.
"""
# create container for model
diffConst = 10e-12 # m^2/sec
motorRate = 1e-6 # m/sec
concA = 1 # millimolar
# create molecules and reactions
a = moose.Pool( compt.path + '/a' )
b = moose.Pool( compt.path + '/b' )
s = moose.Pool( compt.path + '/s' )
e1 = moose.MMenz( compt.path + '/e1' )
e2 = moose.MMenz( compt.path + '/e2' )
e3 = moose.MMenz( compt.path + '/e3' )
r1 = moose.Reac( compt.path + '/r1' )
a.concInit = 0.1
b.concInit = 0.1
s.concInit = 1
moose.connect( e1, 'sub', s, 'reac' )
moose.connect( e1, 'prd', a, 'reac' )
moose.connect( a, 'nOut', e1, 'enzDest' )
e1.Km = 1
e1.kcat = 1
moose.connect( e2, 'sub', s, 'reac' )
moose.connect( e2, 'prd', b, 'reac' )
moose.connect( a, 'nOut', e2, 'enzDest' )
e2.Km = 1
e2.kcat = 0.5
moose.connect( e3, 'sub', a, 'reac' )
moose.connect( e3, 'prd', s, 'reac' )
moose.connect( b, 'nOut', e3, 'enzDest' )
e3.Km = 0.1
e3.kcat = 1
moose.connect( r1, 'sub', b, 'reac' )
moose.connect( r1, 'prd', s, 'reac' )
r1.Kf = 0.3 # 1/sec
r1.Kb = 0 # 1/sec
# Assign parameters
a.diffConst = diffConst/10
b.diffConst = diffConst
s.diffConst = 0
return compt
#################################################################
# Here we have a series of utility functions for building cell
# prototypes.
#################################################################
def transformNMDAR( path ):
for i in moose.wildcardFind( path + "/##/#NMDA#[ISA!=NMDAChan]" ):
chanpath = i.path
pa = i.parent
i.name = '_temp'
if ( chanpath[-3:] == "[0]" ):
chanpath = chanpath[:-3]
nmdar = moose.NMDAChan( chanpath )
sh = moose.SimpleSynHandler( chanpath + '/sh' )
moose.connect( sh, 'activationOut', nmdar, 'activation' )
sh.numSynapses = 1
sh.synapse[0].weight = 1
nmdar.Ek = i.Ek
nmdar.tau1 = i.tau1
nmdar.tau2 = i.tau2
nmdar.Gbar = i.Gbar
nmdar.CMg = 12
nmdar.KMg_A = 1.0 / 0.28
nmdar.KMg_B = 1.0 / 62
nmdar.temperature = 300
nmdar.extCa = 1.5
nmdar.intCa = 0.00008
nmdar.intCaScale = 1
nmdar.intCaOffset = 0.00008
nmdar.condFraction = 0.02
moose.delete( i )
moose.connect( pa, 'channel', nmdar, 'channel' )
caconc = moose.wildcardFind( pa.path + '/#[ISA=CaConcBase]' )
if ( len( caconc ) < 1 ):
print('no caconcs found on ', pa.path)
else:
moose.connect( nmdar, 'ICaOut', caconc[0], 'current' )
moose.connect( caconc[0], 'concOut', nmdar, 'assignIntCa' )
################################################################
# Utility function for building a compartment, used for spines.
# Builds a compartment object downstream (further away from soma)
# of the specfied previous compartment 'pa'. If 'pa' is not a
# compartment, it builds it on 'pa'. It places the compartment
# on the end of 'prev', and at 0,0,0 otherwise.
def buildCompt( pa, name, RM = 1.0, RA = 1.0, CM = 0.01, dia = 1.0e-6, x = 0.0, y = 0.0, z = 0.0, dx = 10e-6, dy = 0.0, dz = 0.0 ):
length = np.sqrt( dx * dx + dy * dy + dz * dz )
compt = moose.Compartment( pa.path + '/' + name )
compt.x0 = x
compt.y0 = y
compt.z0 = z
compt.x = dx + x
compt.y = dy + y
compt.z = dz + z
compt.diameter = dia
compt.length = length
xa = dia * dia * PI / 4.0
sa = length * dia * PI
compt.Ra = length * RA / xa
compt.Rm = RM / sa
compt.Cm = CM * sa
return compt
def buildComptWrapper( pa, name, length, dia, xoffset, RM, RA, CM ):
return buildCompt( pa, name, RM, RA, CM, dia = dia, x = xoffset, dx = length )
################################################################
# Utility function for building a synapse, used for spines.
def buildSyn( name, compt, Ek, tau1, tau2, Gbar, CM ):
syn = moose.SynChan( compt.path + '/' + name )
syn.Ek = Ek
syn.tau1 = tau1
syn.tau2 = tau2
syn.Gbar = Gbar * compt.Cm / CM
#print "BUILD SYN: ", name, Gbar, syn.Gbar, CM
moose.connect( compt, 'channel', syn, 'channel' )
sh = moose.SimpleSynHandler( syn.path + '/sh' )
moose.connect( sh, 'activationOut', syn, 'activation' )
sh.numSynapses = 1
sh.synapse[0].weight = 1
return syn
######################################################################
# Utility function, borrowed from proto18.py, for making an LCa channel.
# Based on Traub's 91 model, I believe.
def make_LCa( name = 'LCa', parent = '/library' ):
EREST_ACT = -0.060 #/* hippocampal cell resting potl */
ECA = 0.140 + EREST_ACT #// 0.080
if moose.exists( parent + '/' + name ):
return
Ca = moose.HHChannel( parent + '/' + name )
Ca.Ek = ECA
Ca.Gbar = 0
Ca.Gk = 0
Ca.Xpower = 2
Ca.Ypower = 1
Ca.Zpower = 0
xgate = moose.element( parent + '/' + name + '/gateX' )
xA = np.array( [ 1.6e3, 0, 1.0, -1.0 * (0.065 + EREST_ACT), -0.01389, -20e3 * (0.0511 + EREST_ACT), 20e3, -1.0, -1.0 * (0.0511 + EREST_ACT), 5.0e-3, 3000, -0.1, 0.05 ] )
xgate.alphaParms = xA
ygate = moose.element( parent + '/' + name + '/gateY' )
ygate.min = -0.1
ygate.max = 0.05
ygate.divs = 3000
yA = np.zeros( (ygate.divs + 1), dtype=float)
yB = np.zeros( (ygate.divs + 1), dtype=float)
#Fill the Y_A table with alpha values and the Y_B table with (alpha+beta)
dx = (ygate.max - ygate.min)/ygate.divs
x = ygate.min
for i in range( ygate.divs + 1 ):
if ( x > EREST_ACT):
yA[i] = 5.0 * math.exp( -50 * (x - EREST_ACT) )
else:
yA[i] = 5.0
yB[i] = 5.0
x += dx
ygate.tableA = yA
ygate.tableB = yB
return Ca
################################################################
# API function for building spine prototypes. Here we put in the
# spine dimensions, and options for standard channel types.
# The synList tells it to create dual alpha function synchans:
# [name, Erev, tau1, tau2, conductance_density, connectToCa]
# The chanList tells it to copy over channels defined in /library
# and assign the specified conductance density.
# If caTau <= zero then there is no caConc created, otherwise it
# creates one and assigns the desired tau in seconds.
# With the default arguments here it will create a glu, NMDA and LCa,
# and add a Ca_conc.
def addSpineProto( name = 'spine',
parent = '/library',
RM = 1.0, RA = 1.0, CM = 0.01,
shaftLen = 1.e-6 , shaftDia = 0.2e-6,
headLen = 0.5e-6, headDia = 0.5e-6,
synList = (),
chanList = (),
caTau = 0.0
):
assert( moose.exists( parent ) ), "%s must exist" % parent
spine = moose.Neutral( parent + '/' + name )
shaft = buildComptWrapper( spine, 'shaft', shaftLen, shaftDia, 0.0, RM, RA, CM )
head = buildComptWrapper( spine, 'head', headLen, headDia, shaftLen, RM, RA, CM )
moose.connect( shaft, 'axial', head, 'raxial' )
if caTau > 0.0:
conc = moose.CaConc( head.path + '/Ca_conc' )
conc.tau = caTau
conc.length = head.length
conc.diameter = head.diameter
conc.thick = 0.0
# The 'B' field is deprecated.
# B = 1/(ion_charge * Faraday * volume)
#vol = head.length * head.diameter * head.diameter * PI / 4.0
#conc.B = 1.0 / ( 2.0 * FaradayConst * vol )
conc.Ca_base = 0.0
for i in synList:
syn = buildSyn( i[0], head, i[1], i[2], i[3], i[4], CM )
if i[5] and caTau > 0.0:
moose.connect( syn, 'IkOut', conc, 'current' )
for i in chanList:
if ( moose.exists( parent + '/' + i[0] ) ):
chan = moose.copy( parent + '/' + i[0], head )
else:
moose.setCwe( head )
chan = make_LCa()
chan.name = i[0]
moose.setCwe( '/' )
chan.Gbar = i[1] * head.Cm / CM
#print "CHAN = ", chan, chan.tick, chan.Gbar
moose.connect( head, 'channel', chan, 'channel' )
if i[2] and caTau > 0.0:
moose.connect( chan, 'IkOut', conc, 'current' )
transformNMDAR( parent + '/' + name )
return spine
#######################################################################
# Here are some compartment related prototyping functions
def makePassiveHHsoma(name = 'passiveHHsoma', parent='/library'):
''' Make HH squid model sized compartment:
len and dia 500 microns. CM = 0.01 F/m^2, RA =
'''
elecpath = parent + '/' + name
if not moose.exists( elecpath ):
elecid = moose.Neuron( elecpath )
dia = 500e-6
soma = buildComptWrapper( elecid, 'soma', dia, dia, 0.0,
0.33333333, 3000, 0.01 )
soma.initVm = -65e-3 # Resting of -65, from HH
soma.Em = -54.4e-3 # 10.6 mV above resting of -65, from HH
else:
elecid = moose.element( elecpath )
return elecid
# Wrapper function. This is used by the proto builder from rdesigneur
def makeActiveSpine(name = 'active_spine', parent='/library'):
return addSpineProto( name = name, parent = parent,
synList = ( ['glu', 0.0, 2e-3, 9e-3, 200.0, False],
['NMDA', 0.0, 20e-3, 20e-3, 80.0, True] ),
chanList = ( ['Ca', 10.0, True ], ),
caTau = 13.333e-3
)
# Wrapper function. This is used by the proto builder from rdesigneur
def makeExcSpine(name = 'exc_spine', parent='/library'):
return addSpineProto( name = name, parent = parent,
synList = ( ['glu', 0.0, 2e-3, 9e-3, 200.0, False],
['NMDA', 0.0, 20e-3, 20e-3, 80.0, True] ),
caTau = 13.333e-3 )
# Wrapper function. This is used by the proto builder from rdesigneur
def makePassiveSpine(name = 'passive_spine', parent='/library'):
return addSpineProto( name = name, parent = parent)
# legacy function. This is used by the proto builder from rdesigneur
def makeSpineProto( name ):
addSpineProto( name = name, chanList = () )
|
dharmasam9/moose-core
|
python/rdesigneur/rdesigneurProtos.py
|
Python
|
gpl-3.0
| 15,114
|
[
"MOOSE",
"NEURON"
] |
f04b2f9721765ecb07d4c73b3cbfa322b29942f77dda0c765f619ed7944690df
|
import numpy as np
import scipy.stats #Note no longe resi 0 but resi 1
import ALA3, experiment_loader
import mdtraj
ff = "amber96"
prior = "maxent"
bayesian_bootstrap_run_list = [0,1]
regularization_strength = ALA3.regularization_strength_dict[prior][ff]
predictions, measurements, uncertainties = experiment_loader.load(ff)
phi, psi, ass_raw, state_ind = experiment_loader.load_rama(ff, ALA3.stride)
p = np.vstack([np.loadtxt(ALA3.data_directory + "/frame_populations/pops_%s_%s_reg-%.1f-BB%d.dat" % (ff, prior, regularization_strength, bayesian_bootstrap_run)) for bayesian_bootstrap_run in bayesian_bootstrap_run_list]).mean(0)
traj = mdtraj.load("./trajectories/amber96.xtc", top="./pdbs/amber96.pdb")
ind0 = np.array([np.random.multinomial(1, (p ** 0) / float(len(p))).argmax() for i in xrange(3)])
ind = np.array([np.random.multinomial(1, p).argmax() for i in xrange(3)])
traj0 = traj[ind0]
traj1 = traj[ind]
traj0[1:].save(ALA3.outdir + "/info_graphic/ff96_raw_three.pdb")
traj1[1:].save(ALA3.outdir + "/info_graphic/ff96_BELT_three.pdb")
traj0[0:1].save(ALA3.outdir + "/info_graphic/ff96_raw_first_frame.pdb")
traj1[0:1].save(ALA3.outdir + "/info_graphic/ff96_BELT_first_frame.pdb")
|
kyleabeauchamp/EnsemblePaper
|
code/info_graphic/save_frames.py
|
Python
|
gpl-3.0
| 1,200
|
[
"MDTraj"
] |
0c6099003d3b6d8e8616b89c060399c856c466c0ca63344736f447c80475df8d
|
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2008-2009 Brian G. Matherly
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2012 Paul Franklin
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
#
#------------------------------------------------------------------------
#
# python modules
#
#------------------------------------------------------------------------
from gramps.gen.ggettext import gettext as _
from gramps.gen.ggettext import ngettext
from functools import partial
import datetime
import time
#------------------------------------------------------------------------
#
# GRAMPS modules
#
#------------------------------------------------------------------------
from gramps.gen.display.name import displayer as _nd
from gramps.gen.errors import ReportError
from gramps.gen.relationship import get_relationship_calculator
from gramps.gen.plug.docgen import (FontStyle, ParagraphStyle, GraphicsStyle,
FONT_SERIF, PARA_ALIGN_CENTER,
PARA_ALIGN_LEFT, PARA_ALIGN_RIGHT,
IndexMark, INDEX_TYPE_TOC)
from gramps.gen.plug.docgen.fontscale import string_trim
from gramps.gen.plug.menu import (BooleanOption, StringOption, NumberOption,
EnumeratedListOption, FilterOption, PersonOption)
from gramps.gen.plug.report import Report
from gramps.gen.plug.report import utils as ReportUtils
from gramps.gen.plug.report import MenuReportOptions
from gramps.gen.utils.alive import probably_alive
from gramps.gen.datehandler import displayer as _dd, long_days
from gramps.gen.lib import Date, EventRoleType, EventType, Name, NameType, Person, Surname
import gramps.plugins.lib.libholiday as libholiday
from gramps.plugins.lib.libholiday import g2iso
#------------------------------------------------------------------------
#
# Constants
#
#------------------------------------------------------------------------
pt2cm = ReportUtils.pt2cm
cm2pt = ReportUtils.cm2pt
#------------------------------------------------------------------------
#
# Calendar
#
#------------------------------------------------------------------------
class Calendar(Report):
"""
Create the Calendar object that produces the report.
"""
def __init__(self, database, options, user):
Report.__init__(self, database, options, user)
menu = options.menu
self._user = user
get_value = lambda name: menu.get_option_by_name(name).get_value()
self.year = get_value('year')
self.name_format = get_value('name_format')
self.country = get_value('country')
self.anniversaries = get_value('anniversaries')
self.start_dow = get_value('start_dow')
self.maiden_name = get_value('maiden_name')
self.alive = get_value('alive')
self.birthdays = get_value('birthdays')
self.text1 = get_value('text1')
self.text2 = get_value('text2')
self.text3 = get_value('text3')
self.filter_option = menu.get_option_by_name('filter')
self.filter = self.filter_option.get_filter()
pid = get_value('pid')
self.center_person = database.get_person_from_gramps_id(pid)
if (self.center_person == None) :
raise ReportError(_("Person %s is not in the Database") % pid )
def get_name(self, person, maiden_name = None):
""" Return person's name, unless maiden_name given,
unless married_name listed.
"""
# Get all of a person's names:
primary_name = person.get_primary_name()
married_name = None
names = [primary_name] + person.get_alternate_names()
for name in names:
if int(name.get_type()) == NameType.MARRIED:
married_name = name
break # use first
# Now, decide which to use:
if maiden_name is not None:
if married_name is not None:
name = Name(married_name)
else:
name = Name(primary_name)
surname = Surname()
surname.set_surname(maiden_name)
name.set_surname_list([surname])
else:
name = Name(primary_name)
name.set_display_as(self.name_format)
return _nd.display_name(name)
def draw_rectangle(self, style, sx, sy, ex, ey):
""" This should be in BaseDoc """
self.doc.draw_line(style, sx, sy, sx, ey)
self.doc.draw_line(style, sx, sy, ex, sy)
self.doc.draw_line(style, ex, sy, ex, ey)
self.doc.draw_line(style, sx, ey, ex, ey)
### The rest of these all have to deal with calendar specific things
def add_day_item(self, text, month, day, format="CAL-Text", marks=[None]):
""" Add an item to a day. """
month_dict = self.calendar.get(month, {})
day_list = month_dict.get(day, [])
day_list.append((format, text, marks))
month_dict[day] = day_list
self.calendar[month] = month_dict
def __get_holidays(self):
""" Get the holidays for the specified country and year """
holiday_table = libholiday.HolidayTable()
country = holiday_table.get_countries()[self.country]
holiday_table.load_holidays(self.year, country)
for month in range(1, 13):
for day in range(1, 32):
holiday_names = holiday_table.get_holidays(month, day)
for holiday_name in holiday_names:
self.add_day_item(holiday_name, month, day, "CAL-Holiday")
def write_report(self):
""" The short method that runs through each month and creates a page. """
# initialize the dict to fill:
self.calendar = {}
# get the information, first from holidays:
if self.country != 0:
self.__get_holidays()
# get data from database:
self.collect_data()
# generate the report:
self._user.begin_progress( _('Calendar Report'),
_('Formatting months...'), 12)
for month in range(1, 13):
self._user.step_progress()
self.print_page(month)
self._user.end_progress()
def print_page(self, month):
"""
This method actually writes the calendar page.
"""
style_sheet = self.doc.get_style_sheet()
ptitle = style_sheet.get_paragraph_style("CAL-Title")
ptext = style_sheet.get_paragraph_style("CAL-Text")
pdaynames = style_sheet.get_paragraph_style("CAL-Daynames")
pnumbers = style_sheet.get_paragraph_style("CAL-Numbers")
ptext1style = style_sheet.get_paragraph_style("CAL-Text1style")
self.doc.start_page()
width = self.doc.get_usable_width()
height = self.doc.get_usable_height()
header = 2.54 # one inch
mark = None
if month == 1:
mark = IndexMark(_('Calendar Report'), INDEX_TYPE_TOC, 1)
self.draw_rectangle("CAL-Border", 0, 0, width, height)
self.doc.draw_box("CAL-Title", "", 0, 0, width, header, mark)
self.doc.draw_line("CAL-Border", 0, header, width, header)
year = self.year
title = "%s %d" % (_dd.long_months[month].capitalize(), year)
mark = IndexMark(title, INDEX_TYPE_TOC, 2)
font_height = pt2cm(ptitle.get_font().get_size())
self.doc.center_text("CAL-Title", title,
width/2, font_height * 0.25, mark)
cell_width = width / 7
cell_height = (height - header)/ 6
current_date = datetime.date(year, month, 1)
spacing = pt2cm(1.25 * ptext.get_font().get_size()) # 158
if current_date.isoweekday() != g2iso(self.start_dow + 1):
# Go back to previous first day of week, and start from there
current_ord = (current_date.toordinal() -
((current_date.isoweekday() + 7) -
g2iso(self.start_dow + 1)) % 7)
else:
current_ord = current_date.toordinal()
for day_col in range(7):
font_height = pt2cm(pdaynames.get_font().get_size())
self.doc.center_text("CAL-Daynames",
long_days[(day_col+ g2iso(self.start_dow + 1))
% 7 + 1].capitalize(),
day_col * cell_width + cell_width/2,
header - font_height * 1.5)
for week_row in range(6):
something_this_week = 0
for day_col in range(7):
thisday = current_date.fromordinal(current_ord)
if thisday.month == month:
something_this_week = 1
self.draw_rectangle("CAL-Border", day_col * cell_width,
header + week_row * cell_height,
(day_col + 1) * cell_width,
header + (week_row + 1) * cell_height)
last_edge = (day_col + 1) * cell_width
self.doc.center_text("CAL-Numbers", str(thisday.day),
day_col * cell_width + cell_width/2,
header + week_row * cell_height)
list_ = self.calendar.get(month, {}).get(thisday.day, [])
list_.sort() # to get CAL-Holiday on bottom
position = 0.0
for (format, p, m_list) in list_:
lines = p.count("\n") + 1 # lines in the text
position += (lines * spacing)
current = 0
for line in p.split("\n"):
# make sure text will fit:
numpos = pt2cm(pnumbers.get_font().get_size())
if position + (current * spacing) - 0.1 >= cell_height - numpos: # font daynums
continue
font = ptext.get_font()
line = string_trim(font, line, cm2pt(cell_width + 0.2))
self.doc.draw_text(format, line,
day_col * cell_width + 0.1,
header + (week_row + 1) * cell_height - position + (current * spacing) - 0.1, m_list[0])
if len(m_list) > 1: # index the spouse too
self.doc.draw_text(format, "",0,0, m_list[1])
current += 1
current_ord += 1
if not something_this_week:
last_edge = 0
font_height = pt2cm(1.5 * ptext1style.get_font().get_size())
x = last_edge + (width - last_edge)/2
y = height - font_height
self.doc.center_text("CAL-Text1style", self.text1, x, y * 3)
self.doc.center_text("CAL-Text2style", self.text2, x, y * 2)
self.doc.center_text("CAL-Text3style", self.text3, x, y * 1)
self.doc.end_page()
def collect_data(self):
"""
This method runs through the data, and collects the relevant dates
and text.
"""
db = self.database
people = db.iter_person_handles()
self._user.begin_progress(_('Calendar Report'),
_('Applying Filter...'),
db.get_number_of_people())
people = self.filter.apply(self.database, people,
self._user.step_progress)
rel_calc = get_relationship_calculator()
self._user.end_progress()
self._user.begin_progress(_('Calendar Report'),
_('Reading database...'), len(people))
for person_handle in people:
self._user.step_progress()
person = db.get_person_from_handle(person_handle)
mark = ReportUtils.get_person_mark(db, person)
birth_ref = person.get_birth_ref()
birth_date = None
if birth_ref:
birth_event = db.get_event_from_handle(birth_ref.ref)
birth_date = birth_event.get_date_object()
if (self.birthdays and birth_date is not None and birth_date.is_valid()):
year = birth_date.get_year()
month = birth_date.get_month()
day = birth_date.get_day()
prob_alive_date = Date(self.year, month, day)
nyears = self.year - year
# add some things to handle maiden name:
father_lastname = None # husband, actually
if self.maiden_name in ['spouse_first', 'spouse_last']: # get husband's last name:
if person.get_gender() == Person.FEMALE:
family_list = person.get_family_handle_list()
if family_list:
if self.maiden_name == 'spouse_first':
fhandle = family_list[0]
else:
fhandle = family_list[-1]
fam = db.get_family_from_handle(fhandle)
father_handle = fam.get_father_handle()
mother_handle = fam.get_mother_handle()
if mother_handle == person_handle:
if father_handle:
father = db.get_person_from_handle(father_handle)
if father:
father_lastname = father.get_primary_name().get_surname()
short_name = self.get_name(person, father_lastname)
alive = probably_alive(person, db, prob_alive_date)
if not self.alive or alive:
if nyears == 0:
text = _('%(person)s, birth%(relation)s') % {
'person' : short_name,
'relation' : ""}
else:
text = (ngettext('%(person)s, %(age)d%(relation)s',
'%(person)s, %(age)d%(relation)s', nyears)
% {'person' : short_name,
'age' : nyears,
'relation' : ""})
self.add_day_item(text, month, day, marks=[mark])
if self.anniversaries:
family_list = person.get_family_handle_list()
for fhandle in family_list:
fam = db.get_family_from_handle(fhandle)
father_handle = fam.get_father_handle()
mother_handle = fam.get_mother_handle()
if father_handle == person.get_handle():
spouse_handle = mother_handle
else:
continue # with next person if the father is not "person"
# this will keep from duplicating the anniversary
if spouse_handle:
spouse = db.get_person_from_handle(spouse_handle)
if spouse:
s_m = ReportUtils.get_person_mark(db, spouse)
spouse_name = self.get_name(spouse)
short_name = self.get_name(person)
# TEMP: this will handle ordered events
# GRAMPS 3.0 will have a new mechanism for start/stop events
are_married = None
for event_ref in fam.get_event_ref_list():
event = db.get_event_from_handle(event_ref.ref)
et = EventType
rt = EventRoleType
if event.type in [et.MARRIAGE,
et.MARR_ALT] and \
(event_ref.get_role() == rt.FAMILY or
event_ref.get_role() == rt.PRIMARY ):
are_married = event
elif event.type in [et.DIVORCE,
et.ANNULMENT,
et.DIV_FILING] and \
(event_ref.get_role() == rt.FAMILY or
event_ref.get_role() == rt.PRIMARY ):
are_married = None
if are_married is not None:
for event_ref in fam.get_event_ref_list():
event = db.get_event_from_handle(event_ref.ref)
event_obj = event.get_date_object()
if event_obj.is_valid():
year = event_obj.get_year()
month = event_obj.get_month()
day = event_obj.get_day()
prob_alive_date = Date(self.year, month, day)
nyears = self.year - year
if nyears == 0:
text = _('%(spouse)s and\n %(person)s, wedding') % {
'spouse' : spouse_name,
'person' : short_name,
}
else:
text = (ngettext("%(spouse)s and\n %(person)s, %(nyears)d",
"%(spouse)s and\n %(person)s, %(nyears)d", nyears)
% {'spouse' : spouse_name,
'person' : short_name,
'nyears' : nyears})
alive1 = probably_alive(person, self.database,
prob_alive_date)
alive2 = probably_alive(spouse, self.database,
prob_alive_date)
if ((self.alive and alive1 and alive2) or not self.alive):
self.add_day_item(text, month, day,
marks=[mark,s_m])
self._user.end_progress()
#------------------------------------------------------------------------
#
# CalendarOptions
#
#------------------------------------------------------------------------
class CalendarOptions(MenuReportOptions):
""" Calendar options for graphic calendar """
def __init__(self, name, dbase):
self.__db = dbase
self.__pid = None
self.__filter = None
MenuReportOptions.__init__(self, name, dbase)
def add_menu_options(self, menu):
""" Add the options for the graphical calendar """
add_option = partial(menu.add_option, _("Report Options"))
year = NumberOption(_("Year of calendar"), time.localtime()[0],
1000, 3000)
year.set_help(_("Year of calendar"))
add_option("year", year)
self.__filter = FilterOption(_("Filter"), 0)
self.__filter.set_help(
_("Select filter to restrict people that appear on calendar"))
add_option("filter", self.__filter)
self.__pid = PersonOption(_("Center Person"))
self.__pid.set_help(_("The center person for the report"))
add_option("pid", self.__pid)
self.__pid.connect('value-changed', self.__update_filters)
self.__update_filters()
# We must figure out the value of the first option before we can
# create the EnumeratedListOption
fmt_list = _nd.get_name_format()
name_format = EnumeratedListOption(_("Name format"), fmt_list[0][0])
for num, name, fmt_str, act in fmt_list:
name_format.add_item(num, name)
name_format.set_help(_("Select the format to display names"))
add_option("name_format", name_format)
country = EnumeratedListOption(_("Country for holidays"), 0)
holiday_table = libholiday.HolidayTable()
countries = holiday_table.get_countries()
countries.sort()
if (len(countries) == 0 or
(len(countries) > 0 and countries[0] != '')):
countries.insert(0, '')
count = 0
for c in countries:
country.add_item(count, c)
count += 1
country.set_help(_("Select the country to see associated holidays"))
add_option("country", country)
start_dow = EnumeratedListOption(_("First day of week"), 1)
for count in range(1, 8):
# conversion between gramps numbering (sun=1) and iso numbering (mon=1) of weekdays below
start_dow.add_item((count+5) % 7 + 1, long_days[count].capitalize())
start_dow.set_help(_("Select the first day of the week for the calendar"))
add_option("start_dow", start_dow)
maiden_name = EnumeratedListOption(_("Birthday surname"), "own")
maiden_name.add_item("spouse_first", _("Wives use husband's surname (from first family listed)"))
maiden_name.add_item("spouse_last", _("Wives use husband's surname (from last family listed)"))
maiden_name.add_item("own", _("Wives use their own surname"))
maiden_name.set_help(_("Select married women's displayed surname"))
add_option("maiden_name", maiden_name)
alive = BooleanOption(_("Include only living people"), True)
alive.set_help(_("Include only living people in the calendar"))
add_option("alive", alive)
birthdays = BooleanOption(_("Include birthdays"), True)
birthdays.set_help(_("Include birthdays in the calendar"))
add_option("birthdays", birthdays)
anniversaries = BooleanOption(_("Include anniversaries"), True)
anniversaries.set_help(_("Include anniversaries in the calendar"))
add_option("anniversaries", anniversaries)
category_name = _("Text Options")
add_option = partial(menu.add_option, _("Text Options"))
text1 = StringOption(_("Text Area 1"), _("My Calendar"))
text1.set_help(_("First line of text at bottom of calendar"))
add_option("text1", text1)
text2 = StringOption(_("Text Area 2"), _("Produced with Gramps"))
text2.set_help(_("Second line of text at bottom of calendar"))
add_option("text2", text2)
text3 = StringOption(_("Text Area 3"), "http://gramps-project.org/",)
text3.set_help(_("Third line of text at bottom of calendar"))
add_option("text3", text3)
def __update_filters(self):
"""
Update the filter list based on the selected person
"""
gid = self.__pid.get_value()
person = self.__db.get_person_from_gramps_id(gid)
filter_list = ReportUtils.get_person_filters(person, False)
self.__filter.set_filters(filter_list)
def make_my_style(self, default_style, name, description,
size=9, font=FONT_SERIF, justified ="left",
color=None, align=PARA_ALIGN_CENTER,
shadow = None, italic=0, bold=0, borders=0, indent=None):
""" Create paragraph and graphic styles of the same name """
# Paragraph:
f = FontStyle()
f.set_size(size)
f.set_type_face(font)
f.set_italic(italic)
f.set_bold(bold)
p = ParagraphStyle()
p.set_font(f)
p.set_alignment(align)
p.set_description(description)
p.set_top_border(borders)
p.set_left_border(borders)
p.set_bottom_border(borders)
p.set_right_border(borders)
if indent:
p.set(first_indent=indent)
if justified == "left":
p.set_alignment(PARA_ALIGN_LEFT)
elif justified == "right":
p.set_alignment(PARA_ALIGN_RIGHT)
elif justified == "center":
p.set_alignment(PARA_ALIGN_CENTER)
default_style.add_paragraph_style(name, p)
# Graphics:
g = GraphicsStyle()
g.set_paragraph_style(name)
if shadow:
g.set_shadow(*shadow)
if color is not None:
g.set_fill_color(color)
if not borders:
g.set_line_width(0)
default_style.add_draw_style(name, g)
def make_default_style(self, default_style):
""" Add the styles used in this report """
self.make_my_style(default_style, "CAL-Title",
_('Title text and background color'), 20,
bold=1, italic=1,
color=(0xEA, 0xEA, 0xEA))
self.make_my_style(default_style, "CAL-Numbers",
_('Calendar day numbers'), 13,
bold=1)
self.make_my_style(default_style, "CAL-Text",
_('Daily text display'), 9)
self.make_my_style(default_style, "CAL-Holiday",
_('Holiday text display'), 9,
bold=1, italic=1)
self.make_my_style(default_style, "CAL-Daynames",
_('Days of the week text'), 12,
italic=1, bold=1,
color = (0xEA, 0xEA, 0xEA))
self.make_my_style(default_style, "CAL-Text1style",
_('Text at bottom, line 1'), 12)
self.make_my_style(default_style, "CAL-Text2style",
_('Text at bottom, line 2'), 12)
self.make_my_style(default_style, "CAL-Text3style",
_('Text at bottom, line 3'), 9)
self.make_my_style(default_style, "CAL-Border",
_('Borders'), borders=True)
|
arunkgupta/gramps
|
gramps/plugins/drawreport/calendarreport.py
|
Python
|
gpl-2.0
| 27,262
|
[
"Brian"
] |
b19c9a995868d1de2ad3f72061c3cc19adc0b93aadb2f012c926a35078c4a458
|
#! /usr/bin/env python2.7
'''blastextract.py
A script that automates the pipeline between blastn and extract.py. In the
future, this script may automate the role of mafft or some other multiple
alignment program; for now, you must manually run the multiple aligner of your
choice (we recommend ginsi, which ships with mafft) on the output of this
program. This program requires a working installation of BLAST+; see the
accompanying file BLASTHELP for more information on this.
EDIT (Oct 2015):
The "accompanying" file BLASTHELP has been lost, and in the intervening four
years, the author has forgotten what little he knew in the first place. The
curious hacker may consult the BLAST+ home page, whose URL is given below. Note
that this project was developed in summer 2011, so it may rely upon an older
version of BLAST+.
BLAST+ url:
https://blast.ncbi.nlm.nih.gov/Blast.cgi?PAGE_TYPE=BlastDocs&DOC_TYPE=Download
'''
import glob, os, sys, os.path as path, subprocess as proc, argparse as arg
import textwrap, itertools as it, signal
import fasta, classify, extract, utils
def main(args):
return args.func(args,args.blargs) # args.func can be blast_func,
# extract_func, or ext_from_archive_func
def functionmaker(f):
def new_f(args,blargs):
tups = tuple(map(tuple,f(args,blargs)))
if args.dry_run:
print pipestr(tups,stdout=args.out,append=args.mode=='a')
return 0
with utils.quickopen(args.out,args.mode) as out:
process = pipe_together(tups,stdout=out,shell=False,bufsize=-1)
return process.wait()
return new_f
@functionmaker
def blast_func(args,blargs):
if args.archive and args.mode=='a':
raise CmdLineError('Cannot append to an archive.')
yield blast_cmd_tup(query=args.query,subject=args.subject,db=args.db,
archive=args.archive,*blargs)
if not args.archive: yield sed_cmd_tup()
@functionmaker
def extract_func(args,blargs):
if '-' is args.query:
raise CmdLineError('Cannot use stdin for query')
yield blast_cmd_tup(query=args.query,db=args.db,subject=args.subject,
archive=False,*blargs)
yield sed_cmd_tup()
yield te_extraction_tup(args)
yield ('cat',args.query,'-')
@functionmaker
def ext_from_archive_func(args,blargs):
if blargs:
raise CmdLineError('option {0[0]} not understood'.format(blargs))
if args.query=='-':
raise CmdLineError('Cannot use stdin for query')
yield ('blast_formatter','-outfmt',_std_outfmt,'-archive',args.archive)
yield sed_cmd_tup()
yield te_extraction_tup(args)
if args.query is not None: yield ('cat',args.query,'-')
# should subclass all error classes for here.
class LocalError(Exception):
errcode = 1
# for errors of command line.
class CmdLineError(LocalError):
errcode = 2
# when files are not found
class FileNotFoundError(LocalError):
errcode = 3
_std_outfmt = '10 ' + ' '.join(classify.allflds).lower()
def blast_cmd_tup(query=None,db=None,subject=None,archive=None,*args):
for x in ('-html','-outfmt'):
if x in args:
raise CmdLineError('cannot request {} with this tool. '.format(x) +
'If you need it, use blastn instead.')
for x in ('-help','-query','-subject','-db'):
if x in args:
raise CmdLineError('{0} option invalid; use -{0} instead'.format(x))
yield 'blastn'
yield '-query' ; yield '-' if query is None else query
if db is None:
yield '-subject' ; yield subject
else:
yield '-db' ; yield db
yield '-outfmt' ; yield '11' if archive else _std_outfmt
for x in args: yield x
def sed_cmd_tup(input=None):
yield 'sed' ; yield '1i\\\n{}\n'.format(','.join(classify.allflds))
if input is not None: yield input
def te_extraction_tup(args,input=None):
if extract.__file__.endswith('.pyc'): yield extract.__file__[:-1]
else: yield extract.__file__
for x in ('out','min_distance','min_length','max_overlap',
'evalue_threshold'):
val = getattr(args,x)
if val is not None:
yield '--{}={}'.format(x.replace('_','-'),val)
yield '-' if input is None else input
def pipestr(tups,stdout=None,stdin=None,append=False):
def stringify(s):
if any(c in s for c in '\n$!\\` "\t'):
return "'{}'".format(s.replace("'",r"\'"))
if any(c in s for c in " \t'"): return '"{}"'.format(s)
return s
def maketup(t):
st = ''
for w in t:
newst = st + (st and ' ') + w
if len(newst) >= 67:
yield st or newst
newst = w if st else ''
st = newst
if st: yield st
tups = [map(stringify,t) for t in tups]
lines = [' \\\n '.join(maketup(t)) for t in tups]
cmdstr = ' | \\\n '.join(lines)
if stdin not in (None,'-'): cmdstr += ' < ' + stdin
if stdout not in (None,'-'): cmdstr += (' >> ' if append else ' > ')+stdout
return cmdstr
def pipe_together(argtuples,stdout=None,stdin=None,**kwds):
if not argtuples: raise TypeError('must have at least one tuple for pipe')
streams = [stdin]
for n,tup in enumerate(argtuples):
nosigpipe = tup[0].startswith('python') or tup[0].endswith('.py')
p = proc.Popen(tup,
stdin=streams[-1],
stdout=stdout if n+1==len(argtuples) else proc.PIPE,
preexec_fn=None if nosigpipe else utils.restoresigpipe,
**kwds)
streams.append(p.stdout)
return p
# action that checks whether the argument is a valid file
class FileCheckAction(arg.Action):
def __call__(self,parser,namespace,values,option_string=None):
if values is not None and values is not '-' and not path.isfile(values):
raise FileNotFoundError('%r is not a valid file name' % values)
setattr(namespace,self.dest,values)
def make_parser():
def doeachparser(parser):
parser.add_argument('-o','--out',default='-',help='''
File to which to write output. To write to standard output, give
- as the argument to --output; this behavior is the default.''')
parser.add_argument('-a','--append',action='store_const',const='a',
dest='mode',default='w',help='''
If --append is specified, output is added to the output file, as
opposed to the default behavior of overwriting it.''')
parser.add_argument('--dry-run',action='store_true', help='''
If specified, %(prog)s will print a shell-able version of the
commands it executes, and then exit. May help users learn to
use the underlying tools directly.''')
def doblastparser(parser):
subj = parser.add_mutually_exclusive_group(required=True)
subj.add_argument('--db',help='''\
A database containing subject sequences (usually a genome
sequence) for the blast search. The argument may be a path to
an existing database (e.g. --db=path/to/dvir); if it is not
(e.g. --db=dvir), BLAST+ assumes it is a database name and
tries to figure out where it is, using the $BLASTDB environment
variable. This may cause cryptic errors; see the accompanying
file BLASTDB for some troubleshooting information.''')
subj.add_argument('-s','--subject',action=FileCheckAction,help='''\
The subject sequences for the blast search; usually a genome.''')
parser.add_argument('-q','--query',action=FileCheckAction,help='''\
The query sequence(s) for the blast search; should be a single
transposon sequence.''')
parser.add_argument('blargs',nargs='*',help='''Any unrecognized
arguments (i.e., those not mentioned elsewhere) are passed
directly to blastn, allowing the use of options like -penalty or
-ungapped (type blastn -help for a full list). Note that to avoid
ambiguity (e.g. %(prog)s may confuse blastn's "-penalty" with
extract.py's option -p), you should put -- before your blastn args,
e.g. %(prog)s --db dvir -- -penalty 4''')
parser = arg.ArgumentParser(formatter_class=arg.RawDescriptionHelpFormatter,
description=__doc__ + '''
Type %(prog)s CMD -h to see help for a particular command.''')
subparsers = parser.add_subparsers()
blast = subparsers.add_parser('blast',
description='''Do only a blast search, and postpone the extraction step.
Output will be a CSV file with a header, or a BLAST archive if the
--archive option is given. You may give other blast parameters if
you wish (type blastn -help at the terminal for a full list), as any
options not recognized by %(prog)s will be given unchanged to
blastn.''')
doblastparser(blast)
blast.add_argument('--archive',action='store_true',help='''
By default, this command writes as its output a CSV file, treatable
by extract.py directly. If this option is set, output is instead
written in a special archive format known as ASN.1, which can be
inspected separately from the present process using blast_formatter.''')
doeachparser(blast)
blast.set_defaults(func=blast_func)
ext_from_archive = subparsers.add_parser('ar-extract',
fromfile_prefix_chars='@',
description='''This command retrieves a blast search from an archive
file -- i.e. one produced by '%(prog)s blast --archive' or by
'blastn -outfmt 11' -- and performs extraction on the results.''')
doeachparser(ext_from_archive)
ext_from_archive.add_argument('archive',action=FileCheckAction,
help="The archive file. Use - to retrieve from stdin.")
ext_from_archive.add_argument('-q','--query',action=FileCheckAction,
help='''The query file from which the BLAST search was made. If
given, this file is prepended to the extraction results, which is
usually desirable.''')
extract.makeparser(ext_from_archive)
ext_from_archive.set_defaults(func=ext_from_archive_func)
ext_from_archive.set_defaults(blargs=())
extract_p = subparsers.add_parser('extract',
fromfile_prefix_chars='@',
description='''This command pastes together the blast search and the
transposon extraction. As with '%(prog)s blast', unknown options
will be passed to blastn, allowing you to give custom parameters
to your blast search.''')
doblastparser(extract_p) ; doeachparser(extract_p)
extract.makeparser(extract_p)
extract_p.set_defaults(func=extract_func)
return parser
if __name__ == '__main__':
parser = make_parser()
try: val = main(parser.parse_args())
except LocalError as e:
if not isinstance(e,FileNotFoundError): parser.print_usage(sys.stderr)
print >>sys.stderr, parser.prog,': error: ', e
sys.exit(e.errcode)
else: sys.exit(val)
|
jpassaro/seq-align-prep
|
blastextract.py
|
Python
|
gpl-2.0
| 11,395
|
[
"BLAST"
] |
e1f635d9e6346ff01f0ce3d030edaa8daa50d6a460d784f65891ff6a98954ab4
|
from __future__ import division
import numpy as np
import networkx as nx
from pgmpy.models import BayesianModel
from pgmpy.factors.continuous import LinearGaussianCPD
from pgmpy.factors.continuous import JointGaussianDistribution
class LinearGaussianBayesianNetwork(BayesianModel):
"""
A Linear Gaussain Bayesian Network is a Bayesian Network, all
of whose variables are continuous, and where all of the CPDs
are linear Gaussians.
An important result is that the linear Gaussian Bayesian Networks
are an alternative representation for the class of multivariate
Gaussian distributions.
"""
def add_cpds(self, *cpds):
"""
Add linear Gaussian CPD (Conditional Probability Distribution)
to the Bayesian Model.
Parameters
----------
cpds : instances of LinearGaussianCPD
List of LinearGaussianCPDs which will be associated with the model
Examples
--------
>>> from pgmpy.models import LinearGaussianBayesianNetwork
>>> from pgmpy.factors import LinearGaussianCPD
>>> model = LinearGaussianBayesianNetwork([('x1', 'x2'), ('x2', 'x3')])
>>> cpd1 = LinearGaussianCPD('x1', 1, 4)
>>> cpd2 = LinearGaussianCPD('x2', -5, 4, ['x1'], [0.5])
>>> cpd3 = LinearGaussianCPD('x3', 4, 3, ['x2'], [-1])
>>> model.add_cpds(cpd1, cpd2, cpd3)
>>> for cpd in model.cpds:
print(cpd)
P(x1) = N(1; 4)
P(x2| x1) = N(0.5*x1_mu); -5)
P(x3| x2) = N(-1*x2_mu); 4)
"""
for cpd in cpds:
if not isinstance(cpd, LinearGaussianCPD):
raise ValueError('Only LinearGaussianCPD can be added.')
if set(cpd.variables) - set(cpd.variables).intersection(
set(self.nodes())):
raise ValueError('CPD defined on variable not in the model', cpd)
for prev_cpd_index in range(len(self.cpds)):
if self.cpds[prev_cpd_index].variable == cpd.variable:
logging.warning("Replacing existing CPD for {var}".format(var=cpd.variable))
self.cpds[prev_cpd_index] = cpd
break
else:
self.cpds.append(cpd)
def get_cpds(self, node=None):
"""
Returns the cpd of the node. If node is not specified returns all the CPDs
that have been added till now to the graph
Parameter
---------
node: any hashable python object (optional)
The node whose CPD we want. If node not specified returns all the
CPDs added to the model.
Returns
-------
A list of linear Gaussian CPDs.
Examples
--------
>>> from pgmpy.models import LinearGaussianBayesianNetwork
>>> form pgmpy.factors import LinearGaussianCPD
>>> model = LinearGaussianBayesianNetwork([('x1', 'x2'), ('x2', 'x3')])
>>> cpd1 = LinearGaussianCPD('x1', 1, 4)
>>> cpd2 = LinearGaussianCPD('x2', -5, 4, ['x1'], [0.5])
>>> cpd3 = LinearGaussianCPD('x3', 4, 3, ['x2'], [-1])
>>> model.add_cpds(cpd1, cpd2, cpd3)
>>> model.get_cpds()
"""
return super(LinearGaussianBayesianNetwork, self).get_cpds(node)
def remove_cpds(self, *cpds):
"""
Removes the cpds that are provided in the argument.
Parameters
----------
*cpds: LinearGaussianCPD object
A LinearGaussianCPD object on any subset of the variables
of the model which is to be associated with the model.
Examples
--------
>>> from pgmpy.models import LinearGaussianBayesianNetwork
>>> form pgmpy.factors import LinearGaussianCPD
>>> model = LinearGaussianBayesianNetwork([('x1', 'x2'), ('x2', 'x3')])
>>> cpd1 = LinearGaussianCPD('x1', 1, 4)
>>> cpd2 = LinearGaussianCPD('x2', -5, 4, ['x1'], [0.5])
>>> cpd3 = LinearGaussianCPD('x3', 4, 3, ['x2'], [-1])
>>> model.add_cpds(cpd1, cpd2, cpd3)
>>> for cpd in model.get_cpds():
print(cpd)
P(x1) = N(1; 4)
P(x2| x1) = N(0.5*x1_mu); -5)
P(x3| x2) = N(-1*x2_mu); 4)
>>> model.remove_cpds(cpd2, cpd3)
>>> for cpd in model.get_cpds():
print(cpd)
P(x1) = N(1; 4)
"""
return super(LinearGaussianBayesianNetwork, self).remove_cpds(*cpds)
def to_joint_gaussian(self):
"""
The linear Gaussian Bayesian Networks are an alternative
representation for the class of multivariate Gaussian distributions.
This method returns an equivalent joint Gaussian distribution.
Returns
-------
JointGaussianDistribution: An equivalent joint Gaussian
distribution for the network.
Reference
---------
Section 7.2, Example 7.3,
Probabilistic Graphical Models, Principles and Techniques
Examples
--------
>>> from pgmpy.models import LinearGaussianBayesianNetwork
>>> form pgmpy.factors import LinearGaussianCPD
>>> model = LinearGaussianBayesianNetwork([('x1', 'x2'), ('x2', 'x3')])
>>> cpd1 = LinearGaussianCPD('x1', 1, 4)
>>> cpd2 = LinearGaussianCPD('x2', -5, 4, ['x1'], [0.5])
>>> cpd3 = LinearGaussianCPD('x3', 4, 3, ['x2'], [-1])
>>> model.add_cpds(cpd1, cpd2, cpd3)
>>> jgd = model.to_joint_gaussian()
>>> jgd.variables
['x1', 'x2', 'x3']
>>> jgd.mean
array([[ 1. ],
[-4.5],
[ 8.5]])
>>> jgd.covariance
array([[ 4., 2., -2.],
[ 2., 5., -5.],
[-2., -5., 8.]])
"""
variables = nx.topological_sort(self)
mean = np.zeros(len(variables))
covariance = np.zeros((len(variables), len(variables)))
for node_idx in range(len(variables)):
cpd = self.get_cpds(variables[node_idx])
mean[node_idx] = sum([coeff * mean[variables.index(parent)] for
coeff, parent in zip(cpd.beta_vector, cpd.evidence)]) + cpd.beta_0
covariance[node_idx, node_idx] = sum(
[coeff * coeff * covariance[variables.index(parent), variables.index(parent)]
for coeff, parent in zip(cpd.beta_vector, cpd.evidence)]) + cpd.variance
for node_i_idx in range(len(variables)):
for node_j_idx in range(len(variables)):
if covariance[node_j_idx, node_i_idx] != 0:
covariance[node_i_idx, node_j_idx] = covariance[node_j_idx, node_i_idx]
else:
cpd_j = self.get_cpds(variables[node_j_idx])
covariance[node_i_idx, node_j_idx] = sum(
[coeff * covariance[node_i_idx, variables.index(parent)]
for coeff, parent in zip(cpd_j.beta_vector, cpd_j.evidence)])
return JointGaussianDistribution(variables, mean, covariance)
def check_model(self):
"""
Checks the model for various errors. This method checks for the following
error -
* Checks if the CPDs associated with nodes are consistent with their parents.
Returns
-------
check: boolean
True if all the checks pass.
"""
for node in self.nodes():
cpd = self.get_cpds(node=node)
if isinstance(cpd, LinearGaussianCPD):
if set(cpd.evidence) != set(self.get_parents(node)):
raise ValueError("CPD associated with %s doesn't have "
"proper parents associated with it." % node)
return True
def get_cardinality(self, node):
"""
Cardinality is not defined for continuous variables.
"""
raise ValueError("Cardinality is not defined for continuous variables.")
def fit(self, data, estimator_type=None, state_names=[], complete_samples_only=True, **kwargs):
"""
For now, fit method has not been implemented for LinearGaussianBayesianNetwork.
"""
raise NotImplementedError("fit method has not been implemented for LinearGaussianBayesianNetwork.")
def predict(self, data):
"""
For now, predict method has not been implemented for LinearGaussianBayesianNetwork.
"""
raise NotImplementedError("predict method has not been implemented for LinearGaussianBayesianNetwork.")
def to_markov_model(self):
"""
For now, to_markov_model method has not been implemented for LinearGaussianBayesianNetwork.
"""
raise NotImplementedError("to_markov_model method has not been implemented for LinearGaussianBayesianNetwork.")
def is_imap(self, JPD):
"""
For now, is_imap method has not been implemented for LinearGaussianBayesianNetwork.
"""
raise NotImplementedError("is_imap method has not been implemented for LinearGaussianBayesianNetwork.")
|
abinashpanda/pgmpy
|
pgmpy/models/LinearGaussianBayesianNetwork.py
|
Python
|
mit
| 9,162
|
[
"Gaussian"
] |
03842fc216a30e160181195f0d0d589f668ed281debbe0889f2c170ac645514e
|
"""An interface for publishing rich data to frontends.
There are two components of the display system:
* Display formatters, which take a Python object and compute the
representation of the object in various formats (text, HTML, SVG, etc.).
* The display publisher that is used to send the representation data to the
various frontends.
This module defines the logic display publishing. The display publisher uses
the ``display_data`` message type that is defined in the IPython messaging
spec.
Authors:
* Brian Granger
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import print_function
from IPython.config.configurable import Configurable
from IPython.utils import io
#-----------------------------------------------------------------------------
# Main payload class
#-----------------------------------------------------------------------------
class DisplayPublisher(Configurable):
"""A traited class that publishes display data to frontends.
Instances of this class are created by the main IPython object and should
be accessed there.
"""
def _validate_data(self, source, data, metadata=None):
"""Validate the display data.
Parameters
----------
source : str
The fully dotted name of the callable that created the data, like
:func:`foo.bar.my_formatter`.
data : dict
The formata data dictionary.
metadata : dict
Any metadata for the data.
"""
if not isinstance(source, basestring):
raise TypeError('source must be a str, got: %r' % source)
if not isinstance(data, dict):
raise TypeError('data must be a dict, got: %r' % data)
if metadata is not None:
if not isinstance(metadata, dict):
raise TypeError('metadata must be a dict, got: %r' % data)
def publish(self, source, data, metadata=None):
"""Publish data and metadata to all frontends.
See the ``display_data`` message in the messaging documentation for
more details about this message type.
The following MIME types are currently implemented:
* text/plain
* text/html
* text/latex
* application/json
* application/javascript
* image/png
* image/jpeg
* image/svg+xml
Parameters
----------
source : str
A string that give the function or method that created the data,
such as 'IPython.core.page'.
data : dict
A dictionary having keys that are valid MIME types (like
'text/plain' or 'image/svg+xml') and values that are the data for
that MIME type. The data itself must be a JSON'able data
structure. Minimally all data should have the 'text/plain' data,
which can be displayed by all frontends. If more than the plain
text is given, it is up to the frontend to decide which
representation to use.
metadata : dict
A dictionary for metadata related to the data. This can contain
arbitrary key, value pairs that frontends can use to interpret
the data. Metadata specific to each mime-type can be specified
in the metadata dict with the same mime-type keys as
the data itself.
"""
# The default is to simply write the plain text data using io.stdout.
if 'text/plain' in data:
print(data['text/plain'], file=io.stdout)
def clear_output(self, stdout=True, stderr=True, other=True):
"""Clear the output of the cell receiving output."""
if stdout:
print('\033[2K\r', file=io.stdout, end='')
io.stdout.flush()
if stderr:
print('\033[2K\r', file=io.stderr, end='')
io.stderr.flush()
def publish_display_data(source, data, metadata=None):
"""Publish data and metadata to all frontends.
See the ``display_data`` message in the messaging documentation for
more details about this message type.
The following MIME types are currently implemented:
* text/plain
* text/html
* text/latex
* application/json
* application/javascript
* image/png
* image/jpeg
* image/svg+xml
Parameters
----------
source : str
A string that give the function or method that created the data,
such as 'IPython.core.page'.
data : dict
A dictionary having keys that are valid MIME types (like
'text/plain' or 'image/svg+xml') and values that are the data for
that MIME type. The data itself must be a JSON'able data
structure. Minimally all data should have the 'text/plain' data,
which can be displayed by all frontends. If more than the plain
text is given, it is up to the frontend to decide which
representation to use.
metadata : dict
A dictionary for metadata related to the data. This can contain
arbitrary key, value pairs that frontends can use to interpret
the data. mime-type keys matching those in data can be used
to specify metadata about particular representations.
"""
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.instance().display_pub.publish(
source,
data,
metadata
)
|
noslenfa/tdjangorest
|
uw/lib/python2.7/site-packages/IPython/core/displaypub.py
|
Python
|
apache-2.0
| 5,918
|
[
"Brian"
] |
6be84973325239f791999df1b07b2d53ab1108acc3046858edc98a6a117b241c
|
#!/usr/bin/env python
# coding: utf-8
# # Gradient Methods
#
#
# ## Top-down start
#
# * We will start with a top-down view, with a simple harmonic oscillator problem in one dimension as case.
#
# * Thereafter we continue with implementing the simplest possible steepest descent approach to our two-electron problem with an electrostatic (Coulomb) interaction. Our code includes also importance sampling. The simple Python code here illustrates the basic elements which need to be included in our own code.
#
# * Then we move on to the mathematical description of various gradient methods.
#
# ## Motivation
# Our aim with this part of the project is to be able to
# * find an optimal value for the variational parameters using only some few Monte Carlo cycles
#
# * use these optimal values for the variational parameters to perform a large-scale Monte Carlo calculation
#
# To achieve this will look at methods like *Steepest descent* and the *conjugate gradient method*. Both these methods allow us to find
# the minima of a multivariable function like our energy (function of several variational parameters).
# Alternatively, you can always use Newton's method. In particular, since we will normally have one variational parameter,
# Newton's method can be easily used in finding the minimum of the local energy.
#
#
#
#
# ## Simple example and demonstration
#
# Let us illustrate what is needed in our calculations using a simple example, the harmonic oscillator in one dimension.
# For the harmonic oscillator in one-dimension we have a trial wave function and probability
# $$
# \psi_T(x;\alpha) = \exp{-(\frac{1}{2}\alpha^2x^2)},
# $$
# which results in a local energy
# $$
# \frac{1}{2}\left(\alpha^2+x^2(1-\alpha^4)\right).
# $$
# We can compare our numerically calculated energies with the exact energy as function of $\alpha$
# $$
# \overline{E}[\alpha] = \frac{1}{4}\left(\alpha^2+\frac{1}{\alpha^2}\right).
# $$
# ## Simple example and demonstration
# The derivative of the energy with respect to $\alpha$ gives
# $$
# \frac{d\langle E_L[\alpha]\rangle}{d\alpha} = \frac{1}{2}\alpha-\frac{1}{2\alpha^3}
# $$
# and a second derivative which is always positive (meaning that we find a minimum)
# $$
# \frac{d^2\langle E_L[\alpha]\rangle}{d\alpha^2} = \frac{1}{2}+\frac{3}{2\alpha^4}
# $$
# The condition
# $$
# \frac{d\langle E_L[\alpha]\rangle}{d\alpha} = 0,
# $$
# gives the optimal $\alpha=1$, as expected.
#
#
#
#
#
#
# <!-- --- begin exercise --- -->
#
# ## Exercise 1: Find the local energy for the harmonic oscillator
#
#
# **a)**
# Derive the local energy for the harmonic oscillator in one dimension and find its expectation value.
#
# **b)**
# Show also that the optimal value of optimal $\alpha=1$
#
# **c)**
# Repeat the above steps in two dimensions for $N$ bosons or electrons. What is the optimal value of $\alpha$?
#
#
#
# <!-- --- end exercise --- -->
#
#
# ## Variance in the simple model
# We can also minimize the variance. In our simple model the variance is
# $$
# \sigma^2[\alpha]=\frac{1}{4}\left(1+(1-\alpha^4)^2\frac{3}{4\alpha^4}\right)-\overline{E}^2.
# $$
# which yields a second derivative which is always positive.
#
#
#
#
#
# ## Computing the derivatives
#
# In general we end up computing the expectation value of the energy in terms
# of some parameters $\alpha_0,\alpha_1,\dots,\alpha_n$
# and we search for a minimum in this multi-variable parameter space.
# This leads to an energy minimization problem *where we need the derivative of the energy as a function of the variational parameters*.
#
# In the above example this was easy and we were able to find the expression for the derivative by simple derivations.
# However, in our actual calculations the energy is represented by a multi-dimensional integral with several variational parameters.
# How can we can then obtain the derivatives of the energy with respect to the variational parameters without having
# to resort to expensive numerical derivations?
#
#
#
#
#
# ## Expressions for finding the derivatives of the local energy
#
# To find the derivatives of the local energy expectation value as function of the variational parameters, we can use the chain rule and the hermiticity of the Hamiltonian.
#
# Let us define
# $$
# \bar{E}_{\alpha}=\frac{d\langle E_L[\alpha]\rangle}{d\alpha}.
# $$
# as the derivative of the energy with respect to the variational parameter $\alpha$ (we limit ourselves to one parameter only).
# In the above example this was easy and we obtain a simple expression for the derivative.
# We define also the derivative of the trial function (skipping the subindex $T$) as
# $$
# \bar{\psi}_{\alpha}=\frac{d\psi[\alpha]\rangle}{d\alpha}.
# $$
# ## Derivatives of the local energy
# The elements of the gradient of the local energy are then (using the chain rule and the hermiticity of the Hamiltonian)
# $$
# \bar{E}_{\alpha} = 2\left( \langle \frac{\bar{\psi}_{\alpha}}{\psi[\alpha]}E_L[\alpha]\rangle -\langle \frac{\bar{\psi}_{\alpha}}{\psi[\alpha]}\rangle\langle E_L[\alpha] \rangle\right).
# $$
# From a computational point of view it means that you need to compute the expectation values of
# $$
# \langle \frac{\bar{\psi}_{\alpha}}{\psi[\alpha]}E_L[\alpha]\rangle,
# $$
# and
# $$
# \langle \frac{\bar{\psi}_{\alpha}}{\psi[\alpha]}\rangle\langle E_L[\alpha]\rangle
# $$
# <!-- --- begin exercise --- -->
#
# ## Exercise 2: General expression for the derivative of the energy
#
#
# **a)**
# Show that
# $$
# \bar{E}_{\alpha} = 2\left( \langle \frac{\bar{\psi}_{\alpha}}{\psi[\alpha]}E_L[\alpha]\rangle -\langle \frac{\bar{\psi}_{\alpha}}{\psi[\alpha]}\rangle\langle E_L[\alpha] \rangle\right).
# $$
# **b)**
# Find the corresponding expression for the variance.
#
#
#
#
#
# <!-- --- end exercise --- -->
#
#
# ## Python program for 2-electrons in 2 dimensions
# In[1]:
get_ipython().run_line_magic('matplotlib', 'inline')
# 2-electron VMC code for 2dim quantum dot with importance sampling
# Using gaussian rng for new positions and Metropolis- Hastings
# Added energy minimization with gradient descent using fixed step size
# To do: replace with optimization codes from scipy and/or use stochastic gradient descent
from math import exp, sqrt
from random import random, seed, normalvariate
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import sys
# Trial wave function for the 2-electron quantum dot in two dims
def WaveFunction(r,alpha,beta):
r1 = r[0,0]**2 + r[0,1]**2
r2 = r[1,0]**2 + r[1,1]**2
r12 = sqrt((r[0,0]-r[1,0])**2 + (r[0,1]-r[1,1])**2)
deno = r12/(1+beta*r12)
return exp(-0.5*alpha*(r1+r2)+deno)
# Local energy for the 2-electron quantum dot in two dims, using analytical local energy
def LocalEnergy(r,alpha,beta):
r1 = (r[0,0]**2 + r[0,1]**2)
r2 = (r[1,0]**2 + r[1,1]**2)
r12 = sqrt((r[0,0]-r[1,0])**2 + (r[0,1]-r[1,1])**2)
deno = 1.0/(1+beta*r12)
deno2 = deno*deno
return 0.5*(1-alpha*alpha)*(r1 + r2) +2.0*alpha + 1.0/r12+deno2*(alpha*r12-deno2+2*beta*deno-1.0/r12)
# Derivate of wave function ansatz as function of variational parameters
def DerivativeWFansatz(r,alpha,beta):
WfDer = np.zeros((2), np.double)
r1 = (r[0,0]**2 + r[0,1]**2)
r2 = (r[1,0]**2 + r[1,1]**2)
r12 = sqrt((r[0,0]-r[1,0])**2 + (r[0,1]-r[1,1])**2)
deno = 1.0/(1+beta*r12)
deno2 = deno*deno
WfDer[0] = -0.5*(r1+r2)
WfDer[1] = -r12*r12*deno2
return WfDer
# Setting up the quantum force for the two-electron quantum dot, recall that it is a vector
def QuantumForce(r,alpha,beta):
qforce = np.zeros((NumberParticles,Dimension), np.double)
r12 = sqrt((r[0,0]-r[1,0])**2 + (r[0,1]-r[1,1])**2)
deno = 1.0/(1+beta*r12)
qforce[0,:] = -2*r[0,:]*alpha*(r[0,:]-r[1,:])*deno*deno/r12
qforce[1,:] = -2*r[1,:]*alpha*(r[1,:]-r[0,:])*deno*deno/r12
return qforce
# Computing the derivative of the energy and the energy
def EnergyMinimization(alpha, beta):
NumberMCcycles= 10000
# Parameters in the Fokker-Planck simulation of the quantum force
D = 0.5
TimeStep = 0.05
# positions
PositionOld = np.zeros((NumberParticles,Dimension), np.double)
PositionNew = np.zeros((NumberParticles,Dimension), np.double)
# Quantum force
QuantumForceOld = np.zeros((NumberParticles,Dimension), np.double)
QuantumForceNew = np.zeros((NumberParticles,Dimension), np.double)
# seed for rng generator
seed()
energy = 0.0
DeltaE = 0.0
EnergyDer = np.zeros((2), np.double)
DeltaPsi = np.zeros((2), np.double)
DerivativePsiE = np.zeros((2), np.double)
#Initial position
for i in range(NumberParticles):
for j in range(Dimension):
PositionOld[i,j] = normalvariate(0.0,1.0)*sqrt(TimeStep)
wfold = WaveFunction(PositionOld,alpha,beta)
QuantumForceOld = QuantumForce(PositionOld,alpha, beta)
#Loop over MC MCcycles
for MCcycle in range(NumberMCcycles):
#Trial position moving one particle at the time
for i in range(NumberParticles):
for j in range(Dimension):
PositionNew[i,j] = PositionOld[i,j]+normalvariate(0.0,1.0)*sqrt(TimeStep)+ QuantumForceOld[i,j]*TimeStep*D
wfnew = WaveFunction(PositionNew,alpha,beta)
QuantumForceNew = QuantumForce(PositionNew,alpha, beta)
GreensFunction = 0.0
for j in range(Dimension):
GreensFunction += 0.5*(QuantumForceOld[i,j]+QuantumForceNew[i,j])* (D*TimeStep*0.5*(QuantumForceOld[i,j]-QuantumForceNew[i,j])- PositionNew[i,j]+PositionOld[i,j])
GreensFunction = exp(GreensFunction)
ProbabilityRatio = GreensFunction*wfnew**2/wfold**2
#Metropolis-Hastings test to see whether we accept the move
if random() <= ProbabilityRatio:
for j in range(Dimension):
PositionOld[i,j] = PositionNew[i,j]
QuantumForceOld[i,j] = QuantumForceNew[i,j]
wfold = wfnew
DeltaE = LocalEnergy(PositionOld,alpha,beta)
DerPsi = DerivativeWFansatz(PositionOld,alpha,beta)
DeltaPsi += DerPsi
energy += DeltaE
DerivativePsiE += DerPsi*DeltaE
# We calculate mean values
energy /= NumberMCcycles
DerivativePsiE /= NumberMCcycles
DeltaPsi /= NumberMCcycles
EnergyDer = 2*(DerivativePsiE-DeltaPsi*energy)
return energy, EnergyDer
#Here starts the main program with variable declarations
NumberParticles = 2
Dimension = 2
# guess for variational parameters
alpha = 0.9
beta = 0.2
# Set up iteration using gradient descent method
Energy = 0
EDerivative = np.zeros((2), np.double)
eta = 0.01
Niterations = 50
#
for iter in range(Niterations):
Energy, EDerivative = EnergyMinimization(alpha,beta)
alphagradient = EDerivative[0]
betagradient = EDerivative[1]
alpha -= eta*alphagradient
beta -= eta*betagradient
print(alpha, beta)
print(Energy, EDerivative[0], EDerivative[1])
# ## Using Broyden's algorithm in scipy
# The following function uses the above described BFGS algorithm. Here we have defined a function which calculates the energy and a function which computes the first derivative.
# In[ ]:
# 2-electron VMC code for 2dim quantum dot with importance sampling
# Using gaussian rng for new positions and Metropolis- Hastings
# Added energy minimization using the BFGS algorithm, see p. 136 of https://www.springer.com/it/book/9780387303031
from math import exp, sqrt
from random import random, seed, normalvariate
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
from scipy.optimize import minimize
import sys
# Trial wave function for the 2-electron quantum dot in two dims
def WaveFunction(r,alpha,beta):
r1 = r[0,0]**2 + r[0,1]**2
r2 = r[1,0]**2 + r[1,1]**2
r12 = sqrt((r[0,0]-r[1,0])**2 + (r[0,1]-r[1,1])**2)
deno = r12/(1+beta*r12)
return exp(-0.5*alpha*(r1+r2)+deno)
# Local energy for the 2-electron quantum dot in two dims, using analytical local energy
def LocalEnergy(r,alpha,beta):
r1 = (r[0,0]**2 + r[0,1]**2)
r2 = (r[1,0]**2 + r[1,1]**2)
r12 = sqrt((r[0,0]-r[1,0])**2 + (r[0,1]-r[1,1])**2)
deno = 1.0/(1+beta*r12)
deno2 = deno*deno
return 0.5*(1-alpha*alpha)*(r1 + r2) +2.0*alpha + 1.0/r12+deno2*(alpha*r12-deno2+2*beta*deno-1.0/r12)
# Derivate of wave function ansatz as function of variational parameters
def DerivativeWFansatz(r,alpha,beta):
WfDer = np.zeros((2), np.double)
r1 = (r[0,0]**2 + r[0,1]**2)
r2 = (r[1,0]**2 + r[1,1]**2)
r12 = sqrt((r[0,0]-r[1,0])**2 + (r[0,1]-r[1,1])**2)
deno = 1.0/(1+beta*r12)
deno2 = deno*deno
WfDer[0] = -0.5*(r1+r2)
WfDer[1] = -r12*r12*deno2
return WfDer
# Setting up the quantum force for the two-electron quantum dot, recall that it is a vector
def QuantumForce(r,alpha,beta):
qforce = np.zeros((NumberParticles,Dimension), np.double)
r12 = sqrt((r[0,0]-r[1,0])**2 + (r[0,1]-r[1,1])**2)
deno = 1.0/(1+beta*r12)
qforce[0,:] = -2*r[0,:]*alpha*(r[0,:]-r[1,:])*deno*deno/r12
qforce[1,:] = -2*r[1,:]*alpha*(r[1,:]-r[0,:])*deno*deno/r12
return qforce
# Computing the derivative of the energy and the energy
def EnergyDerivative(x0):
# Parameters in the Fokker-Planck simulation of the quantum force
D = 0.5
TimeStep = 0.05
NumberMCcycles= 10000
# positions
PositionOld = np.zeros((NumberParticles,Dimension), np.double)
PositionNew = np.zeros((NumberParticles,Dimension), np.double)
# Quantum force
QuantumForceOld = np.zeros((NumberParticles,Dimension), np.double)
QuantumForceNew = np.zeros((NumberParticles,Dimension), np.double)
energy = 0.0
DeltaE = 0.0
alpha = x0[0]
beta = x0[1]
EnergyDer = 0.0
DeltaPsi = 0.0
DerivativePsiE = 0.0
#Initial position
for i in range(NumberParticles):
for j in range(Dimension):
PositionOld[i,j] = normalvariate(0.0,1.0)*sqrt(TimeStep)
wfold = WaveFunction(PositionOld,alpha,beta)
QuantumForceOld = QuantumForce(PositionOld,alpha, beta)
#Loop over MC MCcycles
for MCcycle in range(NumberMCcycles):
#Trial position moving one particle at the time
for i in range(NumberParticles):
for j in range(Dimension):
PositionNew[i,j] = PositionOld[i,j]+normalvariate(0.0,1.0)*sqrt(TimeStep)+ QuantumForceOld[i,j]*TimeStep*D
wfnew = WaveFunction(PositionNew,alpha,beta)
QuantumForceNew = QuantumForce(PositionNew,alpha, beta)
GreensFunction = 0.0
for j in range(Dimension):
GreensFunction += 0.5*(QuantumForceOld[i,j]+QuantumForceNew[i,j])* (D*TimeStep*0.5*(QuantumForceOld[i,j]-QuantumForceNew[i,j])- PositionNew[i,j]+PositionOld[i,j])
GreensFunction = exp(GreensFunction)
ProbabilityRatio = GreensFunction*wfnew**2/wfold**2
#Metropolis-Hastings test to see whether we accept the move
if random() <= ProbabilityRatio:
for j in range(Dimension):
PositionOld[i,j] = PositionNew[i,j]
QuantumForceOld[i,j] = QuantumForceNew[i,j]
wfold = wfnew
DeltaE = LocalEnergy(PositionOld,alpha,beta)
DerPsi = DerivativeWFansatz(PositionOld,alpha,beta)
DeltaPsi += DerPsi
energy += DeltaE
DerivativePsiE += DerPsi*DeltaE
# We calculate mean values
energy /= NumberMCcycles
DerivativePsiE /= NumberMCcycles
DeltaPsi /= NumberMCcycles
EnergyDer = 2*(DerivativePsiE-DeltaPsi*energy)
return EnergyDer
# Computing the expectation value of the local energy
def Energy(x0):
# Parameters in the Fokker-Planck simulation of the quantum force
D = 0.5
TimeStep = 0.05
# positions
PositionOld = np.zeros((NumberParticles,Dimension), np.double)
PositionNew = np.zeros((NumberParticles,Dimension), np.double)
# Quantum force
QuantumForceOld = np.zeros((NumberParticles,Dimension), np.double)
QuantumForceNew = np.zeros((NumberParticles,Dimension), np.double)
energy = 0.0
DeltaE = 0.0
alpha = x0[0]
beta = x0[1]
NumberMCcycles= 10000
#Initial position
for i in range(NumberParticles):
for j in range(Dimension):
PositionOld[i,j] = normalvariate(0.0,1.0)*sqrt(TimeStep)
wfold = WaveFunction(PositionOld,alpha,beta)
QuantumForceOld = QuantumForce(PositionOld,alpha, beta)
#Loop over MC MCcycles
for MCcycle in range(NumberMCcycles):
#Trial position moving one particle at the time
for i in range(NumberParticles):
for j in range(Dimension):
PositionNew[i,j] = PositionOld[i,j]+normalvariate(0.0,1.0)*sqrt(TimeStep)+ QuantumForceOld[i,j]*TimeStep*D
wfnew = WaveFunction(PositionNew,alpha,beta)
QuantumForceNew = QuantumForce(PositionNew,alpha, beta)
GreensFunction = 0.0
for j in range(Dimension):
GreensFunction += 0.5*(QuantumForceOld[i,j]+QuantumForceNew[i,j])* (D*TimeStep*0.5*(QuantumForceOld[i,j]-QuantumForceNew[i,j])- PositionNew[i,j]+PositionOld[i,j])
GreensFunction = exp(GreensFunction)
ProbabilityRatio = GreensFunction*wfnew**2/wfold**2
#Metropolis-Hastings test to see whether we accept the move
if random() <= ProbabilityRatio:
for j in range(Dimension):
PositionOld[i,j] = PositionNew[i,j]
QuantumForceOld[i,j] = QuantumForceNew[i,j]
wfold = wfnew
DeltaE = LocalEnergy(PositionOld,alpha,beta)
energy += DeltaE
# We calculate mean values
energy /= NumberMCcycles
return energy
#Here starts the main program with variable declarations
NumberParticles = 2
Dimension = 2
# seed for rng generator
seed()
# guess for variational parameters
x0 = np.array([0.9,0.2])
# Using Broydens method
res = minimize(Energy, x0, method='BFGS', jac=EnergyDerivative, options={'gtol': 1e-4,'disp': True})
print(res.x)
# Note that the **minimize** function returns the finale values for the variable $\alpha=x0[0]$ and $\beta=x0[1]$ in the array $x$.
#
#
#
# ## Brief reminder on Newton-Raphson's method
#
# Let us quickly remind ourselves how we derive the above method.
#
# Perhaps the most celebrated of all one-dimensional root-finding
# routines is Newton's method, also called the Newton-Raphson
# method. This method requires the evaluation of both the
# function $f$ and its derivative $f'$ at arbitrary points.
# If you can only calculate the derivative
# numerically and/or your function is not of the smooth type, we
# normally discourage the use of this method.
#
#
# ## The equations
#
# The Newton-Raphson formula consists geometrically of extending the
# tangent line at a current point until it crosses zero, then setting
# the next guess to the abscissa of that zero-crossing. The mathematics
# behind this method is rather simple. Employing a Taylor expansion for
# $x$ sufficiently close to the solution $s$, we have
# <!-- Equation labels as ordinary links -->
# <div id="eq:taylornr"></div>
#
# $$
# f(s)=0=f(x)+(s-x)f'(x)+\frac{(s-x)^2}{2}f''(x) +\dots.
# \label{eq:taylornr} \tag{1}
# $$
# For small enough values of the function and for well-behaved
# functions, the terms beyond linear are unimportant, hence we obtain
# $$
# f(x)+(s-x)f'(x)\approx 0,
# $$
# yielding
# $$
# s\approx x-\frac{f(x)}{f'(x)}.
# $$
# Having in mind an iterative procedure, it is natural to start iterating with
# $$
# x_{n+1}=x_n-\frac{f(x_n)}{f'(x_n)}.
# $$
# ## Simple geometric interpretation
#
# The above is Newton-Raphson's method. It has a simple geometric
# interpretation, namely $x_{n+1}$ is the point where the tangent from
# $(x_n,f(x_n))$ crosses the $x$-axis. Close to the solution,
# Newton-Raphson converges fast to the desired result. However, if we
# are far from a root, where the higher-order terms in the series are
# important, the Newton-Raphson formula can give grossly inaccurate
# results. For instance, the initial guess for the root might be so far
# from the true root as to let the search interval include a local
# maximum or minimum of the function. If an iteration places a trial
# guess near such a local extremum, so that the first derivative nearly
# vanishes, then Newton-Raphson may fail totally
#
#
#
# ## Extending to more than one variable
#
# Newton's method can be generalized to systems of several non-linear equations
# and variables. Consider the case with two equations
# $$
# \begin{array}{cc} f_1(x_1,x_2) &=0\\
# f_2(x_1,x_2) &=0,\end{array}
# $$
# which we Taylor expand to obtain
# $$
# \begin{array}{cc} 0=f_1(x_1+h_1,x_2+h_2)=&f_1(x_1,x_2)+h_1
# \partial f_1/\partial x_1+h_2
# \partial f_1/\partial x_2+\dots\\
# 0=f_2(x_1+h_1,x_2+h_2)=&f_2(x_1,x_2)+h_1
# \partial f_2/\partial x_1+h_2
# \partial f_2/\partial x_2+\dots
# \end{array}.
# $$
# Defining the Jacobian matrix $\hat{J}$ we have
# $$
# \hat{J}=\left( \begin{array}{cc}
# \partial f_1/\partial x_1 & \partial f_1/\partial x_2 \\
# \partial f_2/\partial x_1 &\partial f_2/\partial x_2
# \end{array} \right),
# $$
# we can rephrase Newton's method as
# $$
# \left(\begin{array}{c} x_1^{n+1} \\ x_2^{n+1} \end{array} \right)=
# \left(\begin{array}{c} x_1^{n} \\ x_2^{n} \end{array} \right)+
# \left(\begin{array}{c} h_1^{n} \\ h_2^{n} \end{array} \right),
# $$
# where we have defined
# $$
# \left(\begin{array}{c} h_1^{n} \\ h_2^{n} \end{array} \right)=
# -{\bf \hat{J}}^{-1}
# \left(\begin{array}{c} f_1(x_1^{n},x_2^{n}) \\ f_2(x_1^{n},x_2^{n}) \end{array} \right).
# $$
# We need thus to compute the inverse of the Jacobian matrix and it
# is to understand that difficulties may
# arise in case $\hat{J}$ is nearly singular.
#
# It is rather straightforward to extend the above scheme to systems of
# more than two non-linear equations. In our case, the Jacobian matrix is given by the Hessian that represents the second derivative of cost function.
#
#
#
#
# ## Steepest descent
#
# The basic idea of gradient descent is
# that a function $F(\mathbf{x})$,
# $\mathbf{x} \equiv (x_1,\cdots,x_n)$, decreases fastest if one goes from $\bf {x}$ in the
# direction of the negative gradient $-\nabla F(\mathbf{x})$.
#
# It can be shown that if
# $$
# \mathbf{x}_{k+1} = \mathbf{x}_k - \gamma_k \nabla F(\mathbf{x}_k),
# $$
# with $\gamma_k > 0$.
#
# For $\gamma_k$ small enough, then $F(\mathbf{x}_{k+1}) \leq
# F(\mathbf{x}_k)$. This means that for a sufficiently small $\gamma_k$
# we are always moving towards smaller function values, i.e a minimum.
#
#
# ## More on Steepest descent
#
# The previous observation is the basis of the method of steepest
# descent, which is also referred to as just gradient descent (GD). One
# starts with an initial guess $\mathbf{x}_0$ for a minimum of $F$ and
# computes new approximations according to
# $$
# \mathbf{x}_{k+1} = \mathbf{x}_k - \gamma_k \nabla F(\mathbf{x}_k), \ \ k \geq 0.
# $$
# The parameter $\gamma_k$ is often referred to as the step length or
# the learning rate within the context of Machine Learning.
#
#
# ## The ideal
#
# Ideally the sequence $\{\mathbf{x}_k \}_{k=0}$ converges to a global
# minimum of the function $F$. In general we do not know if we are in a
# global or local minimum. In the special case when $F$ is a convex
# function, all local minima are also global minima, so in this case
# gradient descent can converge to the global solution. The advantage of
# this scheme is that it is conceptually simple and straightforward to
# implement. However the method in this form has some severe
# limitations:
#
# In machine learing we are often faced with non-convex high dimensional
# cost functions with many local minima. Since GD is deterministic we
# will get stuck in a local minimum, if the method converges, unless we
# have a very good intial guess. This also implies that the scheme is
# sensitive to the chosen initial condition.
#
# Note that the gradient is a function of $\mathbf{x} =
# (x_1,\cdots,x_n)$ which makes it expensive to compute numerically.
#
#
#
# ## The sensitiveness of the gradient descent
#
# The gradient descent method
# is sensitive to the choice of learning rate $\gamma_k$. This is due
# to the fact that we are only guaranteed that $F(\mathbf{x}_{k+1}) \leq
# F(\mathbf{x}_k)$ for sufficiently small $\gamma_k$. The problem is to
# determine an optimal learning rate. If the learning rate is chosen too
# small the method will take a long time to converge and if it is too
# large we can experience erratic behavior.
#
# Many of these shortcomings can be alleviated by introducing
# randomness. One such method is that of Stochastic Gradient Descent
# (SGD), see below.
#
#
#
# ## Convex functions
#
# Ideally we want our cost/loss function to be convex(concave).
#
# First we give the definition of a convex set: A set $C$ in
# $\mathbb{R}^n$ is said to be convex if, for all $x$ and $y$ in $C$ and
# all $t \in (0,1)$ , the point $(1 − t)x + ty$ also belongs to
# C. Geometrically this means that every point on the line segment
# connecting $x$ and $y$ is in $C$ as discussed below.
#
# The convex subsets of $\mathbb{R}$ are the intervals of
# $\mathbb{R}$. Examples of convex sets of $\mathbb{R}^2$ are the
# regular polygons (triangles, rectangles, pentagons, etc...).
#
#
# ## Convex function
#
# **Convex function**: Let $X \subset \mathbb{R}^n$ be a convex set. Assume that the function $f: X \rightarrow \mathbb{R}$ is continuous, then $f$ is said to be convex if $$f(tx_1 + (1-t)x_2) \leq tf(x_1) + (1-t)f(x_2) $$ for all $x_1, x_2 \in X$ and for all $t \in [0,1]$. If $\leq$ is replaced with a strict inequaltiy in the definition, we demand $x_1 \neq x_2$ and $t\in(0,1)$ then $f$ is said to be strictly convex. For a single variable function, convexity means that if you draw a straight line connecting $f(x_1)$ and $f(x_2)$, the value of the function on the interval $[x_1,x_2]$ is always below the line as illustrated below.
#
#
# ## Conditions on convex functions
#
# In the following we state first and second-order conditions which
# ensures convexity of a function $f$. We write $D_f$ to denote the
# domain of $f$, i.e the subset of $R^n$ where $f$ is defined. For more
# details and proofs we refer to: [S. Boyd and L. Vandenberghe. Convex Optimization. Cambridge University Press](http://stanford.edu/boyd/cvxbook/, 2004).
#
# **First order condition.**
#
# Suppose $f$ is differentiable (i.e $\nabla f(x)$ is well defined for
# all $x$ in the domain of $f$). Then $f$ is convex if and only if $D_f$
# is a convex set and $$f(y) \geq f(x) + \nabla f(x)^T (y-x) $$ holds
# for all $x,y \in D_f$. This condition means that for a convex function
# the first order Taylor expansion (right hand side above) at any point
# a global under estimator of the function. To convince yourself you can
# make a drawing of $f(x) = x^2+1$ and draw the tangent line to $f(x)$ and
# note that it is always below the graph.
#
#
#
# **Second order condition.**
#
# Assume that $f$ is twice
# differentiable, i.e the Hessian matrix exists at each point in
# $D_f$. Then $f$ is convex if and only if $D_f$ is a convex set and its
# Hessian is positive semi-definite for all $x\in D_f$. For a
# single-variable function this reduces to $f''(x) \geq 0$. Geometrically this means that $f$ has nonnegative curvature
# everywhere.
#
#
#
# This condition is particularly useful since it gives us an procedure for determining if the function under consideration is convex, apart from using the definition.
#
#
# ## More on convex functions
#
# The next result is of great importance to us and the reason why we are
# going on about convex functions. In machine learning we frequently
# have to minimize a loss/cost function in order to find the best
# parameters for the model we are considering.
#
# Ideally we want the
# global minimum (for high-dimensional models it is hard to know
# if we have local or global minimum). However, if the cost/loss function
# is convex the following result provides invaluable information:
#
# **Any minimum is global for convex functions.**
#
# Consider the problem of finding $x \in \mathbb{R}^n$ such that $f(x)$
# is minimal, where $f$ is convex and differentiable. Then, any point
# $x^*$ that satisfies $\nabla f(x^*) = 0$ is a global minimum.
#
#
#
# This result means that if we know that the cost/loss function is convex and we are able to find a minimum, we are guaranteed that it is a global minimum.
#
#
# ## Some simple problems
#
# 1. Show that $f(x)=x^2$ is convex for $x \in \mathbb{R}$ using the definition of convexity. Hint: If you re-write the definition, $f$ is convex if the following holds for all $x,y \in D_f$ and any $\lambda \in [0,1]$ $\lambda f(x)+(1-\lambda)f(y)-f(\lambda x + (1-\lambda) y ) \geq 0$.
#
# 2. Using the second order condition show that the following functions are convex on the specified domain.
#
# * $f(x) = e^x$ is convex for $x \in \mathbb{R}$.
#
# * $g(x) = -\ln(x)$ is convex for $x \in (0,\infty)$.
#
#
# 3. Let $f(x) = x^2$ and $g(x) = e^x$. Show that $f(g(x))$ and $g(f(x))$ is convex for $x \in \mathbb{R}$. Also show that if $f(x)$ is any convex function than $h(x) = e^{f(x)}$ is convex.
#
# 4. A norm is any function that satisfy the following properties
#
# * $f(\alpha x) = |\alpha| f(x)$ for all $\alpha \in \mathbb{R}$.
#
# * $f(x+y) \leq f(x) + f(y)$
#
# * $f(x) \leq 0$ for all $x \in \mathbb{R}^n$ with equality if and only if $x = 0$
#
#
# Using the definition of convexity, try to show that a function satisfying the properties above is convex (the third condition is not needed to show this).
#
#
#
# ## Standard steepest descent
#
#
# Before we proceed, we would like to discuss the approach called the
# **standard Steepest descent**, which again leads to us having to be able
# to compute a matrix. It belongs to the class of Conjugate Gradient methods (CG).
#
# [The success of the CG method](https://www.cs.cmu.edu/~quake-papers/painless-conjugate-gradient.pdf)
# for finding solutions of non-linear problems is based on the theory
# of conjugate gradients for linear systems of equations. It belongs to
# the class of iterative methods for solving problems from linear
# algebra of the type
# $$
# \hat{A}\hat{x} = \hat{b}.
# $$
# In the iterative process we end up with a problem like
# $$
# \hat{r}= \hat{b}-\hat{A}\hat{x},
# $$
# where $\hat{r}$ is the so-called residual or error in the iterative process.
#
# When we have found the exact solution, $\hat{r}=0$.
#
#
# ## Gradient method
#
# The residual is zero when we reach the minimum of the quadratic equation
# $$
# P(\hat{x})=\frac{1}{2}\hat{x}^T\hat{A}\hat{x} - \hat{x}^T\hat{b},
# $$
# with the constraint that the matrix $\hat{A}$ is positive definite and
# symmetric. This defines also the Hessian and we want it to be positive definite.
#
#
#
# ## Steepest descent method
#
# We denote the initial guess for $\hat{x}$ as $\hat{x}_0$.
# We can assume without loss of generality that
# $$
# \hat{x}_0=0,
# $$
# or consider the system
# $$
# \hat{A}\hat{z} = \hat{b}-\hat{A}\hat{x}_0,
# $$
# instead.
#
#
#
# ## Steepest descent method
# One can show that the solution $\hat{x}$ is also the unique minimizer of the quadratic form
# $$
# f(\hat{x}) = \frac{1}{2}\hat{x}^T\hat{A}\hat{x} - \hat{x}^T \hat{x} , \quad \hat{x}\in\mathbf{R}^n.
# $$
# This suggests taking the first basis vector $\hat{r}_1$ (see below for definition)
# to be the gradient of $f$ at $\hat{x}=\hat{x}_0$,
# which equals
# $$
# \hat{A}\hat{x}_0-\hat{b},
# $$
# and
# $\hat{x}_0=0$ it is equal $-\hat{b}$.
#
#
#
#
# ## Final expressions
# We can compute the residual iteratively as
# $$
# \hat{r}_{k+1}=\hat{b}-\hat{A}\hat{x}_{k+1},
# $$
# which equals
# $$
# \hat{b}-\hat{A}(\hat{x}_k+\alpha_k\hat{r}_k),
# $$
# or
# $$
# (\hat{b}-\hat{A}\hat{x}_k)-\alpha_k\hat{A}\hat{r}_k,
# $$
# which gives
# $$
# \alpha_k = \frac{\hat{r}_k^T\hat{r}_k}{\hat{r}_k^T\hat{A}\hat{r}_k}
# $$
# leading to the iterative scheme
# $$
# \hat{x}_{k+1}=\hat{x}_k-\alpha_k\hat{r}_{k},
# $$
# ## Code examples for steepest descent
#
#
# ## Simple codes for steepest descent and conjugate gradient using a $2\times 2$ matrix, in c++, Python code to come
# #include <cmath>
# #include <iostream>
# #include <fstream>
# #include <iomanip>
# #include "vectormatrixclass.h"
# using namespace std;
# // Main function begins here
# int main(int argc, char * argv[]){
# int dim = 2;
# Vector x(dim),xsd(dim), b(dim),x0(dim);
# Matrix A(dim,dim);
#
# // Set our initial guess
# x0(0) = x0(1) = 0;
# // Set the matrix
# A(0,0) = 3; A(1,0) = 2; A(0,1) = 2; A(1,1) = 6;
# b(0) = 2; b(1) = -8;
# cout << "The Matrix A that we are using: " << endl;
# A.Print();
# cout << endl;
# xsd = SteepestDescent(A,b,x0);
# cout << "The approximate solution using Steepest Descent is: " << endl;
# xsd.Print();
# cout << endl;
# }
#
# ## The routine for the steepest descent method
# Vector SteepestDescent(Matrix A, Vector b, Vector x0){
# int IterMax, i;
# int dim = x0.Dimension();
# const double tolerance = 1.0e-14;
# Vector x(dim),f(dim),z(dim);
# double c,alpha,d;
# IterMax = 30;
# x = x0;
# r = A*x-b;
# i = 0;
# while (i <= IterMax){
# z = A*r;
# c = dot(r,r);
# alpha = c/dot(r,z);
# x = x - alpha*r;
# r = A*x-b;
# if(sqrt(dot(r,r)) < tolerance) break;
# i++;
# }
# return x;
# }
#
# ## Steepest descent example
# In[ ]:
import numpy as np
import numpy.linalg as la
import scipy.optimize as sopt
import matplotlib.pyplot as pt
from mpl_toolkits.mplot3d import axes3d
def f(x):
return 0.5*x[0]**2 + 2.5*x[1]**2
def df(x):
return np.array([x[0], 5*x[1]])
fig = pt.figure()
ax = fig.gca(projection="3d")
xmesh, ymesh = np.mgrid[-2:2:50j,-2:2:50j]
fmesh = f(np.array([xmesh, ymesh]))
ax.plot_surface(xmesh, ymesh, fmesh)
# And then as countor plot
# In[ ]:
pt.axis("equal")
pt.contour(xmesh, ymesh, fmesh)
guesses = [np.array([2, 2./5])]
# Find guesses
# In[ ]:
x = guesses[-1]
s = -df(x)
# Run it!
# In[ ]:
def f1d(alpha):
return f(x + alpha*s)
alpha_opt = sopt.golden(f1d)
next_guess = x + alpha_opt * s
guesses.append(next_guess)
print(next_guess)
# What happened?
# In[ ]:
pt.axis("equal")
pt.contour(xmesh, ymesh, fmesh, 50)
it_array = np.array(guesses)
pt.plot(it_array.T[0], it_array.T[1], "x-")
# ## Conjugate gradient method
# In the CG method we define so-called conjugate directions and two vectors
# $\hat{s}$ and $\hat{t}$
# are said to be
# conjugate if
# $$
# \hat{s}^T\hat{A}\hat{t}= 0.
# $$
# The philosophy of the CG method is to perform searches in various conjugate directions
# of our vectors $\hat{x}_i$ obeying the above criterion, namely
# $$
# \hat{x}_i^T\hat{A}\hat{x}_j= 0.
# $$
# Two vectors are conjugate if they are orthogonal with respect to
# this inner product. Being conjugate is a symmetric relation: if $\hat{s}$ is conjugate to $\hat{t}$, then $\hat{t}$ is conjugate to $\hat{s}$.
#
#
#
#
# ## Conjugate gradient method
# An example is given by the eigenvectors of the matrix
# $$
# \hat{v}_i^T\hat{A}\hat{v}_j= \lambda\hat{v}_i^T\hat{v}_j,
# $$
# which is zero unless $i=j$.
#
#
#
#
#
# ## Conjugate gradient method
# Assume now that we have a symmetric positive-definite matrix $\hat{A}$ of size
# $n\times n$. At each iteration $i+1$ we obtain the conjugate direction of a vector
# $$
# \hat{x}_{i+1}=\hat{x}_{i}+\alpha_i\hat{p}_{i}.
# $$
# We assume that $\hat{p}_{i}$ is a sequence of $n$ mutually conjugate directions.
# Then the $\hat{p}_{i}$ form a basis of $R^n$ and we can expand the solution
# $ \hat{A}\hat{x} = \hat{b}$ in this basis, namely
# $$
# \hat{x} = \sum^{n}_{i=1} \alpha_i \hat{p}_i.
# $$
# ## Conjugate gradient method
# The coefficients are given by
# $$
# \mathbf{A}\mathbf{x} = \sum^{n}_{i=1} \alpha_i \mathbf{A} \mathbf{p}_i = \mathbf{b}.
# $$
# Multiplying with $\hat{p}_k^T$ from the left gives
# $$
# \hat{p}_k^T \hat{A}\hat{x} = \sum^{n}_{i=1} \alpha_i\hat{p}_k^T \hat{A}\hat{p}_i= \hat{p}_k^T \hat{b},
# $$
# and we can define the coefficients $\alpha_k$ as
# $$
# \alpha_k = \frac{\hat{p}_k^T \hat{b}}{\hat{p}_k^T \hat{A} \hat{p}_k}
# $$
# ## Conjugate gradient method and iterations
#
# If we choose the conjugate vectors $\hat{p}_k$ carefully,
# then we may not need all of them to obtain a good approximation to the solution
# $\hat{x}$.
# We want to regard the conjugate gradient method as an iterative method.
# This will us to solve systems where $n$ is so large that the direct
# method would take too much time.
#
# We denote the initial guess for $\hat{x}$ as $\hat{x}_0$.
# We can assume without loss of generality that
# $$
# \hat{x}_0=0,
# $$
# or consider the system
# $$
# \hat{A}\hat{z} = \hat{b}-\hat{A}\hat{x}_0,
# $$
# instead.
#
#
#
#
#
# ## Conjugate gradient method
# One can show that the solution $\hat{x}$ is also the unique minimizer of the quadratic form
# $$
# f(\hat{x}) = \frac{1}{2}\hat{x}^T\hat{A}\hat{x} - \hat{x}^T \hat{x} , \quad \hat{x}\in\mathbf{R}^n.
# $$
# This suggests taking the first basis vector $\hat{p}_1$
# to be the gradient of $f$ at $\hat{x}=\hat{x}_0$,
# which equals
# $$
# \hat{A}\hat{x}_0-\hat{b},
# $$
# and
# $\hat{x}_0=0$ it is equal $-\hat{b}$.
# The other vectors in the basis will be conjugate to the gradient,
# hence the name conjugate gradient method.
#
#
#
#
#
# ## Conjugate gradient method
# Let $\hat{r}_k$ be the residual at the $k$-th step:
# $$
# \hat{r}_k=\hat{b}-\hat{A}\hat{x}_k.
# $$
# Note that $\hat{r}_k$ is the negative gradient of $f$ at
# $\hat{x}=\hat{x}_k$,
# so the gradient descent method would be to move in the direction $\hat{r}_k$.
# Here, we insist that the directions $\hat{p}_k$ are conjugate to each other,
# so we take the direction closest to the gradient $\hat{r}_k$
# under the conjugacy constraint.
# This gives the following expression
# $$
# \hat{p}_{k+1}=\hat{r}_k-\frac{\hat{p}_k^T \hat{A}\hat{r}_k}{\hat{p}_k^T\hat{A}\hat{p}_k} \hat{p}_k.
# $$
# ## Conjugate gradient method
# We can also compute the residual iteratively as
# $$
# \hat{r}_{k+1}=\hat{b}-\hat{A}\hat{x}_{k+1},
# $$
# which equals
# $$
# \hat{b}-\hat{A}(\hat{x}_k+\alpha_k\hat{p}_k),
# $$
# or
# $$
# (\hat{b}-\hat{A}\hat{x}_k)-\alpha_k\hat{A}\hat{p}_k,
# $$
# which gives
# $$
# \hat{r}_{k+1}=\hat{r}_k-\hat{A}\hat{p}_{k},
# $$
# ## Simple implementation of the Conjugate gradient algorithm
# Vector ConjugateGradient(Matrix A, Vector b, Vector x0){
# int dim = x0.Dimension();
# const double tolerance = 1.0e-14;
# Vector x(dim),r(dim),v(dim),z(dim);
# double c,t,d;
#
# x = x0;
# r = b - A*x;
# v = r;
# c = dot(r,r);
# int i = 0; IterMax = dim;
# while(i <= IterMax){
# z = A*v;
# t = c/dot(v,z);
# x = x + t*v;
# r = r - t*z;
# d = dot(r,r);
# if(sqrt(d) < tolerance)
# break;
# v = r + (d/c)*v;
# c = d; i++;
# }
# return x;
# }
#
# ## Broyden–Fletcher–Goldfarb–Shanno algorithm
# The optimization problem is to minimize $f(\mathbf {x} )$ where $\mathbf {x}$ is a vector in $R^{n}$, and $f$ is a differentiable scalar function. There are no constraints on the values that $\mathbf {x}$ can take.
#
# The algorithm begins at an initial estimate for the optimal value $\mathbf {x}_{0}$ and proceeds iteratively to get a better estimate at each stage.
#
# The search direction $p_k$ at stage $k$ is given by the solution of the analogue of the Newton equation
# $$
# B_{k}\mathbf {p} _{k}=-\nabla f(\mathbf {x}_{k}),
# $$
# where $B_{k}$ is an approximation to the Hessian matrix, which is
# updated iteratively at each stage, and $\nabla f(\mathbf {x} _{k})$
# is the gradient of the function
# evaluated at $x_k$.
# A line search in the direction $p_k$ is then used to
# find the next point $x_{k+1}$ by minimising
# $$
# f(\mathbf {x}_{k}+\alpha \mathbf {p}_{k}),
# $$
# over the scalar $\alpha > 0$.
#
#
#
#
#
# ## Stochastic Gradient Descent
#
# Stochastic gradient descent (SGD) and variants thereof address some of
# the shortcomings of the Gradient descent method discussed above.
#
# The underlying idea of SGD comes from the observation that a given
# function, which we want to minimize, can almost always be written as a
# sum over $n$ data points $\{\mathbf{x}_i\}_{i=1}^n$,
# $$
# C(\mathbf{\beta}) = \sum_{i=1}^n c_i(\mathbf{x}_i,
# \mathbf{\beta}).
# $$
# ## Computation of gradients
#
# This in turn means that the gradient can be
# computed as a sum over $i$-gradients
# $$
# \nabla_\beta C(\mathbf{\beta}) = \sum_i^n \nabla_\beta c_i(\mathbf{x}_i,
# \mathbf{\beta}).
# $$
# Stochasticity/randomness is introduced by only taking the
# gradient on a subset of the data called minibatches. If there are $n$
# data points and the size of each minibatch is $M$, there will be $n/M$
# minibatches. We denote these minibatches by $B_k$ where
# $k=1,\cdots,n/M$.
#
#
# ## SGD example
# As an example, suppose we have $10$ data points $(\mathbf{x}_1,\cdots, \mathbf{x}_{10})$
# and we choose to have $M=5$ minibathces,
# then each minibatch contains two data points. In particular we have
# $B_1 = (\mathbf{x}_1,\mathbf{x}_2), \cdots, B_5 =
# (\mathbf{x}_9,\mathbf{x}_{10})$. Note that if you choose $M=1$ you
# have only a single batch with all data points and on the other extreme,
# you may choose $M=n$ resulting in a minibatch for each datapoint, i.e
# $B_k = \mathbf{x}_k$.
#
# The idea is now to approximate the gradient by replacing the sum over
# all data points with a sum over the data points in one the minibatches
# picked at random in each gradient descent step
# $$
# \nabla_{\beta}
# C(\mathbf{\beta}) = \sum_{i=1}^n \nabla_\beta c_i(\mathbf{x}_i,
# \mathbf{\beta}) \rightarrow \sum_{i \in B_k}^n \nabla_\beta
# c_i(\mathbf{x}_i, \mathbf{\beta}).
# $$
# ## The gradient step
#
# Thus a gradient descent step now looks like
# $$
# \beta_{j+1} = \beta_j - \gamma_j \sum_{i \in B_k}^n \nabla_\beta c_i(\mathbf{x}_i,
# \mathbf{\beta})
# $$
# where $k$ is picked at random with equal
# probability from $[1,n/M]$. An iteration over the number of
# minibathces (n/M) is commonly referred to as an epoch. Thus it is
# typical to choose a number of epochs and for each epoch iterate over
# the number of minibatches, as exemplified in the code below.
#
#
# ## Simple example code
# In[ ]:
import numpy as np
n = 100 #100 datapoints
M = 5 #size of each minibatch
m = int(n/M) #number of minibatches
n_epochs = 10 #number of epochs
j = 0
for epoch in range(1,n_epochs+1):
for i in range(m):
k = np.random.randint(m) #Pick the k-th minibatch at random
#Compute the gradient using the data in minibatch Bk
#Compute new suggestion for
j += 1
# Taking the gradient only on a subset of the data has two important
# benefits. First, it introduces randomness which decreases the chance
# that our opmization scheme gets stuck in a local minima. Second, if
# the size of the minibatches are small relative to the number of
# datapoints ($M < n$), the computation of the gradient is much
# cheaper since we sum over the datapoints in the $k-th$ minibatch and not
# all $n$ datapoints.
#
#
# ## When do we stop?
#
# A natural question is when do we stop the search for a new minimum?
# One possibility is to compute the full gradient after a given number
# of epochs and check if the norm of the gradient is smaller than some
# threshold and stop if true. However, the condition that the gradient
# is zero is valid also for local minima, so this would only tell us
# that we are close to a local/global minimum. However, we could also
# evaluate the cost function at this point, store the result and
# continue the search. If the test kicks in at a later stage we can
# compare the values of the cost function and keep the $\beta$ that
# gave the lowest value.
#
#
# ## Slightly different approach
#
# Another approach is to let the step length $\gamma_j$ depend on the
# number of epochs in such a way that it becomes very small after a
# reasonable time such that we do not move at all.
#
# As an example, let $e = 0,1,2,3,\cdots$ denote the current epoch and let $t_0, t_1 > 0$ be two fixed numbers. Furthermore, let $t = e \cdot m + i$ where $m$ is the number of minibatches and $i=0,\cdots,m-1$. Then the function $$\gamma_j(t; t_0, t_1) = \frac{t_0}{t+t_1} $$ goes to zero as the number of epochs gets large. I.e. we start with a step length $\gamma_j (0; t_0, t_1) = t_0/t_1$ which decays in *time* $t$.
#
# In this way we can fix the number of epochs, compute $\beta$ and
# evaluate the cost function at the end. Repeating the computation will
# give a different result since the scheme is random by design. Then we
# pick the final $\beta$ that gives the lowest value of the cost
# function.
# In[ ]:
import numpy as np
def step_length(t,t0,t1):
return t0/(t+t1)
n = 100 #100 datapoints
M = 5 #size of each minibatch
m = int(n/M) #number of minibatches
n_epochs = 500 #number of epochs
t0 = 1.0
t1 = 10
gamma_j = t0/t1
j = 0
for epoch in range(1,n_epochs+1):
for i in range(m):
k = np.random.randint(m) #Pick the k-th minibatch at random
#Compute the gradient using the data in minibatch Bk
#Compute new suggestion for beta
t = epoch*m+i
gamma_j = step_length(t,t0,t1)
j += 1
print("gamma_j after %d epochs: %g" % (n_epochs,gamma_j))
# ## Program for stochastic gradient
# In[ ]:
# Importing various packages
from math import exp, sqrt
from random import random, seed
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import SGDRegressor
x = 2*np.random.rand(100,1)
y = 4+3*x+np.random.randn(100,1)
xb = np.c_[np.ones((100,1)), x]
theta_linreg = np.linalg.inv(xb.T.dot(xb)).dot(xb.T).dot(y)
print("Own inversion")
print(theta_linreg)
sgdreg = SGDRegressor(n_iter = 50, penalty=None, eta0=0.1)
sgdreg.fit(x,y.ravel())
print("sgdreg from scikit")
print(sgdreg.intercept_, sgdreg.coef_)
theta = np.random.randn(2,1)
eta = 0.1
Niterations = 1000
m = 100
for iter in range(Niterations):
gradients = 2.0/m*xb.T.dot(xb.dot(theta)-y)
theta -= eta*gradients
print("theta frm own gd")
print(theta)
xnew = np.array([[0],[2]])
xbnew = np.c_[np.ones((2,1)), xnew]
ypredict = xbnew.dot(theta)
ypredict2 = xbnew.dot(theta_linreg)
n_epochs = 50
t0, t1 = 5, 50
m = 100
def learning_schedule(t):
return t0/(t+t1)
theta = np.random.randn(2,1)
for epoch in range(n_epochs):
for i in range(m):
random_index = np.random.randint(m)
xi = xb[random_index:random_index+1]
yi = y[random_index:random_index+1]
gradients = 2 * xi.T.dot(xi.dot(theta)-yi)
eta = learning_schedule(epoch*m+i)
theta = theta - eta*gradients
print("theta from own sdg")
print(theta)
plt.plot(xnew, ypredict, "r-")
plt.plot(xnew, ypredict2, "b-")
plt.plot(x, y ,'ro')
plt.axis([0,2.0,0, 15.0])
plt.xlabel(r'$x$')
plt.ylabel(r'$y$')
plt.title(r'Random numbers ')
plt.show()
# ## Using gradient descent methods, limitations
#
# * **Gradient descent (GD) finds local minima of our function**. Since the GD algorithm is deterministic, if it converges, it will converge to a local minimum of our energy function. Because in ML we are often dealing with extremely rugged landscapes with many local minima, this can lead to poor performance.
#
# * **GD is sensitive to initial conditions**. One consequence of the local nature of GD is that initial conditions matter. Depending on where one starts, one will end up at a different local minima. Therefore, it is very important to think about how one initializes the training process. This is true for GD as well as more complicated variants of GD.
#
# * **Gradients are computationally expensive to calculate for large datasets**. In many cases in statistics and ML, the energy function is a sum of terms, with one term for each data point. For example, in linear regression, $E \propto \sum_{i=1}^n (y_i - \mathbf{w}^T\cdot\mathbf{x}_i)^2$; for logistic regression, the square error is replaced by the cross entropy. To calculate the gradient we have to sum over *all* $n$ data points. Doing this at every GD step becomes extremely computationally expensive. An ingenious solution to this, is to calculate the gradients using small subsets of the data called "mini batches". This has the added benefit of introducing stochasticity into our algorithm.
#
# * **GD is very sensitive to choices of learning rates**. GD is extremely sensitive to the choice of learning rates. If the learning rate is very small, the training process take an extremely long time. For larger learning rates, GD can diverge and give poor results. Furthermore, depending on what the local landscape looks like, we have to modify the learning rates to ensure convergence. Ideally, we would *adaptively* choose the learning rates to match the landscape.
#
# * **GD treats all directions in parameter space uniformly.** Another major drawback of GD is that unlike Newton's method, the learning rate for GD is the same in all directions in parameter space. For this reason, the maximum learning rate is set by the behavior of the steepest direction and this can significantly slow down training. Ideally, we would like to take large steps in flat directions and small steps in steep directions. Since we are exploring rugged landscapes where curvatures change, this requires us to keep track of not only the gradient but second derivatives. The ideal scenario would be to calculate the Hessian but this proves to be too computationally expensive.
#
# * GD can take exponential time to escape saddle points, even with random initialization. As we mentioned, GD is extremely sensitive to initial condition since it determines the particular local minimum GD would eventually reach. However, even with a good initialization scheme, through the introduction of randomness, GD can still take exponential time to escape saddle points.
#
# ## Codes from numerical recipes
# You can however use codes we have adapted from the text [Numerical Recipes in C++](http://www.nr.com/), see chapter 10.7.
# Here we present a program, which you also can find at the webpage of the course we use the functions **dfpmin** and **lnsrch**. This is a variant of the Broyden et al algorithm discussed in the previous slide.
#
# * The program uses the harmonic oscillator in one dimensions as example.
#
# * The program does not use armadillo to handle vectors and matrices, but employs rather my own vector-matrix class. These auxiliary functions, and the main program *model.cpp* can all be found under the [program link here](https://github.com/CompPhysics/ComputationalPhysics2/tree/gh-pages/doc/pub/cg/programs/c%2B%2B).
#
# Below we show only excerpts from the main program. For the full program, see the above link.
#
#
#
#
#
# ## Finding the minimum of the harmonic oscillator model in one dimension
# // Main function begins here
# int main()
# {
# int n, iter;
# double gtol, fret;
# double alpha;
# n = 1;
# // reserve space in memory for vectors containing the variational
# // parameters
# Vector g(n), p(n);
# cout << "Read in guess for alpha" << endl;
# cin >> alpha;
# gtol = 1.0e-5;
# // now call dfmin and compute the minimum
# p(0) = alpha;
# dfpmin(p, n, gtol, &iter, &fret, Efunction, dEfunction);
# cout << "Value of energy minimum = " << fret << endl;
# cout << "Number of iterations = " << iter << endl;
# cout << "Value of alpha at minimum = " << p(0) << endl;
# return 0;
# } // end of main program
#
#
# ## Functions to observe
# The functions **Efunction** and **dEfunction** compute the expectation value of the energy and its derivative.
# They use the the quasi-Newton method of [Broyden, Fletcher, Goldfarb, and Shanno (BFGS)](https://www.springer.com/it/book/9780387303031)
# It uses the first derivatives only. The BFGS algorithm has proven good performance even for non-smooth optimizations.
# These functions need to be changed when you want to your own derivatives.
# // this function defines the expectation value of the local energy
# double Efunction(Vector &x)
# {
# double value = x(0)*x(0)*0.5+1.0/(8*x(0)*x(0));
# return value;
# } // end of function to evaluate
#
# // this function defines the derivative of the energy
# void dEfunction(Vector &x, Vector &g)
# {
# g(0) = x(0)-1.0/(4*x(0)*x(0)*x(0));
# } // end of function to evaluate
#
# You need to change these functions in order to compute the local energy for your system. I used 1000
# cycles per call to get a new value of $\langle E_L[\alpha]\rangle$.
# When I compute the local energy I also compute its derivative.
# After roughly 10-20 iterations I got a converged result in terms of $\alpha$.
|
CompPhysics/ComputationalPhysics2
|
doc/LectureNotes/_build/jupyter_execute/gradientmethods.py
|
Python
|
cc0-1.0
| 54,902
|
[
"Gaussian"
] |
cababc6abfc5f5578e91caf4966f13bd58e023fca9ce2ad7f19ead65468f17c1
|
import numpy as np
import matplotlib
matplotlib.use('Qt4Agg')
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
from matplotlib.widgets import Slider, Button, RadioButtons, CheckButtons
from matplotlib.lines import Line2D
from scipy.stats import spearmanr,pearsonr,kendalltau
from scipy.optimize import minimize
from scipy.optimize import curve_fit
from scipy.interpolate import interp1d,UnivariateSpline
from scipy.signal import argrelextrema
import pdb
import pandas as pd
import time
from scipy import signal
def air_to_vacuum(airwl,nouvconv=True):
"""
Returns vacuum wavelength of the provided air wavelength array or scalar.
Good to ~ .0005 angstroms.
If nouvconv is True, does nothing for air wavelength < 2000 angstroms.
Input must be in angstroms.
Adapted from idlutils airtovac.pro, based on the IAU standard
for conversion in Morton (1991 Ap.J. Suppl. 77, 119)
"""
airwl = np.array(airwl,copy=False,dtype=float,ndmin=1)
isscal = airwl.shape == tuple()
if isscal:
airwl = airwl.ravel()
#wavenumber squared
sig2 = (1e4/airwl)**2
convfact = 1. + 6.4328e-5 + 2.94981e-2/(146. - sig2) + 2.5540e-4/( 41. - sig2)
newwl = airwl.copy()
if nouvconv:
convmask = newwl>=2000
newwl[convmask] *= convfact[convmask]
else:
newwl[:] *= convfact
return newwl[0] if isscal else newwl
def gaussian_lines(line_x,line_a,xgrid,width=2.0):
'''
Creates ideal Xenon spectrum
'''
#print 'Creating ideal calibration spectrum'
temp = np.zeros(xgrid.size)
for i in range(line_a.size):
gauss = line_a[i]*np.exp(-(xgrid-line_x[i])**2/(2*width**2))
temp += gauss
return temp
def polyfour(x,a,b,c,d,e,f):
return a + b*x + c*x**2.0 + d*x**3.0 + e*x**4.0 + f*x**5.0
def wavecalibrate(px,fx,slit_x,stretch_est=0.0,shift_est=0.0,quad_est=0.0,cube_est=0.0,fourth_est=0.0,fifth_est=0.0):
#flip and normalize flux
fx = fx - np.min(fx)
fx = fx[::-1]
fx = fx/signal.medfilt(fx,201)
#prep calibration lines into 1d spectra
wm,fm = np.loadtxt('osmos_Xenon.dat',usecols=(0,2),unpack=True)
wm = air_to_vacuum(wm)
xgrid = np.arange(0.0,6800.0,0.01)
lines_gauss = gaussian_lines(wm,fm,xgrid)
interp = interp1d(xgrid,lines_gauss,bounds_error=False,fill_value=0)
#interp = UnivariateSpline(xgrid,lines_gauss)
wave_est = fifth_est*(px-slit_x)**5 + fourth_est*(px-slit_x)**4 + cube_est*(px-slit_x)**3 + quad_est*(px-slit_x)**2 + (px-slit_x)*stretch_est + shift_est #don't subtract the slit pos because interactive plot doesn't (easier)
wm_in = wm[np.where((wm<wave_est.max())&(wm>wave_est.min()))]
#wm_in = wm[np.where((wm<5000.0)&(wm>wave_est.min()))]
px_max = np.zeros(wm_in.size)
for i in range(wm_in.size):
px_in = px[np.where((wave_est<wm_in[i]+5.0)&(wave_est>wm_in[i]-5))]
px_max[i] = px_in[fx[np.where((wave_est<wm_in[i]+5.0)&(wave_est>wm_in[i]-5))].argmax()]
params,pcov = curve_fit(polyfour,(px_max-slit_x),wm_in,p0=[shift_est,stretch_est,quad_est,cube_est,fourth_est,fifth_est])
#return (wave_new,fx,max_fourth,max_cube,max_quad,max_stretch,max_shift)
return (params[0]+params[1]*(px-slit_x)+params[2]*(px-slit_x)**2+params[3]*(px-slit_x)**3.0+params[4]*(px-slit_x)**4.0+params[5]*(px-slit_x)**5.0,fx,params[5],params[4],params[3],params[2],params[1],params[0])
#return (param0+param1*(px-slit_x)+param2*(px-slit_x)**2+param3*(px-slit_x)**3.0+param4*(px-slit_x)**4.0+param5*(px-slit_x)**5.0,fx,params[4],params[3],params[2],params[1],params[0])
def interactive_plot(px,fx,stretch_0,shift_0,quad_0,cube_0,fourth_0,fifth_0,slit_x,wm,fm):
#flip and normalize flux
fx = fx - np.min(fx)
fx = fx[::-1]
'''
#prep calibration lines into 1d spectra
wm_Xe,fm_Xe = np.loadtxt('osmos_Xenon.dat',usecols=(0,2),unpack=True)
wm_Xe = air_to_vacuum(wm_Xe)
wm_Ar,fm_Ar = np.loadtxt('osmos_Argon.dat',usecols=(0,2),unpack=True)
wm_Ar = air_to_vacuum(wm_Ar)
wm_HgNe,fm_HgNe = np.loadtxt('osmos_HgNe.dat',usecols=(0,2),unpack=True)
wm_HgNe = air_to_vacuum(wm_HgNe)
wm_Ne,fm_Ne = np.loadtxt('osmos_Ne.dat',usecols=(0,2),unpack=True)
wm_Ne = air_to_vacuum(wm_Ne)
'''
cal_states = {'Xe':True,'Ar':False,'HgNe':False,'Ne':False}
fig,ax = plt.subplots()
plt.subplots_adjust(left=0.25,bottom=0.30)
l, = ax.plot(fifth_0*(px-slit_x)**5 + fourth_0*(px-slit_x)**4 + cube_0*(px-slit_x)**3 + quad_0*(px-slit_x)**2 + stretch_0*(px-slit_x) + shift_0,fx/10.0,'b')
plt.plot(wm,fm/2.0,'ro')
for i in range(wm.size): ax.axvline(wm[i],color='r')
ax.set_xlim(4000,6000)
ax.set_ylim(0,3500)
#stateax = plt.axes([0.1,0.25,0.15,0.1])
#states = CheckButtons(stateax,cal_states.keys(), cal_states.values())
axstretch = plt.axes([0.25,0.17,0.65,0.03])
axshift = plt.axes([0.25,0.22,0.65,0.03])
fn_quad_0 = 0.0
fn_stretch_0 = 0.0
fn_shift_0 = 0.0
fn_axquad = plt.axes([0.25,0.03,0.65,0.03])
fn_axstretch = plt.axes([0.25,0.07,0.65,0.03])
fn_axshift = plt.axes([0.25,0.12,0.65,0.03])
close_ax = plt.axes([0.05,0.5,0.13,0.1])
slide_stretch = Slider(axstretch, 'Stretch',0.4,1.3,valinit=stretch_0)
slide_shift = Slider(axshift,'Shift',-2000.0,6000.0,valinit=shift_0)
fn_slide_stretch = Slider(fn_axstretch, 'Fine Stretch',-0.05,0.05,valinit=fn_stretch_0)
fn_slide_shift = Slider(fn_axshift,'Fine Shift',-200.0,200.0,valinit=fn_shift_0)
fn_slide_quad = Slider(fn_axquad,'Fine Quad',-4e-5,4e-5,valinit=fn_quad_0)
close_button = Button(close_ax,'Close Plots', hovercolor='0.80')
def set_calib_lines(label):
cal_states[label] = not cal_states[label]
xl = ax.get_xlim()
yl = ax.get_ylim()
ax.cla()
wm = []
fm = []
if cal_states['Xe']:
wm.extend(wm_Xe)
fm.extend(fm_Xe)
if cal_states['Ar']:
wm.extend(wm_Ar)
fm.extend(fm_Ar)
if cal_states['HgNe']:
wm.extend(wm_HgNe)
fm.extend(fm_HgNe)
if cal_states['Ne']:
wm.extend(wm_Ne)
fm.extend(fm_Ne)
wm = np.array(wm)
fm = np.array(fm)
for j in range(wm.size):
ax.axvline(wm[j],color='r')
line, = ax.plot(np.array(wm),np.array(fm)/2.0,'ro',picker=5)# 5 points tolerance
l, = ax.plot(fifth_0*(px-slit_x)**5 + fourth_0*(px-slit_x)**4 + cube_0*(px-slit_x)**3 + (quad_0+fn_slide_quad.val)*(px-slit_x)**2 + (slide_stretch.val+fn_slide_stretch.val)*(px-slit_x) + (slide_shift.val+fn_slide_shift.val),fx/10.0,'b')
ax.set_xlim(xl)
ax.set_ylim(yl)
fig.canvas.draw()
def update(val):
l.set_xdata(fifth_0*(px-slit_x)**5 + fourth_0*(px-slit_x)**4 + cube_0*(px-slit_x)**3 + (quad_0+fn_slide_quad.val)*(px-slit_x)**2+(slide_stretch.val+fn_slide_stretch.val)*(px-slit_x)+(slide_shift.val+fn_slide_shift.val))
fig.canvas.draw_idle()
def fineupdate(val):
l.set_xdata(fifth_0*(px-slit_x)**5 + fourth_0*(px-slit_x)**4 + cube_0*(px-slit_x)**3 + (quad_0+fn_slide_quad.val)*(px-slit_x)**2+(slide_stretch.val+fn_slide_stretch.val)*(px-slit_x)+(slide_shift.val+fn_slide_shift.val))
#slide_stretch.val = slide_stretch.val + fn_slide_stretch.val
#slide_shift.val = slide_shift.val + fn_slide_shift.val
fig.canvas.draw_idle()
def close_plots(event):
plt.close()
slide_stretch.on_changed(update)
slide_shift.on_changed(update)
fn_slide_stretch.on_changed(fineupdate)
fn_slide_shift.on_changed(fineupdate)
fn_slide_quad.on_changed(fineupdate)
close_button.on_clicked(close_plots)
#states.on_clicked(set_calib_lines)
plt.show()
shift_est = slide_shift.val+fn_slide_shift.val
stretch_est = slide_stretch.val+fn_slide_stretch.val
quad_est = quad_0 + fn_slide_quad.val
print 'quad_est:',quad_est, 'stretch est:',stretch_est, 'shift est:',shift_est
return stretch_est,shift_est,quad_est
def interactive_plot_plus(px,fx,wm,fm,stretch_0,shift_0,quad_0):
#main plot
fig,ax = plt.subplots()
plt.subplots_adjust(left=0.25,bottom=0.30)
l, = plt.plot(quad_0*(px-2032.0)**2+stretch_0*px+shift_0,fx/10.0,'b')
plt.plot(wm,fm/2.0,'ro')
for i in range(wm.size): plt.axvline(wm[i],color='r')
plt.xlim(4000,6000)
plt.ylim(0,3500)
axstretch = plt.axes([0.25,0.17,0.65,0.03])
axshift = plt.axes([0.25,0.22,0.65,0.03])
fn_stretch_0 = 0.0
fn_shift_0 = 0.0
fn_axstretch = plt.axes([0.25,0.07,0.65,0.03])
fn_axshift = plt.axes([0.25,0.12,0.65,0.03])
close_ax = plt.axes([0.05,0.5,0.13,0.1])
slide_stretch = Slider(axstretch, 'Stretch',0.4,1.3,valinit=stretch_0)
slide_shift = Slider(axshift,'Shift',-4000.0,4000.0,valinit=shift_0)
fn_slide_stretch = Slider(fn_axstretch, 'Fine Stretch',-0.05,0.05,valinit=fn_stretch_0)
fn_slide_shift = Slider(fn_axshift,'Fine Shift',-200.0,200.0,valinit=fn_shift_0)
close_button = Button(close_ax,'Close Plots', hovercolor='0.80')
#secondary 'zoom' plots
s = plt.figure()
ax2 = s.add_subplot(211)
ax3 = s.add_subplot(212)
l2, = ax2.plot(quad_0*(px-2032.0)**2+stretch_0*px+shift_0,fx/10.0,'b')
ax2.plot(wm,fm/2.0,'ro')
for i in range(wm.size): ax2.axvline(wm[i],color='r')
ax2.set_xlim(4490,4600)
ax2.set_ylim(0,1000)
l3, = ax3.plot(quad_0*(px-2032.0)**2+stretch_0*px+shift_0,fx/10.0,'b')
ax3.plot(wm,fm/2.0,'ro')
for i in range(wm.size): ax3.axvline(wm[i],color='r')
ax3.set_xlim(4900,5100)
ax3.set_ylim(0,1500)
def update(val):
l.set_xdata(quad_0*(px-2032.0)**2+(slide_stretch.val+fn_slide_stretch.val)*px+(slide_shift.val+fn_slide_shift.val))
l2.set_xdata(quad_0*(px-2032.0)**2+(slide_stretch.val+fn_slide_stretch.val)*px+(slide_shift.val+fn_slide_shift.val))
l3.set_xdata(quad_0*(px-2032.0)**2+(slide_stretch.val+fn_slide_stretch.val)*px+(slide_shift.val+fn_slide_shift.val))
fig.canvas.draw_idle()
s.canvas.draw_idle()
def fineupdate(val):
l.set_xdata(quad_0*(px-2032.0)**2+(slide_stretch.val+fn_slide_stretch.val)*px+(slide_shift.val+fn_slide_shift.val))
l2.set_xdata(quad_0*(px-2032.0)**2+(slide_stretch.val+fn_slide_stretch.val)*px+(slide_shift.val+fn_slide_shift.val))
l3.set_xdata(quad_0*(px-2032.0)**2+(slide_stretch.val+fn_slide_stretch.val)*px+(slide_shift.val+fn_slide_shift.val))
#slide_stretch.val = slide_stretch.val + fn_slide_stretch.val
#slide_shift.val = slide_shift.val + fn_slide_shift.val
fig.canvas.draw_idle()
s.canvas.draw_idle()
def close_plots(event):
plt.close()
plt.close()
slide_stretch.on_changed(update)
slide_shift.on_changed(update)
fn_slide_stretch.on_changed(fineupdate)
fn_slide_shift.on_changed(fineupdate)
close_button.on_clicked(close_plots)
plt.show()
shift_est = slide_shift.val+fn_slide_shift.val
stretch_est = slide_stretch.val+fn_slide_stretch.val
print 'quad_0:',quad_0,'stretch_0:',stretch_est,'shift_0:',shift_est
return (quad_0*(px-2032.0)**2+px*stretch_est+shift_est,fx,stretch_est,shift_est)
class LineBrowser:
def __init__(self,fig,ax,est_f,wm,fm,px,xslit,vlines,fline,xspectra,yspectra,peaks,peaks_w,peaks_p,peaks_h,line_matches,cal_states):
#load calibration files
self.wm_Xe,self.fm_Xe = np.loadtxt('osmos_Xenon.dat',usecols=(0,2),unpack=True)
self.wm_Xe = air_to_vacuum(self.wm_Xe)
self.wm_Ar,self.fm_Ar = np.loadtxt('osmos_Argon.dat',usecols=(0,2),unpack=True)
self.wm_Ar = air_to_vacuum(self.wm_Ar)
self.wm_HgNe,self.fm_HgNe = np.loadtxt('osmos_HgNe.dat',usecols=(0,2),unpack=True)
self.wm_HgNe = air_to_vacuum(self.wm_HgNe)
self.wm_Ne,self.fm_Ne = np.loadtxt('osmos_Ne.dat',usecols=(0,2),unpack=True)
self.wm_Ne = air_to_vacuum(self.wm_Ne)
#slider objects
fn_axquad = plt.axes([0.25,0.03,0.65,0.03])
fn_axstretch = plt.axes([0.25,0.07,0.65,0.03])
fn_axshift = plt.axes([0.25,0.12,0.65,0.03])
self.fn_slide_stretch = Slider(fn_axstretch, 'Fine Stretch',-0.05,0.05,valinit=0.0)
self.fn_slide_shift = Slider(fn_axshift,'Fine Shift',-200.0,200.0,valinit=0.0)
self.fn_slide_quad = Slider(fn_axquad,'Fine Quad',-4e-5,4e-5,valinit=0.0)
self.fn_slide_stretch.on_changed(self.slider_update)
self.fn_slide_shift.on_changed(self.slider_update)
self.fn_slide_quad.on_changed(self.slider_update)
self.lastind = 0
self.j = 0
self.est_f = est_f
self.px = px
self.fig = fig
self.ax = ax
self.wm = wm
self.vlines = vlines
self.fline = fline
self.xspectra = xspectra
self.yspectra = yspectra
self.peaks = peaks
self.peaks_w = peaks_w
self.peaks_p = peaks_p
self.peaks_h = peaks_h
self.line_matches = line_matches
self.cal_states = cal_states
self.mindist_el, = np.where(self.peaks_w == self.line_matches['peaks_w'][self.j])
#self.text = ax.text(0.05, 0.95, 'Pick red reference line',transform=ax.transAxes, va='top')
#self.selected, = ax.plot([xs[0]], [ys[0]], 'o', ms=12, alpha=0.4,color='yellow', visible=False)
self.selected = self.ax.axvline(self.line_matches['lines'][self.j],lw=3,alpha=0.5,color='red',ymin=0.5)
self.selected_peak, = self.ax.plot(self.line_matches['peaks_w'][self.j],self.line_matches['peaks_h'][self.j],'o',mec='orange',markersize=8,alpha=0.7,mfc='None',mew=3,visible=True)
self.selected_peak_line = self.ax.axvline(self.line_matches['lines'][self.j],color='cyan',lw=4,alpha=0.3,ymax=0.5,visible=True)
self.reset_lims()
def slider_update(self,val):
#update new wavelength spacing
self.xspectra = self.est_f[0]*(self.px)**5 + self.est_f[1]*(self.px)**4 + self.est_f[2]*(self.px)**3 + (self.est_f[3]+self.fn_slide_quad.val)*(self.px)**2+(self.est_f[4]+self.fn_slide_stretch.val)*(self.px)+(self.est_f[5]+self.fn_slide_shift.val)
self.fline.set_xdata(self.xspectra)
self.peaks_w = self.xspectra[self.peaks]
for k in range(self.wm[self.j:].size):
kj = k + self.j
self.line_matches['peaks_p'][kj] = self.peaks_p[np.argsort(np.abs(self.wm[kj]-self.peaks_w))][0] #closest peak (in pixels)
self.line_matches['peaks_w'][kj] = self.peaks_w[np.argsort(np.abs(self.wm[kj]-self.peaks_w))][0] #closest peak (in wavelength)
self.line_matches['peaks_h'][kj] = self.peaks_h[np.argsort(np.abs(self.wm[kj]-self.peaks_w))][0] #closest peak (height)
self.mindist_el, = np.where(self.peaks_w == self.line_matches['peaks_w'][self.j])
self.mindist_el = self.mindist_el[0]
self.update_circle()
#self.fig.canvas.draw()
self.update_current()
def update_current(self):
if self.j >= len(self.line_matches['peaks_w']):
print 'done with plot'
plt.close()
return
self.selected_peak.set_xdata(self.line_matches['peaks_w'][self.j])
self.selected_peak.set_ydata(self.line_matches['peaks_h'][self.j])
self.selected.set_xdata(self.line_matches['lines'][self.j])
self.selected_peak_line.set_xdata(self.line_matches['lines'][self.j])
self.mindist_el, = np.where(self.peaks_w == self.line_matches['peaks_w'][self.j])
self.mindist_el = self.mindist_el[0]
xlim = self.ax.xaxis.get_view_interval()
ylim = self.ax.yaxis.get_view_interval()
if self.line_matches['lines'][self.j] > xlim[1]:
self.reset_lims()
self.fig.canvas.draw()
def reset_lims(self):
self.ax.set_xlim(self.line_matches['peaks_w'][self.j] - 100, self.line_matches['peaks_w'][self.j] + 500.0)
xlims = self.ax.xaxis.get_view_interval()
y_in = self.yspectra[np.where((self.xspectra>xlims[0])&(self.xspectra<xlims[1]))]
self.ax.set_ylim(top=np.max(y_in)*1.1)
def onpress(self, event):
if event.key not in ('n','m','j','b'): return
if event.key=='n':
self.replace()
if event.key=='m':
self.replace()
if event.key=='j':
self.delete()
if event.key=='b':
self.back_line()
return
def onclick(self, event):
if event.inaxes == self.ax:
if event.button == 1:
# the click locations
x = event.xdata
y = event.ydata
self.mindist_el = np.argsort(np.abs(self.peaks_w-x))[0]
self.update_circle()
def update_circle(self):
self.selected_peak.set_xdata([self.peaks_w[self.mindist_el]])
self.selected_peak.set_ydata([self.peaks_h[self.mindist_el]])
self.fig.canvas.draw()
def replace(self):
self.line_matches['peaks_p'][self.j] = self.peaks_p[self.mindist_el]
self.line_matches['peaks_w'][self.j] = self.peaks_w[self.mindist_el]
self.line_matches['peaks_h'][self.j] = self.peaks_h[self.mindist_el]
self.next_line()
return
def back_line(self):
if self.j >= 1:
self.j -= 1
self.slider_update(1.0)
#self.update_current()
else: return
def next_go(self,event):
self.next_line()
def next_line(self):
self.j += 1
self.update_current()
def finish(self,event):
self.line_matches['peaks_p'] = self.line_matches['peaks_p'][:self.j]
self.line_matches['peaks_w'] = self.line_matches['peaks_w'][:self.j]
self.line_matches['peaks_h'] = self.line_matches['peaks_h'][:self.j]
self.line_matches['lines'] = self.line_matches['lines'][:self.j]
print 'FINISHED GALAXY CALIBRATION'
plt.close()
return
def set_calib_lines(self,label):
self.cal_states[label] = not self.cal_states[label]
xl = self.ax.get_xlim()
yl = self.ax.get_ylim()
#self.ax.cla()
self.wm = []
self.fm = []
for vl in self.vlines:
self.ax.lines.remove(vl)
self.vlines = []
if self.cal_states['Xe']:
self.wm.extend(self.wm_Xe)
self.fm.extend(self.fm_Xe)
if self.cal_states['Ar']:
self.wm.extend(self.wm_Ar)
self.fm.extend(self.fm_Ar)
if self.cal_states['HgNe']:
self.wm.extend(self.wm_HgNe)
self.fm.extend(self.fm_HgNe)
if self.cal_states['Ne']:
self.wm.extend(self.wm_Ne)
self.fm.extend(self.fm_Ne)
self.wm = np.array(self.wm)
self.fm = np.array(self.fm)
for j in range(self.wm.size):
self.vlines.append(self.ax.axvline(self.wm[j],color='r'))
#self.line, = self.ax.plot(np.array(self.wm),np.array(self.fm)/2.0,'ro',picker=5)# 5 points tolerance
#self.selected = self.ax.axvline(self.wm[0],lw=2,alpha=0.7,color='red', visible=False)
#self.selected_peak, = self.ax.plot(np.zeros(1),np.zeros(1),'bo',markersize=4,alpha=0.6,visible=False)
#self.fline, = self.ax.plot(self.xspectra,self.yspectra,'b',picker=5)
self.ax.set_xlim(xl)
self.ax.set_ylim(yl)
self.fig.canvas.draw()
def delete_b(self,event):
self.delete()
def replace_b(self,event):
self.replace()
def delete(self):
self.line_matches['lines'].pop(self.j)
self.line_matches['peaks_p'].pop(self.j)
self.line_matches['peaks_w'].pop(self.j)
self.line_matches['peaks_h'].pop(self.j)
self.wm = np.delete(self.wm,self.j)
self.update_current()
return
if __name__ == '__main__':
from astropy.io import fits as pyfits
wm,fm = np.loadtxt('osmos_Xenon.dat',usecols=(0,2),unpack=True)
wm = air_to_vacuum(wm)
arcfits = pyfits.open('C4_0199/arcs/arc590813.0001b.fits')
data = arcfits[0].data
xpos = 500.0
xpos2 = 1500.0
p_x = np.arange(0,4064,1)
f_x = np.sum(data[1670:1705,:],axis=0)
stretch_est,shift_est,quad_est = interactive_plot(p_x,f_x,0.70,0.0,0.0,0.0,0.0,0.0,2000)
line_matches = {'lines':[],'peaks':[]}
fig,ax = plt.subplots(1)
plt.subplots_adjust(right=0.8)
for j in range(wm.size):
ax.axvline(wm[j],color='r')
line, = ax.plot(wm,fm/2.0,'ro',picker=5)# 5 points tolerance
fline, = plt.plot(quad_est*(p_x-2000)**2 + stretch_est*(p_x-2000) + shift_est,(f_x[::-1]-f_x.min())/10.0,'b',picker=5)
closeax = plt.axes([0.83, 0.3, 0.15, 0.1])
button = Button(closeax, 'Add Line', hovercolor='0.975')
#rax = plt.axes([0.85, 0.5, 0.1, 0.2])
#radio = RadioButtons(rax, ('Select Line', 'Select Peak'))
browser = LineBrowser(fig,ax,line,wm,p_x,fline,line_matches)
fig.canvas.mpl_connect('pick_event', browser.onpick)
fig.canvas.mpl_connect('key_press_event',browser.onpress)
button.on_clicked(browser.add_line)
#radio.on_clicked(browser.radioset)
plt.show()
params,pcov = curve_fit(polyfour,np.sort(browser.line_matches['peaks']),np.sort(browser.line_matches['lines']),p0=[shift_est,stretch_est,quad_est,1e-8,1e-12,1e-12])
print params
wave,Flux,fifth,fourth,cube,quad,stretch,shift = wavecalibrate(p_x,f_x,1679.1503,0.7122818,2778.431)
#p_x2 = np.arange(0,4064,1) + 1000.0
#wave2,Flux2,cube2,quad2,stretch2,shift2 = wavecalibrate(p_x2,f_x,stretch,shift-(xpos2*stretch-xpos*stretch),quad)
|
giffordw/OSMOSreduce
|
testopt.py
|
Python
|
bsd-3-clause
| 21,628
|
[
"Galaxy"
] |
b28a3dd6afcfdef96f9cacd2125aac0c812e4fafe7b4d98355af8af772ab4d86
|
import numpy as np
import netCDF4 as nc
from .base_grid import BaseGrid
class CiceGrid(BaseGrid):
def __init__(self, **kwargs):
self.type = 'Arakawa B'
self.full_name = 'CICE'
super(CiceGrid, self).__init__(**kwargs)
@classmethod
def fromfile(cls, h_grid_def, mask_file=None,
description='CICE tripolar'):
"""
Read in grid definition from file(s).
"""
with nc.Dataset(h_grid_def) as f:
x_t = np.rad2deg(f.variables['tlat'][:])
y_t = np.rad2deg(f.variables['tlon'][:])
x_u = np.rad2deg(f.variables['ulat'][:])
y_u = np.rad2deg(f.variables['ulon'][:])
dx_tn = f.variables['htn'][:] * 100.0
dy_te = f.variables['hte'][:] * 100.0
area_t = f.variables['tarea'][:]
area_u = f.variables['uarea'][:]
angle_t = np.rad2deg(f.variables['angleT'][:])
angle_u = np.rad2deg(f.variables['angle'][:])
if 'clon_t' in f.variables:
clon_t = f.variables['clon_t'][:]
clat_t = f.variables['clat_t'][:]
clon_u = f.variables['clon_u'][:]
clat_u = f.variables['clat_u'][:]
else:
clon_t = clat_t = clon_u = clat_u = None
if mask_file is not None:
with nc.Dataset(mask_file) as f:
mask_t = f.variables['kmt'][:]
return cls(x_t=x_t, y_t=y_t, x_u=x_u, y_u=y_u,
dx_t=dx_tn, dy_t=dy_te,
dx_tn=dx_tn, dy_te=dy_te,
area_t=area_t, area_u=area_u,
clat_t=clat_t, clon_t=clon_t, clat_u=clat_u, clon_u=clon_u,
mask_t=mask_t, description=description)
def write(self, grid_filename, mask_filename):
"""
Write out CICE grid to netcdf.
"""
f = nc.Dataset(grid_filename, 'w')
# Create dimensions.
f.createDimension('nx', self.num_lon_points)
f.createDimension('ny', self.num_lat_points)
f.createDimension('nc', 4)
# Make all CICE grid variables.
ulat = f.createVariable('ulat', 'f8', dimensions=('ny', 'nx'))
ulat.units = "radians"
ulat.title = "Latitude of U points"
ulon = f.createVariable('ulon', 'f8', dimensions=('ny', 'nx'))
ulon.units = "radians"
ulon.title = "Longitude of U points"
tlat = f.createVariable('tlat', 'f8', dimensions=('ny', 'nx'))
tlat.units = "radians"
tlat.title = "Latitude of T points"
tlon = f.createVariable('tlon', 'f8', dimensions=('ny', 'nx'))
tlon.units = "radians"
tlon.title = "Longitude of T points"
if self.clon_t is not None:
clon_t = f.createVariable('clon_t', 'f8',
dimensions=('nc', 'ny', 'nx'))
clon_t.units = "radians"
clon_t.title = "Longitude of T cell corners"
clat_t = f.createVariable('clat_t', 'f8',
dimensions=('nc', 'ny', 'nx'))
clat_t.units = "radians"
clat_t.title = "Latitude of T cell corners"
clon_u = f.createVariable('clon_u', 'f8',
dimensions=('nc', 'ny', 'nx'))
clon_u.units = "radians"
clon_u.title = "Longitude of U cell corners"
clat_u = f.createVariable('clat_u', 'f8',
dimensions=('nc', 'ny', 'nx'))
clat_u.units = "radians"
clat_u.title = "Latitude of U cell corners"
htn = f.createVariable('htn', 'f8', dimensions=('ny', 'nx'))
htn.units = "cm"
htn.title = "Width of T cells on North side."
hte = f.createVariable('hte', 'f8', dimensions=('ny', 'nx'))
hte.units = "cm"
hte.title = "Width of T cells on East side."
angle = f.createVariable('angle', 'f8', dimensions=('ny', 'nx'))
angle.units = "radians"
angle.title = "Rotation angle of U cells."
angleT = f.createVariable('angleT', 'f8', dimensions=('ny', 'nx'))
angleT.units = "radians"
angleT.title = "Rotation angle of T cells."
area_t = f.createVariable('tarea', 'f8', dimensions=('ny', 'nx'))
area_t.units = "m^2"
area_t.title = "Area of T cells."
area_u = f.createVariable('uarea', 'f8', dimensions=('ny', 'nx'))
area_u.units = "m^2"
area_u.title = "Area of U cells."
area_t[:] = self.area_t[:]
area_u[:] = self.area_u[:]
# Convert units: degrees -> radians.
tlat[:] = np.deg2rad(self.y_t)
tlon[:] = np.deg2rad(self.x_t)
ulat[:] = np.deg2rad(self.y_u)
ulon[:] = np.deg2rad(self.x_u)
if self.clon_t is not None:
clon_t[:] = np.deg2rad(self.clon_t)
clat_t[:] = np.deg2rad(self.clat_t)
clon_u[:] = np.deg2rad(self.clon_u)
clat_u[:] = np.deg2rad(self.clat_u)
# Convert from m to cm.
htn[:] = self.dx_tn[:] * 100.
hte[:] = self.dy_te[:] * 100.
angle[:] = np.deg2rad(self.angle_u[:])
angleT[:] = np.deg2rad(self.angle_t[:])
f.close()
with nc.Dataset(mask_filename, 'w') as f:
f.createDimension('nx', self.num_lon_points)
f.createDimension('ny', self.num_lat_points)
mask = f.createVariable('kmt', 'f8', dimensions=('ny', 'nx'))
# CICE uses 0 as masked, whereas internally we use 1 as masked.
mask[:] = (1 - self.mask_t)
|
DoublePrecision/esmgrids
|
esmgrids/cice_grid.py
|
Python
|
apache-2.0
| 5,621
|
[
"NetCDF"
] |
efd038d03dd46f3e84a72193680c358123e98bb2dda8fba88b2b90ceb4c1b5e8
|
from google.appengine.ext import ndb
#
# NDB Models
#
# This class contains the NDB models for my TriviaGame project.
#
# Author: Brian Doherty
#
#
# Category Class
#
#class Category(ndb.Model):
#
# Question Class
#
#class Question(ndb.Model):
#
# User Properties Class
#
class UserProperty(ndb.Model):
userName = ndb.StringProperty()
userId = ndb.StringProperty()
userStatus = ndb.StringProperty()
#
# User Access Request Class
#
class AccessRequest(ndb.Model):
userName = ndb.StringProperty()
userId = ndb.StringProperty()
requestedAccess = ndb.StringProperty()
requestReason = ndb.StringProperty()
|
bedoherty/TriviaGame
|
Web Backend/TriviaServer/ndbmodels.py
|
Python
|
mit
| 612
|
[
"Brian"
] |
5502964643f1c4bc4cd5db089b0474d80f339a4c53e75beb9cbf5dbe59f4cc5c
|
"""fusioncatcher subcommand tests"""
# (c) 2015-2021 Wibowo Arindrarto <contact@arindrarto.dev>
import json
import pytest
from click.testing import CliRunner
from crimson.cli import main
from .utils import get_test_path, getattr_nested
@pytest.fixture(scope="module")
def fusioncatcher_fail():
runner = CliRunner()
in_file = get_test_path("fusioncatcher_nope.txt")
result = runner.invoke(main, ["fusioncatcher", in_file])
return result
@pytest.fixture(scope="module")
def fusioncatcher_v0995a():
runner = CliRunner()
in_file = get_test_path("fusioncatcher_v0995a.txt")
result = runner.invoke(main, ["fusioncatcher", in_file])
result.json = json.loads(result.output)
return result
@pytest.fixture(scope="module")
def fusioncatcher_v100():
runner = CliRunner()
in_file = get_test_path("fusioncatcher_v100.txt")
result = runner.invoke(main, ["fusioncatcher", in_file])
result.json = json.loads(result.output)
return result
@pytest.fixture(scope="module")
def fusioncatcher_v120_empty():
runner = CliRunner()
in_file = get_test_path("fusioncatcher_v120_empty.txt")
result = runner.invoke(main, ["fusioncatcher", in_file])
result.json = json.loads(result.output)
return result
def test_fusioncatcher_fail_exit_code(fusioncatcher_fail):
assert fusioncatcher_fail.exit_code != 0
def test_fusioncatcher_fail_output(fusioncatcher_fail):
err_msg = "Unexpected column names:"
assert err_msg in fusioncatcher_fail.output
@pytest.mark.parametrize(
"attrs, exp_v0955a",
[
([0, "5end", "geneSymbol"], "RUNX1"),
([0, "5end", "geneID"], "ENSG00000159216"),
([0, "5end", "exonID"], "ENSE00003512550"),
([0, "5end", "chromosome"], "21"),
([0, "5end", "position"], 34859474),
([0, "5end", "strand"], "-"),
([0, "3end", "geneSymbol"], "RUNX1T1"),
([0, "3end", "geneID"], "ENSG00000079102"),
([0, "3end", "exonID"], "ENSE00003614817"),
([0, "3end", "chromosome"], "8"),
([0, "3end", "position"], 92017363),
([0, "3end", "strand"], "-"),
(
[0, "fusionDescription"],
["known", "chimerdb2", "ticdb", "tcga", "cell_lines"],
),
([0, "nCommonMappingReads"], 0),
([0, "nSpanningPairs"], 41),
([0, "nSpanningUniqueReads"], 16),
([0, "longestAnchorLength"], 62),
([0, "fusionFindingMethod"], ["BOWTIE", "BOWTIE+BLAT", "BOWTIE+STAR"]),
(
[0, "fusionSequence"],
"CTACCACAGAGCCATCAAAATCACAGTGGATGGGCCCCGAGAACCTCGAA*ATCGTACTGAGAAGCA"
"CTCCACAATGCCAGACTCACCTGTGGATGTGAAG",
),
([0, "predictedEffect"], "in-frame"),
(
[0, "predictedFusedTranscripts", 0],
"ENST00000437180:803/ENST00000522467:311",
),
(
[0, "predictedFusedTranscripts", -1],
"ENST00000344691:2110/ENST00000518992:377",
),
(
[0, "predictedFusedProteins", 0],
"MASDSIFESFPSYPQCFMRECILGMNPSRDVHDASTSRRFTPPSTALSPGKMSEALPLGAPDAGAAL"
"AGKLRSGDRSMVEVLADHPGELVRTDSPNFLCSVLPTHWRCNKTLPIAFKVVALGDVPDGTLVTVMA"
"GNDENYSAELRNATAAMKNQVARFNDLRFVGRSGRGKSFTLTITVFTNPPQVATYHRAIKITVDGPR"
"EPRNRTEKHSTMPDSPVDVKTQSRLTPPTMPPPPTTQGAPRTSSFTPTTLTNGTSHSPTALNGAPSP"
"PNGFSNGPSSSSSSSLANQQLPPACGARQLSKLKRFLTTLQQFGNDISPEIGERVRTLVLGLV",
),
(
[0, "predictedFusedProteins", -1],
"MRIPVDASTSRRFTPPSTALSPGKMSEALPLGAPDAGAALAGKLRSGDRSMVEVLADHPGELVRTDS"
"PNFLCSVLPTHWRCNKTLPIAFKVVALGDVPDGTLVTVMAGNDENYSAELRNATAAMKNQVARFNDL"
"RFVGRSGRGKSFTLTITVFTNPPQVATYHRAIKITVDGPREPRNRTEKHSTMPDSPVDVKTQSRLTP"
"PTMPPPPTTQGAPRTSSFTPTTLTNGTSHSPTALNGAPSPPNGFSNGPSSSSSSSLANQQLPPACGA"
"RQLSKLKRFLTTLQQFGNDISPEIGERVRTLVLGLVNSTLTIEEFHSKLQEATNFPLRPFVIPFLKA"
"NLPLLQRELLHCARLAKQNPAQYLAQHEQLLLDASTTSPVDS",
),
([-1, "5end", "geneSymbol"], "VPS45"),
([-1, "5end", "geneID"], "ENSG00000136631"),
([-1, "5end", "exonID"], "ENSE00003679462"),
([-1, "5end", "chromosome"], "1"),
([-1, "5end", "position"], 150110627),
([-1, "5end", "strand"], "+"),
([-1, "3end", "geneSymbol"], "PLEKHO1"),
([-1, "3end", "geneID"], "ENSG00000023902"),
([-1, "3end", "exonID"], "ENSE00003616995"),
([-1, "3end", "chromosome"], "1"),
([-1, "3end", "position"], 150150912),
([-1, "3end", "strand"], "+"),
(
[-1, "fusionDescription"],
[
"adjacent",
"known",
"healthy",
"hpa",
"banned",
"1K<gap<10K",
"readthrough",
],
),
([-1, "nCommonMappingReads"], 0),
([-1, "nSpanningPairs"], 1),
([-1, "nSpanningUniqueReads"], 3),
([-1, "longestAnchorLength"], 29),
([-1, "fusionFindingMethod"], ["BOWTIE"]),
(
[-1, "fusionSequence"],
"TGAGGATTGTCCTGGGAGGCACCACAGTGCACAACACGAAAAG*"
"GGACCTCAGGATGGAAACCAGCAGCCTGCACCGCCCGAGAAGG",
),
([-1, "predictedEffect"], "out-of-frame"),
(
[-1, "predictedFusedTranscripts", 0],
"ENST00000369128:1550/ENST00000369124:309",
),
(
[-1, "predictedFusedTranscripts", -1],
"ENST00000369130:2171/ENST00000369124:309",
),
(
[-1, "predictedFusedProteins", 0],
"MVYTQSEILQKEVYLFERIDSQNREIMKHLKAICFLRPTKENVDYIIQELRRPKYTIYFIYFSNVISK"
"SDVKSLAEADEQEVVAEVQQVITKEYELFEFRRTEVPPLLLILDRCDDAITPLLNQWTYQAMVHELLG"
"INNNRIDLSRVPGISKDLREVVLSAENDEFYANNMYLNFAEIGSNIKNLMEDFQKKKPKEQQKLESIA"
"DMKAFVENYPQFKKMSGTVSKHVTVVGELSRLVSERNLLEVSEVEQELACQNDHSSALQNIKRLLQNP"
"KVTEFDAARLVMLYALHYERHSSNSLPGLMMDLRNKGVSEKYRKLVSAVVEYGGKRVRGSDLFSPKDA"
"VAITKQFLKGLKGVENVYTQHQPFLHETLDHLIKGRLKENLYPYLGPSTLRDRPQDIIVFVIGGATYE"
"EALTVYNLNRTTPGVRIVLGGTTVHNTKRDLRMETSSLHRPRRSAGSGNSAGKGFSGRFGKTAMWC",
),
(
[-1, "predictedFusedProteins", -1],
"MNVVFAVKQYISKMIEDSGPGMKVLLMDKETTGIVSMVYTQSEILQKEVYLFERIDSQNREIMKHLKA"
"ICFLRPTKENVDYIIQELRRPKYTIYFIYFSNVISKSDVKSLAEADEQEVVAEVQEFYGDYIAVNPHL"
"FSLNILGCCQGRNWDPAQLSRTTQGLTALLLSLKKCPMIRYQLSSEAAKRLAECVKQVITKEYELFEF"
"RRTEVPPLLLILDRCDDAITPLLNQWTYQAMVHELLGINNNRIDLSRVPGISKDLREVVLSAENDEFY"
"ANNMYLNFAEIGSNIKNLMEDFQKKKPKEQQKLESIADMKAFVENYPQFKKMSGTVSKHVTVVGELSR"
"LVSERNLLEVSEVEQELACQNDHSSALQNIKRLLQNPKVTEFDAARLVMLYALHYERHSSNSLPGLMM"
"DLRNKGVSEKYRKLVSAVVEYGGKRVRGSDLFSPKDAVAITKQFLKGLKGVENVYTQHQPFLHETLDH"
"LIKGRLKENLYPYLGPSTLRDRPQDIIVFVIGGATYEEALTVYNLNRTTPGVRIVLGGTTVHNTKRDL"
"RMETSSLHRPRRSAGSGNSAGKGFSGRFGKTAMWC",
),
],
)
def test_fusioncatcher_v0995a(fusioncatcher_v0995a, attrs, exp_v0955a):
assert getattr_nested(fusioncatcher_v0995a.json, attrs) == exp_v0955a, ", ".join(
[repr(x) for x in attrs]
)
@pytest.mark.parametrize(
"attrs, exp_v100",
[
([0, "5end", "geneSymbol"], "ADGRE2"),
([0, "3end", "geneSymbol"], "ADGRE5"),
([-1, "5end", "geneSymbol"], "NSF"),
([-1, "3end", "geneSymbol"], "LRRC37A3"),
],
)
def test_fusioncatcher_v100(fusioncatcher_v100, attrs, exp_v100):
assert getattr_nested(fusioncatcher_v100.json, attrs) == exp_v100, ", ".join(
[repr(x) for x in attrs]
)
def test_fusioncatcher_v120_empty(fusioncatcher_v120_empty):
err_msg = "Unexpected column names:"
assert err_msg not in fusioncatcher_v120_empty.output
|
bow/crimson
|
tests/test_fusioncatcher.py
|
Python
|
bsd-3-clause
| 7,773
|
[
"Bowtie"
] |
a163375e9cad7d9465cd612d3ea8a06fb3ba161cb06c82b3e5f543a8f22b7144
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Determines the declaration, r/w status, and last use of each variable"""
import ast
import sys
from .runtime import HYBRID_GLOBALS
from .util import _internal_assert
class PyVariableUsage(ast.NodeVisitor):
"""The vistor class to determine the declaration, r/w status, and last use of each variable"""
#pylint: disable=invalid-name
#pylint: disable=missing-docstring
def __init__(self, args, symbols, closure_vars):
self.status = {}
self.scope_level = []
self._args = {}
self.args = args
self.aug_assign_ = False
self.symbols = symbols
self.closure_vars = closure_vars
def visit_FunctionDef(self, node):
self.scope_level.append(node)
_internal_assert(len(node.args.args) == len(self.args), \
'#arguments passed should be the same as #arguments defined')
for idx, arg in enumerate(node.args.args):
_attr = 'id' if sys.version_info[0] < 3 else 'arg' # To make py2 and 3 compatible
self._args[getattr(arg, _attr)] = self.args[idx]
for i in node.body:
self.visit(i)
def visit_For(self, node):
_internal_assert(isinstance(node.target, ast.Name), \
"For's iterator should be an id")
self.visit(node.iter)
self.scope_level.append(node)
for i in node.body:
self.visit(i)
self.scope_level.pop()
def visit_Call(self, node):
#No function pointer supported so far
_internal_assert(isinstance(node.func, ast.Name), "Function call should be an id")
func_id = node.func.id
_internal_assert(func_id in list(HYBRID_GLOBALS.keys()) + \
['range', 'max', 'min', 'len'] + \
list(self.symbols.keys()), \
"Function call id not in intrinsics' list")
for elem in node.args:
self.visit(elem)
def visit_AugAssign(self, node):
self.aug_assign_ = True
self.generic_visit(node)
self.aug_assign_ = False
def visit_Name(self, node):
# If it is True or False, we do not worry about it!
if sys.version_info[0] == 2 and node.id in ['True', 'False']:
return
# If it is from the argument list or loop variable, we do not worry about it!
if node.id in self._args.keys():
return
fors = [loop.target.id for loop in self.scope_level if isinstance(loop, ast.For)]
if node.id in fors:
return
# The loop variable cannot be overwritten when iteration
_internal_assert(not isinstance(node.ctx, ast.Store) or node.id not in fors, \
"Iter var cannot be overwritten")
if node.id not in self.status.keys():
# It is a captured value in closure
if node.id in self.closure_vars:
try:
ast.literal_eval(str(self.closure_vars[node.id]))
except ValueError:
raise ValueError("Only support capturing constant values in closure")
return
_internal_assert(isinstance(node.ctx, ast.Store), \
'Undeclared variable %s' % node.id)
if self.aug_assign_:
raise ValueError('"First store" cannot be an AugAssign')
self.status[node.id] = (node, self.scope_level[-1], set())
else:
decl, loop, usage = self.status[node.id]
usage.add(type(node.ctx))
_internal_assert(loop in self.scope_level,
"%s is used out of the scope it is defined!" % node.id)
self.status[node.id] = (decl, loop, usage)
def determine_variable_usage(root, args, symbols, closure_vars):
"""The helper function for calling the dedicated visitor."""
visitor = PyVariableUsage(args, symbols, closure_vars)
visitor.visit(root)
return visitor.status
|
Huyuwei/tvm
|
python/tvm/hybrid/preprocessor.py
|
Python
|
apache-2.0
| 4,765
|
[
"VisIt"
] |
c990c494b08c5c7bfde86f3eadfbaef294caa25de13faebf732f786931e28d64
|
import os
from intermol.gromacs.gromacs_parser import load_gromacs
def load_top_opls(top_path, mol_name=None):
"""Load a gromacs .top file parameterized with OPLS types. """
if mol_name is not None:
with open(top_path) as top_file:
if mol_name not in top_file.read():
return None, None, None
split_path = os.path.split(top_path)
filename = split_path[-1]
gro_file = '{}-gas.gro'.format(filename[:-4])
gro_path = os.path.join(split_path[0], gro_file)
system = load_gromacs(top_path, gro_path)
opls_types = [atom.atomtype[0] for atom in system.atoms]
mol_name = [name for name in system.molecule_types][0]
return system, opls_types, mol_name
|
Jonestj1/foyer
|
foyer/utils/io.py
|
Python
|
mit
| 722
|
[
"Gromacs"
] |
ffbce06c010b1cc39f4ae5ed592ec42e009757f75b79360ae13eb7d0e877e2a4
|
#!/usr/bin/python
"""Test of sayAll."""
from macaroon.playback import *
import utils
sequence = MacroSequence()
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("KP_Add"))
sequence.append(utils.AssertPresentationAction(
"1. KP_Add to do a SayAll",
["SPEECH OUTPUT: 'Home'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: 'Back to the Gnome Bugzilla home page'",
"SPEECH OUTPUT: 'Bugzilla'",
"SPEECH OUTPUT: 'New bug'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '·'",
"SPEECH OUTPUT: 'Browse'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '·'",
"SPEECH OUTPUT: 'Search'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '·'",
"SPEECH OUTPUT: 'Reports'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '·'",
"SPEECH OUTPUT: 'Account'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '·'",
"SPEECH OUTPUT: 'Admin'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '·'",
"SPEECH OUTPUT: 'Help'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: 'Logged In joanmarie.diggs@gmail.com'",
"SPEECH OUTPUT: '|'",
"SPEECH OUTPUT: 'Log Out'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: 'Enter Bug: orca \u2013 This page lets you enter a new bug into Bugzilla.'",
"SPEECH OUTPUT: 'heading level 1'",
"SPEECH OUTPUT: 'Before reporting a bug, please read the'",
"SPEECH OUTPUT: 'bug writing guidelines'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: ', please look at the list of'",
"SPEECH OUTPUT: 'most frequently reported bugs'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: ', and please'",
"SPEECH OUTPUT: 'search'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: 'or'",
"SPEECH OUTPUT: 'browse'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: 'for the bug.'",
"SPEECH OUTPUT: 'Reporter:'",
"SPEECH OUTPUT: 'joanmarie.diggs@gmail.com'",
"SPEECH OUTPUT: 'Product:'",
"SPEECH OUTPUT: 'orca'",
"SPEECH OUTPUT: 'Version:'",
"SPEECH OUTPUT: '2.21.x '",
"SPEECH OUTPUT: 'List with 9 items'",
"SPEECH OUTPUT: 'Component'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: ':'",
"SPEECH OUTPUT: 'braille '",
"SPEECH OUTPUT: 'List with 5 items'",
"SPEECH OUTPUT: 'GNOME version'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: ':'",
"SPEECH OUTPUT: 'Unspecified'",
"SPEECH OUTPUT: 'combo box'",
"SPEECH OUTPUT: 'OS'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: ':'",
"SPEECH OUTPUT: 'Linux'",
"SPEECH OUTPUT: 'combo box'",
"SPEECH OUTPUT: 'Severity'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: ':'",
"SPEECH OUTPUT: 'normal'",
"SPEECH OUTPUT: 'combo box'",
"SPEECH OUTPUT: 'Summary:'",
"SPEECH OUTPUT: 'entry'",
"SPEECH OUTPUT: 'Description:'",
"SPEECH OUTPUT: 'entry'",
"SPEECH OUTPUT: 'Optional Fields'",
"SPEECH OUTPUT: 'Cc:'",
"SPEECH OUTPUT: 'entry'",
"SPEECH OUTPUT: 'Keywords'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: ':'",
"SPEECH OUTPUT: 'entry'",
"SPEECH OUTPUT: 'Depends on:'",
"SPEECH OUTPUT: 'entry'",
"SPEECH OUTPUT: 'Blocks:'",
"SPEECH OUTPUT: 'entry'",
"SPEECH OUTPUT: 'Commit'",
"SPEECH OUTPUT: 'push button'",
"SPEECH OUTPUT: 'Remember values as bookmarkable template'",
"SPEECH OUTPUT: 'push button'",
"SPEECH OUTPUT: 'We've made a guess at your operating system.'",
"SPEECH OUTPUT: 'Please check it and, if we got it wrong, email bugmaster@gnome.org.'",
"SPEECH OUTPUT: 'Saved Searches:'",
"SPEECH OUTPUT: 'All Orca'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '|'",
"SPEECH OUTPUT: 'Firefox'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '|'",
"SPEECH OUTPUT: 'open orca'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '|'",
"SPEECH OUTPUT: 'Open RFEs'",
"SPEECH OUTPUT: 'link'"]))
sequence.append(utils.AssertionSummaryAction())
sequence.start()
|
pvagner/orca
|
test/keystrokes/firefox/say_all_enter_bug.py
|
Python
|
lgpl-2.1
| 4,023
|
[
"ORCA"
] |
e7a31529b6052ee257871dd85a53496231a4d479bcdf17092dcfddbca49aa529
|
tests = [("python", "testConstraints.py", {}), ]
longTests = []
if __name__ == '__main__':
import sys
from rdkit import TestRunner
failed, tests = TestRunner.RunScript('test_list.py', 0, 1)
sys.exit(len(failed))
|
rvianello/rdkit
|
Code/ForceField/Wrap/test_list.py
|
Python
|
bsd-3-clause
| 222
|
[
"RDKit"
] |
ffe68cd9711c73585d93c625763bc7cee89ad3a61bd0d4323abae283974b1c53
|
#!/usr/bin/env python
# This Python file uses the following encoding: utf-8
#
# Copyright (C) 2010-2018 Davide Andreoli <dave@gurumeditation.it>
#
# This file is part of EpyMC, an EFL based Media Center written in Python.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import epymc.utils as utils
import epymc.gui as gui
def DBG(*args):
print('YTDL:', *args)
pass
class YoutubeDL(object):
""" Helper class to interact with the youtube-dl executable """
def __init__(self):
self.exe = os.path.join(utils.user_cache_dir, 'youtube-dl')
self._update_dialog = None
self._done_cb = None
self._done_cb_kargs = None
self._url_dialog = None
@property
def installed(self):
""" True if youtube-dl is already installed """
return os.path.exists(self.exe)
def get_real_video_url(self, url, done_cb, **kargs):
""" Scrape the given url using youtube-dl
Args:
url: any url supported by youtube-dl
done_cb: function to call when the process is completed
signature: func(real_url, **kargs)
**kargs: any other keyword arguments will be passed back in done_cb
"""
DBG('Getting real url for:', url)
self._done_cb = done_cb
self._done_cb_kargs = kargs
txt = _('Please wait while searching the video...') + '<br><br>' + \
_('For info and credits please visit:') + '<br>' + \
'<info>rg3.github.io/youtube-dl</>'
self._url_dialog = gui.EmcDialog(style='minimal', title=_('Youtube-DL'),
text=txt, spinner=True)
utils.EmcExec('{} --get-url --format best "{}"'.format(self.exe, url),
grab_output=True, done_cb=self._get_real_video_url_cb)
def _get_real_video_url_cb(self, cmd_output):
self._url_dialog.delete()
self._done_cb(cmd_output, **self._done_cb_kargs)
def check_update(self, verbose=True, quiet=False, done_cb=None, **kargs):
""" Check if a newer version is available and download if needed
Args:
verbose (bool): Show the progress dialog also while checking versions
quiet (bool): Do not show any dialog at all
done_cb: function to call when the process is completed
signature: func(success, dialog, **kargs)
**kargs: any other keyword arguments will be passed back in done_cb
"""
self._local_version = None
self._remote_version = None
self._update_dialog = None
self._update_text = ''
self._quiet_update = quiet
self._done_cb = done_cb
self._done_cb_kargs = kargs
txt = _('Checking for updates, please wait.') + '<br>'
if verbose:
self._update_dialog = gui.EmcDialog(style='progress', text=txt,
title=_('Youtube-DL'))
else:
self._update_text += txt
# check local version...
utils.EmcExec('{} --version'.format(self.exe), grab_output=True,
done_cb=self._local_vers_cb)
# check remote version...
utils.EmcUrl('http://youtube-dl.org/latest/version',
done_cb=self._remote_vers_cb)
def _local_vers_cb(self, version):
self._local_version = version.strip() if version else _('Unknown')
DBG('Local version:', self._local_version)
txt = '<name>{}:</name> {}<br>'.format(_('Local version'),
self._local_version)
if self._update_dialog:
self._update_dialog.text_append(txt)
else:
self._update_text += txt
self._local_or_remote_done()
def _remote_vers_cb(self, url, status, version):
self._remote_version = version.strip() if status == 200 else _('Unknown')
DBG('Upstream version:', self._remote_version)
txt = '<name>{}:</name> {}<br>'.format(_('Upstream version'),
self._remote_version)
if self._update_dialog:
self._update_dialog.text_append(txt)
else:
self._update_text += txt
self._local_or_remote_done()
def _local_or_remote_done(self):
if self._local_version is None or self._remote_version is None:
return
if self._local_version == _('Unknown') or \
self._remote_version == _('Unknown') or \
self._remote_version != self._local_version:
self._download_latest()
return
txt = '<success>{}</success>'.format(_('Already updated'))
if self._update_dialog:
self._update_dialog.text_append(txt)
self._update_dialog.progress_set(1.0)
else:
self._update_text += txt
if callable(self._done_cb):
self._done_cb(True, self._update_dialog, **self._done_cb_kargs)
elif self._update_dialog:
self._update_dialog.delete()
def _download_latest(self):
txt = '<info>{}</info><br>'.format(_('Updating to latest release...'))
if self._update_dialog:
self._update_dialog.text_append(txt)
elif self._quiet_update is False:
self._update_text += txt
self._update_dialog = gui.EmcDialog(style='progress',
title=_('Youtube-DL'),
text=self._update_text)
utils.download_url_async('http://youtube-dl.org/latest/youtube-dl',
dest=self.exe + '.temp',
progress_cb=self._dwn_progress_cb,
complete_cb=self._dwn_complete_cb)
def _dwn_progress_cb(self, dest, dltotal, dlnow):
if self._update_dialog:
self._update_dialog.progress_set((dlnow / dltotal) if dltotal else 0)
def _dwn_complete_cb(self, dest, status):
if status == 200:
os.chmod(dest, 0o0744) # (make it executable)
os.rename(dest, self.exe) # (atomically remove ".temp")
txt = '<success>{}</success>'.format(_('Download completed'))
if self._update_dialog:
self._update_dialog.text_append(txt)
else:
self._update_text += txt
else:
DBG("ERROR: download failed")
txt = '<failure>{}</failure>'.format(_('Download failed'))
if self._update_dialog:
self._update_dialog.text_append(txt)
else:
self._update_text += txt
if callable(self._done_cb):
self._done_cb(True if status == 200 else False, self._update_dialog,
**self._done_cb_kargs)
elif self._update_dialog:
self._update_dialog.delete()
|
DaveMDS/epymc
|
epymc/youtubedl.py
|
Python
|
gpl-3.0
| 7,519
|
[
"VisIt"
] |
92a3174708f34b0b347ada6181cb6dadc476e7584a7b8481c2f468c783861e77
|
from __future__ import (absolute_import, division, print_function)
from mantid.api import (DataProcessorAlgorithm, mtd, AlgorithmFactory,
FileProperty, FileAction,
MultipleFileProperty, WorkspaceProperty,
PropertyMode, Progress)
from mantid.simpleapi import (LoadIsawUB, LoadInstrument,
SetGoniometer, ConvertToMD, Load,
LoadIsawDetCal, LoadMask,
DeleteWorkspace, MaskDetectors,
ConvertToMDMinMaxGlobal)
from mantid.kernel import VisibleWhenProperty, PropertyCriterion, Direction
from mantid import logger
class ConvertMultipleRunsToSingleCrystalMD(DataProcessorAlgorithm):
def category(self):
return "MDAlgorithms\\Creation"
def name(self):
return "ConvertMultipleRunsToSingleCrystalMD"
def summary(self):
return "Convert multiple runs to one Single Crystal MDEventWorkspace"
def PyInit(self):
# files to reduce
self.declareProperty(MultipleFileProperty(name="Filename",
extensions=["_event.nxs", ".nxs.h5", ".nxs"]),
"Files to combine in reduction")
# Filter by time
self.copyProperties('LoadEventNexus', ['FilterByTofMin', 'FilterByTofMax', 'FilterByTimeStop'])
# UBMatrix
self.declareProperty(FileProperty(name="UBMatrix",defaultValue="",action=FileAction.OptionalLoad,
extensions=[".mat", ".ub", ".txt"]),
doc="Path to an ISAW-style UB matrix text file. See :ref:`LoadIsawUB <algm-LoadIsawUB>`")
# Goniometer
self.declareProperty('SetGoniometer', False, "Set which Goniometer to use. See :ref:`SetGoniometer <algm-SetGoniometer>`")
condition = VisibleWhenProperty("SetGoniometer", PropertyCriterion.IsNotDefault)
self.copyProperties('SetGoniometer', ['Goniometers', 'Axis0', 'Axis1', 'Axis2'])
self.setPropertySettings("Goniometers", condition)
self.setPropertySettings('Axis0', condition)
self.setPropertySettings('Axis1', condition)
self.setPropertySettings('Axis2', condition)
# Corrections
self.declareProperty(FileProperty(name="LoadInstrument",defaultValue="",action=FileAction.OptionalLoad,
extensions=[".xml"]),
"Load a different instrument IDF onto the data from a file. See :ref:`LoadInstrument <algm-LoadInstrument>`")
self.declareProperty(FileProperty(name="DetCal",defaultValue="",action=FileAction.OptionalLoad,
extensions=[".detcal"]),
"Load an ISAW DetCal calibration onto the data from a file. See :ref:`LoadIsawDetCal <algm-LoadIsawDetCal>`")
self.declareProperty(FileProperty(name="MaskFile",defaultValue="",action=FileAction.OptionalLoad,
extensions=[".xml",".msk"]),
"Masking file for masking. Supported file format is XML and ISIS ASCII. See :ref:`LoadMask <algm-LoadMask>`")
self.declareProperty(WorkspaceProperty("OutputWorkspace", "",
optional=PropertyMode.Mandatory,
direction=Direction.Output),
"Output Workspace")
# Convert Settings
self.copyProperties('ConvertToMD', ['Uproj', 'Vproj', 'Wproj',
'MinValues', 'MaxValues', 'SplitInto', 'SplitThreshold',
'MaxRecursionDepth', 'OverwriteExisting'])
self.setPropertyGroup('FilterByTofMin', 'Loading')
self.setPropertyGroup('FilterByTofMax', 'Loading')
self.setPropertyGroup('FilterByTimeStop', 'Loading')
# Goniometer
self.setPropertyGroup("SetGoniometer","Goniometer")
self.setPropertyGroup("Goniometers","Goniometer")
self.setPropertyGroup("Axis0","Goniometer")
self.setPropertyGroup("Axis1","Goniometer")
self.setPropertyGroup("Axis2","Goniometer")
# Corrections
self.setPropertyGroup("LoadInstrument","Corrections")
self.setPropertyGroup("DetCal","Corrections")
self.setPropertyGroup("MaskFile","Corrections")
# ConvertToMD
self.setPropertyGroup('Uproj', 'ConvertToMD')
self.setPropertyGroup('Vproj', 'ConvertToMD')
self.setPropertyGroup('Wproj', 'ConvertToMD')
self.setPropertyGroup('MinValues', 'ConvertToMD')
self.setPropertyGroup('MaxValues', 'ConvertToMD')
self.setPropertyGroup('SplitInto', 'ConvertToMD')
self.setPropertyGroup('SplitThreshold', 'ConvertToMD')
self.setPropertyGroup('MaxRecursionDepth', 'ConvertToMD')
def PyExec(self):
_load_inst = bool(self.getProperty("LoadInstrument").value)
_detcal = bool(self.getProperty("DetCal").value)
_masking = bool(self.getProperty("MaskFile").value)
_outWS_name = self.getPropertyValue("OutputWorkspace")
_UB = bool(self.getProperty("UBMatrix").value)
MinValues = self.getProperty("MinValues").value
MaxValues = self.getProperty("MaxValues").value
if self.getProperty("OverwriteExisting").value:
if mtd.doesExist(_outWS_name):
DeleteWorkspace(_outWS_name)
progress = Progress(self, 0.0, 1.0, len(self.getProperty("Filename").value))
for run in self.getProperty("Filename").value:
logger.notice("Working on " + run)
Load(Filename=run,
OutputWorkspace='__run',
FilterByTofMin=self.getProperty("FilterByTofMin").value,
FilterByTofMax=self.getProperty("FilterByTofMax").value,
FilterByTimeStop=self.getProperty("FilterByTimeStop").value)
if _load_inst:
LoadInstrument(Workspace='__run', Filename=self.getProperty("LoadInstrument").value, RewriteSpectraMap=False)
if _detcal:
LoadIsawDetCal(InputWorkspace='__run', Filename=self.getProperty("DetCal").value)
if _masking:
if not mtd.doesExist('__mask'):
LoadMask(Instrument=mtd['__run'].getInstrument().getName(),
InputFile=self.getProperty("MaskFile").value,
OutputWorkspace='__mask')
MaskDetectors(Workspace='__run',MaskedWorkspace='__mask')
if self.getProperty('SetGoniometer').value:
SetGoniometer(Workspace='__run',
Goniometers=self.getProperty('Goniometers').value,
Axis0=self.getProperty('Axis0').value,
Axis1=self.getProperty('Axis1').value,
Axis2=self.getProperty('Axis2').value)
if _UB:
LoadIsawUB(InputWorkspace='__run', Filename=self.getProperty("UBMatrix").value)
if len(MinValues) == 0 or len(MaxValues) == 0:
MinValues, MaxValues = ConvertToMDMinMaxGlobal('__run', dEAnalysisMode='Elastic',Q3DFrames='HKL',QDimensions='Q3D')
ConvertToMD(InputWorkspace='__run',
OutputWorkspace=_outWS_name,
QDimensions='Q3D',
dEAnalysisMode='Elastic',
Q3DFrames='HKL',
QConversionScales='HKL',
Uproj=self.getProperty('Uproj').value,
Vproj=self.getProperty('Vproj').value,
Wproj=self.getProperty('Wproj').value,
MinValues=MinValues,
MaxValues=MaxValues,
SplitInto=self.getProperty('SplitInto').value,
SplitThreshold=self.getProperty('SplitThreshold').value,
MaxRecursionDepth=self.getProperty('MaxRecursionDepth').value,
OverwriteExisting=False)
else:
if len(MinValues) == 0 or len(MaxValues) == 0:
MinValues, MaxValues = ConvertToMDMinMaxGlobal('__run', dEAnalysisMode='Elastic',Q3DFrames='Q',QDimensions='Q3D')
ConvertToMD(InputWorkspace='__run',
OutputWorkspace=_outWS_name,
QDimensions='Q3D',
dEAnalysisMode='Elastic',
Q3DFrames='Q_sample',
Uproj=self.getProperty('Uproj').value,
Vproj=self.getProperty('Vproj').value,
Wproj=self.getProperty('Wproj').value,
MinValues=MinValues,
MaxValues=MaxValues,
SplitInto=self.getProperty('SplitInto').value,
SplitThreshold=self.getProperty('SplitThreshold').value,
MaxRecursionDepth=self.getProperty('MaxRecursionDepth').value,
OverwriteExisting=False)
DeleteWorkspace('__run')
progress.report()
if mtd.doesExist('__mask'):
DeleteWorkspace('__mask')
self.setProperty("OutputWorkspace", mtd[_outWS_name])
AlgorithmFactory.subscribe(ConvertMultipleRunsToSingleCrystalMD)
|
wdzhou/mantid
|
Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/ConvertMultipleRunsToSingleCrystalMD.py
|
Python
|
gpl-3.0
| 9,652
|
[
"CRYSTAL"
] |
9f59b76c6cd74660cb2f6952c918320cd35a637b477a1a6148230b52514d878a
|
"""Using urllib3 ProxyManager and tor example."""
# author: James Campbell
# date: 2015 11 19
# Date Updated: 2 July 2019
import urllib3 # use with python 3 only
import argparse
from bs4 import BeautifulSoup
# terminal arguments parser globals - do not change
parser = argparse.ArgumentParser()
parser.add_argument('-o', action='store', dest='onion',
help='put in onion site to load (with http & quotes)')
results = parser.parse_args()
# Global Vars
# set the default onion site to visit to test, in this case DuckDuckGo
onionsite = 'http://3g2upl4pq6kufc4m.onion'
if results.onion is not None: # if search terms set in terminal then change from default to that
onionsite = results.onion # set from argparse above in globals section
# TOR SETUP GLOBAL Vars
# TOR proxy port that is default from torrc, change to whatever torrc is configured to
SOCKS_PORT = 9050
header = {'User-Agent': 'JAMES CAMPBELL jamescampbell.us SEARCH BOT! I FOUND YOU!!!!'}
# using this with privoxy and forwarding to tor
proxy = urllib3.ProxyManager('http://127.0.0.1:8119/')
r1 = proxy.request('GET', onionsite, headers=header)
print(r1.status) # status code
print(r1.headers) # header data
print(r1.data.decode('utf8')) # html raw output
souper = BeautifulSoup(r1.data, "html.parser")
soupera = souper.find_all('a') # get all a href's
for eachone in soupera:
print('This is a link: \n', eachone.text)
exit()
# test connect to DuckDuckGo .onion site
|
jamesacampbell/python-examples
|
urllib3_proxymanager-example.py
|
Python
|
mit
| 1,467
|
[
"VisIt"
] |
a58ebf940a0dbe64a4100a9760b31833f3ca71d52df7d67fe84b1855ab287661
|
from collections import deque
from rdkit import Chem
import sys
import tensorflow as tf
import pickle
import os
import fnmatch
import numpy as np
from scipy.spatial.distance import pdist, squareform
import pandas as pd
from deepchem.feat.base_classes import Featurizer
from deepchem.feat.graph_features import atom_features
from scipy.sparse import csr_matrix
def get_atom_type(atom):
elem = atom.GetAtomicNum()
hyb = str(atom.GetHybridization).lower()
if elem == 1:
return (0)
if elem == 4:
return (1)
if elem == 5:
return (2)
if elem == 6:
if "sp2" in hyb:
return (3)
elif "sp3" in hyb:
return (4)
else:
return (5)
if elem == 7:
if "sp2" in hyb:
return (6)
elif "sp3" in hyb:
return (7)
else:
return (8)
if elem == 8:
if "sp2" in hyb:
return (9)
elif "sp3" in hyb:
return (10)
else:
return (11)
if elem == 9:
return (12)
if elem == 15:
if "sp2" in hyb:
return (13)
elif "sp3" in hyb:
return (14)
else:
return (15)
if elem == 16:
if "sp2" in hyb:
return (16)
elif "sp3" in hyb:
return (17)
else:
return (18)
if elem == 17:
return (19)
if elem == 35:
return (20)
if elem == 53:
return (21)
return (22)
def get_atom_adj_matrices(mol,
n_atom_types,
max_n_atoms=200,
max_valence=4,
graph_conv_features=True,
nxn=True):
if not graph_conv_features:
bond_matrix = np.zeros((max_n_atoms, 4 * max_valence)).astype(np.uint8)
if nxn:
adj_matrix = np.zeros((max_n_atoms, max_n_atoms)).astype(np.uint8)
else:
adj_matrix = np.zeros((max_n_atoms, max_valence)).astype(np.uint8)
adj_matrix += (adj_matrix.shape[0] - 1)
if not graph_conv_features:
atom_matrix = np.zeros((max_n_atoms, n_atom_types + 3)).astype(np.uint8)
atom_matrix[:, atom_matrix.shape[1] - 1] = 1
atom_arrays = []
for a_idx in range(0, mol.GetNumAtoms()):
atom = mol.GetAtomWithIdx(a_idx)
if graph_conv_features:
atom_arrays.append(atom_features(atom))
else:
atom_type = get_atom_type(atom)
atom_matrix[a_idx][-1] = 0
atom_matrix[a_idx][atom_type] = 1
for n_idx, neighbor in enumerate(atom.GetNeighbors()):
if nxn:
adj_matrix[a_idx][neighbor.GetIdx()] = 1
adj_matrix[a_idx][a_idx] = 1
else:
adj_matrix[a_idx][n_idx] = neighbor.GetIdx()
if not graph_conv_features:
bond = mol.GetBondBetweenAtoms(a_idx, neighbor.GetIdx())
bond_type = str(bond.GetBondType()).lower()
if "single" in bond_type:
bond_order = 0
elif "double" in bond_type:
bond_order = 1
elif "triple" in bond_type:
bond_order = 2
elif "aromatic" in bond_type:
bond_order = 3
bond_matrix[a_idx][(4 * n_idx) + bond_order] = 1
if graph_conv_features:
n_feat = len(atom_arrays[0])
atom_matrix = np.zeros((max_n_atoms, n_feat)).astype(np.uint8)
for idx, atom_array in enumerate(atom_arrays):
atom_matrix[idx, :] = atom_array
else:
atom_matrix = np.concatenate(
[atom_matrix, bond_matrix], axis=1).astype(np.uint8)
return (adj_matrix.astype(np.uint8), atom_matrix.astype(np.uint8))
def featurize_mol(mol, n_atom_types, max_n_atoms, max_valence,
num_atoms_feature):
adj_matrix, atom_matrix = get_atom_adj_matrices(mol, n_atom_types,
max_n_atoms, max_valence)
if num_atoms_feature:
return ((adj_matrix, atom_matrix, mol.GetNumAtoms()))
return ((adj_matrix, atom_matrix))
class AdjacencyFingerprint(Featurizer):
def __init__(self,
n_atom_types=23,
max_n_atoms=200,
add_hydrogens=False,
max_valence=4,
num_atoms_feature=False):
self.n_atom_types = n_atom_types
self.max_n_atoms = max_n_atoms
self.add_hydrogens = add_hydrogens
self.max_valence = max_valence
self.num_atoms_feature = num_atoms_feature
def featurize(self, rdkit_mols):
featurized_mols = np.empty((len(rdkit_mols)), dtype=object)
for idx, mol in enumerate(rdkit_mols):
if self.add_hydrogens:
mol = Chem.AddHs(mol)
featurized_mol = featurize_mol(mol, self.n_atom_types, self.max_n_atoms,
self.max_valence, self.num_atoms_feature)
featurized_mols[idx] = featurized_mol
return (featurized_mols)
|
Agent007/deepchem
|
deepchem/feat/adjacency_fingerprints.py
|
Python
|
mit
| 4,624
|
[
"RDKit"
] |
d8064a76a780dbedba2bc76100cac83f643b69dbd3c215f82f130b96e68d4c47
|
#!/usr/bin/env python
#
# Restriction Analysis Libraries.
# Copyright (C) 2004. Frederic Sohm.
#
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#
""" Notes about the diverses class of the restriction enzyme implementation.
RestrictionType is the type of all restriction enzymes.
----------------------------------------------------------------------------
AbstractCut implements some methods that are common to all enzymes.
----------------------------------------------------------------------------
NoCut, OneCut,TwoCuts represent the number of double strand cuts
produced by the enzyme.
they correspond to the 4th field of the rebase
record emboss_e.NNN.
0->NoCut : the enzyme is not characterised.
2->OneCut : the enzyme produce one double strand cut.
4->TwoCuts : two double strand cuts.
----------------------------------------------------------------------------
Meth_Dep, Meth_Undep represent the methylation susceptibility to
the enzyme.
Not implemented yet.
----------------------------------------------------------------------------
Palindromic, if the site is palindromic or not.
NotPalindromic allow some optimisations of the code.
No need to check the reverse strand
with palindromic sites.
----------------------------------------------------------------------------
Unknown, Blunt, represent the overhang.
Ov5, Ov3 Unknown is here for symetry reasons and
correspond to enzymes that are not characterised
in rebase.
----------------------------------------------------------------------------
Defined, Ambiguous, represent the sequence of the overhang.
NotDefined
NotDefined is for enzymes not characterised in
rebase.
Defined correspond to enzymes that display a
constant overhang whatever the sequence.
ex : EcoRI. G^AATTC -> overhang :AATT
CTTAA^G
Ambiguous : the overhang varies with the
sequence restricted.
Typically enzymes which cut outside their
restriction site or (but not always)
inside an ambiguous site.
ex:
AcuI CTGAAG(22/20) -> overhang : NN
AasI GACNNN^NNNGTC -> overhang : NN
CTGN^NNNNNCAG
note : these 3 classes refers to the overhang not the site.
So the enzyme ApoI (RAATTY) is defined even if its restriction
site is ambiguous.
ApoI R^AATTY -> overhang : AATT -> Defined
YTTAA^R
Accordingly, blunt enzymes are always Defined even
when they cut outside their restriction site.
----------------------------------------------------------------------------
Not_available, as found in rebase file emboss_r.NNN files.
Commercially_available
allow the selection of the enzymes according to
their suppliers to reduce the quantity
of results.
Also will allow the implementation of buffer
compatibility tables. Not implemented yet.
the list of suppliers is extracted from
emboss_s.NNN
----------------------------------------------------------------------------
"""
import re
import itertools
from Bio.Seq import Seq, MutableSeq
from Bio.Alphabet import IUPAC
from Bio.Restriction.Restriction_Dictionary import rest_dict as enzymedict
from Bio.Restriction.Restriction_Dictionary import typedict
from Bio.Restriction.Restriction_Dictionary import suppliers as suppliers_dict
from Bio.Restriction.RanaConfig import *
from Bio.Restriction.PrintFormat import PrintFormat
#Used to use Bio.Restriction.DNAUtils.check_bases (and expose it under this
#namespace), but have deprecated that module.
def _check_bases(seq_string):
"""Check characters in a string (PRIVATE).
Remove digits and white space present in string. Allows any valid ambiguous
IUPAC DNA single letters codes (ABCDGHKMNRSTVWY, lower case are converted).
Other characters (e.g. symbols) trigger a TypeError.
Returns the string WITH A LEADING SPACE (!). This is for backwards
compatibility, and may in part be explained by the fact that
Bio.Restriction doesn't use zero based counting.
"""
#Remove white space and make upper case:
seq_string = "".join(seq_string.split()).upper()
#Remove digits
for c in "0123456789" : seq_string = seq_string.replace(c,"")
#Check only allowed IUPAC letters
if not set(seq_string).issubset(set("ABCDGHKMNRSTVWY")) :
raise TypeError("Invalid character found in %s" % repr(seq_string))
return " " + seq_string
matching = {'A' : 'ARWMHVDN', 'C' : 'CYSMHBVN', 'G' : 'GRSKBVDN',
'T' : 'TYWKHBDN', 'R' : 'ABDGHKMNSRWV', 'Y' : 'CBDHKMNSTWVY',
'W' : 'ABDHKMNRTWVY', 'S' : 'CBDGHKMNSRVY', 'M' : 'ACBDHMNSRWVY',
'K' : 'BDGHKNSRTWVY', 'H' : 'ACBDHKMNSRTWVY',
'B' : 'CBDGHKMNSRTWVY', 'V' : 'ACBDGHKMNSRWVY',
'D' : 'ABDGHKMNSRTWVY', 'N' : 'ACBDGHKMNSRTWVY'}
DNA = Seq
class FormattedSeq(object):
"""FormattedSeq(seq, [linear=True])-> new FormattedSeq.
Translate a Bio.Seq into a formatted sequence to be used with Restriction.
Roughly:
remove anything which is not IUPAC alphabet and then add a space
in front of the sequence to get a biological index instead of a
python index (i.e. index of the first base is 1 not 0).
Retains information about the shape of the molecule linear (default)
or circular. Restriction sites are search over the edges of circular
sequence."""
def __init__(self, seq, linear = True):
"""FormattedSeq(seq, [linear=True])-> new FormattedSeq.
seq is either a Bio.Seq, Bio.MutableSeq or a FormattedSeq.
if seq is a FormattedSeq, linear will have no effect on the
shape of the sequence."""
if isinstance(seq, Seq) or isinstance(seq, MutableSeq):
stringy = seq.tostring()
self.lower = stringy.islower()
#Note this adds a leading space to the sequence (!)
self.data = _check_bases(stringy)
self.linear = linear
self.klass = seq.__class__
self.alphabet = seq.alphabet
elif isinstance(seq, FormattedSeq):
self.lower = seq.lower
self.data = seq.data
self.linear = seq.linear
self.alphabet = seq.alphabet
self.klass = seq.klass
else:
raise TypeError('expected Seq or MutableSeq, got %s' % type(seq))
def __len__(self):
return len(self.data) - 1
def __repr__(self):
return 'FormattedSeq(%s, linear=%s)' %(repr(self[1:]), repr(self.linear))
def __eq__(self, other):
if isinstance(other, FormattedSeq):
if repr(self) == repr(other):
return True
else:
return False
return False
def circularise(self):
"""FS.circularise() -> circularise FS"""
self.linear = False
return
def linearise(self):
"""FS.linearise() -> linearise FS"""
self.linear = True
return
def to_linear(self):
"""FS.to_linear() -> new linear FS instance"""
new = self.__class__(self)
new.linear = True
return new
def to_circular(self):
"""FS.to_circular() -> new circular FS instance"""
new = self.__class__(self)
new.linear = False
return new
def is_linear(self):
"""FS.is_linear() -> bool.
True if the sequence will analysed as a linear sequence."""
return self.linear
def finditer(self, pattern, size):
"""FS.finditer(pattern, size) -> list.
return a list of pattern into the sequence.
the list is made of tuple (location, pattern.group).
the latter is used with non palindromic sites.
pattern is the regular expression pattern corresponding to the
enzyme restriction site.
size is the size of the restriction enzyme recognition-site size."""
if self.is_linear():
data = self.data
else:
data = self.data + self.data[1:size]
return [(i.start(), i.group) for i in re.finditer(pattern, data)]
def __getitem__(self, i):
if self.lower:
return self.klass((self.data[i]).lower(), self.alphabet)
return self.klass(self.data[i], self.alphabet)
class RestrictionType(type):
"""RestrictionType. Type from which derives all enzyme classes.
Implement the operator methods."""
def __init__(cls, name='', bases=(), dct={}):
"""RE(name, bases, dct) -> RestrictionType instance.
Not intended to be used in normal operation. The enzymes are
instantiated when importing the module.
see below."""
if "-" in name :
raise ValueError("Problem with hyphen in %s as enzyme name" \
% repr(name))
super(RestrictionType, cls).__init__(cls, name, bases, dct)
try :
cls.compsite = re.compile(cls.compsite)
except Exception, err :
raise ValueError("Problem with regular expression, re.compiled(%s)" \
% repr(cls.compsite))
def __add__(cls, other):
"""RE.__add__(other) -> RestrictionBatch().
if other is an enzyme returns a batch of the two enzymes.
if other is already a RestrictionBatch add enzyme to it."""
if isinstance(other, RestrictionType):
return RestrictionBatch([cls, other])
elif isinstance(other, RestrictionBatch):
return other.add_nocheck(cls)
else:
raise TypeError
def __div__(cls, other):
"""RE.__div__(other) -> list.
RE/other
returns RE.search(other)."""
return cls.search(other)
def __rdiv__(cls, other):
"""RE.__rdiv__(other) -> list.
other/RE
returns RE.search(other)."""
return cls.search(other)
def __truediv__(cls, other):
"""RE.__truediv__(other) -> list.
RE/other
returns RE.search(other)."""
return cls.search(other)
def __rtruediv__(cls, other):
"""RE.__rtruediv__(other) -> list.
other/RE
returns RE.search(other)."""
return cls.search(other)
def __floordiv__(cls, other):
"""RE.__floordiv__(other) -> list.
RE//other
returns RE.catalyse(other)."""
return cls.catalyse(other)
def __rfloordiv__(cls, other):
"""RE.__rfloordiv__(other) -> list.
other//RE
returns RE.catalyse(other)."""
return cls.catalyse(other)
def __str__(cls):
"""RE.__str__() -> str.
return the name of the enzyme."""
return cls.__name__
def __repr__(cls):
"""RE.__repr__() -> str.
used with eval or exec will instantiate the enzyme."""
return "%s" % cls.__name__
def __len__(cls):
"""RE.__len__() -> int.
length of the recognition site."""
return cls.size
def __hash__(cls):
#Python default is to use id(...)
#This is consistent with the __eq__ implementation
return id(cls)
def __eq__(cls, other):
"""RE == other -> bool
True if RE and other are the same enzyme.
Specifically this checks they are the same Python object.
"""
#assert (id(cls)==id(other)) == (other is cls) == (cls is other)
return id(cls)==id(other)
def __ne__(cls, other):
"""RE != other -> bool.
isoschizomer strict, same recognition site, same restriction -> False
all the other-> True
WARNING - This is not the inverse of the __eq__ method.
"""
if not isinstance(other, RestrictionType):
return True
elif cls.charac == other.charac:
return False
else:
return True
def __rshift__(cls, other):
"""RE >> other -> bool.
neoschizomer : same recognition site, different restriction. -> True
all the others : -> False"""
if not isinstance(other, RestrictionType):
return False
elif cls.site == other.site and cls.charac != other.charac:
return True
else:
return False
def __mod__(cls, other):
"""a % b -> bool.
Test compatibility of the overhang of a and b.
True if a and b have compatible overhang."""
if not isinstance(other, RestrictionType):
raise TypeError( \
'expected RestrictionType, got %s instead' % type(other))
return cls._mod1(other)
def __ge__(cls, other):
"""a >= b -> bool.
a is greater or equal than b if the a site is longer than b site.
if their site have the same length sort by alphabetical order of their
names."""
if not isinstance(other, RestrictionType):
raise NotImplementedError
if len(cls) > len(other):
return True
elif cls.size == len(other) and cls.__name__ >= other.__name__:
return True
else:
return False
def __gt__(cls, other):
"""a > b -> bool.
sorting order:
1. size of the recognition site.
2. if equal size, alphabetical order of the names."""
if not isinstance(other, RestrictionType):
raise NotImplementedError
if len(cls) > len(other):
return True
elif cls.size == len(other) and cls.__name__ > other.__name__:
return True
else:
return False
def __le__(cls, other):
"""a <= b -> bool.
sorting order:
1. size of the recognition site.
2. if equal size, alphabetical order of the names."""
if not isinstance(other, RestrictionType):
raise NotImplementedError
elif len(cls) < len(other):
return True
elif len(cls) == len(other) and cls.__name__ <= other.__name__:
return True
else:
return False
def __lt__(cls, other):
"""a < b -> bool.
sorting order:
1. size of the recognition site.
2. if equal size, alphabetical order of the names."""
if not isinstance(other, RestrictionType):
raise NotImplementedError
elif len(cls) < len(other):
return True
elif len(cls) == len(other) and cls.__name__ < other.__name__:
return True
else:
return False
class AbstractCut(RestrictionType):
"""Implement the methods that are common to all restriction enzymes.
All the methods are classmethod.
For internal use only. Not meant to be instantiate."""
def search(cls, dna, linear=True):
"""RE.search(dna, linear=True) -> list.
return a list of all the site of RE in dna. Compensate for circular
sequences and so on.
dna must be a Bio.Seq.Seq instance or a Bio.Seq.MutableSeq instance.
if linear is False, the restriction sites than span over the boundaries
will be included.
The positions are the first base of the 3' fragment,
i.e. the first base after the position the enzyme will cut. """
#
# Separating search from _search allow a (very limited) optimisation
# of the search when using a batch of restriction enzymes.
# in this case the DNA is tested once by the class which implements
# the batch instead of being tested by each enzyme single.
# see RestrictionBatch.search() for example.
#
if isinstance(dna, FormattedSeq):
cls.dna = dna
return cls._search()
else :
cls.dna = FormattedSeq(dna, linear)
return cls._search()
search = classmethod(search)
def all_suppliers(self):
"""RE.all_suppliers -> print all the suppliers of R"""
supply = [x[0] for x in suppliers_dict.itervalues()]
supply.sort()
print ",\n".join(supply)
return
all_suppliers = classmethod(all_suppliers)
def is_equischizomer(self, other):
"""RE.is_equischizomers(other) -> bool.
True if other is an isoschizomer of RE.
False else.
equischizomer <=> same site, same position of restriction."""
return not self != other
is_equischizomer = classmethod(is_equischizomer)
def is_neoschizomer(self, other):
"""RE.is_neoschizomers(other) -> bool.
True if other is an isoschizomer of RE.
False else.
neoschizomer <=> same site, different position of restriction."""
return self >> other
is_neoschizomer = classmethod(is_neoschizomer)
def is_isoschizomer(self, other):
"""RE.is_isoschizomers(other) -> bool.
True if other is an isoschizomer of RE.
False else.
isoschizomer <=> same site."""
return (not self != other) or self >> other
is_isoschizomer = classmethod(is_isoschizomer)
def equischizomers(self, batch=None):
"""RE.equischizomers([batch]) -> list.
return a tuple of all the isoschizomers of RE.
if batch is supplied it is used instead of the default AllEnzymes.
equischizomer <=> same site, same position of restriction."""
if not batch : batch = AllEnzymes
r = [x for x in batch if not self != x]
i = r.index(self)
del r[i]
r.sort()
return r
equischizomers = classmethod(equischizomers)
def neoschizomers(self, batch=None):
"""RE.neoschizomers([batch]) -> list.
return a tuple of all the neoschizomers of RE.
if batch is supplied it is used instead of the default AllEnzymes.
neoschizomer <=> same site, different position of restriction."""
if not batch : batch = AllEnzymes
r = [x for x in batch if self >> x]
r.sort()
return r
neoschizomers = classmethod(neoschizomers)
def isoschizomers(self, batch=None):
"""RE.isoschizomers([batch]) -> list.
return a tuple of all the equischizomers and neoschizomers of RE.
if batch is supplied it is used instead of the default AllEnzymes."""
if not batch : batch = AllEnzymes
r = [x for x in batch if (self >> x) or (not self != x)]
i = r.index(self)
del r[i]
r.sort()
return r
isoschizomers = classmethod(isoschizomers)
def frequency(self):
"""RE.frequency() -> int.
frequency of the site."""
return self.freq
frequency = classmethod(frequency)
class NoCut(AbstractCut):
"""Implement the methods specific to the enzymes that do not cut.
These enzymes are generally enzymes that have been only partially
characterised and the way they cut the DNA is unknow or enzymes for
which the pattern of cut is to complex to be recorded in Rebase
(ncuts values of 0 in emboss_e.###).
When using search() with these enzymes the values returned are at the start of
the restriction site.
Their catalyse() method returns a TypeError.
Unknown and NotDefined are also part of the base classes of these enzymes.
Internal use only. Not meant to be instantiated."""
def cut_once(self):
"""RE.cut_once() -> bool.
True if the enzyme cut the sequence one time on each strand."""
return False
cut_once = classmethod(cut_once)
def cut_twice(self):
"""RE.cut_twice() -> bool.
True if the enzyme cut the sequence twice on each strand."""
return False
cut_twice = classmethod(cut_twice)
def _modify(self, location):
"""RE._modify(location) -> int.
for internal use only.
location is an integer corresponding to the location of the match for
the enzyme pattern in the sequence.
_modify returns the real place where the enzyme will cut.
example:
EcoRI pattern : GAATTC
EcoRI will cut after the G.
so in the sequence:
______
GAATACACGGAATTCGA
|
10
dna.finditer(GAATTC, 6) will return 10 as G is the 10th base
EcoRI cut after the G so:
EcoRI._modify(10) -> 11.
if the enzyme cut twice _modify will returns two integer corresponding
to each cutting site.
"""
yield location
_modify = classmethod(_modify)
def _rev_modify(self, location):
"""RE._rev_modify(location) -> generator of int.
for internal use only.
as _modify for site situated on the antiparallel strand when the
enzyme is not palindromic
"""
yield location
_rev_modify = classmethod(_rev_modify)
def characteristic(self):
"""RE.characteristic() -> tuple.
the tuple contains the attributes:
fst5 -> first 5' cut ((current strand) or None
fst3 -> first 3' cut (complementary strand) or None
scd5 -> second 5' cut (current strand) or None
scd5 -> second 3' cut (complementary strand) or None
site -> recognition site."""
return None, None, None, None, self.site
characteristic = classmethod(characteristic)
class OneCut(AbstractCut):
"""Implement the methods specific to the enzymes that cut the DNA only once
Correspond to ncuts values of 2 in emboss_e.###
Internal use only. Not meant to be instantiated."""
def cut_once(self):
"""RE.cut_once() -> bool.
True if the enzyme cut the sequence one time on each strand."""
return True
cut_once = classmethod(cut_once)
def cut_twice(self):
"""RE.cut_twice() -> bool.
True if the enzyme cut the sequence twice on each strand."""
return False
cut_twice = classmethod(cut_twice)
def _modify(self, location):
"""RE._modify(location) -> int.
for internal use only.
location is an integer corresponding to the location of the match for
the enzyme pattern in the sequence.
_modify returns the real place where the enzyme will cut.
example:
EcoRI pattern : GAATTC
EcoRI will cut after the G.
so in the sequence:
______
GAATACACGGAATTCGA
|
10
dna.finditer(GAATTC, 6) will return 10 as G is the 10th base
EcoRI cut after the G so:
EcoRI._modify(10) -> 11.
if the enzyme cut twice _modify will returns two integer corresponding
to each cutting site.
"""
yield location + self.fst5
_modify = classmethod(_modify)
def _rev_modify(self, location):
"""RE._rev_modify(location) -> generator of int.
for internal use only.
as _modify for site situated on the antiparallel strand when the
enzyme is not palindromic
"""
yield location - self.fst3
_rev_modify = classmethod(_rev_modify)
def characteristic(self):
"""RE.characteristic() -> tuple.
the tuple contains the attributes:
fst5 -> first 5' cut ((current strand) or None
fst3 -> first 3' cut (complementary strand) or None
scd5 -> second 5' cut (current strand) or None
scd5 -> second 3' cut (complementary strand) or None
site -> recognition site."""
return self.fst5, self.fst3, None, None, self.site
characteristic = classmethod(characteristic)
class TwoCuts(AbstractCut):
"""Implement the methods specific to the enzymes that cut the DNA twice
Correspond to ncuts values of 4 in emboss_e.###
Internal use only. Not meant to be instantiated."""
def cut_once(self):
"""RE.cut_once() -> bool.
True if the enzyme cut the sequence one time on each strand."""
return False
cut_once = classmethod(cut_once)
def cut_twice(self):
"""RE.cut_twice() -> bool.
True if the enzyme cut the sequence twice on each strand."""
return True
cut_twice = classmethod(cut_twice)
def _modify(self, location):
"""RE._modify(location) -> int.
for internal use only.
location is an integer corresponding to the location of the match for
the enzyme pattern in the sequence.
_modify returns the real place where the enzyme will cut.
example:
EcoRI pattern : GAATTC
EcoRI will cut after the G.
so in the sequence:
______
GAATACACGGAATTCGA
|
10
dna.finditer(GAATTC, 6) will return 10 as G is the 10th base
EcoRI cut after the G so:
EcoRI._modify(10) -> 11.
if the enzyme cut twice _modify will returns two integer corresponding
to each cutting site.
"""
yield location + self.fst5
yield location + self.scd5
_modify = classmethod(_modify)
def _rev_modify(self, location):
"""RE._rev_modify(location) -> generator of int.
for internal use only.
as _modify for site situated on the antiparallel strand when the
enzyme is not palindromic
"""
yield location - self.fst3
yield location - self.scd3
_rev_modify = classmethod(_rev_modify)
def characteristic(self):
"""RE.characteristic() -> tuple.
the tuple contains the attributes:
fst5 -> first 5' cut ((current strand) or None
fst3 -> first 3' cut (complementary strand) or None
scd5 -> second 5' cut (current strand) or None
scd5 -> second 3' cut (complementary strand) or None
site -> recognition site."""
return self.fst5, self.fst3, self.scd5, self.scd3, self.site
characteristic = classmethod(characteristic)
class Meth_Dep(AbstractCut):
"""Implement the information about methylation.
Enzymes of this class possess a site which is methylable."""
def is_methylable(self):
"""RE.is_methylable() -> bool.
True if the recognition site is a methylable."""
return True
is_methylable = classmethod(is_methylable)
class Meth_Undep(AbstractCut):
"""Implement informations about methylation sensitibility.
Enzymes of this class are not sensible to methylation."""
def is_methylable(self):
"""RE.is_methylable() -> bool.
True if the recognition site is a methylable."""
return False
is_methylable = classmethod(is_methylable)
class Palindromic(AbstractCut):
"""Implement the methods specific to the enzymes which are palindromic
palindromic means : the recognition site and its reverse complement are
identical.
Remarks : an enzyme with a site CGNNCG is palindromic even if some
of the sites that it will recognise are not.
for example here : CGAACG
Internal use only. Not meant to be instantiated."""
def _search(self):
"""RE._search() -> list.
for internal use only.
implement the search method for palindromic and non palindromic enzyme.
"""
siteloc = self.dna.finditer(self.compsite,self.size)
self.results = [r for s,g in siteloc for r in self._modify(s)]
if self.results : self._drop()
return self.results
_search = classmethod(_search)
def is_palindromic(self):
"""RE.is_palindromic() -> bool.
True if the recognition site is a palindrom."""
return True
is_palindromic = classmethod(is_palindromic)
class NonPalindromic(AbstractCut):
"""Implement the methods specific to the enzymes which are not palindromic
palindromic means : the recognition site and its reverse complement are
identical.
Internal use only. Not meant to be instantiated."""
def _search(self):
"""RE._search() -> list.
for internal use only.
implement the search method for palindromic and non palindromic enzyme.
"""
iterator = self.dna.finditer(self.compsite, self.size)
self.results = []
modif = self._modify
revmodif = self._rev_modify
s = str(self)
self.on_minus = []
for start, group in iterator:
if group(s):
self.results += [r for r in modif(start)]
else:
self.on_minus += [r for r in revmodif(start)]
self.results += self.on_minus
if self.results:
self.results.sort()
self._drop()
return self.results
_search = classmethod(_search)
def is_palindromic(self):
"""RE.is_palindromic() -> bool.
True if the recognition site is a palindrom."""
return False
is_palindromic = classmethod(is_palindromic)
class Unknown(AbstractCut):
"""Implement the methods specific to the enzymes for which the overhang
is unknown.
These enzymes are also NotDefined and NoCut.
Internal use only. Not meant to be instantiated."""
def catalyse(self, dna, linear=True):
"""RE.catalyse(dna, linear=True) -> tuple of DNA.
RE.catalyze(dna, linear=True) -> tuple of DNA.
return a tuple of dna as will be produced by using RE to restrict the
dna.
dna must be a Bio.Seq.Seq instance or a Bio.Seq.MutableSeq instance.
if linear is False, the sequence is considered to be circular and the
output will be modified accordingly."""
raise NotImplementedError('%s restriction is unknown.' \
% self.__name__)
catalyze = catalyse = classmethod(catalyse)
def is_blunt(self):
"""RE.is_blunt() -> bool.
True if the enzyme produces blunt end.
see also:
RE.is_3overhang()
RE.is_5overhang()
RE.is_unknown()"""
return False
is_blunt = classmethod(is_blunt)
def is_5overhang(self):
"""RE.is_5overhang() -> bool.
True if the enzyme produces 5' overhang sticky end.
see also:
RE.is_3overhang()
RE.is_blunt()
RE.is_unknown()"""
return False
is_5overhang = classmethod(is_5overhang)
def is_3overhang(self):
"""RE.is_3overhang() -> bool.
True if the enzyme produces 3' overhang sticky end.
see also:
RE.is_5overhang()
RE.is_blunt()
RE.is_unknown()"""
return False
is_3overhang = classmethod(is_3overhang)
def overhang(self):
"""RE.overhang() -> str. type of overhang of the enzyme.,
can be "3' overhang", "5' overhang", "blunt", "unknown" """
return 'unknown'
overhang = classmethod(overhang)
def compatible_end(self):
"""RE.compatible_end() -> list.
list of all the enzymes that share compatible end with RE."""
return []
compatible_end = classmethod(compatible_end)
def _mod1(self, other):
"""RE._mod1(other) -> bool.
for internal use only
test for the compatibility of restriction ending of RE and other."""
return False
_mod1 = classmethod(_mod1)
class Blunt(AbstractCut):
"""Implement the methods specific to the enzymes for which the overhang
is blunt.
The enzyme cuts the + strand and the - strand of the DNA at the same
place.
Internal use only. Not meant to be instantiated."""
def catalyse(self, dna, linear=True):
"""RE.catalyse(dna, linear=True) -> tuple of DNA.
RE.catalyze(dna, linear=True) -> tuple of DNA.
return a tuple of dna as will be produced by using RE to restrict the
dna.
dna must be a Bio.Seq.Seq instance or a Bio.Seq.MutableSeq instance.
if linear is False, the sequence is considered to be circular and the
output will be modified accordingly."""
r = self.search(dna, linear)
d = self.dna
if not r : return d[1:],
fragments = []
length = len(r)-1
if d.is_linear():
#
# START of the sequence to FIRST site.
#
fragments.append(d[1:r[0]])
if length:
#
# if more than one site add them.
#
fragments += [d[r[x]:r[x+1]] for x in xrange(length)]
#
# LAST site to END of the sequence.
#
fragments.append(d[r[-1]:])
else:
#
# circular : bridge LAST site to FIRST site.
#
fragments.append(d[r[-1]:]+d[1:r[0]])
if not length:
#
# one site we finish here.
#
return tuple(fragments)
#
# add the others.
#
fragments += [d[r[x]:r[x+1]] for x in xrange(length)]
return tuple(fragments)
catalyze = catalyse = classmethod(catalyse)
def is_blunt(self):
"""RE.is_blunt() -> bool.
True if the enzyme produces blunt end.
see also:
RE.is_3overhang()
RE.is_5overhang()
RE.is_unknown()"""
return True
is_blunt = classmethod(is_blunt)
def is_5overhang(self):
"""RE.is_5overhang() -> bool.
True if the enzyme produces 5' overhang sticky end.
see also:
RE.is_3overhang()
RE.is_blunt()
RE.is_unknown()"""
return False
is_5overhang = classmethod(is_5overhang)
def is_3overhang(self):
"""RE.is_3overhang() -> bool.
True if the enzyme produces 3' overhang sticky end.
see also:
RE.is_5overhang()
RE.is_blunt()
RE.is_unknown()"""
return False
is_3overhang = classmethod(is_3overhang)
def overhang(self):
"""RE.overhang() -> str. type of overhang of the enzyme.,
can be "3' overhang", "5' overhang", "blunt", "unknown" """
return 'blunt'
overhang = classmethod(overhang)
def compatible_end(self, batch=None):
"""RE.compatible_end() -> list.
list of all the enzymes that share compatible end with RE."""
if not batch : batch = AllEnzymes
r = [x for x in iter(AllEnzymes) if x.is_blunt()]
r.sort()
return r
compatible_end = classmethod(compatible_end)
def _mod1(other):
"""RE._mod1(other) -> bool.
for internal use only
test for the compatibility of restriction ending of RE and other."""
if issubclass(other, Blunt) : return True
else : return False
_mod1 = staticmethod(_mod1)
class Ov5(AbstractCut):
"""Implement the methods specific to the enzymes for which the overhang
is recessed in 3'.
The enzyme cuts the + strand after the - strand of the DNA.
Internal use only. Not meant to be instantiated."""
def catalyse(self, dna, linear=True):
"""RE.catalyse(dna, linear=True) -> tuple of DNA.
RE.catalyze(dna, linear=True) -> tuple of DNA.
return a tuple of dna as will be produced by using RE to restrict the
dna.
dna must be a Bio.Seq.Seq instance or a Bio.Seq.MutableSeq instance.
if linear is False, the sequence is considered to be circular and the
output will be modified accordingly."""
r = self.search(dna, linear)
d = self.dna
if not r : return d[1:],
length = len(r)-1
fragments = []
if d.is_linear():
#
# START of the sequence to FIRST site.
#
fragments.append(d[1:r[0]])
if length:
#
# if more than one site add them.
#
fragments += [d[r[x]:r[x+1]] for x in xrange(length)]
#
# LAST site to END of the sequence.
#
fragments.append(d[r[-1]:])
else:
#
# circular : bridge LAST site to FIRST site.
#
fragments.append(d[r[-1]:]+d[1:r[0]])
if not length:
#
# one site we finish here.
#
return tuple(fragments)
#
# add the others.
#
fragments += [d[r[x]:r[x+1]] for x in xrange(length)]
return tuple(fragments)
catalyze = catalyse = classmethod(catalyse)
def is_blunt(self):
"""RE.is_blunt() -> bool.
True if the enzyme produces blunt end.
see also:
RE.is_3overhang()
RE.is_5overhang()
RE.is_unknown()"""
return False
is_blunt = classmethod(is_blunt)
def is_5overhang(self):
"""RE.is_5overhang() -> bool.
True if the enzyme produces 5' overhang sticky end.
see also:
RE.is_3overhang()
RE.is_blunt()
RE.is_unknown()"""
return True
is_5overhang = classmethod(is_5overhang)
def is_3overhang(self):
"""RE.is_3overhang() -> bool.
True if the enzyme produces 3' overhang sticky end.
see also:
RE.is_5overhang()
RE.is_blunt()
RE.is_unknown()"""
return False
is_3overhang = classmethod(is_3overhang)
def overhang(self):
"""RE.overhang() -> str. type of overhang of the enzyme.,
can be "3' overhang", "5' overhang", "blunt", "unknown" """
return "5' overhang"
overhang = classmethod(overhang)
def compatible_end(self, batch=None):
"""RE.compatible_end() -> list.
list of all the enzymes that share compatible end with RE."""
if not batch : batch = AllEnzymes
r = [x for x in iter(AllEnzymes) if x.is_5overhang() and x % self]
r.sort()
return r
compatible_end = classmethod(compatible_end)
def _mod1(self, other):
"""RE._mod1(other) -> bool.
for internal use only
test for the compatibility of restriction ending of RE and other."""
if issubclass(other, Ov5) : return self._mod2(other)
else : return False
_mod1 = classmethod(_mod1)
class Ov3(AbstractCut):
"""Implement the methods specific to the enzymes for which the overhang
is recessed in 5'.
The enzyme cuts the - strand after the + strand of the DNA.
Internal use only. Not meant to be instantiated."""
def catalyse(self, dna, linear=True):
"""RE.catalyse(dna, linear=True) -> tuple of DNA.
RE.catalyze(dna, linear=True) -> tuple of DNA.
return a tuple of dna as will be produced by using RE to restrict the
dna.
dna must be a Bio.Seq.Seq instance or a Bio.Seq.MutableSeq instance.
if linear is False, the sequence is considered to be circular and the
output will be modified accordingly."""
r = self.search(dna, linear)
d = self.dna
if not r : return d[1:],
fragments = []
length = len(r)-1
if d.is_linear():
#
# START of the sequence to FIRST site.
#
fragments.append(d[1:r[0]])
if length:
#
# if more than one site add them.
#
fragments += [d[r[x]:r[x+1]] for x in xrange(length)]
#
# LAST site to END of the sequence.
#
fragments.append(d[r[-1]:])
else:
#
# circular : bridge LAST site to FIRST site.
#
fragments.append(d[r[-1]:]+d[1:r[0]])
if not length:
#
# one site we finish here.
#
return tuple(fragments)
#
# add the others.
#
fragments += [d[r[x]:r[x+1]] for x in xrange(length)]
return tuple(fragments)
catalyze = catalyse = classmethod(catalyse)
def is_blunt(self):
"""RE.is_blunt() -> bool.
True if the enzyme produces blunt end.
see also:
RE.is_3overhang()
RE.is_5overhang()
RE.is_unknown()"""
return False
is_blunt = classmethod(is_blunt)
def is_5overhang(self):
"""RE.is_5overhang() -> bool.
True if the enzyme produces 5' overhang sticky end.
see also:
RE.is_3overhang()
RE.is_blunt()
RE.is_unknown()"""
return False
is_5overhang = classmethod(is_5overhang)
def is_3overhang(self):
"""RE.is_3overhang() -> bool.
True if the enzyme produces 3' overhang sticky end.
see also:
RE.is_5overhang()
RE.is_blunt()
RE.is_unknown()"""
return True
is_3overhang = classmethod(is_3overhang)
def overhang(self):
"""RE.overhang() -> str. type of overhang of the enzyme.,
can be "3' overhang", "5' overhang", "blunt", "unknown" """
return "3' overhang"
overhang = classmethod(overhang)
def compatible_end(self, batch=None):
"""RE.compatible_end() -> list.
list of all the enzymes that share compatible end with RE."""
if not batch : batch = AllEnzymes
r = [x for x in iter(AllEnzymes) if x.is_3overhang() and x % self]
r.sort()
return r
compatible_end = classmethod(compatible_end)
def _mod1(self, other):
"""RE._mod1(other) -> bool.
for internal use only
test for the compatibility of restriction ending of RE and other."""
#
# called by RE._mod1(other) when the one of the enzyme is ambiguous
#
if issubclass(other, Ov3) : return self._mod2(other)
else : return False
_mod1 = classmethod(_mod1)
class Defined(AbstractCut):
"""Implement the methods specific to the enzymes for which the overhang
and the cut are not variable.
Typical example : EcoRI -> G^AATT_C
The overhang will always be AATT
Notes:
Blunt enzymes are always defined. even if there site is GGATCCNNN^_N
There overhang is always the same : blunt!
Internal use only. Not meant to be instantiated."""
def _drop(self):
"""RE._drop() -> list.
for internal use only.
drop the site that are situated outside the sequence in linear sequence.
modify the index for site in circular sequences."""
#
# remove or modify the results that are outside the sequence.
# This is necessary since after finding the site we add the distance
# from the site to the cut with the _modify and _rev_modify methods.
# For linear we will remove these sites altogether.
# For circular sequence, we modify the result rather than _drop it
# since the site is in the sequence.
#
length = len(self.dna)
drop = itertools.dropwhile
take = itertools.takewhile
if self.dna.is_linear():
self.results = [x for x in drop(lambda x:x<1, self.results)]
self.results = [x for x in take(lambda x:x<length, self.results)]
else:
for index, location in enumerate(self.results):
if location < 1:
self.results[index] += length
else:
break
for index, location in enumerate(self.results[::-1]):
if location > length:
self.results[-(index+1)] -= length
else:
break
return
_drop = classmethod(_drop)
def is_defined(self):
"""RE.is_defined() -> bool.
True if the sequence recognised and cut is constant,
i.e. the recognition site is not degenerated AND the enzyme cut inside
the site.
see also:
RE.is_ambiguous()
RE.is_unknown()"""
return True
is_defined = classmethod(is_defined)
def is_ambiguous(self):
"""RE.is_ambiguous() -> bool.
True if the sequence recognised and cut is ambiguous,
i.e. the recognition site is degenerated AND/OR the enzyme cut outside
the site.
see also:
RE.is_defined()
RE.is_unknown()"""
return False
is_ambiguous = classmethod(is_ambiguous)
def is_unknown(self):
"""RE.is_unknown() -> bool.
True if the sequence is unknown,
i.e. the recognition site has not been characterised yet.
see also:
RE.is_defined()
RE.is_ambiguous()"""
return False
is_unknown = classmethod(is_unknown)
def elucidate(self):
"""RE.elucidate() -> str
return a representation of the site with the cut on the (+) strand
represented as '^' and the cut on the (-) strand as '_'.
ie:
>>> EcoRI.elucidate() # 5' overhang
'G^AATT_C'
>>> KpnI.elucidate() # 3' overhang
'G_GTAC^C'
>>> EcoRV.elucidate() # blunt
'GAT^_ATC'
>>> SnaI.elucidate() # NotDefined, cut profile unknown.
'? GTATAC ?'
>>>
"""
f5 = self.fst5
f3 = self.fst3
site = self.site
if self.cut_twice() : re = 'cut twice, not yet implemented sorry.'
elif self.is_5overhang():
if f5 == f3 == 0 : re = 'N^'+ self.site + '_N'
elif f3 == 0 : re = site[:f5] + '^' + site[f5:] + '_N'
else : re = site[:f5] + '^' + site[f5:f3] + '_' + site[f3:]
elif self.is_blunt():
re = site[:f5] + '^_' + site[f5:]
else:
if f5 == f3 == 0 : re = 'N_'+ site + '^N'
else : re = site[:f3] + '_' + site[f3:f5] +'^'+ site[f5:]
return re
elucidate = classmethod(elucidate)
def _mod2(self, other):
"""RE._mod2(other) -> bool.
for internal use only
test for the compatibility of restriction ending of RE and other."""
#
# called by RE._mod1(other) when the one of the enzyme is ambiguous
#
if other.ovhgseq == self.ovhgseq:
return True
elif issubclass(other, Ambiguous):
return other._mod2(self)
else:
return False
_mod2 = classmethod(_mod2)
class Ambiguous(AbstractCut):
"""Implement the methods specific to the enzymes for which the overhang
is variable.
Typical example : BstXI -> CCAN_NNNN^NTGG
The overhang can be any sequence of 4 bases.
Notes:
Blunt enzymes are always defined. even if there site is GGATCCNNN^_N
There overhang is always the same : blunt!
Internal use only. Not meant to be instantiated."""
def _drop(self):
"""RE._drop() -> list.
for internal use only.
drop the site that are situated outside the sequence in linear sequence.
modify the index for site in circular sequences."""
length = len(self.dna)
drop = itertools.dropwhile
take = itertools.takewhile
if self.dna.is_linear():
self.results = [x for x in drop(lambda x : x < 1, self.results)]
self.results = [x for x in take(lambda x : x <length, self.results)]
else:
for index, location in enumerate(self.results):
if location < 1:
self.results[index] += length
else:
break
for index, location in enumerate(self.results[::-1]):
if location > length:
self.results[-(index+1)] -= length
else:
break
return
_drop = classmethod(_drop)
def is_defined(self):
"""RE.is_defined() -> bool.
True if the sequence recognised and cut is constant,
i.e. the recognition site is not degenerated AND the enzyme cut inside
the site.
see also:
RE.is_ambiguous()
RE.is_unknown()"""
return False
is_defined = classmethod(is_defined)
def is_ambiguous(self):
"""RE.is_ambiguous() -> bool.
True if the sequence recognised and cut is ambiguous,
i.e. the recognition site is degenerated AND/OR the enzyme cut outside
the site.
see also:
RE.is_defined()
RE.is_unknown()"""
return True
is_ambiguous = classmethod(is_ambiguous)
def is_unknown(self):
"""RE.is_unknown() -> bool.
True if the sequence is unknown,
i.e. the recognition site has not been characterised yet.
see also:
RE.is_defined()
RE.is_ambiguous()"""
return False
is_unknown = classmethod(is_unknown)
def _mod2(self, other):
"""RE._mod2(other) -> bool.
for internal use only
test for the compatibility of restriction ending of RE and other."""
#
# called by RE._mod1(other) when the one of the enzyme is ambiguous
#
if len(self.ovhgseq) != len(other.ovhgseq):
return False
else:
se = self.ovhgseq
for base in se:
if base in 'ATCG':
pass
if base in 'N':
se = '.'.join(se.split('N'))
if base in 'RYWMSKHDBV':
expand = '['+ matching[base] + ']'
se = expand.join(se.split(base))
if re.match(se, other.ovhgseq):
return True
else:
return False
_mod2 = classmethod(_mod2)
def elucidate(self):
"""RE.elucidate() -> str
return a representation of the site with the cut on the (+) strand
represented as '^' and the cut on the (-) strand as '_'.
ie:
>>> EcoRI.elucidate() # 5' overhang
'G^AATT_C'
>>> KpnI.elucidate() # 3' overhang
'G_GTAC^C'
>>> EcoRV.elucidate() # blunt
'GAT^_ATC'
>>> SnaI.elucidate() # NotDefined, cut profile unknown.
'? GTATAC ?'
>>>
"""
f5 = self.fst5
f3 = self.fst3
length = len(self)
site = self.site
if self.cut_twice() : re = 'cut twice, not yet implemented sorry.'
elif self.is_5overhang():
if f3 == f5 == 0:
re = 'N^' + site +'_N'
elif 0 <= f5 <= length and 0 <= f3+length <= length:
re = site[:f5] + '^' + site[f5:f3] + '_' + site[f3:]
elif 0 <= f5 <= length:
re = site[:f5] + '^' + site[f5:] + f3*'N' + '_N'
elif 0 <= f3+length <= length:
re = 'N^' + abs(f5) * 'N' + site[:f3] + '_' + site[f3:]
elif f3+length < 0:
re = 'N^'*abs(f5)*'N' + '_' + abs(length+f3)*'N' + site
elif f5 > length:
re = site + (f5-length)*'N'+'^'+(length+f3-f5)*'N'+'_N'
else:
re = 'N^' + abs(f5) * 'N' + site + f3*'N' + '_N'
elif self.is_blunt():
if f5 < 0:
re = 'N^_' + abs(f5)*'N' + site
elif f5 > length:
re = site + (f5-length)*'N' + '^_N'
else:
raise ValueError('%s.easyrepr() : error f5=%i' \
% (self.name,f5))
else:
if f3 == 0:
if f5 == 0 : re = 'N_' + site + '^N'
else : re = site + '_' + (f5-length)*'N' + '^N'
elif 0 < f3+length <= length and 0 <= f5 <= length:
re = site[:f3] + '_' + site[f3:f5] + '^' + site[f5:]
elif 0 < f3+length <= length:
re = site[:f3] + '_' + site[f3:] + (f5-length)*'N' + '^N'
elif 0 <= f5 <= length:
re = 'N_' +'N'*(f3+length) + site[:f5] + '^' + site[f5:]
elif f3 > 0:
re = site + f3*'N' + '_' + (f5-f3-length)*'N' + '^N'
elif f5 < 0:
re = 'N_' + abs(f3-f5+length)*'N' + '^' + abs(f5)*'N' + site
else:
re = 'N_' + abs(f3+length)*'N' + site + (f5-length)*'N' + '^N'
return re
elucidate = classmethod(elucidate)
class NotDefined(AbstractCut):
"""Implement the methods specific to the enzymes for which the overhang
is not characterised.
Correspond to NoCut and Unknown.
Internal use only. Not meant to be instantiated."""
def _drop(self):
"""RE._drop() -> list.
for internal use only.
drop the site that are situated outside the sequence in linear sequence.
modify the index for site in circular sequences."""
if self.dna.is_linear():
return
else:
length = len(self.dna)
for index, location in enumerate(self.results):
if location < 1:
self.results[index] += length
else:
break
for index, location in enumerate(self.results[:-1]):
if location > length:
self.results[-(index+1)] -= length
else:
break
return
_drop = classmethod(_drop)
def is_defined(self):
"""RE.is_defined() -> bool.
True if the sequence recognised and cut is constant,
i.e. the recognition site is not degenerated AND the enzyme cut inside
the site.
see also:
RE.is_ambiguous()
RE.is_unknown()"""
return False
is_defined = classmethod(is_defined)
def is_ambiguous(self):
"""RE.is_ambiguous() -> bool.
True if the sequence recognised and cut is ambiguous,
i.e. the recognition site is degenerated AND/OR the enzyme cut outside
the site.
see also:
RE.is_defined()
RE.is_unknown()"""
return False
is_ambiguous = classmethod(is_ambiguous)
def is_unknown(self):
"""RE.is_unknown() -> bool.
True if the sequence is unknown,
i.e. the recognition site has not been characterised yet.
see also:
RE.is_defined()
RE.is_ambiguous()"""
return True
is_unknown = classmethod(is_unknown)
def _mod2(self, other):
"""RE._mod2(other) -> bool.
for internal use only
test for the compatibility of restriction ending of RE and other."""
#
# Normally we should not arrive here. But well better safe than sorry.
# the overhang is not defined we are compatible with nobody.
# could raise an Error may be rather than return quietly.
#
#return False
raise ValueError("%s.mod2(%s), %s : NotDefined. pas glop pas glop!" \
% (str(self), str(other), str(self)))
_mod2 = classmethod(_mod2)
def elucidate(self):
"""RE.elucidate() -> str
return a representation of the site with the cut on the (+) strand
represented as '^' and the cut on the (-) strand as '_'.
ie:
>>> EcoRI.elucidate() # 5' overhang
'G^AATT_C'
>>> KpnI.elucidate() # 3' overhang
'G_GTAC^C'
>>> EcoRV.elucidate() # blunt
'GAT^_ATC'
>>> SnaI.elucidate() # NotDefined, cut profile unknown.
'? GTATAC ?'
>>>
"""
return '? %s ?' % self.site
elucidate = classmethod(elucidate)
class Commercially_available(AbstractCut):
#
# Recent addition to Rebase make this naming convention uncertain.
# May be better to says enzymes which have a supplier.
#
"""Implement the methods specific to the enzymes which are commercially
available.
Internal use only. Not meant to be instantiated."""
def suppliers(self):
"""RE.suppliers() -> print the suppliers of RE."""
supply = suppliers_dict.items()
for k,v in supply:
if k in self.suppl:
print v[0]+','
return
suppliers = classmethod(suppliers)
def supplier_list(self):
"""RE.supplier_list() -> list.
list of the supplier names for RE."""
return [v[0] for k,v in suppliers_dict.items() if k in self.suppl]
supplier_list = classmethod(supplier_list)
def buffers(self, supplier):
"""RE.buffers(supplier) -> string.
not implemented yet."""
return
buffers = classmethod(buffers)
def is_comm(self):
"""RE.iscomm() -> bool.
True if RE has suppliers."""
return True
is_comm = classmethod(is_comm)
class Not_available(AbstractCut):
"""Implement the methods specific to the enzymes which are not commercially
available.
Internal use only. Not meant to be instantiated."""
def suppliers():
"""RE.suppliers() -> print the suppliers of RE."""
return None
suppliers = staticmethod(suppliers)
def supplier_list(self):
"""RE.supplier_list() -> list.
list of the supplier names for RE."""
return []
supplier_list = classmethod(supplier_list)
def buffers(self, supplier):
"""RE.buffers(supplier) -> string.
not implemented yet."""
raise TypeError("Enzyme not commercially available.")
buffers = classmethod(buffers)
def is_comm(self):
"""RE.iscomm() -> bool.
True if RE has suppliers."""
return False
is_comm = classmethod(is_comm)
###############################################################################
# #
# Restriction Batch #
# #
###############################################################################
class RestrictionBatch(set):
def __init__(self, first=[], suppliers=[]):
"""RestrictionBatch([sequence]) -> new RestrictionBatch."""
first = [self.format(x) for x in first]
first += [eval(x) for n in suppliers for x in suppliers_dict[n][1]]
set.__init__(self, first)
self.mapping = dict.fromkeys(self)
self.already_mapped = None
def __str__(self):
if len(self) < 5:
return '+'.join(self.elements())
else:
return '...'.join(('+'.join(self.elements()[:2]),\
'+'.join(self.elements()[-2:])))
def __repr__(self):
return 'RestrictionBatch(%s)' % self.elements()
def __contains__(self, other):
try:
other = self.format(other)
except ValueError : # other is not a restriction enzyme
return False
return set.__contains__(self, other)
def __div__(self, other):
return self.search(other)
def __rdiv__(self, other):
return self.search(other)
def get(self, enzyme, add=False):
"""B.get(enzyme[, add]) -> enzyme class.
if add is True and enzyme is not in B add enzyme to B.
if add is False (which is the default) only return enzyme.
if enzyme is not a RestrictionType or can not be evaluated to
a RestrictionType, raise a ValueError."""
e = self.format(enzyme)
if e in self:
return e
elif add:
self.add(e)
return e
else:
raise ValueError('enzyme %s is not in RestrictionBatch' \
% e.__name__)
def lambdasplit(self, func):
"""B.lambdasplit(func) -> RestrictionBatch .
the new batch will contains only the enzymes for which
func return True."""
d = [x for x in itertools.ifilter(func, self)]
new = RestrictionBatch()
new._data = dict(zip(d, [True]*len(d)))
return new
def add_supplier(self, letter):
"""B.add_supplier(letter) -> add a new set of enzyme to B.
letter represents the suppliers as defined in the dictionary
RestrictionDictionary.suppliers
return None.
raise a KeyError if letter is not a supplier code."""
supplier = suppliers_dict[letter]
self.suppliers.append(letter)
for x in supplier[1]:
self.add_nocheck(eval(x))
return
def current_suppliers(self):
"""B.current_suppliers() -> add a new set of enzyme to B.
return a sorted list of the suppliers which have been used to
create the batch."""
suppl_list = [suppliers_dict[x][0] for x in self.suppliers]
suppl_list.sort()
return suppl_list
def __iadd__(self, other):
""" b += other -> add other to b, check the type of other."""
self.add(other)
return self
def __add__(self, other):
""" b + other -> new RestrictionBatch."""
new = self.__class__(self)
new.add(other)
return new
def remove(self, other):
"""B.remove(other) -> remove other from B if other is a RestrictionType.
Safe set.remove method. Verify that other is a RestrictionType or can be
evaluated to a RestrictionType.
raise a ValueError if other can not be evaluated to a RestrictionType.
raise a KeyError if other is not in B."""
return set.remove(self, self.format(other))
def add(self, other):
"""B.add(other) -> add other to B if other is a RestrictionType.
Safe set.add method. Verify that other is a RestrictionType or can be
evaluated to a RestrictionType.
raise a ValueError if other can not be evaluated to a RestrictionType.
"""
return set.add(self, self.format(other))
def add_nocheck(self, other):
"""B.add_nocheck(other) -> add other to B. don't check type of other.
"""
return set.add(self, other)
def format(self, y):
"""B.format(y) -> RestrictionType or raise ValueError.
if y is a RestrictionType return y
if y can be evaluated to a RestrictionType return eval(y)
raise a Value Error in all other case."""
try:
if isinstance(y, RestrictionType):
return y
elif isinstance(eval(str(y)), RestrictionType):
return eval(y)
else:
pass
except (NameError, SyntaxError):
pass
raise ValueError('%s is not a RestrictionType' % y.__class__)
def is_restriction(self, y):
"""B.is_restriction(y) -> bool.
True is y or eval(y) is a RestrictionType."""
return isinstance(y, RestrictionType) or \
isinstance(eval(str(y)), RestrictionType)
def split(self, *classes, **bool):
"""B.split(class, [class.__name__ = True]) -> new RestrictionBatch.
it works but it is slow, so it has really an interest when splitting
over multiple conditions."""
def splittest(element):
for klass in classes:
b = bool.get(klass.__name__, True)
if issubclass(element, klass):
if b:
continue
else:
return False
elif b:
return False
else:
continue
return True
d = [k for k in itertools.ifilter(splittest, self)]
new = RestrictionBatch()
new._data = dict(zip(d, [True]*len(d)))
return new
def elements(self):
"""B.elements() -> tuple.
give all the names of the enzymes in B sorted alphabetically."""
l = [str(e) for e in self]
l.sort()
return l
def as_string(self):
"""B.as_string() -> list.
return a list of the name of the elements of B."""
return [str(e) for e in self]
def suppl_codes(self):
"""B.suppl_codes() -> dict
letter code for the suppliers"""
supply = dict([(k,v[0]) for k,v in suppliers_dict.iteritems()])
return supply
suppl_codes = classmethod(suppl_codes)
def show_codes(self):
"B.show_codes() -> letter codes for the suppliers"""
supply = [' = '.join(i) for i in self.suppl_codes().iteritems()]
print '\n'.join(supply)
return
show_codes = classmethod(show_codes)
def search(self, dna, linear=True):
"""B.search(dna) -> dict."""
#
# here we replace the search method of the individual enzymes
# with one unique testing method.
#
if not hasattr(self, "already_mapped") :
#TODO - Why does this happen!
#Try the "doctest" at the start of PrintFormat.py
self.already_mapped = None
if isinstance(dna, DNA):
# For the searching, we just care about the sequence as a string,
# if that is the same we can use the cached search results.
# At the time of writing, Seq == method isn't implemented,
# and therefore does object identity which is stricter.
if (str(dna), linear) == self.already_mapped:
return self.mapping
else:
self.already_mapped = str(dna), linear
fseq = FormattedSeq(dna, linear)
self.mapping = dict([(x, x.search(fseq)) for x in self])
return self.mapping
elif isinstance(dna, FormattedSeq):
if (str(dna), dna.linear) == self.already_mapped:
return self.mapping
else:
self.already_mapped = str(dna), dna.linear
self.mapping = dict([(x, x.search(dna)) for x in self])
return self.mapping
raise TypeError("Expected Seq or MutableSeq instance, got %s instead"\
%type(dna))
###############################################################################
# #
# Restriction Analysis #
# #
###############################################################################
class Analysis(RestrictionBatch, PrintFormat):
def __init__(self, restrictionbatch=RestrictionBatch(),sequence=DNA(''),
linear=True):
"""Analysis([restrictionbatch [, sequence] linear=True]) -> New Analysis class.
For most of the method of this class if a dictionary is given it will
be used as the base to calculate the results.
If no dictionary is given a new analysis using the Restriction Batch
which has been given when the Analysis class has been instantiated."""
RestrictionBatch.__init__(self, restrictionbatch)
self.rb = restrictionbatch
self.sequence = sequence
self.linear = linear
if self.sequence:
self.search(self.sequence, self.linear)
def __repr__(self):
return 'Analysis(%s,%s,%s)'%\
(repr(self.rb),repr(self.sequence),self.linear)
def _sub_set(self, wanted):
"""A._sub_set(other_set) -> dict.
Internal use only.
screen the results through wanted set.
Keep only the results for which the enzymes is in wanted set.
"""
return dict([(k,v) for k,v in self.mapping.iteritems() if k in wanted])
def _boundaries(self, start, end):
"""A._boundaries(start, end) -> tuple.
Format the boundaries for use with the methods that limit the
search to only part of the sequence given to analyse.
"""
if not isinstance(start, int):
raise TypeError('expected int, got %s instead' % type(start))
if not isinstance(end, int):
raise TypeError('expected int, got %s instead' % type(end))
if start < 1:
start += len(self.sequence)
if end < 1:
end += len(self.sequence)
if start < end:
pass
else:
start, end == end, start
if start < 1:
start == 1
if start < end:
return start, end, self._test_normal
else:
return start, end, self._test_reverse
def _test_normal(self, start, end, site):
"""A._test_normal(start, end, site) -> bool.
Internal use only
Test if site is in between start and end.
"""
return start <= site < end
def _test_reverse(self, start, end, site):
"""A._test_reverse(start, end, site) -> bool.
Internal use only
Test if site is in between end and start (for circular sequences).
"""
return start <= site <= len(self.sequence) or 1 <= site < end
def print_that(self, dct=None, title='', s1=''):
"""A.print_that([dct[, title[, s1]]]) -> print the results from dct.
If dct is not given the full dictionary is used.
"""
if not dct:
dct = self.mapping
print
return PrintFormat.print_that(self, dct, title, s1)
def change(self, **what):
"""A.change(**attribute_name) -> Change attribute of Analysis.
It is possible to change the width of the shell by setting
self.ConsoleWidth to what you want.
self.NameWidth refer to the maximal length of the enzyme name.
Changing one of these parameters here might not give the results
you expect. In which case, you can settle back to a 80 columns shell
or try to change self.Cmodulo and self.PrefWidth in PrintFormat until
you get it right."""
for k,v in what.iteritems():
if k in ('NameWidth', 'ConsoleWidth'):
setattr(self, k, v)
self.Cmodulo = self.ConsoleWidth % self.NameWidth
self.PrefWidth = self.ConsoleWidth - self.Cmodulo
elif k is 'sequence':
setattr(self, 'sequence', v)
self.search(self.sequence, self.linear)
elif k is 'rb':
self = Analysis.__init__(self, v, self.sequence, self.linear)
elif k is 'linear':
setattr(self, 'linear', v)
self.search(self.sequence, v)
elif k in ('Indent', 'Maxsize'):
setattr(self, k, v)
elif k in ('Cmodulo', 'PrefWidth'):
raise AttributeError( \
'To change %s, change NameWidth and/or ConsoleWidth' \
% name)
else:
raise AttributeError( \
'Analysis has no attribute %s' % name)
return
def full(self, linear=True):
"""A.full() -> dict.
Full Restriction Map of the sequence."""
return self.mapping
def blunt(self, dct = None):
"""A.blunt([dct]) -> dict.
Only the enzymes which have a 3'overhang restriction site."""
if not dct:
dct = self.mapping
return dict([(k,v) for k,v in dct.iteritems() if k.is_blunt()])
def overhang5(self, dct=None):
"""A.overhang5([dct]) -> dict.
Only the enzymes which have a 5' overhang restriction site."""
if not dct:
dct = self.mapping
return dict([(k,v) for k,v in dct.iteritems() if k.is_5overhang()])
def overhang3(self, dct=None):
"""A.Overhang3([dct]) -> dict.
Only the enzymes which have a 3'overhang restriction site."""
if not dct:
dct = self.mapping
return dict([(k,v) for k,v in dct.iteritems() if k.is_3overhang()])
def defined(self, dct=None):
"""A.defined([dct]) -> dict.
Only the enzymes that have a defined restriction site in Rebase."""
if not dct:
dct = self.mapping
return dict([(k,v) for k,v in dct.iteritems() if k.is_defined()])
def with_sites(self, dct=None):
"""A.with_sites([dct]) -> dict.
Enzymes which have at least one site in the sequence."""
if not dct:
dct = self.mapping
return dict([(k,v) for k,v in dct.iteritems() if v])
def without_site(self, dct=None):
"""A.without_site([dct]) -> dict.
Enzymes which have no site in the sequence."""
if not dct:
dct = self.mapping
return dict([(k,v) for k,v in dct.iteritems() if not v])
def with_N_sites(self, N, dct=None):
"""A.With_N_Sites(N [, dct]) -> dict.
Enzymes which cut N times the sequence."""
if not dct:
dct = self.mapping
return dict([(k,v) for k,v in dct.iteritems()if len(v) == N])
def with_number_list(self, list, dct= None):
if not dct:
dct = self.mapping
return dict([(k,v) for k,v in dct.iteritems() if len(v) in list])
def with_name(self, names, dct=None):
"""A.with_name(list_of_names [, dct]) ->
Limit the search to the enzymes named in list_of_names."""
for i, enzyme in enumerate(names):
if not enzyme in AllEnzymes:
print "no datas for the enzyme:", str(name)
del names[i]
if not dct:
return RestrictionBatch(names).search(self.sequence)
return dict([(n, dct[n]) for n in names if n in dct])
def with_site_size(self, site_size, dct=None):
"""A.with_site_size(site_size [, dct]) ->
Limit the search to the enzymes whose site is of size <site_size>."""
sites = [name for name in self if name.size == site_size]
if not dct:
return RestrictionBatch(sites).search(self.sequence)
return dict([(k,v) for k,v in dct.iteritems() if k in site_size])
def only_between(self, start, end, dct=None):
"""A.only_between(start, end[, dct]) -> dict.
Enzymes that cut the sequence only in between start and end."""
start, end, test = self._boundaries(start, end)
if not dct:
dct = self.mapping
d = dict(dct)
for key, sites in dct.iteritems():
if not sites:
del d[key]
continue
for site in sites:
if test(start, end, site):
continue
else:
del d[key]
break
return d
def between(self, start, end, dct=None):
"""A.between(start, end [, dct]) -> dict.
Enzymes that cut the sequence at least in between start and end.
They may cut outside as well."""
start, end, test = self._boundaries(start, end)
d = {}
if not dct:
dct = self.mapping
for key, sites in dct.iteritems():
for site in sites:
if test(start, end, site):
d[key] = sites
break
continue
return d
def show_only_between(self, start, end, dct=None):
"""A.show_only_between(start, end [, dct]) -> dict.
Enzymes that cut the sequence outside of the region
in between start and end but do not cut inside."""
d = []
if start <= end:
d = [(k, [vv for vv in v if start<=vv<=end])
for v in self.between(start, end, dct)]
else:
d = [(k, [vv for vv in v if start<=vv or vv <= end])
for v in self.between(start, end, dct)]
return dict(d)
def only_outside(self, start, end, dct = None):
"""A.only_outside(start, end [, dct]) -> dict.
Enzymes that cut the sequence outside of the region
in between start and end but do not cut inside."""
start, end, test = self._boundaries(start, end)
if not dct : dct = self.mapping
d = dict(dct)
for key, sites in dct.iteritems():
if not sites:
del d[key]
continue
for site in sites:
if test(start, end, site):
del d[key]
break
else:
continue
return d
def outside(self, start, end, dct=None):
"""A.outside((start, end [, dct]) -> dict.
Enzymes that cut outside the region in between start and end.
No test is made to know if they cut or not inside this region."""
start, end, test = self._boundaries(start, end)
if not dct:
dct = self.mapping
d = {}
for key, sites in dct.iteritems():
for site in sites:
if test(start, end, site):
continue
else:
d[key] = sites
break
return d
def do_not_cut(self, start, end, dct = None):
"""A.do_not_cut(start, end [, dct]) -> dict.
Enzymes that do not cut the region in between start and end."""
if not dct:
dct = self.mapping
d = self.without_site()
d.update(self.only_outside(start, end, dct))
return d
#
# The restriction enzyme classes are created dynamically when the module is
# imported. Here is the magic which allow the creation of the
# restriction-enzyme classes.
#
# The reason for the two dictionaries in Restriction_Dictionary
# one for the types (which will be called pseudo-type as they really
# correspond to the values that instances of RestrictionType can take)
# and one for the enzymes is efficiency as the bases are evaluated
# once per pseudo-type.
#
# However Restriction is still a very inefficient module at import. But
# remember that around 660 classes (which is more or less the size of Rebase)
# have to be created dynamically. However, this processing take place only
# once.
# This inefficiency is however largely compensated by the use of metaclass
# which provide a very efficient layout for the class themselves mostly
# alleviating the need of if/else loops in the class methods.
#
# It is essential to run Restriction with doc string optimisation (-OO switch)
# as the doc string of 660 classes take a lot of processing.
#
CommOnly = RestrictionBatch() # commercial enzymes
NonComm = RestrictionBatch() # not available commercially
for TYPE, (bases, enzymes) in typedict.iteritems():
#
# The keys are the pseudo-types TYPE (stored as type1, type2...)
# The names are not important and are only present to differentiate
# the keys in the dict. All the pseudo-types are in fact RestrictionType.
# These names will not be used after and the pseudo-types are not
# kept in the locals() dictionary. It is therefore impossible to
# import them.
# Now, if you have look at the dictionary, you will see that not all the
# types are present as those without corresponding enzymes have been
# removed by Dictionary_Builder().
#
# The values are tuples which contain
# as first element a tuple of bases (as string) and
# as second element the names of the enzymes.
#
# First eval the bases.
#
bases = tuple([eval(x) for x in bases])
#
# now create the particular value of RestrictionType for the classes
# in enzymes.
#
T = type.__new__(RestrictionType, 'RestrictionType', bases, {})
for k in enzymes:
#
# Now, we go through all the enzymes and assign them their type.
# enzymedict[k] contains the values of the attributes for this
# particular class (self.site, self.ovhg,....).
#
newenz = T(k, bases, enzymedict[k])
#
# we add the enzymes to the corresponding batch.
#
# No need to verify the enzyme is a RestrictionType -> add_nocheck
#
if newenz.is_comm() : CommOnly.add_nocheck(newenz)
else : NonComm.add_nocheck(newenz)
#
# AllEnzymes is a RestrictionBatch with all the enzymes from Rebase.
#
AllEnzymes = CommOnly | NonComm
#
# Now, place the enzymes in locals so they can be imported.
#
names = [str(x) for x in AllEnzymes]
try:
del x
except NameError:
#Scoping changed in Python 3, the variable isn't leaked
pass
locals().update(dict(zip(names, AllEnzymes)))
__all__=['FormattedSeq', 'Analysis', 'RestrictionBatch','AllEnzymes','CommOnly','NonComm']+names
del k, enzymes, TYPE, bases, names
|
asherkhb/coge
|
bin/last_wrapper/Bio/Restriction/Restriction.py
|
Python
|
bsd-2-clause
| 83,257
|
[
"Biopython"
] |
feb45a1b91ecc78dd507b1d1026d46497d90f5e9602a442923d0cf1ada1ae102
|
#! /usr/bin/env python
#
# test_plotting.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Tests for basic topology hl_api functions.
NOTE: These tests only test whether the code runs, it does not check
whether the results produced are correct.
"""
import unittest
import nest
import nest.topology as topo
import sys
from nest.tests.decorators import _skipIf
try:
import matplotlib.pyplot as plt
have_mpl = True
except ImportError:
have_mpl = False
@_skipIf(not have_mpl, 'Python matplotlib package not installed', 'testcase')
class PlottingTestCase(unittest.TestCase):
def test_PlotLayer(self):
"""Test plotting layer."""
ldict = {'elements': 'iaf_neuron', 'rows': 3, 'columns':3,
'extent': [2., 2.], 'edge_wrap': True}
nest.ResetKernel()
l = topo.CreateLayer(ldict)
topo.PlotLayer(l)
self.assertTrue(True)
def test_PlotTargets(self):
"""Test plotting targets."""
ldict = {'elements': ['iaf_neuron', 'iaf_psc_alpha'], 'rows': 3, 'columns':3,
'extent': [2., 2.], 'edge_wrap': True}
cdict = {'connection_type': 'divergent',
'mask': {'grid': {'rows':2, 'columns':2}}}
nest.ResetKernel()
l = topo.CreateLayer(ldict)
ian = [gid for gid in nest.GetLeaves(l)[0]
if nest.GetStatus([gid], 'model')[0] == 'iaf_neuron']
ipa = [gid for gid in nest.GetLeaves(l)[0]
if nest.GetStatus([gid], 'model')[0] == 'iaf_psc_alpha']
# connect ian -> all using static_synapse
cdict.update({'sources': {'model': 'iaf_neuron'},
'synapse_model': 'static_synapse'})
topo.ConnectLayers(l, l, cdict)
for k in ['sources', 'synapse_model']: cdict.pop(k)
# connect ipa -> ipa using stdp_synapse
cdict.update({'sources': {'model': 'iaf_psc_alpha'},
'targets': {'model': 'iaf_psc_alpha'},
'synapse_model': 'stdp_synapse'})
topo.ConnectLayers(l, l, cdict)
for k in ['sources', 'targets', 'synapse_model']: cdict.pop(k)
ctr = topo.FindCenterElement(l)
fig = topo.PlotTargets(ctr, l)
fig.gca().set_title('Plain call')
self.assertTrue(True)
def test_PlotKernel(self):
"""Test plotting kernels."""
ldict = {'elements': 'iaf_neuron', 'rows': 3, 'columns':3,
'extent': [2., 2.], 'edge_wrap': True}
nest.ResetKernel()
l = topo.CreateLayer(ldict)
f = plt.figure()
a1 = f.add_subplot(221)
ctr = topo.FindCenterElement(l)
topo.PlotKernel(a1, ctr, {'circular': {'radius': 1.}}, {'gaussian': {'sigma':0.2}})
a2 = f.add_subplot(222)
topo.PlotKernel(a2, ctr, {'doughnut': {'inner_radius': 0.5, 'outer_radius':0.75}})
a3 = f.add_subplot(223)
topo.PlotKernel(a3, ctr, {'rectangular': {'lower_left': [-.5,-.5],
'upper_right':[0.5,0.5]}})
self.assertTrue(True)
def suite():
suite = unittest.makeSuite(PlottingTestCase,'test')
return suite
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite())
import matplotlib.pyplot as plt
plt.show()
|
gewaltig/cython-neuron
|
topology/pynest/tests/test_plotting.py
|
Python
|
gpl-2.0
| 4,046
|
[
"Gaussian"
] |
2a50a97ea2d39dcc713c7614dfb6887b4b37440bb1f5176d0b8ec0e95022559c
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Module containing classes to generate grain boundaries.
"""
import itertools
import logging
import warnings
from fractions import Fraction
from functools import reduce
from math import cos, floor, gcd
import numpy as np
from monty.fractions import lcm
from pymatgen.core.lattice import Lattice
from pymatgen.core.sites import PeriodicSite
from pymatgen.core.structure import Structure
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
# This module implements representations of grain boundaries, as well as
# algorithms for generating them.
__author__ = "Xiang-Guo Li"
__copyright__ = "Copyright 2018, The Materials Virtual Lab"
__version__ = "0.1"
__maintainer__ = "Xiang-Guo Li"
__email__ = "xil110@ucsd.edu"
__date__ = "7/30/18"
logger = logging.getLogger(__name__)
class GrainBoundary(Structure):
"""
Subclass of Structure representing a GrainBoundary (gb) object.
Implements additional attributes pertaining to gbs, but the
init method does not actually implement any algorithm that
creates a gb. This is a DUMMY class who's init method only holds
information about the gb. Also has additional methods that returns
other information about a gb such as sigma value.
Note that all gbs have the gb surface normal oriented in the c-direction.
This means the lattice vectors a and b are in the gb surface plane (at
least for one grain) and the c vector is out of the surface plane
(though not necessary perpendicular to the surface.)
"""
def __init__(
self,
lattice,
species,
coords,
rotation_axis,
rotation_angle,
gb_plane,
join_plane,
init_cell,
vacuum_thickness,
ab_shift,
site_properties,
oriented_unit_cell,
validate_proximity=False,
coords_are_cartesian=False,
):
"""
Makes a gb structure, a structure object with additional information
and methods pertaining to gbs.
Args:
lattice (Lattice/3x3 array): The lattice, either as a
:class:`pymatgen.core.lattice.Lattice` or
simply as any 2D array. Each row should correspond to a lattice
vector. E.g., [[10,0,0], [20,10,0], [0,0,30]] specifies a
lattice with lattice vectors [10,0,0], [20,10,0] and [0,0,30].
species ([Species]): Sequence of species on each site. Can take in
flexible input, including:
i. A sequence of element / species specified either as string
symbols, e.g. ["Li", "Fe2+", "P", ...] or atomic numbers,
e.g., (3, 56, ...) or actual Element or Species objects.
ii. List of dict of elements/species and occupancies, e.g.,
[{"Fe" : 0.5, "Mn":0.5}, ...]. This allows the setup of
disordered structures.
coords (Nx3 array): list of fractional/cartesian coordinates of
each species.
rotation_axis (list): Rotation axis of GB in the form of a list of
integers, e.g. [1, 1, 0].
rotation_angle (float, in unit of degree): rotation angle of GB.
gb_plane (list): Grain boundary plane in the form of a list of integers
e.g.: [1, 2, 3].
join_plane (list): Joining plane of the second grain in the form of a list of
integers. e.g.: [1, 2, 3].
init_cell (Structure): initial bulk structure to form the GB.
site_properties (dict): Properties associated with the sites as a
dict of sequences, The sequences have to be the same length as
the atomic species and fractional_coords. For gb, you should
have the 'grain_label' properties to classify the sites as 'top',
'bottom', 'top_incident', or 'bottom_incident'.
vacuum_thickness (float in angstrom): The thickness of vacuum inserted
between two grains of the GB.
ab_shift (list of float, in unit of crystal vector a, b): The relative
shift along a, b vectors.
oriented_unit_cell (Structure): oriented unit cell of the bulk init_cell.
Help to accurate calculate the bulk properties that are consistent
with gb calculations.
validate_proximity (bool): Whether to check if there are sites
that are less than 0.01 Ang apart. Defaults to False.
coords_are_cartesian (bool): Set to True if you are providing
coordinates in cartesian coordinates. Defaults to False.
"""
self.oriented_unit_cell = oriented_unit_cell
self.rotation_axis = rotation_axis
self.rotation_angle = rotation_angle
self.gb_plane = gb_plane
self.join_plane = join_plane
self.init_cell = init_cell
self.vacuum_thickness = vacuum_thickness
self.ab_shift = ab_shift
super().__init__(
lattice,
species,
coords,
validate_proximity=validate_proximity,
coords_are_cartesian=coords_are_cartesian,
site_properties=site_properties,
)
def copy(self):
"""
Convenience method to get a copy of the structure, with options to add
site properties.
Returns:
A copy of the Structure, with optionally new site_properties and
optionally sanitized.
"""
return GrainBoundary(
self.lattice,
self.species_and_occu,
self.frac_coords,
self.rotation_axis,
self.rotation_angle,
self.gb_plane,
self.join_plane,
self.init_cell,
self.vacuum_thickness,
self.ab_shift,
self.site_properties,
self.oriented_unit_cell,
)
def get_sorted_structure(self, key=None, reverse=False):
"""
Get a sorted copy of the structure. The parameters have the same
meaning as in list.sort. By default, sites are sorted by the
electronegativity of the species. Note that Slab has to override this
because of the different __init__ args.
Args:
key: Specifies a function of one argument that is used to extract
a comparison key from each list element: key=str.lower. The
default value is None (compare the elements directly).
reverse (bool): If set to True, then the list elements are sorted
as if each comparison were reversed.
"""
sites = sorted(self, key=key, reverse=reverse)
s = Structure.from_sites(sites)
return GrainBoundary(
s.lattice,
s.species_and_occu,
s.frac_coords,
self.rotation_axis,
self.rotation_angle,
self.gb_plane,
self.join_plane,
self.init_cell,
self.vacuum_thickness,
self.ab_shift,
self.site_properties,
self.oriented_unit_cell,
)
@property
def sigma(self):
"""
This method returns the sigma value of the gb.
If using 'quick_gen' to generate GB, this value is not valid.
"""
return int(round(self.oriented_unit_cell.volume / self.init_cell.volume))
@property
def sigma_from_site_prop(self):
"""
This method returns the sigma value of the gb from site properties.
If the GB structure merge some atoms due to the atoms too closer with
each other, this property will not work.
"""
num_coi = 0
if None in self.site_properties["grain_label"]:
raise RuntimeError("Site were merged, this property do not work")
for tag in self.site_properties["grain_label"]:
if "incident" in tag:
num_coi += 1
return int(round(self.num_sites / num_coi))
@property
def top_grain(self):
"""
return the top grain (Structure) of the GB.
"""
top_sites = []
for i, tag in enumerate(self.site_properties["grain_label"]):
if "top" in tag:
top_sites.append(self.sites[i])
return Structure.from_sites(top_sites)
@property
def bottom_grain(self):
"""
return the bottom grain (Structure) of the GB.
"""
bottom_sites = []
for i, tag in enumerate(self.site_properties["grain_label"]):
if "bottom" in tag:
bottom_sites.append(self.sites[i])
return Structure.from_sites(bottom_sites)
@property
def coincidents(self):
"""
return the a list of coincident sites.
"""
coincident_sites = []
for i, tag in enumerate(self.site_properties["grain_label"]):
if "incident" in tag:
coincident_sites.append(self.sites[i])
return coincident_sites
def __str__(self):
comp = self.composition
outs = [
f"Gb Summary ({comp.formula})",
f"Reduced Formula: {comp.reduced_formula}",
f"Rotation axis: {self.rotation_axis}",
f"Rotation angle: {self.rotation_angle}",
f"GB plane: {self.gb_plane}",
f"Join plane: {self.join_plane}",
f"vacuum thickness: {self.vacuum_thickness}",
f"ab_shift: {self.ab_shift}",
]
def to_s(x, rjust=10):
return (f"{x:0.6f}").rjust(rjust)
outs.append("abc : " + " ".join([to_s(i) for i in self.lattice.abc]))
outs.append("angles: " + " ".join([to_s(i) for i in self.lattice.angles]))
outs.append(f"Sites ({len(self)})")
for i, site in enumerate(self):
outs.append(
" ".join(
[
str(i + 1),
site.species_string,
" ".join([to_s(j, 12) for j in site.frac_coords]),
]
)
)
return "\n".join(outs)
def as_dict(self):
"""
Returns:
Dictionary representation of GrainBoundary object
"""
d = super().as_dict()
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
d["init_cell"] = self.init_cell.as_dict()
d["rotation_axis"] = self.rotation_axis
d["rotation_angle"] = self.rotation_angle
d["gb_plane"] = self.gb_plane
d["join_plane"] = self.join_plane
d["vacuum_thickness"] = self.vacuum_thickness
d["ab_shift"] = self.ab_shift
d["oriented_unit_cell"] = self.oriented_unit_cell.as_dict()
return d
@classmethod
def from_dict(cls, d):
"""
Generates a GrainBoundary object from a dictionary created by as_dict().
Args:
d: dict
Returns:
GrainBoundary object
"""
lattice = Lattice.from_dict(d["lattice"])
sites = [PeriodicSite.from_dict(sd, lattice) for sd in d["sites"]]
s = Structure.from_sites(sites)
return GrainBoundary(
lattice=lattice,
species=s.species_and_occu,
coords=s.frac_coords,
rotation_axis=d["rotation_axis"],
rotation_angle=d["rotation_angle"],
gb_plane=d["gb_plane"],
join_plane=d["join_plane"],
init_cell=Structure.from_dict(d["init_cell"]),
vacuum_thickness=d["vacuum_thickness"],
ab_shift=d["ab_shift"],
oriented_unit_cell=Structure.from_dict(d["oriented_unit_cell"]),
site_properties=s.site_properties,
)
class GrainBoundaryGenerator:
"""
This class is to generate grain boundaries (GBs) from bulk
conventional cell (fcc, bcc can from the primitive cell), and works for Cubic,
Tetragonal, Orthorhombic, Rhombohedral, and Hexagonal systems.
It generate GBs from given parameters, which includes
GB plane, rotation axis, rotation angle.
This class works for any general GB, including twist, tilt and mixed GBs.
The three parameters, rotation axis, GB plane and rotation angle, are
sufficient to identify one unique GB. While sometimes, users may not be able
to tell what exactly rotation angle is but prefer to use sigma as an parameter,
this class also provides the function that is able to return all possible
rotation angles for a specific sigma value.
The same sigma value (with rotation axis fixed) can correspond to
multiple rotation angles.
Users can use structure matcher in pymatgen to get rid of the redundant structures.
"""
def __init__(self, initial_structure, symprec=0.1, angle_tolerance=1):
"""
initial_structure (Structure): Initial input structure. It can
be conventional or primitive cell (primitive cell works for bcc and fcc).
For fcc and bcc, using conventional cell can lead to a non-primitive
grain boundary structure.
This code supplies Cubic, Tetragonal, Orthorhombic, Rhombohedral, and
Hexagonal systems.
symprec (float): Tolerance for symmetry finding. Defaults to 0.1 (the value used
in Materials Project), which is for structures with slight deviations
from their proper atomic positions (e.g., structures relaxed with
electronic structure codes).
A smaller value of 0.01 is often used for properly refined
structures with atoms in the proper symmetry coordinates.
User should make sure the symmetry is what you want.
angle_tolerance (float): Angle tolerance for symmetry finding.
"""
analyzer = SpacegroupAnalyzer(initial_structure, symprec, angle_tolerance)
self.lat_type = analyzer.get_lattice_type()[0]
if self.lat_type == "t":
# need to use the conventional cell for tetragonal
initial_structure = analyzer.get_conventional_standard_structure()
a, b, c = initial_structure.lattice.abc
# c axis of tetragonal structure not in the third direction
if abs(a - b) > symprec:
# a == c, rotate b to the third direction
if abs(a - c) < symprec:
initial_structure.make_supercell([[0, 0, 1], [1, 0, 0], [0, 1, 0]])
# b == c, rotate a to the third direction
else:
initial_structure.make_supercell([[0, 1, 0], [0, 0, 1], [1, 0, 0]])
elif self.lat_type == "h":
alpha, beta, gamma = initial_structure.lattice.angles
# c axis is not in the third direction
if abs(gamma - 90) < angle_tolerance:
# alpha = 120 or 60, rotate b, c to a, b vectors
if abs(alpha - 90) > angle_tolerance:
initial_structure.make_supercell([[0, 1, 0], [0, 0, 1], [1, 0, 0]])
# beta = 120 or 60, rotate c, a to a, b vectors
elif abs(beta - 90) > angle_tolerance:
initial_structure.make_supercell([[0, 0, 1], [1, 0, 0], [0, 1, 0]])
elif self.lat_type == "r":
# need to use primitive cell for rhombohedra
initial_structure = analyzer.get_primitive_standard_structure()
elif self.lat_type == "o":
# need to use the conventional cell for orthorombic
initial_structure = analyzer.get_conventional_standard_structure()
self.initial_structure = initial_structure
def gb_from_parameters(
self,
rotation_axis,
rotation_angle,
expand_times=4,
vacuum_thickness=0.0,
ab_shift=[0, 0],
normal=False,
ratio=None,
plane=None,
max_search=20,
tol_coi=1.0e-8,
rm_ratio=0.7,
quick_gen=False,
):
"""
Args:
rotation_axis (list): Rotation axis of GB in the form of a list of integer
e.g.: [1, 1, 0]
rotation_angle (float, in unit of degree): rotation angle used to generate GB.
Make sure the angle is accurate enough. You can use the enum* functions
in this class to extract the accurate angle.
e.g.: The rotation angle of sigma 3 twist GB with the rotation axis
[1, 1, 1] and GB plane (1, 1, 1) can be 60.000000000 degree.
If you do not know the rotation angle, but know the sigma value, we have
provide the function get_rotation_angle_from_sigma which is able to return
all the rotation angles of sigma value you provided.
expand_times (int): The multiple times used to expand one unit grain to larger grain.
This is used to tune the grain length of GB to warrant that the two GBs in one
cell do not interact with each other. Default set to 4.
vacuum_thickness (float, in angstrom): The thickness of vacuum that you want to insert
between two grains of the GB. Default to 0.
ab_shift (list of float, in unit of a, b vectors of Gb): in plane shift of two grains
normal (logic):
determine if need to require the c axis of top grain (first transformation matrix)
perperdicular to the surface or not.
default to false.
ratio (list of integers):
lattice axial ratio.
For cubic system, ratio is not needed.
For tetragonal system, ratio = [mu, mv], list of two integers,
that is, mu/mv = c2/a2. If it is irrational, set it to none.
For orthorhombic system, ratio = [mu, lam, mv], list of three integers,
that is, mu:lam:mv = c2:b2:a2. If irrational for one axis, set it to None.
e.g. mu:lam:mv = c2,None,a2, means b2 is irrational.
For rhombohedral system, ratio = [mu, mv], list of two integers,
that is, mu/mv is the ratio of (1+2*cos(alpha))/cos(alpha).
If irrational, set it to None.
For hexagonal system, ratio = [mu, mv], list of two integers,
that is, mu/mv = c2/a2. If it is irrational, set it to none.
This code also supplies a class method to generate the ratio from the
structure (get_ratio). User can also make their own approximation and
input the ratio directly.
plane (list): Grain boundary plane in the form of a list of integers
e.g.: [1, 2, 3]. If none, we set it as twist GB. The plane will be perpendicular
to the rotation axis.
max_search (int): max search for the GB lattice vectors that give the smallest GB
lattice. If normal is true, also max search the GB c vector that perpendicular
to the plane. For complex GB, if you want to speed up, you can reduce this value.
But too small of this value may lead to error.
tol_coi (float): tolerance to find the coincidence sites. When making approximations to
the ratio needed to generate the GB, you probably need to increase this tolerance to
obtain the correct number of coincidence sites. To check the number of coincidence
sites are correct or not, you can compare the generated Gb object's sigma_from_site_prop
with enum* sigma values (what user expected by input).
rm_ratio (float): the criteria to remove the atoms which are too close with each other.
rm_ratio*bond_length of bulk system is the criteria of bond length, below which the atom
will be removed. Default to 0.7.
quick_gen (bool): whether to quickly generate a supercell, if set to true, no need to
find the smallest cell.
Returns:
Grain boundary structure (gb object).
"""
lat_type = self.lat_type
# if the initial structure is primitive cell in cubic system,
# calculate the transformation matrix from its conventional cell
# to primitive cell, basically for bcc and fcc systems.
trans_cry = np.eye(3)
if lat_type == "c":
analyzer = SpacegroupAnalyzer(self.initial_structure)
convention_cell = analyzer.get_conventional_standard_structure()
vol_ratio = self.initial_structure.volume / convention_cell.volume
# bcc primitive cell, belong to cubic system
if abs(vol_ratio - 0.5) < 1.0e-3:
trans_cry = np.array([[0.5, 0.5, -0.5], [-0.5, 0.5, 0.5], [0.5, -0.5, 0.5]])
logger.info("Make sure this is for cubic with bcc primitive cell")
# fcc primitive cell, belong to cubic system
elif abs(vol_ratio - 0.25) < 1.0e-3:
trans_cry = np.array([[0.5, 0.5, 0], [0, 0.5, 0.5], [0.5, 0, 0.5]])
logger.info("Make sure this is for cubic with fcc primitive cell")
else:
logger.info("Make sure this is for cubic with conventional cell")
elif lat_type == "t":
logger.info("Make sure this is for tetragonal system")
if ratio is None:
logger.info("Make sure this is for irrational c2/a2")
elif len(ratio) != 2:
raise RuntimeError("Tetragonal system needs correct c2/a2 ratio")
elif lat_type == "o":
logger.info("Make sure this is for orthorhombic system")
if ratio is None:
raise RuntimeError("CSL does not exist if all axial ratios are irrational for an orthorhombic system")
if len(ratio) != 3:
raise RuntimeError("Orthorhombic system needs correct c2:b2:a2 ratio")
elif lat_type == "h":
logger.info("Make sure this is for hexagonal system")
if ratio is None:
logger.info("Make sure this is for irrational c2/a2")
elif len(ratio) != 2:
raise RuntimeError("Hexagonal system needs correct c2/a2 ratio")
elif lat_type == "r":
logger.info("Make sure this is for rhombohedral system")
if ratio is None:
logger.info("Make sure this is for irrational (1+2*cos(alpha)/cos(alpha) ratio")
elif len(ratio) != 2:
raise RuntimeError("Rhombohedral system needs correct (1+2*cos(alpha)/cos(alpha) ratio")
else:
raise RuntimeError(
"Lattice type not implemented. This code works for cubic, "
"tetragonal, orthorhombic, rhombehedral, hexagonal systems"
)
# transform four index notation to three index notation for hexagonal and rhombohedral
if len(rotation_axis) == 4:
u1 = rotation_axis[0]
v1 = rotation_axis[1]
w1 = rotation_axis[3]
if lat_type.lower() == "h":
u = 2 * u1 + v1
v = 2 * v1 + u1
w = w1
rotation_axis = [u, v, w]
elif lat_type.lower() == "r":
u = 2 * u1 + v1 + w1
v = v1 + w1 - u1
w = w1 - 2 * v1 - u1
rotation_axis = [u, v, w]
# make sure gcd(rotation_axis)==1
if reduce(gcd, rotation_axis) != 1:
rotation_axis = [int(round(x / reduce(gcd, rotation_axis))) for x in rotation_axis]
# transform four index notation to three index notation for plane
if plane is not None:
if len(plane) == 4:
u1 = plane[0]
v1 = plane[1]
w1 = plane[3]
plane = [u1, v1, w1]
# set the plane for grain boundary when plane is None.
if plane is None:
if lat_type.lower() == "c":
plane = rotation_axis
else:
if lat_type.lower() == "h":
if ratio is None:
c2_a2_ratio = 1
else:
c2_a2_ratio = ratio[0] / ratio[1]
metric = np.array([[1, -0.5, 0], [-0.5, 1, 0], [0, 0, c2_a2_ratio]])
elif lat_type.lower() == "r":
if ratio is None:
cos_alpha = 0.5
else:
cos_alpha = 1.0 / (ratio[0] / ratio[1] - 2)
metric = np.array(
[
[1, cos_alpha, cos_alpha],
[cos_alpha, 1, cos_alpha],
[cos_alpha, cos_alpha, 1],
]
)
elif lat_type.lower() == "t":
if ratio is None:
c2_a2_ratio = 1
else:
c2_a2_ratio = ratio[0] / ratio[1]
metric = np.array([[1, 0, 0], [0, 1, 0], [0, 0, c2_a2_ratio]])
elif lat_type.lower() == "o":
for i in range(3):
if ratio[i] is None:
ratio[i] = 1
metric = np.array(
[
[1, 0, 0],
[0, ratio[1] / ratio[2], 0],
[0, 0, ratio[0] / ratio[2]],
]
)
else:
raise RuntimeError("Lattice type has not implemented.")
plane = np.matmul(rotation_axis, metric)
fractions = [Fraction(x).limit_denominator() for x in plane]
least_mul = reduce(lcm, [f.denominator for f in fractions])
plane = [int(round(x * least_mul)) for x in plane]
if reduce(gcd, plane) != 1:
index = reduce(gcd, plane)
plane = [int(round(x / index)) for x in plane]
t1, t2 = self.get_trans_mat(
r_axis=rotation_axis,
angle=rotation_angle,
normal=normal,
trans_cry=trans_cry,
lat_type=lat_type,
ratio=ratio,
surface=plane,
max_search=max_search,
quick_gen=quick_gen,
)
# find the join_plane
if lat_type.lower() != "c":
if lat_type.lower() == "h":
if ratio is None:
mu, mv = [1, 1]
else:
mu, mv = ratio
trans_cry1 = np.array([[1, 0, 0], [-0.5, np.sqrt(3.0) / 2.0, 0], [0, 0, np.sqrt(mu / mv)]])
elif lat_type.lower() == "r":
if ratio is None:
c2_a2_ratio = 1
else:
mu, mv = ratio
c2_a2_ratio = 3.0 / (2 - 6 * mv / mu)
trans_cry1 = np.array(
[
[0.5, np.sqrt(3.0) / 6.0, 1.0 / 3 * np.sqrt(c2_a2_ratio)],
[-0.5, np.sqrt(3.0) / 6.0, 1.0 / 3 * np.sqrt(c2_a2_ratio)],
[0, -1 * np.sqrt(3.0) / 3.0, 1.0 / 3 * np.sqrt(c2_a2_ratio)],
]
)
else:
if lat_type.lower() == "t":
if ratio is None:
mu, mv = [1, 1]
else:
mu, mv = ratio
lam = mv
elif lat_type.lower() == "o":
new_ratio = [1 if v is None else v for v in ratio]
mu, lam, mv = new_ratio
trans_cry1 = np.array([[1, 0, 0], [0, np.sqrt(lam / mv), 0], [0, 0, np.sqrt(mu / mv)]])
else:
trans_cry1 = trans_cry
grain_matrix = np.dot(t2, trans_cry1)
plane_init = np.cross(grain_matrix[0], grain_matrix[1])
if lat_type.lower() != "c":
plane_init = np.dot(plane_init, trans_cry1.T)
join_plane = self.vec_to_surface(plane_init)
parent_structure = self.initial_structure.copy()
# calculate the bond_length in bulk system.
if len(parent_structure) == 1:
temp_str = parent_structure.copy()
temp_str.make_supercell([1, 1, 2])
distance = temp_str.distance_matrix
else:
distance = parent_structure.distance_matrix
bond_length = np.min(distance[np.nonzero(distance)])
# top grain
top_grain = fix_pbc(parent_structure * t1)
# obtain the smallest oriended cell
if normal and not quick_gen:
t_temp = self.get_trans_mat(
r_axis=rotation_axis,
angle=rotation_angle,
normal=False,
trans_cry=trans_cry,
lat_type=lat_type,
ratio=ratio,
surface=plane,
max_search=max_search,
)
oriended_unit_cell = fix_pbc(parent_structure * t_temp[0])
t_matrix = oriended_unit_cell.lattice.matrix
normal_v_plane = np.cross(t_matrix[0], t_matrix[1])
unit_normal_v = normal_v_plane / np.linalg.norm(normal_v_plane)
unit_ab_adjust = (t_matrix[2] - np.dot(unit_normal_v, t_matrix[2]) * unit_normal_v) / np.dot(
unit_normal_v, t_matrix[2]
)
else:
oriended_unit_cell = top_grain.copy()
unit_ab_adjust = 0.0
# bottom grain, using top grain's lattice matrix
bottom_grain = fix_pbc(parent_structure * t2, top_grain.lattice.matrix)
# label both grains with 'top','bottom','top_incident','bottom_incident'
n_sites = top_grain.num_sites
t_and_b = Structure(
top_grain.lattice,
top_grain.species + bottom_grain.species,
list(top_grain.frac_coords) + list(bottom_grain.frac_coords),
)
t_and_b_dis = t_and_b.lattice.get_all_distances(
t_and_b.frac_coords[0:n_sites], t_and_b.frac_coords[n_sites : n_sites * 2]
)
index_incident = np.nonzero(t_and_b_dis < np.min(t_and_b_dis) + tol_coi)
top_labels = []
for i in range(n_sites):
if i in index_incident[0]:
top_labels.append("top_incident")
else:
top_labels.append("top")
bottom_labels = []
for i in range(n_sites):
if i in index_incident[1]:
bottom_labels.append("bottom_incident")
else:
bottom_labels.append("bottom")
top_grain = Structure(
Lattice(top_grain.lattice.matrix),
top_grain.species,
top_grain.frac_coords,
site_properties={"grain_label": top_labels},
)
bottom_grain = Structure(
Lattice(bottom_grain.lattice.matrix),
bottom_grain.species,
bottom_grain.frac_coords,
site_properties={"grain_label": bottom_labels},
)
# expand both grains
top_grain.make_supercell([1, 1, expand_times])
bottom_grain.make_supercell([1, 1, expand_times])
top_grain = fix_pbc(top_grain)
bottom_grain = fix_pbc(bottom_grain)
# determine the top-grain location.
edge_b = 1.0 - max(bottom_grain.frac_coords[:, 2])
edge_t = 1.0 - max(top_grain.frac_coords[:, 2])
c_adjust = (edge_t - edge_b) / 2.0
# construct all species
all_species = []
all_species.extend([site.specie for site in bottom_grain])
all_species.extend([site.specie for site in top_grain])
half_lattice = top_grain.lattice
# calculate translation vector, perpendicular to the plane
normal_v_plane = np.cross(half_lattice.matrix[0], half_lattice.matrix[1])
unit_normal_v = normal_v_plane / np.linalg.norm(normal_v_plane)
translation_v = unit_normal_v * vacuum_thickness
# construct the final lattice
whole_matrix_no_vac = np.array(half_lattice.matrix)
whole_matrix_no_vac[2] = half_lattice.matrix[2] * 2
whole_matrix_with_vac = whole_matrix_no_vac.copy()
whole_matrix_with_vac[2] = whole_matrix_no_vac[2] + translation_v * 2
whole_lat = Lattice(whole_matrix_with_vac)
# construct the coords, move top grain with translation_v
all_coords = []
grain_labels = bottom_grain.site_properties["grain_label"] + top_grain.site_properties["grain_label"]
for site in bottom_grain:
all_coords.append(site.coords)
for site in top_grain:
all_coords.append(
site.coords
+ half_lattice.matrix[2] * (1 + c_adjust)
+ unit_ab_adjust * np.linalg.norm(half_lattice.matrix[2] * (1 + c_adjust))
+ translation_v
+ ab_shift[0] * whole_matrix_with_vac[0]
+ ab_shift[1] * whole_matrix_with_vac[1]
)
gb_with_vac = Structure(
whole_lat,
all_species,
all_coords,
coords_are_cartesian=True,
site_properties={"grain_label": grain_labels},
)
# merge closer atoms. extract near gb atoms.
cos_c_norm_plane = np.dot(unit_normal_v, whole_matrix_with_vac[2]) / whole_lat.c
range_c_len = abs(bond_length / cos_c_norm_plane / whole_lat.c)
sites_near_gb = []
sites_away_gb = []
for site in gb_with_vac.sites:
if (
site.frac_coords[2] < range_c_len
or site.frac_coords[2] > 1 - range_c_len
or (site.frac_coords[2] > 0.5 - range_c_len and site.frac_coords[2] < 0.5 + range_c_len)
):
sites_near_gb.append(site)
else:
sites_away_gb.append(site)
if len(sites_near_gb) >= 1:
s_near_gb = Structure.from_sites(sites_near_gb)
s_near_gb.merge_sites(tol=bond_length * rm_ratio, mode="d")
all_sites = sites_away_gb + s_near_gb.sites
gb_with_vac = Structure.from_sites(all_sites)
# move coordinates into the periodic cell.
gb_with_vac = fix_pbc(gb_with_vac, whole_lat.matrix)
return GrainBoundary(
whole_lat,
gb_with_vac.species,
gb_with_vac.cart_coords,
rotation_axis,
rotation_angle,
plane,
join_plane,
self.initial_structure,
vacuum_thickness,
ab_shift,
site_properties=gb_with_vac.site_properties,
oriented_unit_cell=oriended_unit_cell,
coords_are_cartesian=True,
)
def get_ratio(self, max_denominator=5, index_none=None):
"""
find the axial ratio needed for GB generator input.
Args:
max_denominator (int): the maximum denominator for
the computed ratio, default to be 5.
index_none (int): specify the irrational axis.
0-a, 1-b, 2-c. Only may be needed for orthorhombic system.
Returns:
axial ratio needed for GB generator (list of integers).
"""
structure = self.initial_structure
lat_type = self.lat_type
if lat_type in ("t", "h"):
# For tetragonal and hexagonal system, ratio = c2 / a2.
a, c = (structure.lattice.a, structure.lattice.c)
if c > a:
frac = Fraction(c**2 / a**2).limit_denominator(max_denominator)
ratio = [frac.numerator, frac.denominator]
else:
frac = Fraction(a**2 / c**2).limit_denominator(max_denominator)
ratio = [frac.denominator, frac.numerator]
elif lat_type == "r":
# For rhombohedral system, ratio = (1 + 2 * cos(alpha)) / cos(alpha).
cos_alpha = cos(structure.lattice.alpha / 180 * np.pi)
frac = Fraction((1 + 2 * cos_alpha) / cos_alpha).limit_denominator(max_denominator)
ratio = [frac.numerator, frac.denominator]
elif lat_type == "o":
# For orthorhombic system, ratio = c2:b2:a2.If irrational for one axis, set it to None.
ratio = [None] * 3
lat = (structure.lattice.c, structure.lattice.b, structure.lattice.a)
index = [0, 1, 2]
if index_none is None:
min_index = np.argmin(lat)
index.pop(min_index)
frac1 = Fraction(lat[index[0]] ** 2 / lat[min_index] ** 2).limit_denominator(max_denominator)
frac2 = Fraction(lat[index[1]] ** 2 / lat[min_index] ** 2).limit_denominator(max_denominator)
com_lcm = lcm(frac1.denominator, frac2.denominator)
ratio[min_index] = com_lcm
ratio[index[0]] = frac1.numerator * int(round(com_lcm / frac1.denominator))
ratio[index[1]] = frac2.numerator * int(round(com_lcm / frac2.denominator))
else:
index.pop(index_none)
if lat[index[0]] > lat[index[1]]:
frac = Fraction(lat[index[0]] ** 2 / lat[index[1]] ** 2).limit_denominator(max_denominator)
ratio[index[0]] = frac.numerator
ratio[index[1]] = frac.denominator
else:
frac = Fraction(lat[index[1]] ** 2 / lat[index[0]] ** 2).limit_denominator(max_denominator)
ratio[index[1]] = frac.numerator
ratio[index[0]] = frac.denominator
elif lat_type == "c":
# Cubic system does not need axial ratio.
return None
else:
raise RuntimeError("Lattice type not implemented.")
return ratio
@staticmethod
def get_trans_mat(
r_axis,
angle,
normal=False,
trans_cry=np.eye(3),
lat_type="c",
ratio=None,
surface=None,
max_search=20,
quick_gen=False,
):
"""
Find the two transformation matrix for each grain from given rotation axis,
GB plane, rotation angle and corresponding ratio (see explanation for ratio
below).
The structure of each grain can be obtained by applying the corresponding
transformation matrix to the conventional cell.
The algorithm for this code is from reference, Acta Cryst, A32,783(1976).
Args:
r_axis (list of three integers, e.g. u, v, w
or four integers, e.g. u, v, t, w for hex/rho system only):
the rotation axis of the grain boundary.
angle (float, in unit of degree) :
the rotation angle of the grain boundary
normal (logic):
determine if need to require the c axis of one grain associated with
the first transformation matrix perperdicular to the surface or not.
default to false.
trans_cry (3 by 3 array):
if the structure given are primitive cell in cubic system, e.g.
bcc or fcc system, trans_cry is the transformation matrix from its
conventional cell to the primitive cell.
lat_type ( one character):
'c' or 'C': cubic system
't' or 'T': tetragonal system
'o' or 'O': orthorhombic system
'h' or 'H': hexagonal system
'r' or 'R': rhombohedral system
default to cubic system
ratio (list of integers):
lattice axial ratio.
For cubic system, ratio is not needed.
For tetragonal system, ratio = [mu, mv], list of two integers,
that is, mu/mv = c2/a2. If it is irrational, set it to none.
For orthorhombic system, ratio = [mu, lam, mv], list of three integers,
that is, mu:lam:mv = c2:b2:a2. If irrational for one axis, set it to None.
e.g. mu:lam:mv = c2,None,a2, means b2 is irrational.
For rhombohedral system, ratio = [mu, mv], list of two integers,
that is, mu/mv is the ratio of (1+2*cos(alpha)/cos(alpha).
If irrational, set it to None.
For hexagonal system, ratio = [mu, mv], list of two integers,
that is, mu/mv = c2/a2. If it is irrational, set it to none.
surface (list of three integers, e.g. h, k, l
or four integers, e.g. h, k, i, l for hex/rho system only):
the miller index of grain boundary plane, with the format of [h,k,l]
if surface is not given, the default is perpendicular to r_axis, which is
a twist grain boundary.
max_search (int): max search for the GB lattice vectors that give the smallest GB
lattice. If normal is true, also max search the GB c vector that perpendicular
to the plane.
quick_gen (bool): whether to quickly generate a supercell, if set to true, no need to
find the smallest cell.
Returns:
t1 (3 by 3 integer array):
The transformation array for one grain.
t2 (3 by 3 integer array):
The transformation array for the other grain
"""
# transform four index notation to three index notation
if len(r_axis) == 4:
u1 = r_axis[0]
v1 = r_axis[1]
w1 = r_axis[3]
if lat_type.lower() == "h":
u = 2 * u1 + v1
v = 2 * v1 + u1
w = w1
r_axis = [u, v, w]
elif lat_type.lower() == "r":
u = 2 * u1 + v1 + w1
v = v1 + w1 - u1
w = w1 - 2 * v1 - u1
r_axis = [u, v, w]
# make sure gcd(r_axis)==1
if reduce(gcd, r_axis) != 1:
r_axis = [int(round(x / reduce(gcd, r_axis))) for x in r_axis]
if surface is not None:
if len(surface) == 4:
u1 = surface[0]
v1 = surface[1]
w1 = surface[3]
surface = [u1, v1, w1]
# set the surface for grain boundary.
if surface is None:
if lat_type.lower() == "c":
surface = r_axis
else:
if lat_type.lower() == "h":
if ratio is None:
c2_a2_ratio = 1
else:
c2_a2_ratio = ratio[0] / ratio[1]
metric = np.array([[1, -0.5, 0], [-0.5, 1, 0], [0, 0, c2_a2_ratio]])
elif lat_type.lower() == "r":
if ratio is None:
cos_alpha = 0.5
else:
cos_alpha = 1.0 / (ratio[0] / ratio[1] - 2)
metric = np.array(
[
[1, cos_alpha, cos_alpha],
[cos_alpha, 1, cos_alpha],
[cos_alpha, cos_alpha, 1],
]
)
elif lat_type.lower() == "t":
if ratio is None:
c2_a2_ratio = 1
else:
c2_a2_ratio = ratio[0] / ratio[1]
metric = np.array([[1, 0, 0], [0, 1, 0], [0, 0, c2_a2_ratio]])
elif lat_type.lower() == "o":
for i in range(3):
if ratio[i] is None:
ratio[i] = 1
metric = np.array(
[
[1, 0, 0],
[0, ratio[1] / ratio[2], 0],
[0, 0, ratio[0] / ratio[2]],
]
)
else:
raise RuntimeError("Lattice type has not implemented.")
surface = np.matmul(r_axis, metric)
fractions = [Fraction(x).limit_denominator() for x in surface]
least_mul = reduce(lcm, [f.denominator for f in fractions])
surface = [int(round(x * least_mul)) for x in surface]
if reduce(gcd, surface) != 1:
index = reduce(gcd, surface)
surface = [int(round(x / index)) for x in surface]
if lat_type.lower() == "h":
# set the value for u,v,w,mu,mv,m,n,d,x
# check the reference for the meaning of these parameters
u, v, w = r_axis
# make sure mu, mv are coprime integers.
if ratio is None:
mu, mv = [1, 1]
if w != 0:
if u != 0 or (v != 0):
raise RuntimeError("For irrational c2/a2, CSL only exist for [0,0,1] or [u,v,0] and m = 0")
else:
mu, mv = ratio
if gcd(mu, mv) != 1:
temp = gcd(mu, mv)
mu = int(round(mu / temp))
mv = int(round(mv / temp))
d = (u**2 + v**2 - u * v) * mv + w**2 * mu
if abs(angle - 180.0) < 1.0e0:
m = 0
n = 1
else:
fraction = Fraction(
np.tan(angle / 2 / 180.0 * np.pi) / np.sqrt(float(d) / 3.0 / mu)
).limit_denominator()
m = fraction.denominator
n = fraction.numerator
# construct the rotation matrix, check reference for details
r_list = [
(u**2 * mv - v**2 * mv - w**2 * mu) * n**2 + 2 * w * mu * m * n + 3 * mu * m**2,
(2 * v - u) * u * mv * n**2 - 4 * w * mu * m * n,
2 * u * w * mu * n**2 + 2 * (2 * v - u) * mu * m * n,
(2 * u - v) * v * mv * n**2 + 4 * w * mu * m * n,
(v**2 * mv - u**2 * mv - w**2 * mu) * n**2 - 2 * w * mu * m * n + 3 * mu * m**2,
2 * v * w * mu * n**2 - 2 * (2 * u - v) * mu * m * n,
(2 * u - v) * w * mv * n**2 - 3 * v * mv * m * n,
(2 * v - u) * w * mv * n**2 + 3 * u * mv * m * n,
(w**2 * mu - u**2 * mv - v**2 * mv + u * v * mv) * n**2 + 3 * mu * m**2,
]
m = -1 * m
r_list_inv = [
(u**2 * mv - v**2 * mv - w**2 * mu) * n**2 + 2 * w * mu * m * n + 3 * mu * m**2,
(2 * v - u) * u * mv * n**2 - 4 * w * mu * m * n,
2 * u * w * mu * n**2 + 2 * (2 * v - u) * mu * m * n,
(2 * u - v) * v * mv * n**2 + 4 * w * mu * m * n,
(v**2 * mv - u**2 * mv - w**2 * mu) * n**2 - 2 * w * mu * m * n + 3 * mu * m**2,
2 * v * w * mu * n**2 - 2 * (2 * u - v) * mu * m * n,
(2 * u - v) * w * mv * n**2 - 3 * v * mv * m * n,
(2 * v - u) * w * mv * n**2 + 3 * u * mv * m * n,
(w**2 * mu - u**2 * mv - v**2 * mv + u * v * mv) * n**2 + 3 * mu * m**2,
]
m = -1 * m
F = 3 * mu * m**2 + d * n**2
all_list = r_list + r_list_inv + [F]
com_fac = reduce(gcd, all_list)
sigma = F / com_fac
r_matrix = (np.array(r_list) / com_fac / sigma).reshape(3, 3)
elif lat_type.lower() == "r":
# set the value for u,v,w,mu,mv,m,n,d
# check the reference for the meaning of these parameters
u, v, w = r_axis
# make sure mu, mv are coprime integers.
if ratio is None:
mu, mv = [1, 1]
if u + v + w != 0:
if u != v or u != w:
raise RuntimeError(
"For irrational ratio_alpha, CSL only exist for [1,1,1] or [u, v, -(u+v)] and m =0"
)
else:
mu, mv = ratio
if gcd(mu, mv) != 1:
temp = gcd(mu, mv)
mu = int(round(mu / temp))
mv = int(round(mv / temp))
d = (u**2 + v**2 + w**2) * (mu - 2 * mv) + 2 * mv * (v * w + w * u + u * v)
if abs(angle - 180.0) < 1.0e0:
m = 0
n = 1
else:
fraction = Fraction(np.tan(angle / 2 / 180.0 * np.pi) / np.sqrt(float(d) / mu)).limit_denominator()
m = fraction.denominator
n = fraction.numerator
# construct the rotation matrix, check reference for details
r_list = [
(mu - 2 * mv) * (u**2 - v**2 - w**2) * n**2
+ 2 * mv * (v - w) * m * n
- 2 * mv * v * w * n**2
+ mu * m**2,
2 * (mv * u * n * (w * n + u * n - m) - (mu - mv) * m * w * n + (mu - 2 * mv) * u * v * n**2),
2 * (mv * u * n * (v * n + u * n + m) + (mu - mv) * m * v * n + (mu - 2 * mv) * w * u * n**2),
2 * (mv * v * n * (w * n + v * n + m) + (mu - mv) * m * w * n + (mu - 2 * mv) * u * v * n**2),
(mu - 2 * mv) * (v**2 - w**2 - u**2) * n**2
+ 2 * mv * (w - u) * m * n
- 2 * mv * u * w * n**2
+ mu * m**2,
2 * (mv * v * n * (v * n + u * n - m) - (mu - mv) * m * u * n + (mu - 2 * mv) * w * v * n**2),
2 * (mv * w * n * (w * n + v * n - m) - (mu - mv) * m * v * n + (mu - 2 * mv) * w * u * n**2),
2 * (mv * w * n * (w * n + u * n + m) + (mu - mv) * m * u * n + (mu - 2 * mv) * w * v * n**2),
(mu - 2 * mv) * (w**2 - u**2 - v**2) * n**2
+ 2 * mv * (u - v) * m * n
- 2 * mv * u * v * n**2
+ mu * m**2,
]
m = -1 * m
r_list_inv = [
(mu - 2 * mv) * (u**2 - v**2 - w**2) * n**2
+ 2 * mv * (v - w) * m * n
- 2 * mv * v * w * n**2
+ mu * m**2,
2 * (mv * u * n * (w * n + u * n - m) - (mu - mv) * m * w * n + (mu - 2 * mv) * u * v * n**2),
2 * (mv * u * n * (v * n + u * n + m) + (mu - mv) * m * v * n + (mu - 2 * mv) * w * u * n**2),
2 * (mv * v * n * (w * n + v * n + m) + (mu - mv) * m * w * n + (mu - 2 * mv) * u * v * n**2),
(mu - 2 * mv) * (v**2 - w**2 - u**2) * n**2
+ 2 * mv * (w - u) * m * n
- 2 * mv * u * w * n**2
+ mu * m**2,
2 * (mv * v * n * (v * n + u * n - m) - (mu - mv) * m * u * n + (mu - 2 * mv) * w * v * n**2),
2 * (mv * w * n * (w * n + v * n - m) - (mu - mv) * m * v * n + (mu - 2 * mv) * w * u * n**2),
2 * (mv * w * n * (w * n + u * n + m) + (mu - mv) * m * u * n + (mu - 2 * mv) * w * v * n**2),
(mu - 2 * mv) * (w**2 - u**2 - v**2) * n**2
+ 2 * mv * (u - v) * m * n
- 2 * mv * u * v * n**2
+ mu * m**2,
]
m = -1 * m
F = mu * m**2 + d * n**2
all_list = r_list_inv + r_list + [F]
com_fac = reduce(gcd, all_list)
sigma = F / com_fac
r_matrix = (np.array(r_list) / com_fac / sigma).reshape(3, 3)
else:
u, v, w = r_axis
if lat_type.lower() == "c":
mu = 1
lam = 1
mv = 1
elif lat_type.lower() == "t":
if ratio is None:
mu, mv = [1, 1]
if w != 0:
if u != 0 or (v != 0):
raise RuntimeError("For irrational c2/a2, CSL only exist for [0,0,1] or [u,v,0] and m = 0")
else:
mu, mv = ratio
lam = mv
elif lat_type.lower() == "o":
if None in ratio:
mu, lam, mv = ratio
non_none = [i for i in ratio if i is not None]
if len(non_none) < 2:
raise RuntimeError("No CSL exist for two irrational numbers")
non1, non2 = non_none
if mu is None:
lam = non1
mv = non2
mu = 1
if w != 0:
if u != 0 or (v != 0):
raise RuntimeError("For irrational c2, CSL only exist for [0,0,1] or [u,v,0] and m = 0")
elif lam is None:
mu = non1
mv = non2
lam = 1
if v != 0:
if u != 0 or (w != 0):
raise RuntimeError("For irrational b2, CSL only exist for [0,1,0] or [u,0,w] and m = 0")
elif mv is None:
mu = non1
lam = non2
mv = 1
if u != 0:
if w != 0 or (v != 0):
raise RuntimeError("For irrational a2, CSL only exist for [1,0,0] or [0,v,w] and m = 0")
else:
mu, lam, mv = ratio
if u == 0 and v == 0:
mu = 1
if u == 0 and w == 0:
lam = 1
if v == 0 and w == 0:
mv = 1
# make sure mu, lambda, mv are coprime integers.
if reduce(gcd, [mu, lam, mv]) != 1:
temp = reduce(gcd, [mu, lam, mv])
mu = int(round(mu / temp))
mv = int(round(mv / temp))
lam = int(round(lam / temp))
d = (mv * u**2 + lam * v**2) * mv + w**2 * mu * mv
if abs(angle - 180.0) < 1.0e0:
m = 0
n = 1
else:
fraction = Fraction(np.tan(angle / 2 / 180.0 * np.pi) / np.sqrt(d / mu / lam)).limit_denominator()
m = fraction.denominator
n = fraction.numerator
r_list = [
(u**2 * mv * mv - lam * v**2 * mv - w**2 * mu * mv) * n**2 + lam * mu * m**2,
2 * lam * (v * u * mv * n**2 - w * mu * m * n),
2 * mu * (u * w * mv * n**2 + v * lam * m * n),
2 * mv * (u * v * mv * n**2 + w * mu * m * n),
(v**2 * mv * lam - u**2 * mv * mv - w**2 * mu * mv) * n**2 + lam * mu * m**2,
2 * mv * mu * (v * w * n**2 - u * m * n),
2 * mv * (u * w * mv * n**2 - v * lam * m * n),
2 * lam * mv * (v * w * n**2 + u * m * n),
(w**2 * mu * mv - u**2 * mv * mv - v**2 * mv * lam) * n**2 + lam * mu * m**2,
]
m = -1 * m
r_list_inv = [
(u**2 * mv * mv - lam * v**2 * mv - w**2 * mu * mv) * n**2 + lam * mu * m**2,
2 * lam * (v * u * mv * n**2 - w * mu * m * n),
2 * mu * (u * w * mv * n**2 + v * lam * m * n),
2 * mv * (u * v * mv * n**2 + w * mu * m * n),
(v**2 * mv * lam - u**2 * mv * mv - w**2 * mu * mv) * n**2 + lam * mu * m**2,
2 * mv * mu * (v * w * n**2 - u * m * n),
2 * mv * (u * w * mv * n**2 - v * lam * m * n),
2 * lam * mv * (v * w * n**2 + u * m * n),
(w**2 * mu * mv - u**2 * mv * mv - v**2 * mv * lam) * n**2 + lam * mu * m**2,
]
m = -1 * m
F = mu * lam * m**2 + d * n**2
all_list = r_list + r_list_inv + [F]
com_fac = reduce(gcd, all_list)
sigma = F / com_fac
r_matrix = (np.array(r_list) / com_fac / sigma).reshape(3, 3)
if sigma > 1000:
raise RuntimeError("Sigma >1000 too large. Are you sure what you are doing, Please check the GB if exist")
# transform surface, r_axis, r_matrix in terms of primitive lattice
surface = np.matmul(surface, np.transpose(trans_cry))
fractions = [Fraction(x).limit_denominator() for x in surface]
least_mul = reduce(lcm, [f.denominator for f in fractions])
surface = [int(round(x * least_mul)) for x in surface]
if reduce(gcd, surface) != 1:
index = reduce(gcd, surface)
surface = [int(round(x / index)) for x in surface]
r_axis = np.rint(np.matmul(r_axis, np.linalg.inv(trans_cry))).astype(int)
if reduce(gcd, r_axis) != 1:
r_axis = [int(round(x / reduce(gcd, r_axis))) for x in r_axis]
r_matrix = np.dot(np.dot(np.linalg.inv(trans_cry.T), r_matrix), trans_cry.T)
# set one vector of the basis to the rotation axis direction, and
# obtain the corresponding transform matrix
eye = np.eye(3, dtype=int)
for h in range(3):
if abs(r_axis[h]) != 0:
eye[h] = np.array(r_axis)
k = h + 1 if h + 1 < 3 else abs(2 - h)
l = h + 2 if h + 2 < 3 else abs(1 - h)
break
trans = eye.T
new_rot = np.array(r_matrix)
# with the rotation matrix to construct the CSL lattice, check reference for details
fractions = [Fraction(x).limit_denominator() for x in new_rot[:, k]]
least_mul = reduce(lcm, [f.denominator for f in fractions])
scale = np.zeros((3, 3))
scale[h, h] = 1
scale[k, k] = least_mul
scale[l, l] = sigma / least_mul
for i in range(least_mul):
check_int = i * new_rot[:, k] + (sigma / least_mul) * new_rot[:, l]
if all(np.round(x, 5).is_integer() for x in list(check_int)):
n_final = i
break
try:
n_final
except NameError:
raise RuntimeError("Something is wrong. Check if this GB exists or not")
scale[k, l] = n_final
# each row of mat_csl is the CSL lattice vector
csl_init = np.rint(np.dot(np.dot(r_matrix, trans), scale)).astype(int).T
if abs(r_axis[h]) > 1:
csl_init = GrainBoundaryGenerator.reduce_mat(np.array(csl_init), r_axis[h], r_matrix)
csl = np.rint(Lattice(csl_init).get_niggli_reduced_lattice().matrix).astype(int)
# find the best slab supercell in terms of the conventional cell from the csl lattice,
# which is the transformation matrix
# now trans_cry is the transformation matrix from crystal to cartesian coordinates.
# for cubic, do not need to change.
if lat_type.lower() != "c":
if lat_type.lower() == "h":
trans_cry = np.array([[1, 0, 0], [-0.5, np.sqrt(3.0) / 2.0, 0], [0, 0, np.sqrt(mu / mv)]])
elif lat_type.lower() == "r":
if ratio is None:
c2_a2_ratio = 1
else:
c2_a2_ratio = 3.0 / (2 - 6 * mv / mu)
trans_cry = np.array(
[
[0.5, np.sqrt(3.0) / 6.0, 1.0 / 3 * np.sqrt(c2_a2_ratio)],
[-0.5, np.sqrt(3.0) / 6.0, 1.0 / 3 * np.sqrt(c2_a2_ratio)],
[0, -1 * np.sqrt(3.0) / 3.0, 1.0 / 3 * np.sqrt(c2_a2_ratio)],
]
)
else:
trans_cry = np.array([[1, 0, 0], [0, np.sqrt(lam / mv), 0], [0, 0, np.sqrt(mu / mv)]])
t1_final = GrainBoundaryGenerator.slab_from_csl(
csl, surface, normal, trans_cry, max_search=max_search, quick_gen=quick_gen
)
t2_final = np.array(np.rint(np.dot(t1_final, np.linalg.inv(r_matrix.T)))).astype(int)
return t1_final, t2_final
@staticmethod
def enum_sigma_cubic(cutoff, r_axis):
"""
Find all possible sigma values and corresponding rotation angles
within a sigma value cutoff with known rotation axis in cubic system.
The algorithm for this code is from reference, Acta Cryst, A40,108(1984)
Args:
cutoff (integer): the cutoff of sigma values.
r_axis (list of three integers, e.g. u, v, w):
the rotation axis of the grain boundary, with the format of [u,v,w].
Returns:
sigmas (dict):
dictionary with keys as the possible integer sigma values
and values as list of the possible rotation angles to the
corresponding sigma values.
e.g. the format as
{sigma1: [angle11,angle12,...], sigma2: [angle21, angle22,...],...}
Note: the angles are the rotation angles of one grain respect to
the other grain.
When generate the microstructures of the grain boundary using these angles,
you need to analyze the symmetry of the structure. Different angles may
result in equivalent microstructures.
"""
sigmas = {}
# make sure gcd(r_axis)==1
if reduce(gcd, r_axis) != 1:
r_axis = [int(round(x / reduce(gcd, r_axis))) for x in r_axis]
# count the number of odds in r_axis
odd_r = len(list(filter(lambda x: x % 2 == 1, r_axis)))
# Compute the max n we need to enumerate.
if odd_r == 3:
a_max = 4
elif odd_r == 0:
a_max = 1
else:
a_max = 2
n_max = int(np.sqrt(cutoff * a_max / sum(np.array(r_axis) ** 2)))
# enumerate all possible n, m to give possible sigmas within the cutoff.
for n_loop in range(1, n_max + 1):
n = n_loop
m_max = int(np.sqrt(cutoff * a_max - n**2 * sum(np.array(r_axis) ** 2)))
for m in range(0, m_max + 1):
if gcd(m, n) == 1 or m == 0:
if m == 0:
n = 1
else:
n = n_loop
# construct the quadruple [m, U,V,W], count the number of odds in
# quadruple to determine the parameter a, refer to the reference
quadruple = [m] + [x * n for x in r_axis]
odd_qua = len(list(filter(lambda x: x % 2 == 1, quadruple)))
if odd_qua == 4:
a = 4
elif odd_qua == 2:
a = 2
else:
a = 1
sigma = int(round((m**2 + n**2 * sum(np.array(r_axis) ** 2)) / a))
if 1 < sigma <= cutoff:
if sigma not in list(sigmas.keys()):
if m == 0:
angle = 180.0
else:
angle = 2 * np.arctan(n * np.sqrt(sum(np.array(r_axis) ** 2)) / m) / np.pi * 180
sigmas[sigma] = [angle]
else:
if m == 0:
angle = 180.0
else:
angle = 2 * np.arctan(n * np.sqrt(sum(np.array(r_axis) ** 2)) / m) / np.pi * 180
if angle not in sigmas[sigma]:
sigmas[sigma].append(angle)
return sigmas
@staticmethod
def enum_sigma_hex(cutoff, r_axis, c2_a2_ratio):
"""
Find all possible sigma values and corresponding rotation angles
within a sigma value cutoff with known rotation axis in hexagonal system.
The algorithm for this code is from reference, Acta Cryst, A38,550(1982)
Args:
cutoff (integer): the cutoff of sigma values.
r_axis (list of three integers, e.g. u, v, w
or four integers, e.g. u, v, t, w):
the rotation axis of the grain boundary.
c2_a2_ratio (list of two integers, e.g. mu, mv):
mu/mv is the square of the hexagonal axial ratio, which is rational
number. If irrational, set c2_a2_ratio = None
Returns:
sigmas (dict):
dictionary with keys as the possible integer sigma values
and values as list of the possible rotation angles to the
corresponding sigma values.
e.g. the format as
{sigma1: [angle11,angle12,...], sigma2: [angle21, angle22,...],...}
Note: the angles are the rotation angle of one grain respect to the
other grain.
When generate the microstructure of the grain boundary using these
angles, you need to analyze the symmetry of the structure. Different
angles may result in equivalent microstructures.
"""
sigmas = {}
# make sure gcd(r_axis)==1
if reduce(gcd, r_axis) != 1:
r_axis = [int(round(x / reduce(gcd, r_axis))) for x in r_axis]
# transform four index notation to three index notation
if len(r_axis) == 4:
u1 = r_axis[0]
v1 = r_axis[1]
w1 = r_axis[3]
u = 2 * u1 + v1
v = 2 * v1 + u1
w = w1
else:
u, v, w = r_axis
# make sure mu, mv are coprime integers.
if c2_a2_ratio is None:
mu, mv = [1, 1]
if w != 0:
if u != 0 or (v != 0):
raise RuntimeError("For irrational c2/a2, CSL only exist for [0,0,1] or [u,v,0] and m = 0")
else:
mu, mv = c2_a2_ratio
if gcd(mu, mv) != 1:
temp = gcd(mu, mv)
mu = int(round(mu / temp))
mv = int(round(mv / temp))
# refer to the meaning of d in reference
d = (u**2 + v**2 - u * v) * mv + w**2 * mu
# Compute the max n we need to enumerate.
n_max = int(np.sqrt((cutoff * 12 * mu * mv) / abs(d)))
# Enumerate all possible n, m to give possible sigmas within the cutoff.
for n in range(1, n_max + 1):
if (c2_a2_ratio is None) and w == 0:
m_max = 0
else:
m_max = int(np.sqrt((cutoff * 12 * mu * mv - n**2 * d) / (3 * mu)))
for m in range(0, m_max + 1):
if gcd(m, n) == 1 or m == 0:
# construct the rotation matrix, refer to the reference
R_list = [
(u**2 * mv - v**2 * mv - w**2 * mu) * n**2 + 2 * w * mu * m * n + 3 * mu * m**2,
(2 * v - u) * u * mv * n**2 - 4 * w * mu * m * n,
2 * u * w * mu * n**2 + 2 * (2 * v - u) * mu * m * n,
(2 * u - v) * v * mv * n**2 + 4 * w * mu * m * n,
(v**2 * mv - u**2 * mv - w**2 * mu) * n**2 - 2 * w * mu * m * n + 3 * mu * m**2,
2 * v * w * mu * n**2 - 2 * (2 * u - v) * mu * m * n,
(2 * u - v) * w * mv * n**2 - 3 * v * mv * m * n,
(2 * v - u) * w * mv * n**2 + 3 * u * mv * m * n,
(w**2 * mu - u**2 * mv - v**2 * mv + u * v * mv) * n**2 + 3 * mu * m**2,
]
m = -1 * m
# inverse of the rotation matrix
R_list_inv = [
(u**2 * mv - v**2 * mv - w**2 * mu) * n**2 + 2 * w * mu * m * n + 3 * mu * m**2,
(2 * v - u) * u * mv * n**2 - 4 * w * mu * m * n,
2 * u * w * mu * n**2 + 2 * (2 * v - u) * mu * m * n,
(2 * u - v) * v * mv * n**2 + 4 * w * mu * m * n,
(v**2 * mv - u**2 * mv - w**2 * mu) * n**2 - 2 * w * mu * m * n + 3 * mu * m**2,
2 * v * w * mu * n**2 - 2 * (2 * u - v) * mu * m * n,
(2 * u - v) * w * mv * n**2 - 3 * v * mv * m * n,
(2 * v - u) * w * mv * n**2 + 3 * u * mv * m * n,
(w**2 * mu - u**2 * mv - v**2 * mv + u * v * mv) * n**2 + 3 * mu * m**2,
]
m = -1 * m
F = 3 * mu * m**2 + d * n**2
all_list = R_list_inv + R_list + [F]
# Compute the max common factors for the elements of the rotation matrix
# and its inverse.
com_fac = reduce(gcd, all_list)
sigma = int(round((3 * mu * m**2 + d * n**2) / com_fac))
if 1 < sigma <= cutoff:
if sigma not in list(sigmas.keys()):
if m == 0:
angle = 180.0
else:
angle = 2 * np.arctan(n / m * np.sqrt(d / 3.0 / mu)) / np.pi * 180
sigmas[sigma] = [angle]
else:
if m == 0:
angle = 180.0
else:
angle = 2 * np.arctan(n / m * np.sqrt(d / 3.0 / mu)) / np.pi * 180
if angle not in sigmas[sigma]:
sigmas[sigma].append(angle)
if m_max == 0:
break
return sigmas
@staticmethod
def enum_sigma_rho(cutoff, r_axis, ratio_alpha):
"""
Find all possible sigma values and corresponding rotation angles
within a sigma value cutoff with known rotation axis in rhombohedral system.
The algorithm for this code is from reference, Acta Cryst, A45,505(1989).
Args:
cutoff (integer): the cutoff of sigma values.
r_axis (list of three integers, e.g. u, v, w
or four integers, e.g. u, v, t, w):
the rotation axis of the grain boundary, with the format of [u,v,w]
or Weber indices [u, v, t, w].
ratio_alpha (list of two integers, e.g. mu, mv):
mu/mv is the ratio of (1+2*cos(alpha))/cos(alpha) with rational number.
If irrational, set ratio_alpha = None.
Returns:
sigmas (dict):
dictionary with keys as the possible integer sigma values
and values as list of the possible rotation angles to the
corresponding sigma values.
e.g. the format as
{sigma1: [angle11,angle12,...], sigma2: [angle21, angle22,...],...}
Note: the angles are the rotation angle of one grain respect to the
other grain.
When generate the microstructure of the grain boundary using these
angles, you need to analyze the symmetry of the structure. Different
angles may result in equivalent microstructures.
"""
sigmas = {}
# transform four index notation to three index notation
if len(r_axis) == 4:
u1 = r_axis[0]
v1 = r_axis[1]
w1 = r_axis[3]
u = 2 * u1 + v1 + w1
v = v1 + w1 - u1
w = w1 - 2 * v1 - u1
r_axis = [u, v, w]
# make sure gcd(r_axis)==1
if reduce(gcd, r_axis) != 1:
r_axis = [int(round(x / reduce(gcd, r_axis))) for x in r_axis]
u, v, w = r_axis
# make sure mu, mv are coprime integers.
if ratio_alpha is None:
mu, mv = [1, 1]
if u + v + w != 0:
if u != v or u != w:
raise RuntimeError(
"For irrational ratio_alpha, CSL only exist for [1,1,1] or [u, v, -(u+v)] and m =0"
)
else:
mu, mv = ratio_alpha
if gcd(mu, mv) != 1:
temp = gcd(mu, mv)
mu = int(round(mu / temp))
mv = int(round(mv / temp))
# refer to the meaning of d in reference
d = (u**2 + v**2 + w**2) * (mu - 2 * mv) + 2 * mv * (v * w + w * u + u * v)
# Compute the max n we need to enumerate.
n_max = int(np.sqrt((cutoff * abs(4 * mu * (mu - 3 * mv))) / abs(d)))
# Enumerate all possible n, m to give possible sigmas within the cutoff.
for n in range(1, n_max + 1):
if ratio_alpha is None and u + v + w == 0:
m_max = 0
else:
m_max = int(np.sqrt((cutoff * abs(4 * mu * (mu - 3 * mv)) - n**2 * d) / (mu)))
for m in range(0, m_max + 1):
if gcd(m, n) == 1 or m == 0:
# construct the rotation matrix, refer to the reference
R_list = [
(mu - 2 * mv) * (u**2 - v**2 - w**2) * n**2
+ 2 * mv * (v - w) * m * n
- 2 * mv * v * w * n**2
+ mu * m**2,
2 * (mv * u * n * (w * n + u * n - m) - (mu - mv) * m * w * n + (mu - 2 * mv) * u * v * n**2),
2 * (mv * u * n * (v * n + u * n + m) + (mu - mv) * m * v * n + (mu - 2 * mv) * w * u * n**2),
2 * (mv * v * n * (w * n + v * n + m) + (mu - mv) * m * w * n + (mu - 2 * mv) * u * v * n**2),
(mu - 2 * mv) * (v**2 - w**2 - u**2) * n**2
+ 2 * mv * (w - u) * m * n
- 2 * mv * u * w * n**2
+ mu * m**2,
2 * (mv * v * n * (v * n + u * n - m) - (mu - mv) * m * u * n + (mu - 2 * mv) * w * v * n**2),
2 * (mv * w * n * (w * n + v * n - m) - (mu - mv) * m * v * n + (mu - 2 * mv) * w * u * n**2),
2 * (mv * w * n * (w * n + u * n + m) + (mu - mv) * m * u * n + (mu - 2 * mv) * w * v * n**2),
(mu - 2 * mv) * (w**2 - u**2 - v**2) * n**2
+ 2 * mv * (u - v) * m * n
- 2 * mv * u * v * n**2
+ mu * m**2,
]
m = -1 * m
# inverse of the rotation matrix
R_list_inv = [
(mu - 2 * mv) * (u**2 - v**2 - w**2) * n**2
+ 2 * mv * (v - w) * m * n
- 2 * mv * v * w * n**2
+ mu * m**2,
2 * (mv * u * n * (w * n + u * n - m) - (mu - mv) * m * w * n + (mu - 2 * mv) * u * v * n**2),
2 * (mv * u * n * (v * n + u * n + m) + (mu - mv) * m * v * n + (mu - 2 * mv) * w * u * n**2),
2 * (mv * v * n * (w * n + v * n + m) + (mu - mv) * m * w * n + (mu - 2 * mv) * u * v * n**2),
(mu - 2 * mv) * (v**2 - w**2 - u**2) * n**2
+ 2 * mv * (w - u) * m * n
- 2 * mv * u * w * n**2
+ mu * m**2,
2 * (mv * v * n * (v * n + u * n - m) - (mu - mv) * m * u * n + (mu - 2 * mv) * w * v * n**2),
2 * (mv * w * n * (w * n + v * n - m) - (mu - mv) * m * v * n + (mu - 2 * mv) * w * u * n**2),
2 * (mv * w * n * (w * n + u * n + m) + (mu - mv) * m * u * n + (mu - 2 * mv) * w * v * n**2),
(mu - 2 * mv) * (w**2 - u**2 - v**2) * n**2
+ 2 * mv * (u - v) * m * n
- 2 * mv * u * v * n**2
+ mu * m**2,
]
m = -1 * m
F = mu * m**2 + d * n**2
all_list = R_list_inv + R_list + [F]
# Compute the max common factors for the elements of the rotation matrix
# and its inverse.
com_fac = reduce(gcd, all_list)
sigma = int(round(abs(F / com_fac)))
if 1 < sigma <= cutoff:
if sigma not in list(sigmas.keys()):
if m == 0:
angle = 180.0
else:
angle = 2 * np.arctan(n / m * np.sqrt(d / mu)) / np.pi * 180
sigmas[sigma] = [angle]
else:
if m == 0:
angle = 180
else:
angle = 2 * np.arctan(n / m * np.sqrt(d / mu)) / np.pi * 180.0
if angle not in sigmas[sigma]:
sigmas[sigma].append(angle)
if m_max == 0:
break
return sigmas
@staticmethod
def enum_sigma_tet(cutoff, r_axis, c2_a2_ratio):
"""
Find all possible sigma values and corresponding rotation angles
within a sigma value cutoff with known rotation axis in tetragonal system.
The algorithm for this code is from reference, Acta Cryst, B46,117(1990)
Args:
cutoff (integer): the cutoff of sigma values.
r_axis (list of three integers, e.g. u, v, w):
the rotation axis of the grain boundary, with the format of [u,v,w].
c2_a2_ratio (list of two integers, e.g. mu, mv):
mu/mv is the square of the tetragonal axial ratio with rational number.
if irrational, set c2_a2_ratio = None
Returns:
sigmas (dict):
dictionary with keys as the possible integer sigma values
and values as list of the possible rotation angles to the
corresponding sigma values.
e.g. the format as
{sigma1: [angle11,angle12,...], sigma2: [angle21, angle22,...],...}
Note: the angles are the rotation angle of one grain respect to the
other grain.
When generate the microstructure of the grain boundary using these
angles, you need to analyze the symmetry of the structure. Different
angles may result in equivalent microstructures.
"""
sigmas = {}
# make sure gcd(r_axis)==1
if reduce(gcd, r_axis) != 1:
r_axis = [int(round(x / reduce(gcd, r_axis))) for x in r_axis]
u, v, w = r_axis
# make sure mu, mv are coprime integers.
if c2_a2_ratio is None:
mu, mv = [1, 1]
if w != 0:
if u != 0 or (v != 0):
raise RuntimeError("For irrational c2/a2, CSL only exist for [0,0,1] or [u,v,0] and m = 0")
else:
mu, mv = c2_a2_ratio
if gcd(mu, mv) != 1:
temp = gcd(mu, mv)
mu = int(round(mu / temp))
mv = int(round(mv / temp))
# refer to the meaning of d in reference
d = (u**2 + v**2) * mv + w**2 * mu
# Compute the max n we need to enumerate.
n_max = int(np.sqrt((cutoff * 4 * mu * mv) / d))
# Enumerate all possible n, m to give possible sigmas within the cutoff.
for n in range(1, n_max + 1):
if c2_a2_ratio is None and w == 0:
m_max = 0
else:
m_max = int(np.sqrt((cutoff * 4 * mu * mv - n**2 * d) / mu))
for m in range(0, m_max + 1):
if gcd(m, n) == 1 or m == 0:
# construct the rotation matrix, refer to the reference
R_list = [
(u**2 * mv - v**2 * mv - w**2 * mu) * n**2 + mu * m**2,
2 * v * u * mv * n**2 - 2 * w * mu * m * n,
2 * u * w * mu * n**2 + 2 * v * mu * m * n,
2 * u * v * mv * n**2 + 2 * w * mu * m * n,
(v**2 * mv - u**2 * mv - w**2 * mu) * n**2 + mu * m**2,
2 * v * w * mu * n**2 - 2 * u * mu * m * n,
2 * u * w * mv * n**2 - 2 * v * mv * m * n,
2 * v * w * mv * n**2 + 2 * u * mv * m * n,
(w**2 * mu - u**2 * mv - v**2 * mv) * n**2 + mu * m**2,
]
m = -1 * m
# inverse of rotation matrix
R_list_inv = [
(u**2 * mv - v**2 * mv - w**2 * mu) * n**2 + mu * m**2,
2 * v * u * mv * n**2 - 2 * w * mu * m * n,
2 * u * w * mu * n**2 + 2 * v * mu * m * n,
2 * u * v * mv * n**2 + 2 * w * mu * m * n,
(v**2 * mv - u**2 * mv - w**2 * mu) * n**2 + mu * m**2,
2 * v * w * mu * n**2 - 2 * u * mu * m * n,
2 * u * w * mv * n**2 - 2 * v * mv * m * n,
2 * v * w * mv * n**2 + 2 * u * mv * m * n,
(w**2 * mu - u**2 * mv - v**2 * mv) * n**2 + mu * m**2,
]
m = -1 * m
F = mu * m**2 + d * n**2
all_list = R_list + R_list_inv + [F]
# Compute the max common factors for the elements of the rotation matrix
# and its inverse.
com_fac = reduce(gcd, all_list)
sigma = int(round((mu * m**2 + d * n**2) / com_fac))
if 1 < sigma <= cutoff:
if sigma not in list(sigmas.keys()):
if m == 0:
angle = 180.0
else:
angle = 2 * np.arctan(n / m * np.sqrt(d / mu)) / np.pi * 180
sigmas[sigma] = [angle]
else:
if m == 0:
angle = 180.0
else:
angle = 2 * np.arctan(n / m * np.sqrt(d / mu)) / np.pi * 180
if angle not in sigmas[sigma]:
sigmas[sigma].append(angle)
if m_max == 0:
break
return sigmas
@staticmethod
def enum_sigma_ort(cutoff, r_axis, c2_b2_a2_ratio):
"""
Find all possible sigma values and corresponding rotation angles
within a sigma value cutoff with known rotation axis in orthorhombic system.
The algorithm for this code is from reference, Scipta Metallurgica 27, 291(1992)
Args:
cutoff (integer): the cutoff of sigma values.
r_axis (list of three integers, e.g. u, v, w):
the rotation axis of the grain boundary, with the format of [u,v,w].
c2_b2_a2_ratio (list of three integers, e.g. mu,lamda, mv):
mu:lam:mv is the square of the orthorhombic axial ratio with rational
numbers. If irrational for one axis, set it to None.
e.g. mu:lam:mv = c2,None,a2, means b2 is irrational.
Returns:
sigmas (dict):
dictionary with keys as the possible integer sigma values
and values as list of the possible rotation angles to the
corresponding sigma values.
e.g. the format as
{sigma1: [angle11,angle12,...], sigma2: [angle21, angle22,...],...}
Note: the angles are the rotation angle of one grain respect to the
other grain.
When generate the microstructure of the grain boundary using these
angles, you need to analyze the symmetry of the structure. Different
angles may result in equivalent microstructures.
"""
sigmas = {}
# make sure gcd(r_axis)==1
if reduce(gcd, r_axis) != 1:
r_axis = [int(round(x / reduce(gcd, r_axis))) for x in r_axis]
u, v, w = r_axis
# make sure mu, lambda, mv are coprime integers.
if None in c2_b2_a2_ratio:
mu, lam, mv = c2_b2_a2_ratio
non_none = [i for i in c2_b2_a2_ratio if i is not None]
if len(non_none) < 2:
raise RuntimeError("No CSL exist for two irrational numbers")
non1, non2 = non_none
if reduce(gcd, non_none) != 1:
temp = reduce(gcd, non_none)
non1 = int(round(non1 / temp))
non2 = int(round(non2 / temp))
if mu is None:
lam = non1
mv = non2
mu = 1
if w != 0:
if u != 0 or (v != 0):
raise RuntimeError("For irrational c2, CSL only exist for [0,0,1] or [u,v,0] and m = 0")
elif lam is None:
mu = non1
mv = non2
lam = 1
if v != 0:
if u != 0 or (w != 0):
raise RuntimeError("For irrational b2, CSL only exist for [0,1,0] or [u,0,w] and m = 0")
elif mv is None:
mu = non1
lam = non2
mv = 1
if u != 0:
if w != 0 or (v != 0):
raise RuntimeError("For irrational a2, CSL only exist for [1,0,0] or [0,v,w] and m = 0")
else:
mu, lam, mv = c2_b2_a2_ratio
if reduce(gcd, c2_b2_a2_ratio) != 1:
temp = reduce(gcd, c2_b2_a2_ratio)
mu = int(round(mu / temp))
mv = int(round(mv / temp))
lam = int(round(lam / temp))
if u == 0 and v == 0:
mu = 1
if u == 0 and w == 0:
lam = 1
if v == 0 and w == 0:
mv = 1
# refer to the meaning of d in reference
d = (mv * u**2 + lam * v**2) * mv + w**2 * mu * mv
# Compute the max n we need to enumerate.
n_max = int(np.sqrt((cutoff * 4 * mu * mv * mv * lam) / d))
# Enumerate all possible n, m to give possible sigmas within the cutoff.
for n in range(1, n_max + 1):
mu_temp, lam_temp, mv_temp = c2_b2_a2_ratio
if (mu_temp is None and w == 0) or (lam_temp is None and v == 0) or (mv_temp is None and u == 0):
m_max = 0
else:
m_max = int(np.sqrt((cutoff * 4 * mu * mv * lam * mv - n**2 * d) / mu / lam))
for m in range(0, m_max + 1):
if gcd(m, n) == 1 or m == 0:
# construct the rotation matrix, refer to the reference
R_list = [
(u**2 * mv * mv - lam * v**2 * mv - w**2 * mu * mv) * n**2 + lam * mu * m**2,
2 * lam * (v * u * mv * n**2 - w * mu * m * n),
2 * mu * (u * w * mv * n**2 + v * lam * m * n),
2 * mv * (u * v * mv * n**2 + w * mu * m * n),
(v**2 * mv * lam - u**2 * mv * mv - w**2 * mu * mv) * n**2 + lam * mu * m**2,
2 * mv * mu * (v * w * n**2 - u * m * n),
2 * mv * (u * w * mv * n**2 - v * lam * m * n),
2 * lam * mv * (v * w * n**2 + u * m * n),
(w**2 * mu * mv - u**2 * mv * mv - v**2 * mv * lam) * n**2 + lam * mu * m**2,
]
m = -1 * m
# inverse of rotation matrix
R_list_inv = [
(u**2 * mv * mv - lam * v**2 * mv - w**2 * mu * mv) * n**2 + lam * mu * m**2,
2 * lam * (v * u * mv * n**2 - w * mu * m * n),
2 * mu * (u * w * mv * n**2 + v * lam * m * n),
2 * mv * (u * v * mv * n**2 + w * mu * m * n),
(v**2 * mv * lam - u**2 * mv * mv - w**2 * mu * mv) * n**2 + lam * mu * m**2,
2 * mv * mu * (v * w * n**2 - u * m * n),
2 * mv * (u * w * mv * n**2 - v * lam * m * n),
2 * lam * mv * (v * w * n**2 + u * m * n),
(w**2 * mu * mv - u**2 * mv * mv - v**2 * mv * lam) * n**2 + lam * mu * m**2,
]
m = -1 * m
F = mu * lam * m**2 + d * n**2
all_list = R_list + R_list_inv + [F]
# Compute the max common factors for the elements of the rotation matrix
# and its inverse.
com_fac = reduce(gcd, all_list)
sigma = int(round((mu * lam * m**2 + d * n**2) / com_fac))
if 1 < sigma <= cutoff:
if sigma not in list(sigmas.keys()):
if m == 0:
angle = 180.0
else:
angle = 2 * np.arctan(n / m * np.sqrt(d / mu / lam)) / np.pi * 180
sigmas[sigma] = [angle]
else:
if m == 0:
angle = 180.0
else:
angle = 2 * np.arctan(n / m * np.sqrt(d / mu / lam)) / np.pi * 180
if angle not in sigmas[sigma]:
sigmas[sigma].append(angle)
if m_max == 0:
break
return sigmas
@staticmethod
def enum_possible_plane_cubic(plane_cutoff, r_axis, r_angle):
"""
Find all possible plane combinations for GBs given a rotation axis and angle for
cubic system, and classify them to different categories, including 'Twist',
'Symmetric tilt', 'Normal tilt', 'Mixed' GBs.
Args:
plane_cutoff (integer): the cutoff of plane miller index.
r_axis (list of three integers, e.g. u, v, w):
the rotation axis of the grain boundary, with the format of [u,v,w].
r_angle (float): rotation angle of the GBs.
Returns:
all_combinations (dict):
dictionary with keys as GB type, e.g. 'Twist','Symmetric tilt',etc.
and values as the combination of the two plane miller index
(GB plane and joining plane).
"""
all_combinations = {}
all_combinations["Symmetric tilt"] = []
all_combinations["Twist"] = []
all_combinations["Normal tilt"] = []
all_combinations["Mixed"] = []
sym_plane = symm_group_cubic([[1, 0, 0], [1, 1, 0]])
j = np.arange(0, plane_cutoff + 1)
combination = []
for i in itertools.product(j, repeat=3):
if sum(abs(np.array(i))) != 0:
combination.append(list(i))
if len(np.nonzero(i)[0]) == 3:
for i1 in range(3):
new_i = list(i).copy()
new_i[i1] = -1 * new_i[i1]
combination.append(new_i)
elif len(np.nonzero(i)[0]) == 2:
new_i = list(i).copy()
new_i[np.nonzero(i)[0][0]] = -1 * new_i[np.nonzero(i)[0][0]]
combination.append(new_i)
miller = np.array(combination)
miller = miller[np.argsort(np.linalg.norm(miller, axis=1))]
for i, val in enumerate(miller):
if reduce(gcd, val) == 1:
matrix = GrainBoundaryGenerator.get_trans_mat(r_axis, r_angle, surface=val, quick_gen=True)
vec = np.cross(matrix[1][0], matrix[1][1])
miller2 = GrainBoundaryGenerator.vec_to_surface(vec)
if np.all(np.abs(np.array(miller2)) <= plane_cutoff):
cos_1 = abs(np.dot(val, r_axis) / np.linalg.norm(val) / np.linalg.norm(r_axis))
if 1 - cos_1 < 1.0e-5:
all_combinations["Twist"].append([list(val), miller2])
elif cos_1 < 1.0e-8:
sym_tilt = False
if np.sum(np.abs(val)) == np.sum(np.abs(miller2)):
ave = (np.array(val) + np.array(miller2)) / 2
ave1 = (np.array(val) - np.array(miller2)) / 2
for plane in sym_plane:
cos_2 = abs(np.dot(ave, plane) / np.linalg.norm(ave) / np.linalg.norm(plane))
cos_3 = abs(np.dot(ave1, plane) / np.linalg.norm(ave1) / np.linalg.norm(plane))
if 1 - cos_2 < 1.0e-5 or 1 - cos_3 < 1.0e-5:
all_combinations["Symmetric tilt"].append([list(val), miller2])
sym_tilt = True
break
if not sym_tilt:
all_combinations["Normal tilt"].append([list(val), miller2])
else:
all_combinations["Mixed"].append([list(val), miller2])
return all_combinations
@staticmethod
def get_rotation_angle_from_sigma(sigma, r_axis, lat_type="C", ratio=None):
"""
Find all possible rotation angle for the given sigma value.
Args:
sigma (integer):
sigma value provided
r_axis (list of three integers, e.g. u, v, w
or four integers, e.g. u, v, t, w for hex/rho system only):
the rotation axis of the grain boundary.
lat_type ( one character):
'c' or 'C': cubic system
't' or 'T': tetragonal system
'o' or 'O': orthorhombic system
'h' or 'H': hexagonal system
'r' or 'R': rhombohedral system
default to cubic system
ratio (list of integers):
lattice axial ratio.
For cubic system, ratio is not needed.
For tetragonal system, ratio = [mu, mv], list of two integers,
that is, mu/mv = c2/a2. If it is irrational, set it to none.
For orthorhombic system, ratio = [mu, lam, mv], list of three integers,
that is, mu:lam:mv = c2:b2:a2. If irrational for one axis, set it to None.
e.g. mu:lam:mv = c2,None,a2, means b2 is irrational.
For rhombohedral system, ratio = [mu, mv], list of two integers,
that is, mu/mv is the ratio of (1+2*cos(alpha)/cos(alpha).
If irrational, set it to None.
For hexagonal system, ratio = [mu, mv], list of two integers,
that is, mu/mv = c2/a2. If it is irrational, set it to none.
Returns:
rotation_angles corresponding to the provided sigma value.
If the sigma value is not correct, return the rotation angle corresponding
to the correct possible sigma value right smaller than the wrong sigma value provided.
"""
if lat_type.lower() == "c":
logger.info("Make sure this is for cubic system")
sigma_dict = GrainBoundaryGenerator.enum_sigma_cubic(cutoff=sigma, r_axis=r_axis)
elif lat_type.lower() == "t":
logger.info("Make sure this is for tetragonal system")
if ratio is None:
logger.info("Make sure this is for irrational c2/a2 ratio")
elif len(ratio) != 2:
raise RuntimeError("Tetragonal system needs correct c2/a2 ratio")
sigma_dict = GrainBoundaryGenerator.enum_sigma_tet(cutoff=sigma, r_axis=r_axis, c2_a2_ratio=ratio)
elif lat_type.lower() == "o":
logger.info("Make sure this is for orthorhombic system")
if len(ratio) != 3:
raise RuntimeError("Orthorhombic system needs correct c2:b2:a2 ratio")
sigma_dict = GrainBoundaryGenerator.enum_sigma_ort(cutoff=sigma, r_axis=r_axis, c2_b2_a2_ratio=ratio)
elif lat_type.lower() == "h":
logger.info("Make sure this is for hexagonal system")
if ratio is None:
logger.info("Make sure this is for irrational c2/a2 ratio")
elif len(ratio) != 2:
raise RuntimeError("Hexagonal system needs correct c2/a2 ratio")
sigma_dict = GrainBoundaryGenerator.enum_sigma_hex(cutoff=sigma, r_axis=r_axis, c2_a2_ratio=ratio)
elif lat_type.lower() == "r":
logger.info("Make sure this is for rhombohedral system")
if ratio is None:
logger.info("Make sure this is for irrational (1+2*cos(alpha)/cos(alpha) ratio")
elif len(ratio) != 2:
raise RuntimeError("Rhombohedral system needs correct (1+2*cos(alpha)/cos(alpha) ratio")
sigma_dict = GrainBoundaryGenerator.enum_sigma_rho(cutoff=sigma, r_axis=r_axis, ratio_alpha=ratio)
else:
raise RuntimeError("Lattice type not implemented")
sigmas = list(sigma_dict.keys())
if not sigmas:
raise RuntimeError("This is a wriong sigma value, and no sigma exists smaller than this value.")
if sigma in sigmas:
rotation_angles = sigma_dict[sigma]
else:
sigmas.sort()
warnings.warn(
"This is not the possible sigma value according to the rotation axis!"
"The nearest neighbor sigma and its corresponding angle are returned"
)
rotation_angles = sigma_dict[sigmas[-1]]
rotation_angles.sort()
return rotation_angles
@staticmethod
def slab_from_csl(csl, surface, normal, trans_cry, max_search=20, quick_gen=False):
"""
By linear operation of csl lattice vectors to get the best corresponding
slab lattice. That is the area of a,b vectors (within the surface plane)
is the smallest, the c vector first, has shortest length perpendicular
to surface [h,k,l], second, has shortest length itself.
Args:
csl (3 by 3 integer array):
input csl lattice.
surface (list of three integers, e.g. h, k, l):
the miller index of the surface, with the format of [h,k,l]
normal (logic):
determine if the c vector needs to perpendicular to surface
trans_cry (3 by 3 array):
transform matrix from crystal system to orthogonal system
max_search (int): max search for the GB lattice vectors that give the smallest GB
lattice. If normal is true, also max search the GB c vector that perpendicular
to the plane.
quick_gen (bool): whether to quickly generate a supercell, no need to find the smallest
cell if set to true.
Returns:
t_matrix: a slab lattice ( 3 by 3 integer array):
"""
# set the transform matrix in real space
trans = trans_cry
# transform matrix in reciprocal space
ctrans = np.linalg.inv(trans.T)
t_matrix = csl.copy()
# vectors constructed from csl that perpendicular to surface
ab_vector = []
# obtain the miller index of surface in terms of csl.
miller = np.matmul(surface, csl.T)
if reduce(gcd, miller) != 1:
miller = [int(round(x / reduce(gcd, miller))) for x in miller]
miller_nonzero = []
# quickly generate a supercell, normal is not work in this way
if quick_gen:
scale_factor = []
eye = np.eye(3, dtype=int)
for i, j in enumerate(miller):
if j == 0:
scale_factor.append(eye[i])
else:
miller_nonzero.append(i)
if len(scale_factor) < 2:
index_len = len(miller_nonzero)
for i in range(index_len):
for j in range(i + 1, index_len):
lcm_miller = lcm(miller[miller_nonzero[i]], miller[miller_nonzero[j]])
l = [0, 0, 0]
l[miller_nonzero[i]] = -int(round(lcm_miller / miller[miller_nonzero[i]]))
l[miller_nonzero[j]] = int(round(lcm_miller / miller[miller_nonzero[j]]))
scale_factor.append(l)
if len(scale_factor) == 2:
break
t_matrix[0] = np.array(np.dot(scale_factor[0], csl))
t_matrix[1] = np.array(np.dot(scale_factor[1], csl))
t_matrix[2] = csl[miller_nonzero[0]]
if abs(np.linalg.det(t_matrix)) > 1000:
warnings.warn("Too large matrix. Suggest to use quick_gen=False")
return t_matrix
for i, j in enumerate(miller):
if j == 0:
ab_vector.append(csl[i])
else:
c_index = i
miller_nonzero.append(j)
if len(miller_nonzero) > 1:
t_matrix[2] = csl[c_index]
index_len = len(miller_nonzero)
lcm_miller = []
for i in range(index_len):
for j in range(i + 1, index_len):
com_gcd = gcd(miller_nonzero[i], miller_nonzero[j])
mil1 = int(round(miller_nonzero[i] / com_gcd))
mil2 = int(round(miller_nonzero[j] / com_gcd))
lcm_miller.append(max(abs(mil1), abs(mil2)))
lcm_sorted = sorted(lcm_miller)
if index_len == 2:
max_j = lcm_sorted[0]
else:
max_j = lcm_sorted[1]
else:
if not normal:
t_matrix[0] = ab_vector[0]
t_matrix[1] = ab_vector[1]
t_matrix[2] = csl[c_index]
return t_matrix
max_j = abs(miller_nonzero[0])
max_j = min(max_j, max_search)
# area of a, b vectors
area = None
# length of c vector
c_norm = np.linalg.norm(np.matmul(t_matrix[2], trans))
# c vector length along the direction perpendicular to surface
c_length = np.abs(np.dot(t_matrix[2], surface))
# check if the init c vector perpendicular to the surface
if normal:
c_cross = np.cross(np.matmul(t_matrix[2], trans), np.matmul(surface, ctrans))
normal_init = np.linalg.norm(c_cross) < 1e-8
j = np.arange(0, max_j + 1)
combination = []
for i in itertools.product(j, repeat=3):
if sum(abs(np.array(i))) != 0:
combination.append(list(i))
if len(np.nonzero(i)[0]) == 3:
for i1 in range(3):
new_i = list(i).copy()
new_i[i1] = -1 * new_i[i1]
combination.append(new_i)
elif len(np.nonzero(i)[0]) == 2:
new_i = list(i).copy()
new_i[np.nonzero(i)[0][0]] = -1 * new_i[np.nonzero(i)[0][0]]
combination.append(new_i)
for i in combination:
if reduce(gcd, i) == 1:
temp = np.dot(np.array(i), csl)
if abs(np.dot(temp, surface) - 0) < 1.0e-8:
ab_vector.append(temp)
else:
# c vector length along the direction perpendicular to surface
c_len_temp = np.abs(np.dot(temp, surface))
# c vector length itself
c_norm_temp = np.linalg.norm(np.matmul(temp, trans))
if normal:
c_cross = np.cross(np.matmul(temp, trans), np.matmul(surface, ctrans))
if np.linalg.norm(c_cross) < 1.0e-8:
if normal_init:
if c_norm_temp < c_norm:
t_matrix[2] = temp
c_norm = c_norm_temp
else:
c_norm = c_norm_temp
normal_init = True
t_matrix[2] = temp
else:
if c_len_temp < c_length or (abs(c_len_temp - c_length) < 1.0e-8 and c_norm_temp < c_norm):
t_matrix[2] = temp
c_norm = c_norm_temp
c_length = c_len_temp
if normal and (not normal_init):
logger.info("Did not find the perpendicular c vector, increase max_j")
while not normal_init:
if max_j == max_search:
warnings.warn("Cannot find the perpendicular c vector, please increase max_search")
break
max_j = 3 * max_j
max_j = min(max_j, max_search)
j = np.arange(0, max_j + 1)
combination = []
for i in itertools.product(j, repeat=3):
if sum(abs(np.array(i))) != 0:
combination.append(list(i))
if len(np.nonzero(i)[0]) == 3:
for i1 in range(3):
new_i = list(i).copy()
new_i[i1] = -1 * new_i[i1]
combination.append(new_i)
elif len(np.nonzero(i)[0]) == 2:
new_i = list(i).copy()
new_i[np.nonzero(i)[0][0]] = -1 * new_i[np.nonzero(i)[0][0]]
combination.append(new_i)
for i in combination:
if reduce(gcd, i) == 1:
temp = np.dot(np.array(i), csl)
if abs(np.dot(temp, surface) - 0) > 1.0e-8:
c_cross = np.cross(np.matmul(temp, trans), np.matmul(surface, ctrans))
if np.linalg.norm(c_cross) < 1.0e-8:
# c vetor length itself
c_norm_temp = np.linalg.norm(np.matmul(temp, trans))
if normal_init:
if c_norm_temp < c_norm:
t_matrix[2] = temp
c_norm = c_norm_temp
else:
c_norm = c_norm_temp
normal_init = True
t_matrix[2] = temp
if normal_init:
logger.info("Found perpendicular c vector")
# find the best a, b vectors with their formed area smallest and average norm of a,b smallest.
for i in itertools.combinations(ab_vector, 2):
area_temp = np.linalg.norm(np.cross(np.matmul(i[0], trans), np.matmul(i[1], trans)))
if abs(area_temp - 0) > 1.0e-8:
ab_norm_temp = np.linalg.norm(np.matmul(i[0], trans)) + np.linalg.norm(np.matmul(i[1], trans))
if area is None:
area = area_temp
ab_norm = ab_norm_temp
t_matrix[0] = i[0]
t_matrix[1] = i[1]
elif area_temp < area:
t_matrix[0] = i[0]
t_matrix[1] = i[1]
area = area_temp
ab_norm = ab_norm_temp
elif abs(area - area_temp) < 1.0e-8 and ab_norm_temp < ab_norm:
t_matrix[0] = i[0]
t_matrix[1] = i[1]
area = area_temp
ab_norm = ab_norm_temp
# make sure we have a left-handed crystallographic system
if np.linalg.det(np.matmul(t_matrix, trans)) < 0:
t_matrix *= -1
if normal and abs(np.linalg.det(t_matrix)) > 1000:
warnings.warn("Too large matrix. Suggest to use Normal=False")
return t_matrix
@staticmethod
def reduce_mat(mat, mag, r_matrix):
"""
Reduce integer array mat's determinant mag times by linear combination
of its row vectors, so that the new array after rotation (r_matrix) is
still an integer array
Args:
mat (3 by 3 array): input matrix
mag (integer): reduce times for the determinant
r_matrix (3 by 3 array): rotation matrix
Return:
the reduced integer array
"""
max_j = abs(int(round(np.linalg.det(mat) / mag)))
reduced = False
for h in range(3):
k = h + 1 if h + 1 < 3 else abs(2 - h)
l = h + 2 if h + 2 < 3 else abs(1 - h)
j = np.arange(-max_j, max_j + 1)
for j1, j2 in itertools.product(j, repeat=2):
temp = mat[h] + j1 * mat[k] + j2 * mat[l]
if all(np.round(x, 5).is_integer() for x in list(temp / mag)):
mat_copy = mat.copy()
mat_copy[h] = np.array([int(round(ele / mag)) for ele in temp])
new_mat = np.dot(mat_copy, np.linalg.inv(r_matrix.T))
if all(np.round(x, 5).is_integer() for x in list(np.ravel(new_mat))):
reduced = True
mat[h] = np.array([int(round(ele / mag)) for ele in temp])
break
if reduced:
break
if not reduced:
warnings.warn("Matrix reduction not performed, may lead to non-primitive gb cell.")
return mat
@staticmethod
def vec_to_surface(vec):
"""
Transform a float vector to a surface miller index with integers.
Args:
vec (1 by 3 array float vector): input float vector
Return:
the surface miller index of the input vector.
"""
miller = [None] * 3
index = []
for i, value in enumerate(vec):
if abs(value) < 1.0e-8:
miller[i] = 0
else:
index.append(i)
if len(index) == 1:
miller[index[0]] = 1
else:
min_index = np.argmin([i for i in vec if i != 0])
true_index = index[min_index]
index.pop(min_index)
frac = []
for i, value in enumerate(index):
frac.append(Fraction(vec[value] / vec[true_index]).limit_denominator(100))
if len(index) == 1:
miller[true_index] = frac[0].denominator
miller[index[0]] = frac[0].numerator
else:
com_lcm = lcm(frac[0].denominator, frac[1].denominator)
miller[true_index] = com_lcm
miller[index[0]] = frac[0].numerator * int(round(com_lcm / frac[0].denominator))
miller[index[1]] = frac[1].numerator * int(round(com_lcm / frac[1].denominator))
return miller
def factors(n):
"""
Compute the factors of a integer.
Args:
n: the input integer
Returns:
a set of integers that are the factors of the input integer.
"""
return set(
reduce(
list.__add__,
([i, n // i] for i in range(1, int(np.sqrt(n)) + 1) if n % i == 0),
)
)
def fix_pbc(structure, matrix=None):
"""
Set all frac_coords of the input structure within [0,1].
Args:
structure (pymatgen structure object):
input structure
matrix (lattice matrix, 3 by 3 array/matrix)
new structure's lattice matrix, if none, use
input structure's matrix
Return:
new structure with fixed frac_coords and lattice matrix
"""
spec = []
coords = []
if matrix is None:
latte = Lattice(structure.lattice.matrix)
else:
latte = Lattice(matrix)
for site in structure:
spec.append(site.specie)
coord = np.array(site.frac_coords)
for i in range(3):
coord[i] -= floor(coord[i])
if np.allclose(coord[i], 1):
coord[i] = 0
elif np.allclose(coord[i], 0):
coord[i] = 0
else:
coord[i] = round(coord[i], 7)
coords.append(coord)
return Structure(latte, spec, coords, site_properties=structure.site_properties)
def symm_group_cubic(mat):
"""
obtain cubic symmetric equivalents of the list of vectors.
Args:
matrix (lattice matrix, n by 3 array/matrix)
Return:
cubic symmetric equivalents of the list of vectors.
"""
sym_group = np.zeros([24, 3, 3])
sym_group[0, :] = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
sym_group[1, :] = [[1, 0, 0], [0, -1, 0], [0, 0, -1]]
sym_group[2, :] = [[-1, 0, 0], [0, 1, 0], [0, 0, -1]]
sym_group[3, :] = [[-1, 0, 0], [0, -1, 0], [0, 0, 1]]
sym_group[4, :] = [[0, -1, 0], [-1, 0, 0], [0, 0, -1]]
sym_group[5, :] = [[0, -1, 0], [1, 0, 0], [0, 0, 1]]
sym_group[6, :] = [[0, 1, 0], [-1, 0, 0], [0, 0, 1]]
sym_group[7, :] = [[0, 1, 0], [1, 0, 0], [0, 0, -1]]
sym_group[8, :] = [[-1, 0, 0], [0, 0, -1], [0, -1, 0]]
sym_group[9, :] = [[-1, 0, 0], [0, 0, 1], [0, 1, 0]]
sym_group[10, :] = [[1, 0, 0], [0, 0, -1], [0, 1, 0]]
sym_group[11, :] = [[1, 0, 0], [0, 0, 1], [0, -1, 0]]
sym_group[12, :] = [[0, 1, 0], [0, 0, 1], [1, 0, 0]]
sym_group[13, :] = [[0, 1, 0], [0, 0, -1], [-1, 0, 0]]
sym_group[14, :] = [[0, -1, 0], [0, 0, 1], [-1, 0, 0]]
sym_group[15, :] = [[0, -1, 0], [0, 0, -1], [1, 0, 0]]
sym_group[16, :] = [[0, 0, 1], [1, 0, 0], [0, 1, 0]]
sym_group[17, :] = [[0, 0, 1], [-1, 0, 0], [0, -1, 0]]
sym_group[18, :] = [[0, 0, -1], [1, 0, 0], [0, -1, 0]]
sym_group[19, :] = [[0, 0, -1], [-1, 0, 0], [0, 1, 0]]
sym_group[20, :] = [[0, 0, -1], [0, -1, 0], [-1, 0, 0]]
sym_group[21, :] = [[0, 0, -1], [0, 1, 0], [1, 0, 0]]
sym_group[22, :] = [[0, 0, 1], [0, -1, 0], [1, 0, 0]]
sym_group[23, :] = [[0, 0, 1], [0, 1, 0], [-1, 0, 0]]
mat = np.atleast_2d(mat)
all_vectors = []
for sym in sym_group:
for vec in mat:
all_vectors.append(np.dot(sym, vec))
return np.unique(np.array(all_vectors), axis=0)
|
materialsproject/pymatgen
|
pymatgen/analysis/gb/grain.py
|
Python
|
mit
| 115,056
|
[
"CRYSTAL",
"pymatgen"
] |
e31859443feadf581627d246ce5ff7f63af4e8ab60911f8111f08044a9b8e0d2
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class HicPro(MakefilePackage):
"""HiC-Pro is a package designed to process Hi-C data,
from raw fastq files (paired-end Illumina data)
to the normalized contact maps"""
homepage = "https://github.com/nservant/HiC-Pro"
url = "https://github.com/nservant/HiC-Pro/archive/v2.10.0.tar.gz"
version('2.10.0', '6ae2213dcc984b722d1a1f65fcbb21a2')
depends_on('bowtie2')
depends_on('samtools')
depends_on('python@2.7:2.8')
depends_on('r')
depends_on('py-numpy', type=('build', 'run'))
depends_on('py-scipy', type=('build', 'run'))
depends_on('py-pysam', type=('build', 'run'))
depends_on('py-bx-python', type=('build', 'run'))
depends_on('r-rcolorbrewer', type=('build', 'run'))
depends_on('r-ggplot2', type=('build', 'run'))
def edit(self, spec, prefix):
config = FileFilter('config-install.txt')
config.filter('PREFIX =.*', 'PREFIX = {0}'.format(prefix))
config.filter('BOWTIE2 PATH =.*',
'BOWTIE2_PATH = {0}'.format(spec['bowtie2'].prefix))
config.filter('SAMTOOLS_PATH =.*',
'SAMTOOLS_PATH = {0}'.format(spec['samtools'].prefix))
config.filter('R_PATH =.*',
'R_RPTH ={0}'.format(spec['r'].prefix))
config.filter('PYTHON_PATH =.*',
'PYTHON_RPTH ={0}'.format(spec['python'].prefix))
def build(self, spec, preifx):
make('-f', './scripts/install/Makefile',
'CONFIG_SYS=./config-install.txt')
make('mapbuilder')
make('readstrimming')
make('iced')
def install(sefl, spec, prefix):
# Patch INSTALLPATH in config-system.txt
config = FileFilter('config-system.txt')
config.filter('/HiC-Pro_2.10.0', '')
# Install
install('config-hicpro.txt', prefix)
install('config-install.txt', prefix)
install('config-system.txt', prefix)
install_tree('bin', prefix.bin)
install_tree('annotation', prefix.annotation)
install_tree('doc', prefix.doc)
install_tree('scripts', prefix.scripts)
install_tree('test-op', join_path(prefix, 'test-op'))
|
EmreAtes/spack
|
var/spack/repos/builtin/packages/hic-pro/package.py
|
Python
|
lgpl-2.1
| 3,434
|
[
"pysam"
] |
8c845f9c8a7f18661e6e87ed364cf1ab3523386a80c6c23f130d8eaa14c38947
|
# pylint: disable=invalid-name,too-many-lines
"""Density estimation functions for ArviZ."""
import warnings
import numpy as np
from scipy.fftpack import fft
from scipy.optimize import brentq
from scipy.signal import convolve, convolve2d, gaussian # pylint: disable=no-name-in-module
from scipy.sparse import coo_matrix
from scipy.special import ive # pylint: disable=no-name-in-module
from ..utils import _cov, _dot, _stack, conditional_jit
__all__ = ["kde"]
def _bw_scott(x, x_std=None, **kwargs): # pylint: disable=unused-argument
"""Scott's Rule."""
if x_std is None:
x_std = np.std(x)
bw = 1.06 * x_std * len(x) ** (-0.2)
return bw
def _bw_silverman(x, x_std=None, **kwargs): # pylint: disable=unused-argument
"""Silverman's Rule."""
if x_std is None:
x_std = np.std(x)
q75, q25 = np.percentile(x, [75, 25])
x_iqr = q75 - q25
a = min(x_std, x_iqr / 1.34)
bw = 0.9 * a * len(x) ** (-0.2)
return bw
def _bw_isj(x, grid_counts=None, x_std=None, x_range=None):
"""Improved Sheather-Jones bandwidth estimation.
Improved Sheather and Jones method as explained in [1]_. This method is used internally by the
KDE estimator, resulting in saved computation time as minimums, maximums and the grid are
pre-computed.
References
----------
.. [1] Kernel density estimation via diffusion.
Z. I. Botev, J. F. Grotowski, and D. P. Kroese.
Ann. Statist. 38 (2010), no. 5, 2916--2957.
"""
x_len = len(x)
if x_range is None:
x_min = np.min(x)
x_max = np.max(x)
x_range = x_max - x_min
# Relative frequency per bin
if grid_counts is None:
x_std = np.std(x)
grid_len = 256
grid_min = x_min - 0.5 * x_std
grid_max = x_max + 0.5 * x_std
grid_counts, _, _ = histogram(x, grid_len, (grid_min, grid_max))
else:
grid_len = len(grid_counts) - 1
grid_relfreq = grid_counts / x_len
# Discrete cosine transform of the data
a_k = _dct1d(grid_relfreq)
k_sq = np.arange(1, grid_len) ** 2
a_sq = a_k[range(1, grid_len)] ** 2
t = _root(_fixed_point, x_len, args=(x_len, k_sq, a_sq), x=x)
h = t ** 0.5 * x_range
return h
def _bw_experimental(x, grid_counts=None, x_std=None, x_range=None):
"""Experimental bandwidth estimator."""
bw_silverman = _bw_silverman(x, x_std=x_std)
bw_isj = _bw_isj(x, grid_counts=grid_counts, x_range=x_range)
return 0.5 * (bw_silverman + bw_isj)
def _bw_taylor(x):
"""Taylor's rule for circular bandwidth estimation.
This function implements a rule-of-thumb for choosing the bandwidth of a von Mises kernel
density estimator that assumes the underlying distribution is von Mises as introduced in [1]_.
It is analogous to Scott's rule for the Gaussian KDE.
Circular bandwidth has a different scale from linear bandwidth. Unlike linear scale, low
bandwidths are associated with oversmoothing and high values with undersmoothing.
References
----------
.. [1] C.C Taylor (2008). Automatic bandwidth selection for circular
density estimation.
Computational Statistics and Data Analysis, 52, 7, 3493–3500.
"""
x_len = len(x)
kappa = _kappa_mle(x)
num = 3 * x_len * kappa ** 2 * ive(2, 2 * kappa)
den = 4 * np.pi ** 0.5 * ive(0, kappa) ** 2
return (num / den) ** 0.4
_BW_METHODS_LINEAR = {
"scott": _bw_scott,
"silverman": _bw_silverman,
"isj": _bw_isj,
"experimental": _bw_experimental,
}
def _get_bw(x, bw, grid_counts=None, x_std=None, x_range=None):
"""Compute bandwidth for a given data `x` and `bw`.
Also checks `bw` is correctly specified.
Parameters
----------
x : 1-D numpy array
1 dimensional array of sample data from the
variable for which a density estimate is desired.
bw: int, float or str
If numeric, indicates the bandwidth and must be positive.
If str, indicates the method to estimate the bandwidth.
Returns
-------
bw: float
Bandwidth
"""
if isinstance(bw, bool):
raise ValueError(
(
"`bw` must not be of type `bool`.\n"
"Expected a positive numeric or one of the following strings:\n"
f"{list(_BW_METHODS_LINEAR)}."
)
)
if isinstance(bw, (int, float)):
if bw < 0:
raise ValueError(f"Numeric `bw` must be positive.\nInput: {bw:.4f}.")
elif isinstance(bw, str):
bw_lower = bw.lower()
if bw_lower not in _BW_METHODS_LINEAR:
raise ValueError(
"Unrecognized bandwidth method.\n"
f"Input is: {bw_lower}.\n"
f"Expected one of: {list(_BW_METHODS_LINEAR)}."
)
bw_fun = _BW_METHODS_LINEAR[bw_lower]
bw = bw_fun(x, grid_counts=grid_counts, x_std=x_std, x_range=x_range)
else:
raise ValueError(
"Unrecognized `bw` argument.\n"
"Expected a positive numeric or one of the following strings:\n"
f"{list(_BW_METHODS_LINEAR)}."
)
return bw
def _vonmises_pdf(x, mu, kappa):
"""Calculate vonmises_pdf."""
if kappa <= 0:
raise ValueError("Argument 'kappa' must be positive.")
pdf = 1 / (2 * np.pi * ive(0, kappa)) * np.exp(np.cos(x - mu) - 1) ** kappa
return pdf
def _a1inv(x):
"""Compute inverse function.
Inverse function of the ratio of the first and
zeroth order Bessel functions of the first kind.
Returns the value k, such that a1inv(x) = k, i.e. a1(k) = x.
"""
if 0 <= x < 0.53:
return 2 * x + x ** 3 + (5 * x ** 5) / 6
elif x < 0.85:
return -0.4 + 1.39 * x + 0.43 / (1 - x)
else:
return 1 / (x ** 3 - 4 * x ** 2 + 3 * x)
def _kappa_mle(x):
mean = _circular_mean(x)
kappa = _a1inv(np.mean(np.cos(x - mean)))
return kappa
def _dct1d(x):
"""Discrete Cosine Transform in 1 Dimension.
Parameters
----------
x : numpy array
1 dimensional array of values for which the
DCT is desired
Returns
-------
output : DTC transformed values
"""
x_len = len(x)
even_increasing = np.arange(0, x_len, 2)
odd_decreasing = np.arange(x_len - 1, 0, -2)
x = np.concatenate((x[even_increasing], x[odd_decreasing]))
w_1k = np.r_[1, (2 * np.exp(-(0 + 1j) * (np.arange(1, x_len)) * np.pi / (2 * x_len)))]
output = np.real(w_1k * fft(x))
return output
def _fixed_point(t, N, k_sq, a_sq):
"""Calculate t-zeta*gamma^[l](t).
Implementation of the function t-zeta*gamma^[l](t) derived from equation (30) in [1].
References
----------
.. [1] Kernel density estimation via diffusion.
Z. I. Botev, J. F. Grotowski, and D. P. Kroese.
Ann. Statist. 38 (2010), no. 5, 2916--2957.
"""
k_sq = np.asfarray(k_sq, dtype=np.float64)
a_sq = np.asfarray(a_sq, dtype=np.float64)
l = 7
f = np.sum(np.power(k_sq, l) * a_sq * np.exp(-k_sq * np.pi ** 2 * t))
f *= 0.5 * np.pi ** (2.0 * l)
for j in np.arange(l - 1, 2 - 1, -1):
c1 = (1 + 0.5 ** (j + 0.5)) / 3
c2 = np.product(np.arange(1.0, 2 * j + 1, 2, dtype=np.float64))
c2 /= (np.pi / 2) ** 0.5
t_j = np.power((c1 * (c2 / (N * f))), (2.0 / (3.0 + 2.0 * j)))
f = np.sum(k_sq ** j * a_sq * np.exp(-k_sq * np.pi ** 2.0 * t_j))
f *= 0.5 * np.pi ** (2 * j)
out = t - (2 * N * np.pi ** 0.5 * f) ** (-0.4)
return out
def _root(function, N, args, x):
# The right bound is at most 0.01
found = False
N = max(min(1050, N), 50)
tol = 10e-12 + 0.01 * (N - 50) / 1000
while not found:
try:
bw, res = brentq(function, 0, 0.01, args=args, full_output=True, disp=False)
found = res.converged
except ValueError:
bw = 0
tol *= 2.0
found = False
if bw <= 0 or tol >= 1:
bw = (_bw_silverman(x) / np.ptp(x)) ** 2
return bw
return bw
def _check_custom_lims(custom_lims, x_min, x_max):
"""Check if `custom_lims` are of the correct type.
It accepts numeric lists/tuples of length 2.
Parameters
----------
custom_lims : Object whose type is checked.
Returns
-------
None: Object of type None
"""
if not isinstance(custom_lims, (list, tuple)):
raise TypeError(
"`custom_lims` must be a numeric list or tuple of length 2.\n"
f"Not an object of {type(custom_lims)}."
)
if len(custom_lims) != 2:
raise AttributeError(f"`len(custom_lims)` must be 2, not {len(custom_lims)}.")
any_bool = any(isinstance(i, bool) for i in custom_lims)
if any_bool:
raise TypeError("Elements of `custom_lims` must be numeric or None, not bool.")
custom_lims = list(custom_lims) # convert to a mutable object
if custom_lims[0] is None:
custom_lims[0] = x_min
if custom_lims[1] is None:
custom_lims[1] = x_max
all_numeric = all(isinstance(i, (int, float, np.integer, np.float)) for i in custom_lims)
if not all_numeric:
raise TypeError(
("Elements of `custom_lims` must be numeric or None.\n" "At least one of them is not.")
)
if not custom_lims[0] < custom_lims[1]:
raise ValueError("`custom_lims[0]` must be smaller than `custom_lims[1]`.")
if custom_lims[0] > x_min or custom_lims[1] < x_max:
raise ValueError("Some observations are outside `custom_lims` boundaries.")
return custom_lims
def _get_grid(
x_min, x_max, x_std, extend_fct, grid_len, custom_lims, extend=True, bound_correction=False
):
"""Compute the grid that bins the data used to estimate the density function.
Parameters
----------
x_min : float
Minimum value of the data
x_max: float
Maximum value of the data.
x_std: float
Standard deviation of the data.
extend_fct: bool
Indicates the factor by which `x_std` is multiplied
to extend the range of the data.
grid_len: int
Number of bins
custom_lims: tuple or list
Custom limits for the domain of the density estimation.
Must be numeric of length 2. Overrides `extend`.
extend: bool, optional
Whether to extend the range of the data or not.
Default is True.
bound_correction: bool, optional
Whether the density estimations performs boundary correction or not.
This does not impacts directly in the output, but is used
to override `extend`. Overrides `extend`.
Default is False.
Returns
-------
grid_len: int
Number of bins
grid_min: float
Minimum value of the grid
grid_max: float
Maximum value of the grid
"""
# Set up number of bins.
grid_len = max(int(grid_len), 100)
# Set up domain
if custom_lims is not None:
custom_lims = _check_custom_lims(custom_lims, x_min, x_max)
grid_min = custom_lims[0]
grid_max = custom_lims[1]
elif extend and not bound_correction:
grid_extend = extend_fct * x_std
grid_min = x_min - grid_extend
grid_max = x_max + grid_extend
else:
grid_min = x_min
grid_max = x_max
return grid_min, grid_max, grid_len
def kde(x, circular=False, **kwargs):
"""One dimensional density estimation.
It is a wrapper around ``kde_linear()`` and ``kde_circular()``.
Parameters
----------
x: 1D numpy array
Data used to calculate the density estimation.
circular: bool, optional
Whether ``x`` is a circular variable or not. Defaults to False.
**kwargs
Arguments passed to ``kde_linear()`` and ``kde_circular()``.
See their documentation for more info.
Returns
-------
grid: Gridded numpy array for the x values.
pdf: Numpy array for the density estimates.
bw: optional, the estimated bandwidth.
Examples
--------
Default density estimation for linear data
.. plot::
:context: close-figs
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from arviz import kde
>>>
>>> rvs = np.random.gamma(shape=1.8, size=1000)
>>> grid, pdf = kde(rvs)
>>> plt.plot(grid, pdf)
>>> plt.show()
Density estimation for linear data with Silverman's rule bandwidth
.. plot::
:context: close-figs
>>> grid, pdf = kde(rvs, bw="silverman")
>>> plt.plot(grid, pdf)
>>> plt.show()
Density estimation for linear data with scaled bandwidth
.. plot::
:context: close-figs
>>> # bw_fct > 1 means more smoothness.
>>> grid, pdf = kde(rvs, bw_fct=2.5)
>>> plt.plot(grid, pdf)
>>> plt.show()
Default density estimation for linear data with extended limits
.. plot::
:context: close-figs
>>> grid, pdf = kde(rvs, bound_correction=False, extend=True, extend_fct=0.5)
>>> plt.plot(grid, pdf)
>>> plt.show()
Default density estimation for linear data with custom limits
.. plot::
:context: close-figs
>>> # It accepts tuples and lists of length 2.
>>> grid, pdf = kde(rvs, bound_correction=False, custom_lims=(0, 10))
>>> plt.plot(grid, pdf)
>>> plt.show()
Default density estimation for circular data
.. plot::
:context: close-figs
>>> rvs = np.random.vonmises(mu=np.pi, kappa=1, size=500)
>>> grid, pdf = kde(rvs, circular=True)
>>> plt.plot(grid, pdf)
>>> plt.show()
Density estimation for circular data with scaled bandwidth
.. plot::
:context: close-figs
>>> rvs = np.random.vonmises(mu=np.pi, kappa=1, size=500)
>>> # bw_fct > 1 means less smoothness.
>>> grid, pdf = kde(rvs, circular=True, bw_fct=3)
>>> plt.plot(grid, pdf)
>>> plt.show()
Density estimation for circular data with custom limits
.. plot::
:context: close-figs
>>> # This is still experimental, does not always work.
>>> rvs = np.random.vonmises(mu=0, kappa=30, size=500)
>>> grid, pdf = kde(rvs, circular=True, custom_lims=(-1, 1))
>>> plt.plot(grid, pdf)
>>> plt.show()
See Also
--------
plot_kde : Compute and plot a kernel density estimate.
"""
x = x[np.isfinite(x)]
if x.size == 0 or np.all(x == x[0]):
warnings.warn("Your data appears to have a single value or no finite values")
return np.zeros(2), np.array([np.nan] * 2)
if circular:
if circular == "degrees":
x = np.radians(x)
kde_fun = _kde_circular
else:
kde_fun = _kde_linear
return kde_fun(x, **kwargs)
def _kde_linear(
x,
bw="experimental",
adaptive=False,
extend=False,
bound_correction=True,
extend_fct=0,
bw_fct=1,
bw_return=False,
custom_lims=None,
cumulative=False,
grid_len=512,
**kwargs, # pylint: disable=unused-argument
):
"""One dimensional density estimation for linear data.
Given an array of data points `x` it returns an estimate of
the probability density function that generated the samples in `x`.
Parameters
----------
x : 1D numpy array
Data used to calculate the density estimation.
bw: int, float or str, optional
If numeric, indicates the bandwidth and must be positive.
If str, indicates the method to estimate the bandwidth and must be one of "scott",
"silverman", "isj" or "experimental". Defaults to "experimental".
adaptive: boolean, optional
Indicates if the bandwidth is adaptive or not.
It is the recommended approach when there are multiple modes with different spread.
It is not compatible with convolution. Defaults to False.
extend: boolean, optional
Whether to extend the observed range for `x` in the estimation.
It extends each bound by a multiple of the standard deviation of `x` given by `extend_fct`.
Defaults to False.
bound_correction: boolean, optional
Whether to perform boundary correction on the bounds of `x` or not.
Defaults to True.
extend_fct: float, optional
Number of standard deviations used to widen the lower and upper bounds of `x`.
Defaults to 0.5.
bw_fct: float, optional
A value that multiplies `bw` which enables tuning smoothness by hand.
Must be positive. Values below 1 decrease smoothness while values above 1 decrease it.
Defaults to 1 (no modification).
bw_return: bool, optional
Whether to return the estimated bandwidth in addition to the other objects.
Defaults to False.
custom_lims: list or tuple, optional
A list or tuple of length 2 indicating custom bounds for the range of `x`.
Defaults to None which disables custom bounds.
cumulative: bool, optional
Whether return the PDF or the cumulative PDF. Defaults to False.
grid_len: int, optional
The number of intervals used to bin the data points i.e. the length of the grid used in
the estimation. Defaults to 512.
Returns
-------
grid : Gridded numpy array for the x values.
pdf : Numpy array for the density estimates.
bw: optional, the estimated bandwidth.
"""
# Check `bw_fct` is numeric and positive
if not isinstance(bw_fct, (int, float, np.integer, np.floating)):
raise TypeError(f"`bw_fct` must be a positive number, not an object of {type(bw_fct)}.")
if bw_fct <= 0:
raise ValueError(f"`bw_fct` must be a positive number, not {bw_fct}.")
# Preliminary calculations
x_min = x.min()
x_max = x.max()
x_std = np.std(x)
x_range = x_max - x_min
# Determine grid
grid_min, grid_max, grid_len = _get_grid(
x_min, x_max, x_std, extend_fct, grid_len, custom_lims, extend, bound_correction
)
grid_counts, _, grid_edges = histogram(x, grid_len, (grid_min, grid_max))
# Bandwidth estimation
bw = bw_fct * _get_bw(x, bw, grid_counts, x_std, x_range)
# Density estimation
if adaptive:
grid, pdf = _kde_adaptive(x, bw, grid_edges, grid_counts, grid_len, bound_correction)
else:
grid, pdf = _kde_convolution(x, bw, grid_edges, grid_counts, grid_len, bound_correction)
if cumulative:
pdf = pdf.cumsum() / pdf.sum()
if bw_return:
return grid, pdf, bw
else:
return grid, pdf
def _kde_circular(
x,
bw="taylor",
bw_fct=1,
bw_return=False,
custom_lims=None,
cumulative=False,
grid_len=512,
**kwargs, # pylint: disable=unused-argument
):
"""One dimensional density estimation for circular data.
Given an array of data points `x` measured in radians, it returns an estimate of the
probability density function that generated the samples in `x`.
Parameters
----------
x : 1D numpy array
Data used to calculate the density estimation.
bw: int, float or str, optional
If numeric, indicates the bandwidth and must be positive.
If str, indicates the method to estimate the bandwidth and must be "taylor" since it is the
only option supported so far. Defaults to "taylor".
bw_fct: float, optional
A value that multiplies `bw` which enables tuning smoothness by hand. Must be positive.
Values above 1 decrease smoothness while values below 1 decrease it.
Defaults to 1 (no modification).
bw_return: bool, optional
Whether to return the estimated bandwidth in addition to the other objects.
Defaults to False.
custom_lims: list or tuple, optional
A list or tuple of length 2 indicating custom bounds for the range of `x`.
Defaults to None which means the estimation limits are [-pi, pi].
cumulative: bool, optional
Whether return the PDF or the cumulative PDF. Defaults to False.
grid_len: int, optional
The number of intervals used to bin the data pointa i.e. the length of the grid used in the
estimation. Defaults to 512.
"""
# All values between -pi and pi
x = _normalize_angle(x)
# Check `bw_fct` is numeric and positive
if not isinstance(bw_fct, (int, float, np.integer, np.floating)):
raise TypeError(f"`bw_fct` must be a positive number, not an object of {type(bw_fct)}.")
if bw_fct <= 0:
raise ValueError(f"`bw_fct` must be a positive number, not {bw_fct}.")
# Determine bandwidth
if isinstance(bw, bool):
raise ValueError(
"`bw` can't be of type `bool`.\n" "Expected a positive numeric or 'taylor'"
)
if isinstance(bw, (int, float)):
if bw < 0:
raise ValueError(f"Numeric `bw` must be positive.\nInput: {bw:.4f}.")
if isinstance(bw, str):
if bw == "taylor":
bw = _bw_taylor(x)
else:
raise ValueError(f"`bw` must be a positive numeric or `taylor`, not {bw}")
bw *= bw_fct
# Determine grid
if custom_lims is not None:
custom_lims = _check_custom_lims(custom_lims, x.min(), x.max())
grid_min = custom_lims[0]
grid_max = custom_lims[1]
assert grid_min >= -np.pi, "Lower limit can't be smaller than -pi"
assert grid_max <= np.pi, "Upper limit can't be larger than pi"
else:
grid_min = -np.pi
grid_max = np.pi
bins = np.linspace(grid_min, grid_max, grid_len + 1)
bin_counts, _, bin_edges = histogram(x, bins=bins)
grid = 0.5 * (bin_edges[1:] + bin_edges[:-1])
kern = _vonmises_pdf(x=grid, mu=0, kappa=bw)
pdf = np.fft.fftshift(np.fft.irfft(np.fft.rfft(kern) * np.fft.rfft(bin_counts)))
pdf /= len(x)
if cumulative:
pdf = pdf.cumsum() / pdf.sum()
if bw_return:
return grid, pdf, bw
else:
return grid, pdf
# pylint: disable=unused-argument
def _kde_convolution(x, bw, grid_edges, grid_counts, grid_len, bound_correction, **kwargs):
"""Kernel density with convolution.
One dimensional Gaussian kernel density estimation via convolution of the binned relative
frequencies and a Gaussian filter. This is an internal function used by `kde()`.
"""
# Calculate relative frequencies per bin
bin_width = grid_edges[1] - grid_edges[0]
f = grid_counts / bin_width / len(x)
# Bandwidth must consider the bin width
bw /= bin_width
# See: https://stackoverflow.com/questions/2773606/gaussian-filter-in-matlab
grid = (grid_edges[1:] + grid_edges[:-1]) / 2
kernel_n = int(bw * 2 * np.pi)
if kernel_n == 0:
kernel_n = 1
kernel = gaussian(kernel_n, bw)
if bound_correction:
npad = int(grid_len / 5)
f = np.concatenate([f[npad - 1 :: -1], f, f[grid_len : grid_len - npad - 1 : -1]])
pdf = convolve(f, kernel, mode="same", method="direct")[npad : npad + grid_len]
pdf /= bw * (2 * np.pi) ** 0.5
else:
pdf = convolve(f, kernel, mode="same", method="direct")
pdf /= bw * (2 * np.pi) ** 0.5
return grid, pdf
def _kde_adaptive(x, bw, grid_edges, grid_counts, grid_len, bound_correction, **kwargs):
"""Compute Adaptive Kernel Density Estimation.
One dimensional adaptive Gaussian kernel density estimation. The implementation uses the binning
technique. Since there is not an unique `bw`, the convolution is not possible. The alternative
implemented in this function is known as Abramson's method.
This is an internal function used by `kde()`.
"""
# Pilot computations used for bandwidth adjustment
pilot_grid, pilot_pdf = _kde_convolution(
x, bw, grid_edges, grid_counts, grid_len, bound_correction
)
# Adds to avoid np.log(0) and zero division
pilot_pdf += 1e-9
# Determine the modification factors
pdf_interp = np.interp(x, pilot_grid, pilot_pdf)
geom_mean = np.exp(np.mean(np.log(pdf_interp)))
# Power of c = 0.5 -> Abramson's method
adj_factor = (geom_mean / pilot_pdf) ** 0.5
bw_adj = bw * adj_factor
# Estimation of Gaussian KDE via binned method (convolution not possible)
grid = pilot_grid
if bound_correction:
grid_npad = int(grid_len / 5)
grid_width = grid_edges[1] - grid_edges[0]
grid_pad = grid_npad * grid_width
grid_padded = np.linspace(
grid_edges[0] - grid_pad,
grid_edges[grid_len - 1] + grid_pad,
num=grid_len + 2 * grid_npad,
)
grid_counts = np.concatenate(
[
grid_counts[grid_npad - 1 :: -1],
grid_counts,
grid_counts[grid_len : grid_len - grid_npad - 1 : -1],
]
)
bw_adj = np.concatenate(
[bw_adj[grid_npad - 1 :: -1], bw_adj, bw_adj[grid_len : grid_len - grid_npad - 1 : -1]]
)
pdf_mat = (grid_padded - grid_padded[:, None]) / bw_adj[:, None]
pdf_mat = np.exp(-0.5 * pdf_mat ** 2) * grid_counts[:, None]
pdf_mat /= (2 * np.pi) ** 0.5 * bw_adj[:, None]
pdf = np.sum(pdf_mat[:, grid_npad : grid_npad + grid_len], axis=0) / len(x)
else:
pdf_mat = (grid - grid[:, None]) / bw_adj[:, None]
pdf_mat = np.exp(-0.5 * pdf_mat ** 2) * grid_counts[:, None]
pdf_mat /= (2 * np.pi) ** 0.5 * bw_adj[:, None]
pdf = np.sum(pdf_mat, axis=0) / len(x)
return grid, pdf
def _fast_kde_2d(x, y, gridsize=(128, 128), circular=False):
"""
2D fft-based Gaussian kernel density estimate (KDE).
The code was adapted from https://github.com/mfouesneau/faststats
Parameters
----------
x : Numpy array or list
y : Numpy array or list
gridsize : tuple
Number of points used to discretize data. Use powers of 2 for fft optimization
circular: bool
If True use circular boundaries. Defaults to False
Returns
-------
grid: A gridded 2D KDE of the input points (x, y)
xmin: minimum value of x
xmax: maximum value of x
ymin: minimum value of y
ymax: maximum value of y
"""
x = np.asarray(x, dtype=float)
x = x[np.isfinite(x)]
y = np.asarray(y, dtype=float)
y = y[np.isfinite(y)]
xmin, xmax = x.min(), x.max()
ymin, ymax = y.min(), y.max()
len_x = len(x)
weights = np.ones(len_x)
n_x, n_y = gridsize
d_x = (xmax - xmin) / (n_x - 1)
d_y = (ymax - ymin) / (n_y - 1)
xyi = _stack(x, y).T
xyi -= [xmin, ymin]
xyi /= [d_x, d_y]
xyi = np.floor(xyi, xyi).T
scotts_factor = len_x ** (-1 / 6)
cov = _cov(xyi)
std_devs = np.diag(cov) ** 0.5
kern_nx, kern_ny = np.round(scotts_factor * 2 * np.pi * std_devs)
inv_cov = np.linalg.inv(cov * scotts_factor ** 2)
x_x = np.arange(kern_nx) - kern_nx / 2
y_y = np.arange(kern_ny) - kern_ny / 2
x_x, y_y = np.meshgrid(x_x, y_y)
kernel = _stack(x_x.flatten(), y_y.flatten())
kernel = _dot(inv_cov, kernel) * kernel
kernel = np.exp(-kernel.sum(axis=0) / 2)
kernel = kernel.reshape((int(kern_ny), int(kern_nx)))
boundary = "wrap" if circular else "symm"
grid = coo_matrix((weights, xyi), shape=(n_x, n_y)).toarray()
grid = convolve2d(grid, kernel, mode="same", boundary=boundary)
norm_factor = np.linalg.det(2 * np.pi * cov * scotts_factor ** 2)
norm_factor = len_x * d_x * d_y * norm_factor ** 0.5
grid /= norm_factor
return grid, xmin, xmax, ymin, ymax
def get_bins(values):
"""
Automatically compute the number of bins for discrete variables.
Parameters
----------
values = numpy array
values
Returns
-------
array with the bins
Notes
-----
Computes the width of the bins by taking the maximum of the Sturges and the Freedman-Diaconis
estimators. According to numpy `np.histogram` this provides good all around performance.
The Sturges is a very simplistic estimator based on the assumption of normality of the data.
This estimator has poor performance for non-normal data, which becomes especially obvious for
large data sets. The estimate depends only on size of the data.
The Freedman-Diaconis rule uses interquartile range (IQR) to estimate the binwidth.
It is considered a robust version of the Scott rule as the IQR is less affected by outliers
than the standard deviation. However, the IQR depends on fewer points than the standard
deviation, so it is less accurate, especially for long tailed distributions.
"""
dtype = values.dtype.kind
if dtype == "i":
x_min = values.min().astype(int)
x_max = values.max().astype(int)
else:
x_min = values.min().astype(float)
x_max = values.max().astype(float)
# Sturges histogram bin estimator
bins_sturges = (x_max - x_min) / (np.log2(values.size) + 1)
# The Freedman-Diaconis histogram bin estimator.
iqr = np.subtract(*np.percentile(values, [75, 25])) # pylint: disable=assignment-from-no-return
bins_fd = 2 * iqr * values.size ** (-1 / 3)
if dtype == "i":
width = np.round(np.max([1, bins_sturges, bins_fd])).astype(int)
bins = np.arange(x_min, x_max + width + 1, width)
else:
width = np.max([bins_sturges, bins_fd])
if np.isclose(x_min, x_max):
width = 1e-3
bins = np.arange(x_min, x_max + width, width)
return bins
def _sturges_formula(dataset, mult=1):
"""Use Sturges' formula to determine number of bins.
See https://en.wikipedia.org/wiki/Histogram#Sturges'_formula
or https://doi.org/10.1080%2F01621459.1926.10502161
Parameters
----------
dataset: xarray.DataSet
Must have the `draw` dimension
mult: float
Used to scale the number of bins up or down. Default is 1 for Sturges' formula.
Returns
-------
int
Number of bins to use
"""
return int(np.ceil(mult * np.log2(dataset.draw.size)) + 1)
def _circular_mean(x):
"""Compute mean of circular variable measured in radians.
The result is between -pi and pi.
"""
sinr = np.sum(np.sin(x))
cosr = np.sum(np.cos(x))
mean = np.arctan2(sinr, cosr)
return mean
def _normalize_angle(x, zero_centered=True):
"""Normalize angles.
Normalize angles in radians to [-pi, pi) or [0, 2 * pi) according to `zero_centered`.
"""
if zero_centered:
return (x + np.pi) % (2 * np.pi) - np.pi
else:
return x % (2 * np.pi)
@conditional_jit(cache=True)
def histogram(data, bins, range_hist=None):
"""Conditionally jitted histogram.
Parameters
----------
data : array-like
Input data. Passed as first positional argument to ``np.histogram``.
bins : int or array-like
Passed as keyword argument ``bins`` to ``np.histogram``.
range_hist : (float, float), optional
Passed as keyword argument ``range`` to ``np.histogram``.
Returns
-------
hist : array
The number of counts per bin.
density : array
The density corresponding to each bin.
bin_edges : array
The edges of the bins used.
"""
hist, bin_edges = np.histogram(data, bins=bins, range=range_hist)
hist_dens = hist / (hist.sum() * np.diff(bin_edges))
return hist, hist_dens, bin_edges
def _find_hdi_contours(density, hdi_probs):
"""
Find contours enclosing regions of highest posterior density.
Parameters
----------
density : array-like
A 2D KDE on a grid with cells of equal area.
hdi_probs : array-like
An array of highest density interval confidence probabilities.
Returns
-------
contour_levels : array
The contour levels corresponding to the given HDI probabilities.
"""
# Using the algorithm from corner.py
sorted_density = np.sort(density, axis=None)[::-1]
sm = sorted_density.cumsum()
sm /= sm[-1]
contours = np.empty_like(hdi_probs)
for idx, hdi_prob in enumerate(hdi_probs):
try:
contours[idx] = sorted_density[sm <= hdi_prob][-1]
except IndexError:
contours[idx] = sorted_density[0]
return contours
|
arviz-devs/arviz
|
arviz/stats/density_utils.py
|
Python
|
apache-2.0
| 32,377
|
[
"Gaussian"
] |
aeb9cba9a2bdbe2e62b97dc0416fa17cc781ebea72960de523f7d79afc18e6ab
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: set fileencoding=utf-8
import os
import sys
import json
import math
import numpy
import argparse
import itertools
import logging
from gff3 import (
feature_lambda,
coding_genes,
genes,
get_gff3_id,
feature_test_location,
get_rbs_from,
nice_name,
)
from shinefind import NaiveSDCaller
from cpt_gffParser import gffParse, gffWrite, gffSeqFeature
from Bio import SeqIO
from Bio.SeqRecord import SeqRecord
from Bio.SeqFeature import SeqFeature, FeatureLocation
from jinja2 import Environment, FileSystemLoader
from cpt import MGAFinder
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger(name="pav")
# Path to script, required because of Galaxy.
SCRIPT_PATH = os.path.dirname(os.path.realpath(__file__))
# Path to the HTML template for the report
ENCOURAGEMENT = (
(100, "Perfection itself!"),
(90, "Amazing!"),
(80, "Not too bad, a few minor things to fix..."),
(70, "Some issues to address"),
(
50,
"""Issues detected! </p><p class="text-muted">Have you heard of the
<a href="https://cpt.tamu.edu">CPT</a>\'s Automated Phage Annotation
Pipeline?""",
),
(
0,
"""<b>MAJOR</b> issues detected! Please consider using the
<a href="https://cpt.tamu.edu">CPT</a>\'s Automated Phage Annotation Pipeline""",
),
)
def gen_qc_feature(start, end, message, strand=0, id_src=None, type_src="gene"):
kwargs = {"qualifiers": {"note": [message]}}
kwards["type"] = type_src
kwargs["strand"] = strand
kwargs["phase"]=0
kwargs["score"]=0.0
kwargs["source"]="feature"
if id_src is not None:
kwargs["id"] = id_src.id
kwargs["qualifiers"]["ID"] = [id_src.id]
kwargs["qualifiers"]["Name"] = id_src.qualifiers.get("Name", [])
if end >= start:
return gffSeqFeature(FeatureLocation(start, end, strand=strand), **kwargs)
else:
return gffSeqFeature(FeatureLocation(end, start, strand=strand), **kwargs)
def __ensure_location_in_bounds(start=0, end=0, parent_length=0):
# This prevents frameshift errors
while start < 0:
start += 3
while end < 0:
end += 3
while start > parent_length:
start -= 3
while end > parent_length:
end -= 3
return (start, end)
def missing_rbs(record, lookahead_min=5, lookahead_max=15):
"""
Identify gene features with missing RBSs
This "looks ahead" 5-15 bases ahead of each gene feature, and checks if
there's an RBS feature in those bounds.
The returned data is a set of genes with the RBS sequence in the __upstream
attribute, and a message in the __message attribute.
"""
results = []
good = 0
bad = 0
qc_features = []
sd_finder = NaiveSDCaller()
any_rbss = False
for gene in coding_genes(record.features):
# Check if there are RBSs, TODO: make this recursive. Each feature in
# gene.sub_features can also have sub_features.
rbss = get_rbs_from(gene)
# No RBS found
if len(rbss) == 0:
# Get the sequence lookahead_min to lookahead_max upstream
if gene.strand > 0:
start = gene.location.start - lookahead_max
end = gene.location.start - lookahead_min
else:
start = gene.location.end + lookahead_min
end = gene.location.end + lookahead_max
# We have to ensure the feature is ON the genome, otherwise we may
# be trying to access a location outside of the length of the
# genome, which would be bad.
(start, end) = __ensure_location_in_bounds(
start=start, end=end, parent_length=len(record)
)
# Temporary feature to extract sequence
tmp = gffSeqFeature(
FeatureLocation(start, end, strand=gene.strand), type="domain"
)
# Get the sequence
seq = str(tmp.extract(record.seq))
# Set the default properties
gene.__upstream = seq.lower()
gene.__message = "No RBS annotated, None found"
# Try and do an automated shinefind call
sds = sd_finder.list_sds(seq)
if len(sds) > 0:
sd = sds[0]
gene.__upstream = sd_finder.highlight_sd(
seq.lower(), sd["start"], sd["end"]
)
gene.__message = "Unannotated but valid RBS"
qc_features.append(
gen_qc_feature(
start, end, "Missing RBS", strand=gene.strand, id_src=gene, type_src="gene"
)
)
bad += 1
results.append(gene)
results[-1].location = FeatureLocation(results[-1].location.start + 1, results[-1].location.end, results[-1].location.strand)
else:
if len(rbss) > 1:
log.warn("%s RBSs found for gene %s", rbss[0].id, get_gff3_id(gene))
any_rbss = True
# get first RBS/CDS
cds = list(genes(gene.sub_features, feature_type="CDS"))[0]
rbs = rbss[0]
# Get the distance between the two
if gene.strand > 0:
distance = cds.location.start - rbs.location.end
else:
distance = rbs.location.start - cds.location.end
# If the RBS is too far away, annotate that
if distance > lookahead_max:
gene.__message = "RBS too far away (%s nt)" % distance
qc_features.append(
gen_qc_feature(
rbs.location.start,
rbs.location.end,
gene.__message,
strand=gene.strand,
id_src=gene,
type_src="gene"
)
)
bad += 1
results.append(gene)
results[-1].location = FeatureLocation(results[-1].location.start + 1, results[-1].location.end, results[-1].location.strand)
else:
good += 1
return good, bad, results, qc_features, any_rbss
# modified from get_orfs_or_cdss.py
# -----------------------------------------------------------
def require_sd(data, record, chrom_start, sd_min, sd_max):
sd_finder = NaiveSDCaller()
for putative_gene in data:
if putative_gene[2] > 0: # strand
start = chrom_start + putative_gene[0] - sd_max
end = chrom_start + putative_gene[0] - sd_min
else:
start = chrom_start + putative_gene[1] + sd_min
end = chrom_start + putative_gene[1] + sd_max
(start, end) = __ensure_location_in_bounds(
start=start, end=end, parent_length=len(record)
)
tmp = gffSeqFeature(
FeatureLocation(start, end, strand=putative_gene[2]), type="domain"
)
# Get the sequence
seq = str(tmp.extract(record.seq))
sds = sd_finder.list_sds(seq)
if len(sds) > 0:
yield putative_gene + (start, end)
def excessive_gap(
record,
excess=50,
excess_divergent=200,
min_gene=30,
slop=30,
lookahead_min=5,
lookahead_max=15,
):
"""
Identify excessive gaps between gene features.
Default "excessive" gap size is 10, but that should likely be larger.
"""
results = []
good = 0
bad = 0
contiguous_regions = []
sorted_genes = sorted(
genes(record.features), key=lambda feature: feature.location.start
)
if len(sorted_genes) == 0:
log.warn("NO GENES FOUND")
return good, bad, results, []
current_gene = None
for gene in sorted_genes:
# If the gene's start is contiguous to the "current_gene", then we
# extend current_gene
log.debug("gene.id", gene.id)
for cds in genes(gene.sub_features, feature_type="CDS"):
log.debug("\t%s %s", cds.id, cds.location)
if current_gene is None:
current_gene = [int(cds.location.start), int(cds.location.end)]
if cds.location.start <= current_gene[1] + excess:
# Don't want to decrease size
if int(cds.location.end) >= current_gene[1]:
current_gene[1] = int(cds.location.end)
else:
# If it's discontiguous, we append the region and clear.
contiguous_regions.append(current_gene)
current_gene = [int(cds.location.start), int(cds.location.end)]
# This generally expected that annotations would NOT continue unto the end
# of the genome, however that's a bug, and we can make it here with an
# empty contiguous_regions list
contiguous_regions.append(current_gene)
for i in range(len(contiguous_regions) + 1):
if i == 0:
a = (1, 1)
b = contiguous_regions[i]
elif i >= len(contiguous_regions):
a = contiguous_regions[i - 1]
b = (len(record.seq), None)
else:
a = contiguous_regions[i - 1]
b = contiguous_regions[i]
gap_size = abs(b[0] - a[1])
if gap_size > min(excess, excess_divergent):
a_feat_l = itertools.islice(
feature_lambda(
sorted_genes,
feature_test_location,
{"loc": a[1]},
subfeatures=False,
),
1,
)
b_feat_l = itertools.islice(
feature_lambda(
sorted_genes,
feature_test_location,
{"loc": b[0]},
subfeatures=False,
),
1,
)
try:
a_feat = next(a_feat_l)
except StopIteration:
# Triggers on end of genome
a_feat = None
try:
b_feat = next(b_feat_l)
except StopIteration:
# Triggers on end of genome
b_feat = None
result_obj = [
a[1],
b[0],
None if not a_feat else a_feat.location.strand,
None if not b_feat else b_feat.location.strand,
]
if a_feat is None or b_feat is None:
if gap_size > excess_divergent:
results.append(result_obj)
else:
if (
a_feat.location.strand == b_feat.location.strand
and gap_size > excess
):
results.append(result_obj)
elif (
a_feat.location.strand != b_feat.location.strand
and gap_size > excess_divergent
):
results.append(result_obj)
better_results = []
qc_features = []
of = MGAFinder(11, "CDS", "closed", min_gene)
# of = OrfFinder(11, 'CDS', 'closed', min_gene)
for result_obj in results:
start = result_obj[0]
end = result_obj[1]
f = gen_qc_feature(start, end, "Excessive gap, %s bases" % abs(end - start), type_src="gene")
qc_features.append(f)
putative_genes = of.putative_genes_in_sequence(
str(record[start - slop : end + slop].seq)
)
putative_genes = list(
require_sd(putative_genes, record, start, lookahead_min, lookahead_max)
)
for putative_gene in putative_genes:
# (0, 33, 1, 'ATTATTTTATCAAAACGCTTTACAATCTTTTAG', 'MILSKRFTIF', 123123, 124324)
possible_gene_start = start + putative_gene[0]
possible_gene_end = start + putative_gene[1]
if possible_gene_start <= possible_gene_end:
possible_cds = gffSeqFeature(
FeatureLocation(
possible_gene_start, possible_gene_end, strand=putative_gene[2]
),
type="CDS",
)
else:
possible_cds = gffSeqFeature(
FeatureLocation(
possible_gene_end, possible_gene_start, strand=putative_gene[2],
),
type="CDS",
)
# Now we adjust our boundaries for the RBS that's required
# There are only two cases, the rbs is upstream of it, or downstream
if putative_gene[5] < possible_gene_start:
possible_gene_start = putative_gene[5]
else:
possible_gene_end = putative_gene[6]
if putative_gene[5] <= putative_gene[6]:
possible_rbs = gffSeqFeature(
FeatureLocation(
putative_gene[5], putative_gene[6], strand=putative_gene[2]
),
type="Shine_Dalgarno_sequence",
)
else:
possible_rbs = gffSeqFeature(
FeatureLocation(
putative_gene[6], putative_gene[5], strand=putative_gene[2],
),
type="Shine_Dalgarno_sequence",
)
if possible_gene_start <= possible_gene_end:
possible_gene = gffSeqFeature(
FeatureLocation(
possible_gene_start, possible_gene_end, strand=putative_gene[2]
),
type="gene",
qualifiers={"note": ["Possible gene"]},
)
else:
possible_gene = gffSeqFeature(
FeatureLocation(
possible_gene_end, possible_gene_start, strand=putative_gene[2],
),
type="gene",
qualifiers={"note": ["Possible gene"]},
)
possible_gene.sub_features = [possible_rbs, possible_cds]
qc_features.append(possible_gene)
better_results.append(result_obj + [len(putative_genes)])
# Bad gaps are those with more than zero possible genes found
bad = len([x for x in better_results if x[2] > 0])
# Generally taking "good" here as every possible gap in the genome
# Thus, good is TOTAL - gaps
good = len(sorted_genes) + 1 - bad
# and bad is just gaps
return good, bad, better_results, qc_features
def phi(x):
"""Standard phi function used in calculation of normal distribution"""
return math.exp(-1 * math.pi * x * x)
def norm(x, mean=0, sd=1):
"""
Normal distribution. Given an x position, a mean, and a standard
deviation, calculate the "y" value. Useful for score scaling
Modified to multiply by SD. This means even at sd=5, norm(x, mean) where x = mean => 1, rather than 1/5.
"""
return (1 / float(sd)) * phi(float(x - mean) / float(sd)) * sd
def coding_density(record, mean=92.5, sd=20):
"""
Find coding density in the genome
"""
feature_lengths = 0
for gene_a in coding_genes(record.features):
feature_lengths += sum(
[len(x) for x in genes(gene_a.sub_features, feature_type="CDS")]
)
avgFeatLen = float(feature_lengths) / float(len(record.seq))
return int(norm(100 * avgFeatLen, mean=mean, sd=sd) * 100), int(100 * avgFeatLen)
def exact_coding_density(record, mean=92.5, sd=20):
"""
Find exact coding density in the genome
"""
data = numpy.zeros(len(record.seq))
for gene_a in coding_genes(record.features):
for cds in genes(gene_a.sub_features, feature_type="CDS"):
for i in range(cds.location.start, cds.location.end + 1):
data[i - 1] = 1
return float(sum(data)) / len(data)
def excessive_overlap(record, excess=15, excess_divergent=30):
"""
Find excessive overlaps in the genome, where excessive is defined as 15
bases for same strand, and 30 for divergent translation.
Does a product of all the top-level features in the genome, and calculates
gaps.
"""
results = []
bad = 0
qc_features = []
for (gene_a, gene_b) in itertools.combinations(coding_genes(record.features), 2):
# Get the CDS from the subfeature list.
# TODO: not recursive.
cds_a = [x for x in genes(gene_a.sub_features, feature_type="CDS")]
cds_b = [x for x in genes(gene_b.sub_features, feature_type="CDS")]
if len(cds_a) == 0:
log.warn("Gene missing subfeatures; %s", get_gff3_id(gene_a))
continue
if len(cds_b) == 0:
log.warn("Gene missing subfeatures; %s", get_gff3_id(gene_b))
continue
cds_a = cds_a[0]
cds_b = cds_b[0]
# Set of locations that are included in the CDS of A and the
# CDS of B
cas = set(range(cds_a.location.start, cds_a.location.end))
cbs = set(range(cds_b.location.start, cds_b.location.end))
# Here we calculate the intersection between the two sets, and
# if it's larger than our excessive size, we know that they're
# overlapped
ix = cas.intersection(cbs)
if (cds_a.location.strand == cds_b.location.strand and len(ix) >= excess) or (
cds_a.location.strand != cds_b.location.strand
and len(ix) >= excess_divergent
):
bad += float(len(ix)) / float(min(excess, excess_divergent))
qc_features.append(
gen_qc_feature(min(ix), max(ix), "Excessive Overlap", id_src=gene_a, type_src="gene")
)
results.append((gene_a, gene_b, min(ix), max(ix)))
# Good isn't accurate here. It's a triangle number and just ugly, but we
# don't care enough to fix it.
good = len(list(coding_genes(record.features)))
good = int(good - bad)
if good < 0:
good = 0
return good, int(bad), results, qc_features
def get_encouragement(score):
"""Some text telling the user how they did
"""
for encouragement in ENCOURAGEMENT:
if score > encouragement[0]:
return encouragement[1]
return ENCOURAGEMENT[-1][1]
def genome_overview(record):
"""Genome overview
"""
data = {
"genes": {
"count": 0,
"bases": 0,
"density": 0, # genes / kb
"avg_len": [],
"comp": {"A": 0, "C": 0, "G": 0, "T": 0},
},
"overall": {
"comp": {
"A": record.seq.count("A"),
"C": record.seq.count("C"),
"G": record.seq.count("G"),
"T": record.seq.count("T"),
},
"gc": 0,
},
}
gene_features = list(coding_genes(record.features))
data["genes"]["count"] = len(gene_features)
for feat in gene_features:
data["genes"]["comp"]["A"] += feat.extract(record).seq.count("A")
data["genes"]["comp"]["C"] += feat.extract(record).seq.count("C")
data["genes"]["comp"]["T"] += feat.extract(record).seq.count("T")
data["genes"]["comp"]["G"] += feat.extract(record).seq.count("G")
data["genes"]["bases"] += len(feat)
data["genes"]["avg_len"].append(len(feat))
data["genes"]["avg_len"] = float(sum(data["genes"]["avg_len"])) / len(gene_features)
data["overall"]["gc"] = float(
data["overall"]["comp"]["G"] + data["overall"]["comp"]["C"]
) / len(record.seq)
return data
def find_morons(record):
"""Locate morons in the genome
Don't even know why...
TODO: remove? Idk.
"""
results = []
good = 0
bad = 0
gene_features = list(coding_genes(record.features))
for i, gene in enumerate(gene_features):
two_left = gene_features[i - 2 : i]
two_right = gene_features[i + 1 : i + 1 + 2]
strands = [x.strand for x in two_left] + [x.strand for x in two_right]
anticon = [x for x in strands if x != gene.strand]
if len(anticon) == 4:
has_rbs = [x.type == "Shine_Dalgarno_sequence" for x in gene.sub_features]
if any(has_rbs):
rbs = [
x for x in gene.sub_features if x.type == "Shine_Dalgarno_sequence"
][0]
rbs_msg = str(rbs.extract(record.seq))
else:
rbs_msg = "No RBS Available"
results.append((gene, two_left, two_right, rbs_msg))
bad += 1
else:
good += 1
return good, bad, results, []
def bad_gene_model(record):
"""Find features without product
"""
results = []
good = 0
bad = 0
qc_features = []
for gene in coding_genes(record.features):
exons = [
x for x in genes(gene.sub_features, feature_type="exon") if len(x) > 10
]
CDSs = [x for x in genes(gene.sub_features, feature_type="CDS")]
if len(exons) >= 1 and len(CDSs) >= 1:
if len(exons) != len(CDSs):
results.append(
(
get_gff3_id(gene),
None,
None,
"Mismatched number of exons and CDSs in gff3 representation",
)
)
qc_features.append(
gen_qc_feature(
gene.location.start,
gene.location.end,
"Mismatched number of exons and CDSs in gff3 representation",
strand=gene.strand,
id_src=gene,
type_src="gene"
)
)
bad += 1
else:
for (exon, cds) in zip(
sorted(exons, key=lambda x: x.location.start),
sorted(CDSs, key=lambda x: x.location.start),
):
if len(exon) != len(cds):
results.append(
(
get_gff3_id(gene),
exon,
cds,
"CDS does not extend to full length of gene",
)
)
qc_features.append(
gen_qc_feature(
exon.location.start,
exon.location.end,
"CDS does not extend to full length of gene",
strand=exon.strand,
id_src=gene,
type_src="CDS"
)
)
bad += 1
else:
good += 1
else:
log.warn("Could not handle %s, %s", exons, CDSs)
results.append(
(
get_gff3_id(gene),
None,
None,
"{0} exons, {1} CDSs".format(len(exons), len(CDSs)),
)
)
return good, len(results) + bad, results, qc_features
def weird_starts(record):
"""Find features without product
"""
good = 0
bad = 0
qc_features = []
results = []
overall = {}
for gene in coding_genes(record.features):
seq = [x for x in genes(gene.sub_features, feature_type="CDS")]
if len(seq) == 0:
log.warn("No CDS for gene %s", get_gff3_id(gene))
continue
else:
seq = seq[0]
seq_str = str(seq.extract(record.seq))
start_codon = seq_str[0:3]
if len(seq_str) < 3:
sys.stderr.write("Fatal Error: CDS of length less than 3 at " + str(seq.location) + '\n')
exit(2)
# if len(seq_str) % 3 != 0:
# if len(seq_str) < 3:
# stop_codon = seq_str[-(len(seq_str))]
# else:
# stop_codon = seq_str[-3]
#
# log.warn("CDS at %s length is not a multiple of three (Length = %d)", get_gff3_id(gene), len(seq_str))
# seq.__error = "Bad CDS Length"
# results.append(seq)
# qc_features.append(
# gen_qc_feature(
# s, e, "Bad Length", strand=seq.strand, id_src=gene
# )
# )
# bad += 1
# seq.__start = start_codon
# seq.__stop = stop_codon
# continue
stop_codon = seq_str[-3]
seq.__start = start_codon
seq.__stop = stop_codon
if start_codon not in overall:
overall[start_codon] = 1
else:
overall[start_codon] += 1
if start_codon not in ("ATG", "TTG", "GTG"):
log.warn("Weird start codon (%s) on %s", start_codon, get_gff3_id(gene))
seq.__error = "Unusual start codon %s" % start_codon
s = 0
e = 0
if seq.strand > 0:
s = seq.location.start
e = seq.location.start + 3
else:
s = seq.location.end
e = seq.location.end - 3
results.append(seq)
results[-1].location = FeatureLocation(results[-1].location.start + 1, results[-1].location.end, results[-1].location.strand)
qc_features.append(
gen_qc_feature(
s, e, "Weird start codon", strand=seq.strand, id_src=gene, type_src="gene"
)
)
bad += 1
else:
good += 1
return good, bad, results, qc_features, overall
def missing_genes(record):
"""Find features without product
"""
results = []
good = 0
bad = 0
qc_features = []
for gene in coding_genes(record.features):
if gene.qualifiers.get("cpt_source", [None])[0] == "CPT_GENE_MODEL_CORRECTION":
results.append(gene)
bad += 1
else:
good += 1
return good, bad, results, qc_features
def gene_model_correction_issues(record):
"""Find features that have issues from the gene model correction step.
These have qualifiers beginning with CPT_GMS
"""
results = []
good = 0
bad = 0
qc_features = []
# For each gene
for gene in coding_genes(record.features):
# Get the list of child CDSs
cdss = [x for x in genes(gene.sub_features, feature_type="CDS")]
# And our matching qualifiers
gene_data = [(k, v) for (k, v) in gene.qualifiers.items() if k == "cpt_gmc"]
# If there are problems with ONLY the parent, let's complain
local_results = []
local_qc_features = []
for x in gene_data:
if "Missing Locus Tag" in x[1]:
# Missing locus tag is an either or thing, if it hits here
# there shouldn't be anything else wrong with it.
# Obviously missing so we remove it
gene.qualifiers["locus_tag"] = [""]
# Translation from bp_genbank2gff3.py
cdss[0].qualifiers["locus_tag"] = cdss[0].qualifiers["Name"]
# Append our results
local_results.append((gene, cdss[0], "Gene is missing a locus_tag"))
local_qc_features.append(
gen_qc_feature(
gene.location.start,
gene.location.end,
"Gene is missing a locus_tag",
strand=gene.strand,
type_src="gene"
)
)
# We need to alert on any child issues as well.
for cds in cdss:
cds_data = [
(k, v[0]) for (k, v) in cds.qualifiers.items() if k == "cpt_gmc"
]
if len(gene_data) == 0 and len(cds_data) == 0:
# Alles gut
pass
else:
for _, problem in cds_data:
if problem == "BOTH Missing Locus Tag":
gene.qualifiers["locus_tag"] = [""]
cds.qualifiers["locus_tag"] = [""]
local_results.append(
(gene, cds, "Both gene and CDS are missing locus tags")
)
local_qc_features.append(
gen_qc_feature(
cds.location.start,
cds.location.end,
"CDS is missing a locus_tag",
strand=cds.strand,
type_src="CDS"
)
)
local_qc_features.append(
gen_qc_feature(
gene.location.start,
gene.location.end,
"Gene is missing a locus_tag",
strand=gene.strand,
type_src="gene"
)
)
elif problem == "Different locus tag from associated gene.":
gene.qualifiers["locus_tag"] = gene.qualifiers["Name"]
cds.qualifiers["locus_tag"] = cds.qualifiers["cpt_gmc_locus"]
local_results.append(
(gene, cds, "Gene and CDS have differing locus tags")
)
local_qc_features.append(
gen_qc_feature(
gene.location.start,
gene.location.end,
"Gene and CDS have differing locus tags",
strand=gene.strand,
type_src="gene"
)
)
elif problem == "Missing Locus Tag":
# Copy this over
gene.qualifiers["locus_tag"] = gene.qualifiers["Name"]
# This one is missing
cds.qualifiers["locus_tag"] = [""]
local_results.append((gene, cds, "CDS is missing a locus_tag"))
local_qc_features.append(
gen_qc_feature(
cds.location.start,
cds.location.end,
"CDS is missing a locus_tag",
strand=cds.strand,
type_src="CDS"
)
)
else:
log.warn("Cannot handle %s", problem)
if len(local_results) > 0:
bad += 1
else:
good += 1
qc_features.extend(local_qc_features)
results.extend(local_results)
return good, bad, results, qc_features
def missing_tags(record):
"""Find features without product
"""
results = []
good = 0
bad = 0
qc_features = []
for gene in coding_genes(record.features):
cds = [x for x in genes(gene.sub_features, feature_type="CDS")]
if len(cds) == 0:
log.warn("Gene missing CDS subfeature %s", get_gff3_id(gene))
continue
cds = cds[0]
if "product" not in cds.qualifiers:
log.info("Missing product tag on %s", get_gff3_id(gene))
qc_features.append(
gen_qc_feature(
cds.location.start,
cds.location.end,
"Missing product tag",
strand=cds.strand,
type_src="CDS"
)
)
results.append(cds)
bad += 1
else:
good += 1
return good, bad, results, qc_features
def evaluate_and_report(
annotations,
genome,
gff3=None,
tbl=None,
sd_min=5,
sd_max=15,
min_gene_length=30,
excessive_gap_dist=50,
excessive_gap_divergent_dist=200,
excessive_overlap_dist=25,
excessive_overlap_divergent_dist=50,
reportTemplateName="phage_annotation_validator.html",
):
"""
Generate our HTML evaluation of the genome
"""
# Get features from GFF file
seq_dict = SeqIO.to_dict(SeqIO.parse(genome, "fasta"))
# Get the first GFF3 record
# TODO: support multiple GFF3 files.
mostFeat = 0
for rec in list(gffParse(annotations, base_dict=seq_dict)):
if len(rec.features) > mostFeat:
mostFeat = len(rec.features)
record = rec
gff3_qc_record = SeqRecord(record.id, id=record.id)
gff3_qc_record.features = []
gff3_qc_features = []
log.info("Locating missing RBSs")
# mb_any = "did they annotate ANY rbss? if so, take off from score."
mb_good, mb_bad, mb_results, mb_annotations, mb_any = missing_rbs(
record, lookahead_min=sd_min, lookahead_max=sd_max
)
gff3_qc_features += mb_annotations
log.info("Locating excessive gaps")
eg_good, eg_bad, eg_results, eg_annotations = excessive_gap(
record,
excess=excessive_gap_dist,
excess_divergent=excessive_gap_divergent_dist,
min_gene=min_gene_length,
slop=excessive_overlap_dist,
lookahead_min=sd_min,
lookahead_max=sd_max,
)
gff3_qc_features += eg_annotations
log.info("Locating excessive overlaps")
eo_good, eo_bad, eo_results, eo_annotations = excessive_overlap(
record,
excess=excessive_overlap_dist,
excess_divergent=excessive_overlap_divergent_dist,
)
gff3_qc_features += eo_annotations
log.info("Locating morons")
mo_good, mo_bad, mo_results, mo_annotations = find_morons(record)
gff3_qc_features += mo_annotations
log.info("Locating missing tags")
mt_good, mt_bad, mt_results, mt_annotations = missing_tags(record)
gff3_qc_features += mt_annotations
log.info("Locating missing gene features")
mg_good, mg_bad, mg_results, mg_annotations = missing_genes(record)
gff3_qc_features += mg_annotations
log.info("Determining coding density")
cd, cd_real = coding_density(record)
log.info("Locating weird starts")
ws_good, ws_bad, ws_results, ws_annotations, ws_overall = weird_starts(record)
gff3_qc_features += ws_annotations
log.info("Locating bad gene models")
gm_good, gm_bad, gm_results, gm_annotations = bad_gene_model(record)
if gm_good + gm_bad == 0:
gm_bad = 1
log.info("Locating more bad gene models")
gmc_good, gmc_bad, gmc_results, gmc_annotations = gene_model_correction_issues(
record
)
if gmc_good + gmc_bad == 0:
gmc_bad = 1
good_scores = [eg_good, eo_good, mt_good, ws_good, gm_good, gmc_good]
bad_scores = [eg_bad, eo_bad, mt_bad, ws_bad, gm_bad, gmc_bad]
# Only if they tried to annotate RBSs do we consider them.
if mb_any:
good_scores.append(mb_good)
bad_scores.append(mb_bad)
subscores = []
for (g, b) in zip(good_scores, bad_scores):
if g + b == 0:
s = 0
else:
s = int(100 * float(g) / (float(b) + float(g)))
subscores.append(s)
subscores.append(cd)
score = int(float(sum(subscores)) / float(len(subscores)))
# This is data that will go into our HTML template
kwargs = {
"upstream_min": sd_min,
"upstream_max": sd_max,
"record_name": record.id,
"record_nice_name": nice_name(record),
"params": {
"sd_min": sd_min,
"sd_max": sd_max,
"min_gene_length": min_gene_length,
"excessive_gap_dist": excessive_gap_dist,
"excessive_gap_divergent_dist": excessive_gap_divergent_dist,
"excessive_overlap_dist": excessive_overlap_dist,
"excessive_overlap_divergent_dist": excessive_overlap_divergent_dist,
},
"score": score,
"encouragement": get_encouragement(score),
"genome_overview": genome_overview(record),
"rbss_annotated": mb_any,
"missing_rbs": mb_results,
"missing_rbs_good": mb_good,
"missing_rbs_bad": mb_bad,
"missing_rbs_score": 0
if mb_good + mb_bad == 0
else (100 * mb_good / (mb_good + mb_bad)),
"excessive_gap": eg_results,
"excessive_gap_good": eg_good,
"excessive_gap_bad": eg_bad,
"excessive_gap_score": 0
if eo_good + eo_bad == 0
else (100 * eo_good / (eo_good + eo_bad)),
"excessive_overlap": eo_results,
"excessive_overlap_good": eo_good,
"excessive_overlap_bad": eo_bad,
"excessive_overlap_score": 0
if eo_good + eo_bad == 0
else (100 * eo_good / (eo_good + eo_bad)),
"morons": mo_results,
"morons_good": mo_good,
"morons_bad": mo_bad,
"morons_score": 0
if mo_good + mo_bad == 0
else (100 * mo_good / (mo_good + mo_bad)),
"missing_tags": mt_results,
"missing_tags_good": mt_good,
"missing_tags_bad": mt_bad,
"missing_tags_score": 0
if mt_good + mt_bad == 0
else (100 * mt_good / (mt_good + mt_bad)),
"missing_genes": mg_results,
"missing_genes_good": mg_good,
"missing_genes_bad": mg_bad,
"missing_genes_score": 0
if mg_good + mg_bad == 0
else (100 * mg_good / (mg_good + mg_bad)),
"weird_starts": ws_results,
"weird_starts_good": ws_good,
"weird_starts_bad": ws_bad,
"weird_starts_overall": ws_overall,
"weird_starts_overall_sorted_keys": sorted(
ws_overall, reverse=True, key=lambda x: ws_overall[x]
),
"weird_starts_score": 0
if ws_good + ws_bad == 0
else (100 * ws_good / (ws_good + ws_bad)),
"gene_model": gm_results,
"gene_model_good": gm_good,
"gene_model_bad": gm_bad,
"gene_model_score": 0
if gm_good + gm_bad == 0
else (100 * gm_good / (gm_good + gm_bad)),
"gene_model_correction": gmc_results,
"gene_model_correction_good": gmc_good,
"gene_model_correction_bad": gmc_bad,
"gene_model_correction_score": 0
if gmc_good + gmc_bad == 0
else (100 * gmc_good / (gmc_good + gmc_bad)),
"coding_density": cd,
"coding_density_exact": exact_coding_density(record),
"coding_density_real": cd_real,
"coding_density_score": cd,
}
with open(tbl, "w") as handle:
kw_subset = {}
for key in kwargs:
if (
key in ("score", "record_name")
or "_good" in key
or "_bad" in key
or "_overall" in key
):
kw_subset[key] = kwargs[key]
json.dump(kw_subset, handle)
with open(gff3, "w") as handle:
gff3_qc_record.features = gff3_qc_features
gff3_qc_record.annotations = {}
gffWrite([gff3_qc_record], handle)
def nice_strand(direction):
if direction > 0:
return "→".decode("utf-8")
else:
return "←".decode("utf-8")
def nice_strand_tex(direction):
if direction > 0:
return "$\\rightarrow$"
else:
return "$\\leftarrow$"
def texify(data):
return data.replace("_", "\\_").replace("$", "\\$")
def length(data):
return len(data)
def my_encode(data):
return str(data).encode("utf-8")
def my_decode(data):
# For production
return str(data).decode("utf-8")
# For local testing. No, I do not understand.
return str(data.encode("utf-8")).decode("utf-8")
env = Environment(
loader=FileSystemLoader(SCRIPT_PATH), trim_blocks=True, lstrip_blocks=True
)
env.filters.update(
{
"nice_id": get_gff3_id,
"nice_strand": nice_strand,
"nice_strand_tex": nice_strand_tex,
"texify": texify,
"length": length,
"encode": my_encode,
"decode": my_decode,
}
)
tpl = env.get_template(reportTemplateName)
return tpl.render(**kwargs).encode("utf-8")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="rebase gff3 features against parent locations", epilog=""
)
parser.add_argument(
"annotations", type=argparse.FileType("r"), help="Parent GFF3 annotations"
)
parser.add_argument("genome", type=argparse.FileType("r"), help="Genome Sequence")
parser.add_argument(
"--gff3", type=str, help="GFF3 Annotations", default="qc_annotations.gff3"
)
parser.add_argument(
"--tbl",
type=str,
help="Table for noninteractive parsing",
default="qc_results.json",
)
parser.add_argument(
"--sd_min",
type=int,
help="Minimum distance from gene start for an SD to be",
default=5,
)
parser.add_argument(
"--sd_max",
type=int,
help="Maximum distance from gene start for an SD to be",
default=15,
)
parser.add_argument(
"--min_gene_length",
type=int,
help="Minimum length for a putative gene call (AAs)",
default=30,
)
parser.add_argument(
"--excessive_overlap_dist",
type=int,
help="Excessive overlap for genes in same direction",
default=25,
)
parser.add_argument(
"--excessive_overlap_divergent_dist",
type=int,
help="Excessive overlap for genes in diff directions",
default=50,
)
parser.add_argument(
"--excessive_gap_dist",
type=int,
help="Maximum distance between two genes",
default=40,
)
parser.add_argument(
"--excessive_gap_divergent_dist",
type=int,
help="Maximum distance between two divergent genes",
default=200,
)
parser.add_argument(
"--reportTemplateName",
help="Report template file name",
default="phageqc_report_full.html",
)
args = parser.parse_args()
sys.stdout.write(evaluate_and_report(**vars(args)))
|
TAMU-CPT/galaxy-tools
|
tools/phage/phage_annotation_validator.py
|
Python
|
gpl-3.0
| 42,652
|
[
"Galaxy"
] |
c3c83c6bb89785213022836c84187549c8c501fa3cf26ceca8b50ceb5341c166
|
#!/usr/bin/env python
import ruamel.yaml
from flask_script import Manager, Shell, prompt_bool
from ludolatin import app
from app import db
from app import models
from app.models import User, Sentence, Quiz, Answer, Score, Topic, Product, Purchase, Comment, Activity
# output = dump(data, Dumper=Dumper)
manager = Manager(app)
def _make_context():
return dict(
app=app,
db=db,
models=models,
user=User.query.first(),
current_user=User.query.first(),
User=User,
Sentence=Sentence,
Quiz=Quiz,
Answer=Answer,
Score=Score,
Topic=Topic,
Product=Product,
Purchase=Purchase,
Comment=Comment,
)
manager.add_command("shell", Shell(make_context=_make_context))
@manager.command
def db_meta():
"""Show database tables metadata"""
print db
for table in db.metadata.sorted_tables:
print "\n", table.name, ":"
for column in table.columns:
print column.name, ":", column.type
@manager.command
def add_admin():
"""Add an admin user"""
user = User(
email="admin@example.com",
username="admin",
password="password",
is_admin="True"
).save()
user.follow(user)
print "\nUser: admin"
print "Password: password\n"
print "Visit http://localhost/admin, login as admin, "
print "Promote an existing user to admin, then delete this temporary admin user.\n"
@manager.command
def show_users():
"""List all users"""
print User.query.all()
@manager.command
def load_sentences():
"""Load the content of data.yml into the English / Latin tables"""
yaml = open('data/quiz_data.yml')
data = ruamel.yaml.load(yaml, ruamel.yaml.RoundTripLoader)
print data
for topic_name, quiz in data.items():
topic = (Topic.query.filter_by(name=topic_name).first() or Topic(name=topic_name))
print topic
topic.save()
for quiz_name, sentences in quiz.items():
quiz = Quiz(
name=quiz_name,
topic=topic
)
print quiz
quiz.save()
for question, answers in sentences.items():
print question
lang = ""
if "lang" in answers[0]:
lang = answers.pop(0)['lang']
type = answers.pop(0)['type']
q = Sentence(
lang=lang if lang else None,
type=type,
text=question,
quiz=quiz
)
for answer in answers:
a = Sentence(
text=answer,
)
q.translations.append(a)
a.translations.append(q)
db.session.add(q)
db.session.commit()
@manager.command
def delete_sentences():
"""Delete the content of Sentence table"""
if prompt_bool("Are you sure you want to delete all Sentences?"):
Sentence.query.delete()
@manager.command
def load_lessons():
"""Load the content of products.yml into the Product table"""
yaml = open('data/lessons.yml')
data = ruamel.yaml.load(yaml, ruamel.yaml.RoundTripLoader)
print data
for topic_name, text in data.items():
topic = (Topic.query.filter_by(name=topic_name).first() or Topic(name=topic_name))
print topic
topic.text = unicode(text)
topic.save()
@manager.command
def load_products():
"""Load the content of products.yml into the Product table"""
yaml = open('data/products.yml')
data = ruamel.yaml.load(yaml, ruamel.yaml.RoundTripLoader)
print data
for product_name, attributes in data.items():
product = (Product.query.filter_by(name=product_name).first() or Product(name=product_name))
print product
product.description = attributes[0]
product.price = attributes[1]
product.pricing_formula = attributes[2]
product.availability_function = attributes[3]
product.save()
@manager.command
def delete_data():
"""Delete the content of the Product table"""
if prompt_bool("Are you sure you want to delete all Products?"):
Product.query.delete()
@manager.command
def load_data():
"""Load sentence data"""
load_sentences()
load_lessons()
load_products()
@manager.command
def delete_data():
"""Delete the content of Sentence and Product tables"""
if prompt_bool("Are you sure you want to delete all Sentences and Products?"):
Sentence.query.delete()
Product.query.delete()
@manager.command
def test():
"""Run the unit tests."""
import unittest
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
@manager.command
def random_user():
"""Add a random user for testing"""
import random, string
username = "".join(random.choice(string.lowercase) for i in range(5))
user = User(
email=username + "@example.com",
username=username,
password="password",
).save()
user.follow(user)
print "\nUser:", username
print "Password: password\n"
if __name__ == "__main__":
manager.run()
|
merelinguist/ludolatin
|
manage.py
|
Python
|
mit
| 5,300
|
[
"VisIt"
] |
c5436c3c42a7c5d253f8f59db4ac25842b80d1a6beeeb250bb4b196c407e1aea
|
########################################################################
# File : PilotStatusAgent.py
# Author : Stuart Paterson
########################################################################
""" The Pilot Status Agent updates the status of the pilot jobs in the
PilotAgents database.
"""
__RCSID__ = "$Id$"
from DIRAC import S_OK, S_ERROR, gConfig
from DIRAC.Core.Base.AgentModule import AgentModule
from DIRAC.Core.Utilities import Time
from DIRAC.Core.Security import CS
from DIRAC.Core.Utilities.SiteCEMapping import getSiteForCE
from DIRAC.Core.DISET.RPCClient import RPCClient
from DIRAC.Interfaces.API.DiracAdmin import DiracAdmin
from DIRAC.AccountingSystem.Client.Types.Pilot import Pilot as PilotAccounting
from DIRAC.AccountingSystem.Client.DataStoreClient import gDataStoreClient
from DIRAC.WorkloadManagementSystem.DB.PilotAgentsDB import PilotAgentsDB
from DIRAC.WorkloadManagementSystem.DB.JobDB import JobDB
MAX_JOBS_QUERY = 10
MAX_WAITING_STATE_LENGTH = 3
class PilotStatusAgent(AgentModule):
"""
The specific agents must provide the following methods:
- initialize() for initial settings
- beginExecution()
- execute() - the main method called in the agent cycle
- endExecution()
- finalize() - the graceful exit of the method, this one is usually used
for the agent restart
"""
queryStateList = ['Ready', 'Submitted', 'Running', 'Waiting', 'Scheduled']
finalStateList = ['Done', 'Aborted', 'Cleared', 'Deleted', 'Failed']
#############################################################################
def initialize(self):
"""Sets defaults
"""
self.am_setOption('PollingTime', 120)
self.am_setOption('GridEnv', '')
self.am_setOption('PilotStalledDays', 3)
self.pilotDB = PilotAgentsDB()
self.diracadmin = DiracAdmin()
self.jobDB = JobDB()
self.clearPilotsDelay = self.am_getOption('ClearPilotsDelay', 30)
self.clearAbortedDelay = self.am_getOption('ClearAbortedPilotsDelay', 7)
self.WMSAdministrator = RPCClient('WorkloadManagement/WMSAdministrator')
return S_OK()
#############################################################################
def execute(self):
"""The PilotAgent execution method.
"""
self.pilotStalledDays = self.am_getOption('PilotStalledDays', 3)
self.gridEnv = self.am_getOption('GridEnv')
if not self.gridEnv:
# No specific option found, try a general one
setup = gConfig.getValue('/DIRAC/Setup', '')
if setup:
instance = gConfig.getValue('/DIRAC/Setups/%s/WorkloadManagement' % setup, '')
if instance:
self.gridEnv = gConfig.getValue('/Systems/WorkloadManagement/%s/GridEnv' % instance, '')
result = self.pilotDB._getConnection()
if result['OK']:
connection = result['Value']
else:
return result
# Now handle pilots not updated in the last N days (most likely the Broker is no
# longer available) and declare them Deleted.
result = self.handleOldPilots(connection)
connection.close()
result = self.WMSAdministrator.clearPilots(self.clearPilotsDelay, self.clearAbortedDelay)
if not result['OK']:
self.log.warn('Failed to clear old pilots in the PilotAgentsDB')
return S_OK()
def clearWaitingPilots(self, condDict):
""" Clear pilots in the faulty Waiting state
"""
last_update = Time.dateTime() - MAX_WAITING_STATE_LENGTH * Time.hour
clearDict = {'Status': 'Waiting',
'OwnerDN': condDict['OwnerDN'],
'OwnerGroup': condDict['OwnerGroup'],
'GridType': condDict['GridType'],
'Broker': condDict['Broker']}
result = self.pilotDB.selectPilots(clearDict, older=last_update)
if not result['OK']:
self.log.warn('Failed to get the Pilot Agents for Waiting state')
return result
if not result['Value']:
return S_OK()
refList = result['Value']
for pilotRef in refList:
# FIXME: definitely, one of the 2 lines below is wrong...
self.log.info('Setting Waiting pilot to Aborted: %s' % pilotRef)
result = self.pilotDB.setPilotStatus(pilotRef, 'Stalled', statusReason='Exceeded max waiting time')
return S_OK()
def clearParentJob(self, pRef, pDict, connection):
""" Clear the parameteric parent job from the PilotAgentsDB
"""
childList = pDict['ChildRefs']
# Check that at least one child is in the database
children_ok = False
for child in childList:
result = self.pilotDB.getPilotInfo(child, conn=connection)
if result['OK']:
if result['Value']:
children_ok = True
if children_ok:
return self.pilotDB.deletePilot(pRef, conn=connection)
else:
self.log.verbose('Adding children for parent %s' % pRef)
result = self.pilotDB.getPilotInfo(pRef)
parentInfo = result['Value'][pRef]
tqID = parentInfo['TaskQueueID']
ownerDN = parentInfo['OwnerDN']
ownerGroup = parentInfo['OwnerGroup']
broker = parentInfo['Broker']
gridType = parentInfo['GridType']
result = self.pilotDB.addPilotTQReference(childList, tqID, ownerDN, ownerGroup,
broker=broker, gridType=gridType)
if not result['OK']:
return result
children_added = True
for chRef, chDict in pDict['ChildDicts'].items():
result = self.pilotDB.setPilotStatus(chRef, chDict['Status'],
destination=chDict['DestinationSite'],
conn=connection)
if not result['OK']:
children_added = False
if children_added:
result = self.pilotDB.deletePilot(pRef, conn=connection)
else:
return S_ERROR('Failed to add children')
return S_OK()
def handleOldPilots(self, connection):
"""
select all pilots that have not been updated in the last N days and declared them
Deleted, accounting for them.
"""
pilotsToAccount = {}
timeLimitToConsider = Time.toString(Time.dateTime() - Time.day * self.pilotStalledDays)
result = self.pilotDB.selectPilots({'Status': self.queryStateList},
older=timeLimitToConsider,
timeStamp='LastUpdateTime')
if not result['OK']:
self.log.error('Failed to get the Pilot Agents')
return result
if not result['Value']:
return S_OK()
refList = result['Value']
result = self.pilotDB.getPilotInfo(refList)
if not result['OK']:
self.log.error('Failed to get Info for Pilot Agents')
return result
pilotsDict = result['Value']
for pRef in pilotsDict:
if pilotsDict[pRef].get('Jobs') and self._checkJobLastUpdateTime(pilotsDict[pRef]['Jobs'], self.pilotStalledDays):
self.log.debug('%s should not be deleted since one job of %s is running.' %
(str(pRef), str(pilotsDict[pRef]['Jobs'])))
continue
deletedJobDict = pilotsDict[pRef]
deletedJobDict['Status'] = 'Deleted'
deletedJobDict['StatusDate'] = Time.dateTime()
pilotsToAccount[pRef] = deletedJobDict
if len(pilotsToAccount) > 100:
self.accountPilots(pilotsToAccount, connection)
self._killPilots(pilotsToAccount)
pilotsToAccount = {}
self.accountPilots(pilotsToAccount, connection)
self._killPilots(pilotsToAccount)
return S_OK()
def accountPilots(self, pilotsToAccount, connection):
""" account for pilots
"""
accountingFlag = False
pae = self.am_getOption('PilotAccountingEnabled', 'yes')
if pae.lower() == "yes":
accountingFlag = True
if not pilotsToAccount:
self.log.info('No pilots to Account')
return S_OK()
accountingSent = False
if accountingFlag:
retVal = self.pilotDB.getPilotInfo(pilotsToAccount.keys(), conn=connection)
if not retVal['OK']:
self.log.error('Fail to retrieve Info for pilots', retVal['Message'])
return retVal
dbData = retVal['Value']
for pref in dbData:
if pref in pilotsToAccount:
if dbData[pref]['Status'] not in self.finalStateList:
dbData[pref]['Status'] = pilotsToAccount[pref]['Status']
dbData[pref]['DestinationSite'] = pilotsToAccount[pref]['DestinationSite']
dbData[pref]['LastUpdateTime'] = pilotsToAccount[pref]['StatusDate']
retVal = self.__addPilotsAccountingReport(dbData)
if not retVal['OK']:
self.log.error('Fail to retrieve Info for pilots', retVal['Message'])
return retVal
self.log.info("Sending accounting records...")
retVal = gDataStoreClient.commit()
if not retVal['OK']:
self.log.error("Can't send accounting reports", retVal['Message'])
else:
self.log.info("Accounting sent for %s pilots" % len(pilotsToAccount))
accountingSent = True
if not accountingFlag or accountingSent:
for pRef in pilotsToAccount:
pDict = pilotsToAccount[pRef]
self.log.verbose('Setting Status for %s to %s' % (pRef, pDict['Status']))
self.pilotDB.setPilotStatus(pRef,
pDict['Status'],
pDict['DestinationSite'],
pDict['StatusDate'],
conn=connection)
return S_OK()
def __addPilotsAccountingReport(self, pilotsData):
""" fill accounting data
"""
for pRef in pilotsData:
pData = pilotsData[pRef]
pA = PilotAccounting()
pA.setEndTime(pData['LastUpdateTime'])
pA.setStartTime(pData['SubmissionTime'])
retVal = CS.getUsernameForDN(pData['OwnerDN'])
if not retVal['OK']:
userName = 'unknown'
self.log.error("Can't determine username for dn:", pData['OwnerDN'])
else:
userName = retVal['Value']
pA.setValueByKey('User', userName)
pA.setValueByKey('UserGroup', pData['OwnerGroup'])
result = getSiteForCE(pData['DestinationSite'])
if result['OK'] and result['Value'].strip():
pA.setValueByKey('Site', result['Value'].strip())
else:
pA.setValueByKey('Site', 'Unknown')
pA.setValueByKey('GridCE', pData['DestinationSite'])
pA.setValueByKey('GridMiddleware', pData['GridType'])
pA.setValueByKey('GridResourceBroker', pData['Broker'])
pA.setValueByKey('GridStatus', pData['Status'])
if 'Jobs' not in pData:
pA.setValueByKey('Jobs', 0)
else:
pA.setValueByKey('Jobs', len(pData['Jobs']))
self.log.verbose("Added accounting record for pilot %s" % pData['PilotID'])
retVal = gDataStoreClient.addRegister(pA)
if not retVal['OK']:
return retVal
return S_OK()
def _killPilots(self, acc):
for i in sorted(acc.keys()):
result = self.diracadmin.getPilotInfo(i)
if result['OK'] and i in result['Value'] and 'Status' in result['Value'][i]:
ret = self.diracadmin.killPilot(str(i))
if ret['OK']:
self.log.info("Successfully deleted: %s (Status : %s)" % (i, result['Value'][i]['Status']))
else:
self.log.error("Failed to delete pilot: ", "%s : %s" % (i, ret['Message']))
else:
self.log.error("Failed to get pilot info", "%s : %s" % (i, str(result)))
def _checkJobLastUpdateTime(self, joblist, StalledDays):
timeLimitToConsider = Time.dateTime() - Time.day * StalledDays
ret = False
for jobID in joblist:
result = self.jobDB.getJobAttributes(int(jobID))
if result['OK']:
if 'LastUpdateTime' in result['Value']:
lastUpdateTime = result['Value']['LastUpdateTime']
if Time.fromString(lastUpdateTime) > timeLimitToConsider:
ret = True
self.log.debug(
'Since %s updates LastUpdateTime on %s this does not to need to be deleted.' %
(str(jobID), str(lastUpdateTime)))
break
else:
self.log.error("Error taking job info from DB", result['Message'])
return ret
|
arrabito/DIRAC
|
WorkloadManagementSystem/Agent/PilotStatusAgent.py
|
Python
|
gpl-3.0
| 12,171
|
[
"DIRAC"
] |
190035e338bc9f5a8acc2c5dd3d475ec20e6c9413d172665b2daca6826cfd0fd
|
from __future__ import absolute_import
from __future__ import print_function
try:
from unittest.mock import patch
except ImportError:
from mock import patch
import os
import tempfile
import shutil
from os.path import join as p
from rdflib.term import URIRef
from owmeta.data_trans.neuron_data import NeuronCSVDataTranslator, NeuronCSVDataSource
from owmeta.neuron import Neuron
from owmeta.document import Document
from owmeta.network import Network
from owmeta.worm import Worm
from owmeta.evidence import Evidence
from .DataTestTemplate import _DataTest
class _Base(_DataTest):
def setUp(self):
super(_Base, self).setUp()
self.startdir = os.getcwd()
self.testdir = tempfile.mkdtemp(prefix=__name__ + '.')
os.chdir(self.testdir)
self.ds = self.context(NeuronCSVDataSource)()
self.ds.basedir = lambda: self.testdir
self.cut = self.context(NeuronCSVDataTranslator)()
def tearDown(self):
super(_Base, self).tearDown()
os.chdir(self.startdir)
shutil.rmtree(self.testdir)
class NeuronCSVDataTranslatorTest(_Base):
def setUp(self):
super(NeuronCSVDataTranslatorTest, self).setUp()
self.process_class(Worm)
self.process_class(Neuron)
self.process_class(Network)
fname = p(self.testdir, 'mycsv.csv')
text = '''
header,row,completely,ignored,x
ADAR,Neuropeptide,PDF-1,WormAtlas,http://wormatlas.org/neurons/Individual%20Neurons/ADAmainframe.htm'''
with open(fname, 'w') as f:
f.write(text.strip())
self.ds.file_name('mycsv.csv')
def test_creates_neuron(self):
res = self.cut(self.ds, output_identifier=URIRef('http://example.org/smashing'))
n = res.data_context(Neuron)()
self.assertEqual(len(list(n.load())), 1)
def test_neuron_name(self):
res = self.cut(self.ds, output_identifier=URIRef('http://example.org/wonderful'))
n = res.data_context(Neuron)()
for o in n.load():
self.assertEqual(n.name(), 'ADAR')
def test_neuropeptide(self):
res = self.cut(self.ds, output_identifier=URIRef('http://example.org/wonderful'))
n = res.data_context(Neuron)()
for o in n.load():
self.assertEqual(n.neuropeptide(), {'PDF-1'})
def test_evidence(self):
res = self.cut(self.ds, output_identifier=URIRef('http://example.org/wonderful'))
ev = res.evidence_context(Evidence)()
ev.supports(res.data_context.rdf_object)
for o in ev.load():
self.assertEqual(o.url(), 'http://wormatlas.org/neurons/Individual%20Neurons/ADAmainframe.htm')
def test_creates_worm(self):
res = self.cut(self.ds, output_identifier=URIRef('http://example.org/smashing'))
n = res.data_context(Worm)()
self.assertEqual(len(list(n.load())), 1)
def test_creates_network(self):
res = self.cut(self.ds, output_identifier=URIRef('http://example.org/smashing'))
n = res.data_context(Network)()
self.assertEqual(len(list(n.load())), 1)
class NeuronCSVDataTranslatorNoEvidenceTest(_Base):
def setUp(self):
super(NeuronCSVDataTranslatorNoEvidenceTest, self).setUp()
fname = p(self.testdir, 'mycsv.csv')
text = '''
header,row,completely,ignored,x
ADAR,Neuropeptide,PDF-1,WormAtlas,'''
with open(fname, 'w') as f:
f.write(text.strip())
self.ds.file_name('mycsv.csv')
def test_no_evidence(self):
res = self.cut(self.ds, output_identifier=URIRef('http://example.org/wonderful'))
ev = res.evidence_context(Evidence)()
ev.supports(res.data_context.rdf_object)
self.assertEqual(0, len(list(ev.load())))
class NeuronCSVDataTranslatorBibtexTest(_Base):
def setUp(self):
super(NeuronCSVDataTranslatorBibtexTest, self).setUp()
fname = p(self.testdir, 'mycsv.csv')
text = '''
header,row,completely,ignored,x
ADAR,Neuropeptide,PDF-1,WormAtlas,'''
with open(fname, 'w') as f:
f.write(text.strip())
self.mapper.add_class(Evidence)
self.mapper.add_class(Document)
self.ds.file_name('mycsv.csv')
self.ds.bibtex_files(['ignored'])
self.patcher = patch('owmeta.data_trans.neuron_data.parse_bibtex_into_documents')
mock = self.patcher.start()
def m(a, ctx):
return {'WormAtlas': ctx(Document)(key="WormAtlas", title="something")}
mock.side_effect = m
def tearDown(self):
super(NeuronCSVDataTranslatorBibtexTest, self).tearDown()
self.patcher.stop()
def test_has_evidence(self):
res = self.cut(self.ds, output_identifier=URIRef('http://example.org/wonderful'))
ev = res.evidence_context(Evidence)()
self.assertEqual(1, len(list(ev.load())))
def test_evidence_title(self):
res = self.cut(self.ds, output_identifier=URIRef('http://example.org/wonderful'))
ev = res.evidence_context(Evidence)()
for e in ev.load():
self.assertEqual(e.reference().title(), 'something')
|
openworm/PyOpenWorm
|
tests/NeuronCSVDataTranslatorTest.py
|
Python
|
mit
| 5,099
|
[
"NEURON"
] |
f7ea465ff7bd9c7158c67a196d94eec36dd6d045aa599c263e329f3d6bddfd93
|
#!/usr/bin/python
###############################################################################
#
# This script outputs both the rotameric state as a function of time for
# time series plotting and rotamer distributions for error bar analysis
# of rotameric states per trajectory. This script is directly analogous to
# the Occupancy Vs Time scripts. Unfortunately, these scripts are based on
# the assumption that only 2 rotamer states exist (0 and 1) and though
# the rotamer_preprocessor supports multiple cuts, this function doesn't yet
# support that in a useful way.
#
# Example: For a dihedral vs time and state-stream merged data like:
# 25000.0 2.0 ... 1.0 1.0 1.0 0.0
# 25001.0 2.0 ... 1.0 0.0 1.0 0.0
# 25002.0 2.0 ... 1.0 0.0 1.0 1.0
#
# The following command would remove 2000 lines from the input
# and produce a number of plots and statistical output.
# python Rotamer_Vs_Time.py -f f1.out f2.out -t 9
# -x1 0 2 3 4 -x2 1 3 4 5
# -x2_cut 180
# -remove 2000
#
# By Chris Ing, 2013 for Python 2.7
#
###############################################################################
from argparse import ArgumentParser
from numpy import mean
from scipy.stats import sem
from collections import defaultdict
from ChannelAnalysis.RotamerAnalysis.Preprocessor import *
# This function is useful for counting the ratio of rotameric states
# for each of the input trajectory files with respect to the last state.
# So you might get a traj_id 10 that has a state ratio of 8:1 which means
# that there was 8 times as much in state 1.
def rotamer_counter_per_res(data_lines, data_states, traj_col=11, prefix=None):
# This is an epic datatype that I will use to quickly build a
# dict of dicts where the 1st key is a trajectory number
# and the second key is the ion count and the value is a
# count of how many times that ion count was observed.
count_totals = defaultdict(lambda: defaultdict(int))
# This is the return list in the format:
# traj_id d/u_ratio uncertainty
for line, states in zip(data_lines, data_states):
traj_id = line[traj_col]
for state in states:
count_totals[traj_id][state] += 1
# D/U ratios for all trajectories
rotamer_ratios = []
for traj_id, count_dict in count_totals.iteritems():
# We want the highest dunking state (1 in most cases) and to extract
# the count for that state as the temp_max variable
temp_max = float(sorted(count_dict.iteritems())[-1][-1])
temp_row = [traj_id]
for state, state_count in sorted(count_dict.iteritems()):
temp_row.append(state_count/temp_max)
rotamer_ratios.append(temp_row)
# Hack to tranpose list of lists and iterate over rotamer states
flipped_rows = zip(*rotamer_ratios)
temp_mean = ["MEAN"]
temp_sem = ["STDERR"]
# The first flipped_row entry is a list of traj_id's so we skip it
for rotamer_state in flipped_rows[1:]:
temp_mean.append(mean(rotamer_state))
temp_sem.append(sem(rotamer_state))
rotamer_ratios.append(temp_mean)
rotamer_ratios.append(temp_sem)
return rotamer_ratios
# This function counts the number of dunking states at each step for each
# of the dihedral columns classified using the label_states function
# and separates them on a basis of trajectory id. This allows for a
# statistical measure of the distribution of dunking states much like
# the channel and selectivity filter occupancy functions.
def rotamer_counter(data_lines, data_states, traj_col=11, prefix=None):
# This is an epic datatype that I will use to quickly build a
# dict of dicts where the 1st key is a trajectory number
# and the second key is the ion count and the value is a
# count of how many times that ion count was observed.
count_totals = defaultdict(lambda: defaultdict(int))
for line, states in zip(data_lines, data_states):
traj_id = line[traj_col]
state_total = sum(states)
count_totals[traj_id][state_total] += 1
print count_totals
return count_totals_to_percents(count_totals)
# This is a helper function that takes the datatype generated in
# *_counter functions (trajnum dict -> occupancy_id -> integer counts)
# and converts this to populations in a list. Num ions map is
# useful the occupancy_id's represent distinct numbers of ions
# in the selectivity filter.
def count_totals_to_percents(count_totals):
# Here's the return datatype that stores the percentage of occupancy
# in a given channel/sf state which can be paired with the indices
ion_count_percents = defaultdict(list)
ion_count_indices = defaultdict(list)
for traj_id, count_dict in count_totals.iteritems():
traj_total_lines = float(sum(count_dict.values()))
for ion_state, ion_count in count_dict.iteritems():
ion_count_percents[traj_id].append(ion_count/traj_total_lines)
ion_count_indices[traj_id].append(ion_state)
# Append a little statistics, sorry if this is confusing...
all_weighted_avgs=[]
weighted_avgs_by_occid=defaultdict(list)
for traj_id, percents in ion_count_percents.iteritems():
temp_weighted_avg = 0
for occ_id, percent in enumerate(percents):
x = ion_count_indices[traj_id][occ_id]*percent
temp_weighted_avg += x
weighted_avgs_by_occid[occ_id].append(x)
all_weighted_avgs.append(temp_weighted_avg)
for occ_id, weight_avg in weighted_avgs_by_occid.iteritems():
ion_count_percents['MEAN'].append(mean(weight_avg))
ion_count_indices['MEAN'].append(occ_id)
ion_count_percents['STDERR'].append(sem(weight_avg))
ion_count_indices['STDERR'].append(occ_id)
ion_count_percents['MEAN'].append(mean(all_weighted_avgs))
ion_count_indices['MEAN'].append('ALL')
ion_count_percents['STDERR'].append(sem(all_weighted_avgs))
ion_count_indices['STDERR'].append('ALL')
return (dict(ion_count_percents), dict(ion_count_indices))
# This writes the total of all rotamer states as a function of time
# for use of time series plotting purposes.
def compute_rotamer_vs_time(data_lines, data_states, traj_col, prefix=None):
# This a dictionary of file streams that will be used for output
# when prefix is assigned.
count_files = {}
# These are dictionaries of dictionaries where the key is a trajectory
# number and the list is the computed occupancy count, or frame number
rotstate_per_traj = defaultdict(list)
time_per_traj = defaultdict(list)
# This is an epic datatype that I will use to quickly build a
# dict of dicts where the 1st key is a trajectory number
# and the second key is the ion count and the value is a
# count of how many times that ion count was observed.
count_totals=defaultdict(lambda: defaultdict(int))
for line, states in zip(data_lines, data_states):
traj_id = line[traj_col]
state_total = sum(states)
count_totals[traj_id][state_total] += 1
if prefix != None:
if traj_id not in count_files:
count_files[traj_id] = \
open(prefix+"_total_n"+str(traj_id),"w")
count_files[traj_id].write(str(line[0])+" "+
str(traj_id)+" "+
str(state_total)+
"\n")
rotstate_per_traj[traj_id].append(state_total)
time_per_traj[traj_id].append(float(line[0]))
# Close filestreams.
if prefix != None:
for key in count_files.keys():
count_files[key].close()
return (dict(rotstate_per_traj), dict(time_per_traj))
# Returns the population percentages across the entire dataset from the
# label_states array.
def label_statistics(data_states):
cols_per_line = len(data_states[0])
counts_per_state = defaultdict(float)
for line in data_states:
for state in line:
counts_per_state[state] += 1.0
for state in counts_per_state.keys():
counts_per_state[state] /= len(data_states)*cols_per_line
return dict(counts_per_state)
if __name__ == '__main__':
parser = ArgumentParser(
description='Produces rotamer state timeseries datafiles')
parser.add_argument(
'-f', dest='filenames', type=str, nargs="+", required=True,
help='a filename of coordination data from MDAnalysis trajectory data')
parser.add_argument(
'-x1', dest='chi1_cols', type=int, nargs="+", required=True,
help='column numbers in the input that denote chi1 values')
parser.add_argument(
'-x2', dest='chi2_cols', type=int, nargs="+", required=True,
help='column numbers in the input that denote chi2 values')
parser.add_argument(
'-remove', dest='remove_frames', type=int, default=0,
help='this is a number of frames to remove from the start of the data')
parser.add_argument(
'-div', dest='dividers', type=float, nargs="+", default=[180],
help='slices in angle space that label dunking states (<180 = 0, >180 = 1')
parser.add_argument(
'-t', dest='traj_col', type=int, default=11,
help='a zero inclusive column number that contains the run number')
parser.add_argument(
'-o', dest='outfile', type=str, default=None,
help='the file to output the sorted padding output of all input files')
args = parser.parse_args()
data_f_dunk = process_rotamers(filenames=args.filenames,
chi1_cols=args.chi1_cols,
chi2_cols=args.chi2_cols,
remove_frames=args.remove_frames,
traj_col=args.traj_col)
# Same thing but now we pass the SF column list.
print "Dunking states using the dividers", args.dividers
data_f_states = label_states(data_f_dunk, args.chi2_cols, args.dividers)
print "Computing dunking populations"
print rotamer_counter(data_f_dunk, data_f_states, traj_col=args.traj_col)
print compute_rotamer_vs_time(data_f_dunk, data_f_states, traj_col=args.traj_col,
prefix="rotamer")
print "Computing dunking counts per residue"
print rotamer_counter_per_res(data_f_dunk, data_f_states,
traj_col=args.traj_col)
|
cing/ChannelAnalysis
|
ChannelAnalysis/RotamerAnalysis/Statistics.py
|
Python
|
mit
| 10,529
|
[
"MDAnalysis"
] |
0649e2ecabe4c8b2435b56f8de6ce93f50df6c67e49fbd58a7d960f38024765d
|
# Copyright (c) 2000 Autonomous Zone Industries
# This file is licensed under the
# GNU Lesser General Public License v2.1.
# See the file COPYING or visit http://www.gnu.org/ for details.
__revision__ = "$Id: ipaddresslib.py,v 1.10 2002/12/18 15:24:59 myers_carpenter Exp $"
"""
A library for determining the IP addresses of the local machine.
"""
# standard modules
import sys, os, re, string, exceptions, socket
if sys.platform == 'win32':
# this is part of the win32all python package, get it from:
# http://www.activestate.com/Products/ActivePython/win32all.html
import win32pipe
# pyutil modules
from pyutil.debugprint import debugprint
class Error(StandardError): pass
class NonRoutableIPError(Error): pass
# Supported platforms:
# The format of this dict is a key identifying a general platform, and a
# tuple value representing specific platform strings which get mapped into
# general identifier. If you need a more specific identification,
# use sys.platform.
platform_map = {
"linux-i386": "linux", # redhat
"linux-ppc": "linux", # redhat
"linux2": "linux", # debian
"win32": "win32",
"irix6-n32": "irix",
"irix6-n64": "irix",
"irix6": "irix",
"openbsd2": "bsd",
"darwin": "bsd", # Mac OS X
"freebsd4": "bsd",
"netbsd1": "bsd",
"sunos5": "sunos",
}
# Platform information:
platform = sys.platform
try:
platform = platform_map[platform]
except KeyError:
# To be cautious, if platform is not in platforms, warn the developer.
# (By release time this should gracefully explain to the user that the
# platform is not supported, but of course that will never happen. -Nate
debugprint("WARNING: %s is not a supported platform.\n" % platform)
debugprint("Supported platforms include:\n" + str(platform_map))
# These work in Redhat 6.x and Debian 2.2 potato
__linux_ifconfig_path = '/sbin/ifconfig'
__linux_route_path = '/sbin/route'
# NetBSD 1.4 (submitted by Rhialto)
__netbsd_ifconfig_path = '/sbin/ifconfig -a'
__netbsd_netstat_path = '/usr/bin/netstat'
# Darwin/MacOSX
__darwin_ifconfig_path = '/sbin/ifconfig -a'
__darwin_netstat_path = '/usr/sbin/netstat'
# Solaris 2.x
__sunos_ifconfig_path = '/usr/sbin/ifconfig -a'
__sunos_netstat_path = '/usr/bin/netstat'
# Irix 6.5
__irix_ifconfig_path = '/usr/etc/ifconfig -a'
__irix_netstat_path = '/usr/etc/netstat'
# Wow, I'm really amazed at home much mileage we've gotten out of calling
# the external route.exe program on windows... It appears to work on all
# versions so far. Still, the real system calls would much be preferred...
__win32_route_path = 'route.exe'
#
# NOTE: these re's DO get used outside of this file
#
valid_ipaddr_re = re.compile(r"^\d\d?\d?\.\d\d?\d?\.\d\d?\d?\.\d\d?\d?$")
localhost_re = re.compile(r"^(localhost$|localhost\.|127\.)")
# match RFC1597 addresses and all class D multicast addresses
bad_address_re = re.compile(r"^(10\.|192\.168\.|172\.(1[6-9]|2\d|3[01])\.|22[4-9]\.|23\d\.).*")
IP_ADDRESS_DETECTOR_HOST = '198.11.16.136'
def get_primary_ip_address(nonroutableok) :
"""
@param nonroutableok: `true' if and only if it is okay to bind to a non-routable IP address
like 127.0.0.1 or 192.168.1.2
"""
address = find_address_via_socket()
if not address:
address = find_address_via_config(nonroutableok)
#print "ipaddresslib: get_primary_ip_address: ", address
return address
def find_address_via_socket():
"""
Playing around with the socket module I stumbled across this method!
This will detect the IP address of your network interface that would
be used to connect to the configured host. A good idea would be to
always use the root metatracker as the host.
"""
address = None
detectorhost = IP_ADDRESS_DETECTOR_HOST
# this won't actually send any packets, it just creates a DGRAM socket so we can call getsockname() on it
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
s.connect( (detectorhost, 53) )
address, ignoredport = s.getsockname()
except socket.error, se:
debugprint("Error trying to use %s to detect our IP address: %s (this is normal on win95/98/ME)\n", args=(detectorhost, se), v=2, vs='ipaddresslib')
address = None
del s
if address == "0.0.0.0": # For windows
address = None
return address
def find_address_via_config(nonroutableok):
global platform
if platform == 'linux':
ifacedict = read_linux_ifconfig()
default_iface = get_linux_default_iface()
if not default_iface or not ifacedict.has_key(default_iface) :
debugprint("ERROR ipaddresslib couldn't determine your IP address, assuming 127.0.0.1 for testing\n", vs='ipaddresslib')
address = '127.0.0.1'
else :
address = ifacedict[default_iface]
elif platform == 'win32' :
addr = read_win32_default_ifaceaddr()
if not addr :
debugprint("ERROR ipaddresslib couldn't determine your IP address, assuming 127.0.0.1 for testing\n", vs='ipaddresslib')
address = '127.0.0.1'
else :
address = addr
elif platform == 'bsd':
ifacedict = read_netbsd_ifconfig()
default_iface = get_netbsd_default_iface()
if not default_iface or not ifacedict.has_key(default_iface) :
debugprint("ERROR ipaddresslib couldn't determine your IP address, assuming 127.0.0.1 for testing\n", vs='ipaddresslib')
address = '127.0.0.1'
else :
address = ifacedict[default_iface]
elif platform == 'darwin1':
ifacedict = read_netbsd_ifconfig(ifconfig_path=__darwin_ifconfig_path)
default_iface = get_netbsd_default_iface(netstat_path=__darwin_netstat_path)
if not default_iface or not ifacedict.has_key(default_iface) :
debugprint("ERROR ipaddresslib couldn't determine your IP address, assuming 127.0.0.1 for testing\n", vs='ipaddresslib')
address = '127.0.0.1'
else :
address = ifacedict[default_iface]
elif platform == 'irix' :
ifacedict = read_irix_ifconfig()
default_iface = get_irix_default_iface()
if not default_iface or not ifacedict.has_key(default_iface) :
debugprint("ERROR ipaddresslib couldn't determine your IP address, assuming 127.0.0.1 for testing\n", vs='ipaddresslib')
address = '127.0.0.1'
else :
address = ifacedict[default_iface]
elif platform == 'sunos':
ifacedict = read_sunos_ifconfig()
default_iface = get_sunos_default_iface()
if not default_iface or not ifacedict.has_key(default_iface):
debugprint("ERROR ipaddresslib couldn't determine your IP address, assuming 127.0.0.1 for testing\n", vs='ipaddresslib')
address = '127.0.0.1'
else:
address = ifacedict[default_iface]
else :
debugprint('ERROR ipaddresslib unsupported os: '+sys.platform)
if not forced_address :
raise RuntimeError, "unsupported OS in ipaddresslib and IP_ADDRESS_OVERRIDE not configured"
debugprint("I think your IP Address is " + address + "\n", v=3, vs='ipaddresslib')
if (not nonroutableok) and (not is_routable(address)):
raise NonRoutableIPError
if not valid_ipaddr_re.match(str(address)) :
raise RuntimeError, "ipaddresslib could not figure out a valid IP address for your host; invalid address: %s" % address
return address
def is_routable(address):
return (address) and not (localhost_re.match(address) or bad_address_re.match(address))
########################################################################
def read_linux_ifconfig():
"""Returns a dict mapping interface names to IP addresses"""
# this is one function that may have been simpler in Perl...
ifconfig_output = os.popen(__linux_ifconfig_path).read()
ifconfig_output_ifaces = string.split(ifconfig_output, '\n\n')
iface_re = re.compile('^(?P<name>\w+)\s.+\sinet addr:(?P<address>\d+\.\d+\.\d+\.\d+)\s.+$', flags=re.M|re.I|re.S)
resultdict = {}
for output in ifconfig_output_ifaces:
m = iface_re.match(output)
if m:
d = m.groupdict()
resultdict[d['name']] = d['address']
return resultdict
def get_linux_default_iface():
"""Returns the interface name the default route uses on this machine"""
route_output = os.popen(__linux_route_path + ' -n').read()
def_route_re = re.compile('^0\.0\.0\.0\s.*\s(?P<name>\w+)$', flags=re.M|re.I)
m = def_route_re.search(route_output)
if m:
return m.group('name')
else:
return None
########################################################################
def read_netbsd_ifconfig(ifconfig_path=__netbsd_ifconfig_path):
"""Returns a dict mapping interface names to IP addresses"""
# this is one function that may have been simpler in Perl...
ifconfig_output = os.popen(ifconfig_path).read()
ifconfig_output_ifaces = string.split(ifconfig_output, '\n')
name_re = re.compile('^(?P<name>\w+): flags=', flags=re.M|re.I|re.S)
addr_re = re.compile('^\s+inet (?P<address>\d+\.\d+\.\d+\.\d+)\s.+$', flags=re.M|re.I|re.S)
resultdict = {}
for output in ifconfig_output_ifaces:
m = name_re.match(output)
if m:
name = m.groupdict()['name']
m = addr_re.match(output)
if m:
d = m.groupdict()
resultdict[name] = d['address']
return resultdict
def get_netbsd_default_iface(netstat_path=__netbsd_netstat_path):
"""Returns the interface name the default route uses on this machine"""
route_output = os.popen(netstat_path + ' -rn').read()
def_route_re = re.compile('^default\s.*\s(?P<name>\w+)$', flags=re.M|re.I)
m = def_route_re.search(route_output)
if m:
return m.group('name')
else:
return None
########################################################################
def read_sunos_ifconfig(ifconfig_path=__sunos_ifconfig_path):
"""Returns a dict mapping interface names to IP addresses"""
# this is one function that may have been simpler in Perl...
ifconfig_output = os.popen(ifconfig_path).read()
ifconfig_output_ifaces = string.split(ifconfig_output, '\n')
name_re = re.compile('^(?P<name>[\w:]+): flags=', flags=re.M|re.I|re.S)
addr_re = re.compile('^\s+inet (?P<address>\d+\.\d+\.\d+\.\d+)\s.+$', flags=re.M|re.I|re.S)
resultdict = {}
for output in ifconfig_output_ifaces:
m = name_re.match(output)
if m:
name = m.groupdict()['name']
m = addr_re.match(output)
if m:
d = m.groupdict()
resultdict[name] = d['address']
return resultdict
def get_sunos_default_iface(netstat_path=__sunos_netstat_path):
"""Returns the interface name the default route uses on this machine"""
route_output = os.popen(netstat_path + ' -rna').read()
routes_split = string.split(route_output, '\n')
def_route_re = re.compile('^0.0.0.0\s.*\s(?P<name>\w+)$', flags=re.M|re.I)
for output in routes_split:
m = def_route_re.search(output)
if m:
return m.group('name')
else:
return None
########################################################################
# Irix thankfully uses the BSD ifconfig and netstat tools...
def read_irix_ifconfig(ifconfig_path=__irix_ifconfig_path):
"""Returns a dict mapping interface names to IP addresses"""
ifconfig_output = os.popen(ifconfig_path).read()
ifconfig_output_ifaces = string.split(ifconfig_output, '\n')
name_re = re.compile('^(?P<name>\w+): flags=', flags=re.M|re.I|re.S)
addr_re = re.compile('^\s+inet (?P<address>\d+\.\d+\.\d+\.\d+)\s.+$', flags=re.M|re.I|re.S)
resultdict = {}
for oidx in filter( lambda n: (n+2)%2, range(len(ifconfig_output_ifaces)) ):
output, nextoutput = ifconfig_output_ifaces[oidx-1:oidx+1]
m = name_re.match(output)
if m:
name = m.groupdict()['name']
m = addr_re.match(nextoutput)
if m:
d = m.groupdict()
resultdict[name] = d['address']
return resultdict
def get_irix_default_iface():
return get_netbsd_default_iface(netstat_path=__irix_netstat_path)
########################################################################
def read_win32_default_ifaceaddr(warninglogged_boolean=[]):
# we could use the but it causes problems with some Norton tool
# We'll use it. The other thing doesn't work for Art. --Zooko 2002-11-03
return _route_read_win32_default_ifaceaddr()
# return _hostname_read_win32_default_ifaceaddr()
def _hostname_read_win32_default_ifaceaddr():
"""return the IP address found by looking up our hostname"""
try:
myhostname = socket.gethostname()
myipaddress = socket.gethostbyname(myhostname)
except socket.error, e:
debugprint('WARNING: could not obtain IP address for your machine.\n', v=1, vs="ipaddresslib.win32")
return "127.0.0.1" # unknown IP address, return localhost
return myipaddress
def _route_read_win32_default_ifaceaddr():
"""return the IP address of the interface used by the first default route"""
# the win32pipe interface hopefully doesn't bluescreen norton antivirus
try:
route_output = win32pipe.popen(__win32_route_path + ' print 0.0.0.0').read()
except:
debugprint('WARNING: win32pipe.popen() failed reverting to os.popen() to call ROUTE to obtain IP address\n', vs='ipaddresslib.win32')
route_output = os.popen(__win32_route_path + ' print 0.0.0.0').read()
def_route_re = re.compile('^\s*0\.0\.0\.0\s.+\s(?P<address>\d+\.\d+\.\d+\.\d+)\s+(?P<metric>\d+)\s*$', flags=re.M|re.I|re.S)
m = def_route_re.search(route_output)
if m:
return m.group('address')
else:
return None
|
zooko/egtp_new
|
egtp/ipaddresslib.py
|
Python
|
lgpl-2.1
| 14,029
|
[
"VisIt"
] |
01aea13dc18507b681fe24c65c3916e1d6b27f2fc6cf22473720239cd4934f20
|
"""
Binary classes
"""
import data, logging, binascii
from galaxy.datatypes.metadata import MetadataElement
from galaxy.datatypes import metadata
from galaxy.datatypes.sniff import *
from urllib import urlencode, quote_plus
import zipfile, gzip
import os, subprocess, tempfile
log = logging.getLogger(__name__)
# Currently these supported binary data types must be manually set on upload
unsniffable_binary_formats = [ 'ab1', 'scf' ]
class Binary( data.Data ):
"""Binary data"""
def set_peek( self, dataset, is_multi_byte=False ):
"""Set the peek and blurb text"""
if not dataset.dataset.purged:
dataset.peek = 'binary data'
dataset.blurb = 'data'
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def get_mime( self ):
"""Returns the mime type of the datatype"""
return 'application/octet-stream'
class Ab1( Binary ):
"""Class describing an ab1 binary sequence file"""
file_ext = "ab1"
def set_peek( self, dataset, is_multi_byte=False ):
if not dataset.dataset.purged:
export_url = "/history_add_to?" + urlencode( {'history_id':dataset.history_id,'ext':'ab1','name':'ab1 sequence','info':'Sequence file','dbkey':dataset.dbkey} )
dataset.peek = "Binary ab1 sequence file"
dataset.blurb = data.nice_size( dataset.get_size() )
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek( self, dataset ):
try:
return dataset.peek
except:
return "Binary ab1 sequence file (%s)" % ( data.nice_size( dataset.get_size() ) )
class Bam( Binary ):
"""Class describing a BAM binary file"""
file_ext = "bam"
MetadataElement( name="bam_index", desc="BAM Index File", param=metadata.FileParameter, readonly=True, no_value=None, visible=False, optional=True )
def groom_dataset_content( self, file_name ):
"""
Ensures that the Bam file contents are sorted. This function is called
on an output dataset after the content is initially generated.
"""
# Use samtools to sort the Bam file
##$ samtools sort
##Usage: samtools sort [-on] [-m <maxMem>] <in.bam> <out.prefix>
## Sort alignments by leftmost coordinates. File <out.prefix>.bam will be created.
## This command may also create temporary files <out.prefix>.%d.bam when the
## whole alignment cannot be fitted into memory ( controlled by option -m ).
#do this in a unique temp directory, because of possible <out.prefix>.%d.bam temp files
tmp_dir = tempfile.mkdtemp()
tmp_sorted_dataset_file_name_prefix = os.path.join( tmp_dir, 'sorted' )
stderr_name = tempfile.NamedTemporaryFile( dir = tmp_dir, prefix = "bam_sort_stderr" ).name
samtools_created_sorted_file_name = "%s.bam" % tmp_sorted_dataset_file_name_prefix #samtools accepts a prefix, not a filename, it always adds .bam to the prefix
command = "samtools sort %s %s" % ( file_name, tmp_sorted_dataset_file_name_prefix )
proc = subprocess.Popen( args=command, shell=True, cwd=tmp_dir, stderr=open( stderr_name, 'wb' ) )
proc.wait()
#Did sort succeed?
stderr = open( stderr_name ).read().strip()
if stderr:
raise Exception, "Error Grooming BAM file contents: %s" % stderr
# Move samtools_created_sorted_file_name to our output dataset location
shutil.move( samtools_created_sorted_file_name, file_name )
# Remove temp file and empty temporary directory
os.unlink( stderr_name )
os.rmdir( tmp_dir )
def init_meta( self, dataset, copy_from=None ):
Binary.init_meta( self, dataset, copy_from=copy_from )
def set_meta( self, dataset, overwrite = True, **kwd ):
""" Creates the index for the BAM file. """
# These metadata values are not accessible by users, always overwrite
index_file = dataset.metadata.bam_index
if not index_file:
index_file = dataset.metadata.spec['bam_index'].param.new_file( dataset = dataset )
# Create the Bam index
##$ samtools index
##Usage: samtools index <in.bam> [<out.index>]
stderr_name = tempfile.NamedTemporaryFile( prefix = "bam_index_stderr" ).name
command = 'samtools index %s %s' % ( dataset.file_name, index_file.file_name )
proc = subprocess.Popen( args=command, shell=True, stderr=open( stderr_name, 'wb' ) )
proc.wait()
#Did index succeed?
stderr = open( stderr_name ).read().strip()
if stderr:
raise Exception, "Error Setting BAM Metadata: %s" % stderr
dataset.metadata.bam_index = index_file
# Remove temp file
os.unlink( stderr_name )
def sniff( self, filename ):
# BAM is compressed in the BGZF format, and must not be uncompressed in Galaxy.
# The first 4 bytes of any bam file is 'BAM\1', and the file is binary.
try:
header = gzip.open( filename ).read(4)
if binascii.b2a_hex( header ) == binascii.hexlify( 'BAM\1' ):
return True
return False
except:
return False
def set_peek( self, dataset, is_multi_byte=False ):
if not dataset.dataset.purged:
export_url = "/history_add_to?" + urlencode( {'history_id':dataset.history_id,'ext':'bam','name':'bam alignments','info':'Alignments file','dbkey':dataset.dbkey} )
dataset.peek = "Binary bam alignments file"
dataset.blurb = data.nice_size( dataset.get_size() )
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek( self, dataset ):
try:
return dataset.peek
except:
return "Binary bam alignments file (%s)" % ( data.nice_size( dataset.get_size() ) )
class Binseq( Binary ):
"""Class describing a zip archive of binary sequence files"""
file_ext = "binseq.zip"
def set_peek( self, dataset, is_multi_byte=False ):
if not dataset.dataset.purged:
zip_file = zipfile.ZipFile( dataset.file_name, "r" )
num_files = len( zip_file.namelist() )
dataset.peek = "Archive of %s binary sequence files" % ( str( num_files ) )
dataset.blurb = data.nice_size( dataset.get_size() )
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek( self, dataset ):
try:
return dataset.peek
except:
return "Binary sequence file archive (%s)" % ( data.nice_size( dataset.get_size() ) )
def get_mime( self ):
"""Returns the mime type of the datatype"""
return 'application/zip'
class Scf( Binary ):
"""Class describing an scf binary sequence file"""
file_ext = "scf"
def set_peek( self, dataset, is_multi_byte=False ):
if not dataset.dataset.purged:
export_url = "/history_add_to?" + urlencode({'history_id':dataset.history_id,'ext':'scf','name':'scf sequence','info':'Sequence file','dbkey':dataset.dbkey})
dataset.peek = "Binary scf sequence file"
dataset.blurb = data.nice_size( dataset.get_size() )
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek( self, dataset ):
try:
return dataset.peek
except:
return "Binary scf sequence file (%s)" % ( data.nice_size( dataset.get_size() ) )
class Sff( Binary ):
""" Standard Flowgram Format (SFF) """
file_ext = "sff"
def __init__( self, **kwd ):
Binary.__init__( self, **kwd )
def sniff( self, filename ):
# The first 4 bytes of any sff file is '.sff', and the file is binary. For details
# about the format, see http://www.ncbi.nlm.nih.gov/Traces/trace.cgi?cmd=show&f=formats&m=doc&s=format
try:
header = open( filename ).read(4)
if binascii.b2a_hex( header ) == binascii.hexlify( '.sff' ):
return True
return False
except:
return False
def set_peek( self, dataset, is_multi_byte=False ):
if not dataset.dataset.purged:
export_url = "/history_add_to?" + urlencode( {'history_id':dataset.history_id,'ext':'sff','name':'sff file','info':'sff file','dbkey':dataset.dbkey} )
dataset.peek = "Binary sff file"
dataset.blurb = data.nice_size( dataset.get_size() )
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek( self, dataset ):
try:
return dataset.peek
except:
return "Binary sff file (%s)" % ( data.nice_size( dataset.get_size() ) )
|
volpino/Yeps-EURAC
|
lib/galaxy/datatypes/binary.py
|
Python
|
mit
| 9,097
|
[
"Galaxy"
] |
6debbfa8de19c33b8c8358801148ded0893a189f81e266c20211d443d3eec3b9
|
# -*- coding: utf-8 -*-
"""
GUINEA PIG SKIN IMAGE ANALYSIS GESTATION ESTIMATION SUITE (GPS ImAGES)
Created on Wed Aug 17 09:16:22 2016
Last updated 8/17/2016
@author: Aaron Au
"""
import dicom
import glob as gb
import numpy as np
from math import *
import matplotlib.pyplot as plt
import pandas as pd
import scipy.ndimage as ndimage
from scipy import signal;
import time
import tifffile
import bisect
class ImagePrep():
#All Postprocessing Algorithms Go Here
def importDiacom(self,directory, max_size):
"""
Imports a folder of diacom files into a np array
INPUTS:
directory = folder containing all the images
max_size = 3 x 1 array containing maximum size of image; [x, y, z]
OUTPUT:
3D array of values corresponding to image
"""
[Max_X, Max_Y, Max_Z] = max_size;
start = time.clock();
image_names = gb.glob(directory+"/*"); #Get all files in that directory
image_names.sort(); #Sort according to image name
total_images = len(image_names);
if total_images > Max_Z:
total_images = Max_Z;
if total_images > 0:
RefDs = dicom.read_file(image_names[0]);
Img = RefDs.pixel_array; # Call one image to get the image size
Pix_X = Img.shape[1]; #Check for X
if Img.shape[1] > Max_X:
Pix_X = Max_X;
Pix_Y = Img.shape[0]; #Check for Y
if Img.shape[0] > Max_Y:
Pix_Y = Max_Y;
#Import images into array
OrImage = np.zeros([Pix_X,Pix_Y,total_images+1]);
for i in range(0, total_images):
RefDs = dicom.read_file(image_names[i]);
OrImage[:,:,i] = RefDs.pixel_array[:Pix_X, :Pix_Y];
print("importDiacom done, time elapsed :" + str(time.clock()-start));
return OrImage;
else:
print("importDiacom fail, no images");
def testLayers(self, const_x, const_y, d, shape, gaus=False):
"""
Creates a single 'continuous' plane based on the given parameters
anything in the z direction that goes close to 5 and max_z-5 becomes 5 and max_z-5 respectively
INPUTS:
const_x = array of constants for the x axis such that [a,b, ..., n] = ax + bx^2 + ... + nx^
const_y = array of constants for the y axis MAKE SURE THEY ARE THE SAME SIZE!
d = distance between layers
"""
g = np.zeros(shape);
if (gaus == True):
gx = signal.gaussian(shape[0], std=shape[0]/9);
gy = signal.gaussian(shape[1], std=shape[1]/9);
gm = np.meshgrid(gx, gy);
g = gm[0]*gm[1]*d;
x, y = np.meshgrid(range(0,shape[0]), range(0,shape[1]));
i = 1;
z = np.zeros(shape);
dx = np.zeros(shape);
dy = np.zeros(shape);
for a, b in zip(const_x, const_y):
z = a*x**i + b*y**i + z;
dx = i*a*(x**(i-1)) + dx;
dy = i*b*(y**(i-1)) + dy;
i += 1;
m = (d+g)/np.sqrt(dx**2 + dy**2 + 1**2);
output = np.zeros([shape[0],shape[1], z.max()+2*d+15]);
for xi in range(0,shape[0]):
for yi in range(0,shape[1]):
output[xi,yi,z[xi,yi]+5] = 255;
sx = xi-dy[xi,yi]*m[xi,yi];
sy = yi-dx[xi,yi]*m[xi,yi];
if sx >= 0 and sx < shape[0] and sy >= 0 and sy <= shape[1]:
output[sx, sy, z[xi,yi]+m[xi,yi]] = 125;
return output;
def gaussFilter(self, array, sig):
"""
Creates a gaussian filter of 3d array
INPUTS:
array = 3d array of image
sig = 3 x 1 array containg std; [x, y, z]
OUTPUTS:
Gaussian Filtered array
"""
start = time.clock();
ConvoArray = ndimage.gaussian_filter(array, sigma=(sig[0], sig[1], sig[2]), order=0);
print("gaussFilter done, time elapsed :" + str(time.clock()-start));
return ConvoArray;
def dogFilter(self, array, small_psf, large_psf):
"""
Performs a difference of gaussians on the image
INPUTS:
array = 3d array of image
small_psf = 3 x 1 array containing std; [x,y,z]
large_psf = 3 x 1 array containing std of larger psf; [x,y,z]
OUTPUTS:
Difference of Gaussians array
"""
start = time.clock();
SmallArray = ndimage.gaussian_filter(array, sigma=(small_psf[0], small_psf[1], small_psf[2]), order=0);
LargeArray = ndimage.gaussian_filter(array, sigma=(large_psf[0], large_psf[1], large_psf[2]), order=0);
DoGArray = SmallArray - LargeArray;
print("dogFilter done, time elapsed :" + str(time.clock()-start));
return DoGArray;
def map_peaks(self, fp, dis):
"""
Creates a 3d array of the location of peaks
INPUTS:
fp, dis = pickle arrays
OUTPUS:
3d array of peaks
"""
plots = np.zeros([max(fp['x'])+1, max(fp['y']+1), 300]);
for f in fp.index:
for p in dis[dis['fp_id']==f].iterrows():
plots[fp.loc[f]['x'], fp.loc[f]['y'], p[1]['loc']]=255;
return plots;
def saveAsTiff(self, file_name, array, types=np.int8):
"""
Save 3d array into tiff format
INPUTS:
file_name = directory + filename with .tiff as the ending
array = 3d array
types = OPTIONAL save values as different types (default: np.int8)
OUTPUTS:
none
"""
tifffile.imsave(file_name, array.astype(types));
def saveAsPickle(self, image, name):
"""
Extracts the FP and Dis and saves it in a pickle DF format
INPUTS:
image = 3d array representing image
OUTPUTS:
none
"""
Col_FP = ['x','y','z'];
Col_Dis = ['fp_id', 'loc', 'amp'];
i = 0;
#DF_FP = pd.DataFrame(columns=Col_FP);
#DF_Dis = pd.DataFrame(columns=Col_Dis);
fps = np.array([[0,0,0]]);
dis = np.array([[0,0,0]]);
counter = 0;
for x in range(0,image.shape[0]):
for y in range(0, image.shape[1]):
#fp, dist = da.halfMax1d(image[x,y,:], x, y, i);
fp, dist = da.peaks(image[x,y,:],x,y,i);
#DF_FP = DF_FP.append(pd.DataFrame([fp], columns = Col_FP), ignore_index = True);
if not fp==None:
fps = np.insert(fps,len(fps),fp,axis=0);
if dist == None:
counter += 1;
else:
#DF_Dis = DF_Dis.append(pd.DataFrame(dist, columns = Col_Dis), ignore_index = True);
dis = np.insert(dis, len(dis),dist, axis=0);
i += 1;
DF_FP = pd.DataFrame(data=fps, columns=Col_FP);
DF_Dis = pd.DataFrame(data=dis, columns=Col_Dis);
DF_FP.to_pickle(name+"FP_pd");
DF_Dis.to_pickle(name+"Dis_pd");
def saveAsTiff_Dis(self, image, name, types=np.int8):
"""
Extracts the FP and Dis and saves it in a pickle DF format
INPUTS:
image = 3d array representing image
OUTPUTS:
none
"""
i = 0;
fps = np.zeros([image.shape([0]), image.shape([1])]);
dis = np.zeros([image.shape([0]), image.shape([1])]);
counter = 0;
for x in range(0,image.shape[0]):
for y in range(0, image.shape[1]):
#fp, dist = da.halfMax1d(image[x,y,:], x, y, i);
fp, dist = da.peaks(image[x,y,:],x,y,i);
#DF_FP = DF_FP.append(pd.DataFrame([fp], columns = Col_FP), ignore_index = True);
if not fp==None:
fps[x,y] = fp;
if dist == None:
counter += 1;
else:
dis[x,y] = dist;
i += 1;
tifffile.imsave(name+'_fps.tiff', fps.astype(types));
tifffile.imsave(name+'_dis.tiff', fps.astype(types));
return fps, dis;
class DataAcqusition():
#All Data Acqusition Algorithms Go Here
def __none(self):
print "place holder";
def max1d(self, Arr1D, x, y, j, Pk_Thres=0.1):
"""
Determine the First Peak and distance of all following peaks afterwards
INPUTS:
Arr1D = 1D Array describing profile in Z direction
x, y = integer describing location of 1D profile
j = integer describing index of FP
Pk_Thres = threshold where peaks only occur where > max(Arr1D)*thresh
OUTPUTS:
First Peak values [x,y, z], List of Distances [index of FP, location, amplitude]
"""
Loc = [];
Amp = [];
js = [];
Arr1D[Arr1D < np.max(Arr1D) * Pk_Thres] = np.max(Arr1D) * Pk_Thres;
Maxes = signal.argrelmax(Arr1D)[0];
if Maxes.shape[0] <= 0:
FP = float('nan');
elif Maxes.shape[0] == 1:
FP = Maxes[0];
Loc = [float('nan')];
Amp = [float('nan')];
else: #Maxes.shape[0] > 1
FP = Maxes[0];
for i in range(1, Maxes.shape[0]):
Loc.append(Maxes[i] - Maxes[0]);
Amp.append(Arr1D[Maxes[i]]/np.max(Arr1D));
js.append(j);
Dis = np.array([js, Loc, Amp]).T;
if len(js) <= 0:
Dis = float('nan');
return [x, y, FP], Dis;
def halfMax1d(self, Arr1D, x, y, j):
"""
Determine the First Peak (based on Half Maximum) and distance of all following peaks afterwards
INPUTS:
Arr1D = 1D Array describing profile in Z direction
x, y = integer describing location of 1D profile
j = integer describing index of FP
OUTPUTS:
First Peak values [x,y, z], List of Distances [index of FP, location, amplitude]
"""
Loc = [];
Amp = [];
js = [];
Maxe = [];
#Arr1D[Arr1D < np.max(Arr1D) * Thres] = np.max(Arr1D) * Thres;
Maxes = signal.argrelmax(Arr1D, order=5)[0];
difMaxes = signal.argrelmax(np.diff(Arr1D), order=5)[0];
if Maxes.shape[0] <= 0:
FP = float('nan');
if Maxes.shape[0] == 1:
[FP, temp, nahh] = self.__detHM(Maxes[0], difMaxes, Arr1D);
Loc = [float('nan')];
Amp = [float('nan')];
if Maxes.shape[0] > 1:
l = True;
k = 0;
"""while l:
FP, temp = self.__detHM(Maxes[k], difMaxes, Arr1D);
if not np.isnan(FP):
l = False; #exit loop
elif k == (Maxes.shape[0] - 1):
l = False; #too big
k += 1;"""
for i in range(k, Maxes.shape[0]):
tLoc, tAmp, tMax = self.__detHM(Maxes[i], difMaxes, Arr1D);
"""if not np.isnan(tLoc):
Loc.append(tLoc - FP);
Amp.append(tAmp);
js.append(j);"""
if not np.isnan(tLoc):
Loc.append(tLoc);
Amp.append(tAmp);
js.append(j);
Maxe.append(tMax);
ampMax = np.argmax(Maxe);
FP = Loc[ampMax];
del Loc[:ampMax+1];
Loc[:] = [x - FP for x in Loc];
del Amp[:ampMax+1];
del js[:ampMax+1];
Dis = np.array([js, Loc, Amp]).T;
if len(js) <= 0:
Dis = float('nan');
return [x, y, FP], Dis;
def __detHM(self, argMax, diffMaxes, Arr1D):
"""
Private method to determine the Halfmax point
"""
if diffMaxes.shape[0] > 0:
if argMax < diffMaxes[0]:
return [float('nan'), float('nan'), float('nan')];
else:
#diffMax = np.max(diffMaxes[diffMaxes<argMax]); #Location of the POI
diffMax = diffMaxes[bisect.bisect_left(diffMaxes, argMax) - 1];
return [diffMax, Arr1D[argMax] - Arr1D[diffMax], Arr1D[argMax]];
else:
return [float('nan'), float('nan'), float('nan')];
def peaks(self, Arr1D, x, y , j):
#Determine FP
difMaxes = signal.argrelmax(np.diff(Arr1D), order=5)[0]; #Use this to determine the number of half maximums
if len(difMaxes) > 0:
FP = difMaxes[bisect.bisect_left(difMaxes, Arr1D.argmax()) - 1]; #location of FP, by looking for left half_max from largest peak
FP_ind = np.where(difMaxes==FP)[0][0]; #location of FP wrt difMaxes
dis_np = None;
if len(difMaxes) > FP_ind + 1: #has distances after FP
diffs = difMaxes[FP_ind:];
dis_np = np.zeros([len(diffs)-1, 3]);
for i in range(len(diffs)-1):
dis_np[i,0] = j; #index
dis_np[i,1] = diffs[i+1] - FP; #z-difference
dis_np[i,2] = Arr1D[diffs[i+1]]/Arr1D[FP];#amplitude/Max_amplitude
return [x, y, FP], dis_np;
else:
return None, None
def histogram(self, distances, numbins):
"""
Create histogram of distances
INPUT:
distances = the panada table for distances [fp_index, loc, amplitude]
bins = number of divisions in histogram
OUTPUT:
n = array of counts
bins = edges of bins
"""
n, bins, temp = plt.hist(distances['loc'], bins=numbins);
return n, bins;
def coverage(self, fps, distances, upr_bnd, lwr_bnd):
"""
Determines coverage of peaks in a given upper and lower bound of peaks
INPUT:
fps = panda table of first peak >> x || y || z
distances = panda table of distances >> fp_id || loc || amp
upr_bnd, lwr_bnd = upper and lower bounds of peaks (loc)
OUTPUT:
coverage = 2d array of counts at the respective (x,y) coordinate
"""
temp_dis = distances.ix[(distances['loc'] <= upr_bnd) & (distances['loc'] >= lwr_bnd)];
coverage = np.zeros([int(max(fps['x']))+1, int(max(fps['y']))+1]);
i = 0;
for row_dis in temp_dis.iterrows():
row_fp = fps.loc[int(row_dis[1]['fp_id'])];
coverage[int(row_fp['x']), int(row_fp['y'])] = coverage[int(row_fp['x']), int(row_fp['y'])] + 1;
i+=1;
if coverage.max() > 0:
coverage = coverage / coverage.max() * 255;
return coverage;
def second_peak_difference(self, fps1, distances1, fps2, distances2):
"""
Determines height of sp1 and subtracts it from the height of sp2 .
INPUT:
fps1, distances1, fps2, distances2 = are all panda tables of first peak and distances
OUTPUT:
difference = 2d array of differences sp2 - sp1
"""
sp1 = np.ones([max(fps1['x'])+1,max(fps1['y'])+1])*100
sp2 = np.ones([max(fps1['x'])+1,max(fps1['y'])+1])*200
for x in range(max(fps1.x)):
for y in range(max(fps1.y)):
i = fps1[(fps1.x == x) & (fps1.y == y)].index;
j = fps2[(fps2.x == x) & (fps2.y == y)].index;
if len(i) == 1:
dis = distances1[distances1.fp_id == i[0]]["loc"]
if len(dis) > 0:
sp1[x,y] = min(dis);
if len(j) == 1:
dis = distances2[distances2.fp_id == j[0]]["loc"]
if len(dis) > 0:
sp2[x,y] = min(dis);
return sp2-sp1;
def flattenFP(self, image_array):
"""
Creates an image with flattened and spliced
INPUT:
image_array = 3d array of image stack
OUTPUT:
fp = 3d array of first peaks, value of 255 at each peak
output_array = 3d array of flattened image stack
"""
dis = 200;
order = 5;
#GET FIRST PEAKS
zs = np.ones([image_array.shape[0],image_array.shape[1]])* float('nan');
for x in range(0, image_array.shape[0]):
for y in range(0, image_array.shape[1]):
fp, nm = self.halfMax1d(image_array[x,y],x,y,0);
#fp, nm = self.max1d(image_array[x,y], x, y, 0);
zs[x,y] = fp[2];
grad = np.gradient(zs.astype(float));
output_array = np.zeros([image_array.shape[0], image_array.shape[1], dis]);
#fp = np.zeros([image_array.shape[0], image_array.shape[1], np.nanmax(zs)-np.nanmin(zs) + 10]);
print('done upa');
#Place in new array
for x in range(0, image_array.shape[0]):
for y in range(0, image_array.shape[1]):
dx, dy, dz = grad[0][x,y], grad[1][x,y], -1.;
unit_dis = sqrt(dx**2+dy**2+dz**2);
m = 20./unit_dis
x0, y0, z0 = x + m*dx, y + m*dy, zs[x,y] + m*dz;
m = (dis-20.)/unit_dis
x1, y1, z1 = x - m*dx, y - m*dy, zs[x,y] - m*dz;
xi = np.linspace(x0, x1, dis);
yi = np.linspace(y0, y1, dis);
zj = np.linspace(z0, z1, dis);
if grad[0][x,y] <= 10. and grad[0][x,y] >= -10. and grad[1][x,y] <= 10. and grad[1][x,y]>= -10. and not np.isnan(zs[x,y]):
output_array[x,y,:] = ndimage.map_coordinates(image_array, np.vstack((xi,yi,zj)), prefilter=False);
#fp[x,y,zs[x,y]+5] = 255;
return [zs, output_array, grad];
ip = ImagePrep()
da = DataAcqus
#Import Image (Windows or Linux)
#im = ip.importDiacom("Z:\\Au_Aaron\\06-Guinea Pig OCT\\Raw Data\\AA-01046-N13_Abd_Ear-032416\\N13Ear\\PAT1\\20160324\\2_OCT",[256,356,150]);
#im = ip.importDiacom("//home//yipgroup//Current//Au_Aaron//06-Guinea Pig OCT//Raw Data//AA-01046-N13_Abd_Ear-032416//N13Ear//PAT1//20160324//2_OCT",[1024,1024,300])
#Perform Gaussian Images
t_s =time.clock();
small_psf = [8,8,8];
large_psf = [14, 14, 14];
small_gauss = ip.gaussFilter(im, small_psf);
large_guass = ip.gaussFilter(im, large_psf);
t_g = time.clock();
im_dog = ip.dogFilter(im, small_psf, large_psf);
t_dog = time.clock();
ip.saveAsTiff(small_gauss, 'smallGauss.tiff', np.float32);
ip.saveAsTiff(large_gauss, 'largeGuass.tiff', np.float32);
ip.saveAsTiff(im_dog, 'DoF.tiff', np.float32);
t_save = time.clock();
print("Gauss Filter: {}, DoG: {}, Save: {}, Total: {}".format(t_g-t_s, t_dog-t_g, t_save-t_dog, t_save-t_s));
#Acquire two layers
t_s2 = time.clock();
fps, dis = ip.saveAsTiff_Dis(im_dog, 'Unflattened', np.float32);
t_2l = time.clock();
#Overlay fps and dis
unflat_overlay = im;
for x in range(len(im.shape[0])):
for y in range(len(im.shape[1])):
unflat_overlay[x,y,fps[x,y]] = 255;
unflat_overlay[x,y,dis[x,y]] = 255;
t_o = time.clock();
ip.saveAsTiff(unflat_overlay, 'unflat_overlay.tiff', np.float32);
t_save = time.clock();
print("Layers: {}, Overlay: {}, Save: {}, Total: {}".format(t_2l-t_s2, t_o-t_2l, t_save-t_o, t_save-t_s2));
#Flatten
t_s2 = time.clock();
[fps, oa, grad] = da.flattenFP(im_dog);
#Acquire two layers
t_f = time.clock();
fps, dis = ip.saveAsTiff_Dis(oa, 'Flattened', np.float32);
t_2l = time.clock();
#Overlay fps and dis
flat_overlay = oa;
for x in range(len(oa.shape[0])):
for y in range(len(oa.shape[1])):
flat_overlay[x,y,fps[x,y]] = 255;
flat_overlay[x,y,dis[x,y]] = 255;
t_o = time.clock();
ip.saveAsTiff('flat.tiff', oa, np.float32);
ip.saveAsTiff('flat_overlay.tiff', flat_overlay, np.float32);
ip.saveAsTiff("gradx.tiff", grad[0]);
ip.saveAsTiff("grady.tiff", grad[1]);
t_save = time.clock();
print("Flatten: {}, Layers: {}, Overlay: {}, Save: {}, Total {}".format(t_f-ts2, t_2l-t_f, t_o-t_2l, t_save-t_o, t_save-t_s2));
print("FINISHED: {}", format(t_save-t_s));
'''#img = tifffile.imread("/home/yipgroup/image_store/Au_Aaron/DoG/G8-G14-1.tif");
img = np.transpose(img);
Start_X = 125;
Start_Y = 625;
img = np.transpose(img[:,Start_Y:Start_Y+256,Start_X:Start_X+256]);
#img = ip.testLayer([5,10,20],[20,10,5],[256,256,256])
#img2 = ip.gaussFilter(img, [5,5,5])
img = tifffile.imread("C:\Users\Aaron Au\Desktop\with_pandas\img.tiff")
[fps, oa, grad] = da.flattenFP(img)
#ip.saveAsTiff("/home/yipgroup/image_store/Au_Aaron/DoG/img.tiff", img, np.float32);
ip.saveAsPickle(img, "C:\Users\Aaron Au\Desktop\with_pandas\\img_");
for x in range(0,oa.shape[0]):
for y in range(0, oa.shape[1]):
img[x,y,fps[x,y]] = 255.;
ip.saveAsTiff("C:\Users\Aaron Au\Desktop\with_pandas\\flattened.tiff", oa, np.float32);
ip.saveAsTiff("C:\Users\Aaron Au\Desktop\with_pandas\\fp.tiff", fps);
ip.saveAsTiff("C:\Users\Aaron Au\Desktop\with_pandas\\img_layer.tiff", img, np.float32);
ip.saveAsTiff("C:\Users\Aaron Au\Desktop\with_pandas\\gradx.tiff", grad[0]);
ip.saveAsTiff("C:\Users\Aaron Au\Desktop\with_pandas\\grady.tiff", grad[1]);
ip.saveAsPickle(oa, "C:\Users\Aaron Au\Desktop\with_pandas\\layer_");
print("done all")"""
"""da = DataAcqusition();
ip = ImagePrep();
pic = tifffile.imread("C:\Users\Aaron Au\Desktop\with_pandas\\flattened.tiff")
ip.saveAsPickle(pic, "C:\Users\Aaron Au\Desktop\with_pandas\\test")"""
|
YipLab/GuineaPig
|
GPS_ImAGES.py
|
Python
|
gpl-3.0
| 21,660
|
[
"Gaussian"
] |
0946ba59e010d83955716ae692c887338d552262c2edfe888b79d60fc955add7
|
from math import log
import numpy as np
import pytest
from scipy import optimize
from sklearn.multioutput import MultiOutputRegressor
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from skopt.acquisition import _gaussian_acquisition
from skopt.acquisition import gaussian_acquisition_1D
from skopt.acquisition import gaussian_ei
from skopt.acquisition import gaussian_lcb
from skopt.acquisition import gaussian_pi
from skopt.learning import GaussianProcessRegressor
from skopt.learning.gaussian_process.kernels import Matern
from skopt.learning.gaussian_process.kernels import WhiteKernel
from skopt.space import Space
from skopt.utils import cook_estimator
class ConstSurrogate:
def predict(self, X, return_std=True):
X = np.array(X)
return np.zeros(X.shape[0]), np.ones(X.shape[0])
# This is used to test that given constant acquisition values at
# different points, acquisition functions "EIps" and "PIps"
# prefer candidate points that take lesser time.
# The second estimator mimics the GP regressor that is fit on
# the log of the input.
class ConstantGPRSurrogate(object):
def __init__(self, space):
self.space = space
def fit(self, X, y):
"""
The first estimator returns a constant value.
The second estimator is a gaussian process regressor that
models the logarithm of the time.
"""
X = np.array(X)
y = np.array(y)
gpr = cook_estimator("GP", self.space, random_state=0)
gpr.fit(X, np.log(np.ravel(X)))
self.estimators_ = []
self.estimators_.append(ConstSurrogate())
self.estimators_.append(gpr)
return self
@pytest.mark.fast_test
def test_acquisition_ei_correctness():
# check that it works with a vector as well
X = 10 * np.ones((4, 2))
ei = gaussian_ei(X, ConstSurrogate(), -0.5, xi=0.)
assert_array_almost_equal(ei, [0.1977966] * 4)
@pytest.mark.fast_test
def test_acquisition_pi_correctness():
# check that it works with a vector as well
X = 10 * np.ones((4, 2))
pi = gaussian_pi(X, ConstSurrogate(), -0.5, xi=0.)
assert_array_almost_equal(pi, [0.308538] * 4)
@pytest.mark.fast_test
def test_acquisition_variance_correctness():
# check that it works with a vector as well
X = 10 * np.ones((4, 2))
var = gaussian_lcb(X, ConstSurrogate(), kappa='inf')
assert_array_almost_equal(var, [-1.0] * 4)
@pytest.mark.fast_test
def test_acquisition_lcb_correctness():
# check that it works with a vector as well
X = 10 * np.ones((4, 2))
lcb = gaussian_lcb(X, ConstSurrogate(), kappa=0.3)
assert_array_almost_equal(lcb, [-0.3] * 4)
@pytest.mark.fast_test
def test_acquisition_api():
rng = np.random.RandomState(0)
X = rng.randn(10, 2)
y = rng.randn(10)
gpr = GaussianProcessRegressor()
gpr.fit(X, y)
for method in [gaussian_ei, gaussian_lcb, gaussian_pi]:
assert_array_equal(method(X, gpr).shape, 10)
assert_raises(ValueError, method, rng.rand(10), gpr)
def check_gradient_correctness(X_new, model, acq_func, y_opt):
analytic_grad = gaussian_acquisition_1D(
X_new, model, y_opt, acq_func)[1]
num_grad_func = lambda x: gaussian_acquisition_1D(
x, model, y_opt, acq_func=acq_func)[0]
num_grad = optimize.approx_fprime(X_new, num_grad_func, 1e-5)
assert_array_almost_equal(analytic_grad, num_grad, 3)
@pytest.mark.fast_test
def test_acquisition_gradient():
rng = np.random.RandomState(0)
X = rng.randn(20, 5)
y = rng.randn(20)
X_new = rng.randn(5)
mat = Matern()
wk = WhiteKernel()
gpr = GaussianProcessRegressor(kernel=mat + wk)
gpr.fit(X, y)
for acq_func in ["LCB", "PI", "EI"]:
check_gradient_correctness(X_new, gpr, acq_func, np.max(y))
@pytest.mark.fast_test
@pytest.mark.parametrize("acq_func", ["EIps", "PIps"])
def test_acquisition_per_second(acq_func):
X = np.reshape(np.linspace(4.0, 8.0, 10), (-1, 1))
y = np.vstack((np.ones(10), np.ravel(np.log(X)))).T
cgpr = ConstantGPRSurrogate(Space(((1.0, 9.0),)))
cgpr.fit(X, y)
X_pred = np.reshape(np.linspace(1.0, 11.0, 20), (-1, 1))
indices = np.arange(6)
vals = _gaussian_acquisition(X_pred, cgpr, y_opt=1.0, acq_func=acq_func)
for fast, slow in zip(indices[:-1], indices[1:]):
assert_greater(vals[slow], vals[fast])
acq_wo_time = _gaussian_acquisition(
X, cgpr.estimators_[0], y_opt=1.2, acq_func=acq_func[:2])
acq_with_time = _gaussian_acquisition(
X, cgpr, y_opt=1.2, acq_func=acq_func)
assert_array_almost_equal(acq_wo_time / acq_with_time, np.ravel(X), 2)
def test_gaussian_acquisition_check_inputs():
model = ConstantGPRSurrogate(Space(((1.0, 9.0),)))
with pytest.raises(ValueError) as err:
vals = _gaussian_acquisition(np.arange(1, 5), model)
assert("it must be 2-dimensional" in err.value.args[0])
@pytest.mark.fast_test
@pytest.mark.parametrize("acq_func", ["EIps", "PIps"])
def test_acquisition_per_second_gradient(acq_func):
rng = np.random.RandomState(0)
X = rng.randn(20, 10)
# Make the second component large, so that mean_grad and std_grad
# do not become zero.
y = np.vstack((X[:, 0], np.abs(X[:, 0])**3)).T
for X_new in [rng.randn(10), rng.randn(10)]:
gpr = cook_estimator("GP", Space(((-5.0, 5.0),)), random_state=0)
mor = MultiOutputRegressor(gpr)
mor.fit(X, y)
check_gradient_correctness(X_new, mor, acq_func, 1.5)
|
betatim/BlackBox
|
skopt/tests/test_acquisition.py
|
Python
|
bsd-3-clause
| 5,650
|
[
"Gaussian"
] |
61217613a6b176d3ec699dc2a5dd628e268f8583b0ec07b62decd8f0cfb7a0de
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
#
"""\
=============================================
Parsing and Creation of YUV4MPEG format files
=============================================
YUV4MPEGToFrame parses YUV4MPEG format data sent to its "inbox" inbox and sends
video fram data structures to its "outbox" outbox.
FrameToYUV4MPEG does the reverse - taking frame data structures sent to its
"inbox" inbox and outputting YUV4MPEG format data to its "outbox" outbox."
The YUV4MPEG file format is supported by many tools, such as mjpegtools,
mplayer/mencoder, and ffmpeg.
Example Usage
-------------
Playback a YUV4MPEG format file::
Pipeline( RateControlledFileReader("video.yuv4mpeg",readmode="bytes", ...),
YUV4MPEGToFrame(),
VideoOverlay()
).run()
Decode a dirac encoded video file to a YUV4MPEG format file::
Pipeline( RateControlledFileReader("video.dirac",readmode="bytes", ...),
DiracDecoder(),
FrameToYUV4MPEG(),
SimpleFileWriter("output.yuv4mpeg")
).run()
YUV4MPEGToFrame Behaviour
-------------------------
Send binary data as strings containing YUV4MPEG format data to the "inbox" inbox
and frame data structures will be sent out of the "outbox" outbox as soon as
they are parsed.
See below for a description of the uncompressed frame data structure format.
This component supports sending data out of its outbox to a size limited inbox.
If the size limited inbox is full, this component will pause until it is able
to send out the data. Data will not be consumed from the inbox if this component
is waiting to send to the outbox.
If a producerFinished message is received on the "control" inbox, this component
will complete parsing any data pending in its inbox, and finish sending any
resulting data to its outbox. It will then send the producerFinished message on
out of its "signal" outbox and terminate.
If a shutdownMicroprocess message is received on the "control" inbox, this
component will immediately send it on out of its "signal" outbox and immediately
terminate. It will not complete processing, or sending on any pending data.
FrameToYUV4MPEG Behaviour
-------------------------
Send frame data structures to the "inbox" inbox of this component. YUV4MPEG
format binary string data will be sent out of the "outbox" outbox.
See below for a description of the uncompressed frame data structure format.
The header data for the YUV4MPEG file is determined from the first frame.
All frames sent to this component must therefore be in the same pixel format and
size, otherwise the output data will not be valid YUV4MPEG.
This component supports sending data out of its outbox to a size limited inbox.
If the size limited inbox is full, this component will pause until it is able
to send out the data. Data will not be consumed from the inbox if this component
is waiting to send to the outbox.
If a producerFinished message is received on the "control" inbox, this component
will complete parsing any data pending in its inbox, and finish sending any
resulting data to its outbox. It will then send the producerFinished message on
out of its "signal" outbox and terminate.
If a shutdownMicroprocess message is received on the "control" inbox, this
component will immediately send it on out of its "signal" outbox and immediately
terminate. It will not complete processing, or sending on any pending data.
=========================
UNCOMPRESSED FRAME FORMAT
=========================
A frame is a dictionary data structure. It must, at minimum contain the first 3
("yuv", "size" and "pixformat")::
{
"yuv" : (y_data, u_data, v_data) # a tuple of strings
"size" : (width, height) # in pixels
"pixformat" : pixelformat # format of raw video data
"frame_rate" : fps # frames per second
"interlaced" : 0 or not 0 # non-zero if the frame is two interlaced fields
"topfieldfirst" : 0 or not 0 # non-zero the first field comes first in the data
"pixel_aspect" : fraction # aspect ratio of pixels
"sequence_meta" : metadata # string containing extended metadata
# (no whitespace or control characters)
}
All other fields are optional when providing frames to FrameToYUV4MPEG.
YUV4MPEGToFrame only guarantees to fill inthe YUV data itself. All other fields
will be filled in if the relevant header data is detected in the file.
The pixel formats recognised (and therefore supported) are::
"YUV420_planar"
"YUV411_planar"
"YUV422_planar"
"YUV444_planar"
"YUV4444_planar"
"Y_planar"
"""
from Axon.Component import component
#from Axon.Ipc import WaitComplete
from Axon.Ipc import shutdownMicroprocess, producerFinished
from Axon.AxonExceptions import noSpaceInBox
import re
from Kamaelia.Support.Data.Rationals import rational
class YUV4MPEGToFrame(component):
"""\
YUV4MPEGToFrame() -> new YUV4MPEGToFrame component.
Parses YUV4MPEG format binarydata, sent as strings to its "inbox" inbox
and outputs uncompressed video frame data structures to its "outbox" outbox.
"""
def __init__(self):
"""x.__init__(...) initializes x; see x.__class__.__doc__ for signature"""
super(YUV4MPEGToFrame,self).__init__()
self.remainder = ""
self.shutdownMsg = None
def checkShutdown(self):
"""\
Collects any new shutdown messages arriving at the "control" inbox, and
returns "NOW" if immediate shutdown is required, or "WHENEVER" if the
component can shutdown when it has finished processing pending data.
"""
while self.dataReady("control"):
newMsg = self.recv("control")
if isinstance(newMsg, shutdownMicroprocess):
self.shutdownMsg = newMsg
elif self.shutdownMsg is None and isinstance(newMsg, producerFinished):
self.shutdownMsg = newMsg
if isinstance(self.shutdownMsg, shutdownMicroprocess):
return "NOW"
elif self.shutdownMsg is not None:
return "WHENEVER"
else:
return None
def readline(self):
"""\
Generator.
Read up to the next newline char from the stream of chunks of binary
string data arriving at the "inbox" inbox.
Any excess data is placed into self.remainder ready for the next call
to self.readline or self.readbytes.
Data is only read from the inbox when required. It is not preemptively
fetched.
The read data is placed into self.bytesread
If a shutdown is detected, self.bytesread is set to "" and this
generator immediately returns.
"""
bytes = []
newdata = self.remainder
index = newdata.find("\x0a")
while index==-1:
bytes.append(newdata)
while not self.dataReady("inbox"):
if self.checkShutdown():
self.bytesread=""
return
self.pause()
yield 1
newdata = self.recv("inbox")
index = newdata.find("\x0a")
tail = newdata[:index+1]
self.remainder = newdata[index+1:]
bytes.append(tail)
self.bytesread = "".join(bytes)
return
def readbytes(self,size):
"""\
Generator.
Read the specified number of bytes from the stream of chunks of binary
string data arriving at the "inbox" inbox.
Any excess data is placed into self.remainder ready for the next call
to self.readline or self.readbytes.
Data is only read from the inbox when required. It is not preemptively
fetched.
The read data is placed into self.bytesread
If a shutdown is detected, self.bytesread is set to "" and this
generator immediately returns.
"""
buf = [self.remainder]
bufsize = len(self.remainder)
while bufsize < size:
if self.dataReady("inbox"):
newdata = self.recv("inbox")
buf.append(newdata)
bufsize += len(newdata)
shutdown = self.checkShutdown()
if shutdown == "NOW" or (shutdown and not self.dataReady("inbox") and bufsize<size):
self.bytesread=""
return
if bufsize<size and not self.anyReady():
self.pause()
yield 1
excess = bufsize-size
if excess:
wanted = buf[:-1]
tail, self.remainder = buf[-1][:-excess], buf[-1][-excess:]
wanted.append(tail)
else:
wanted = buf
self.remainder = ""
self.bytesread = "".join(wanted)
return
def safesend(self, data, boxname):
"""\
Generator.
Sends data out of the named outbox. If the destination is full
(noSpaceInBox exception) then it waits until there is space and retries
until it succeeds.
If a shutdownMicroprocess message is received, returns early.
"""
while 1:
try:
self.send(data, boxname)
return
except noSpaceInBox:
if self.checkShutdown() == "NOW":
return
self.pause()
yield 1
def main(self):
"""Main loop"""
# parse header
for _ in self.readline(): yield _
if self.checkShutdown() == "NOW" or (self.checkShutdown() and self.bytesread==""):
self.send(self.shutdownMsg,"signal")
return
line = self.bytesread
m = re.match("^YUV4MPEG2((?: .\S*)*)\n$", line)
assert(m)
fields = m.groups()[0]
seq_params = parse_seq_tags(fields)
yield 1
while 1:
for _ in self.readline(): yield _
line = self.bytesread
if self.checkShutdown() == "NOW" or (self.checkShutdown() and self.bytesread==""):
break
m = re.match("^FRAME((?: .\S*)*)\n$", line)
assert(m)
fields = m.groups()[0]
frame_params = parse_frame_tags(fields)
ysize = seq_params["size"][0] * seq_params["size"][1]
csize = seq_params["chroma_size"][0] * seq_params["chroma_size"][1]
for _ in self.readbytes(ysize): yield _
if self.checkShutdown() == "NOW" or (self.checkShutdown() and self.bytesread==""):
break
y = self.bytesread
for _ in self.readbytes(csize): yield _
if self.checkShutdown() == "NOW" or (self.checkShutdown() and self.bytesread==""):
break
u = self.bytesread
for _ in self.readbytes(csize): yield _
if self.checkShutdown() == "NOW" or (self.checkShutdown() and self.bytesread==""):
break
v = self.bytesread
frame = { "yuv" : (y,u,v) }
frame.update(seq_params)
frame.update(frame_params)
for _ in self.safesend(frame,"outbox"): yield _
if self.checkShutdown() == "NOW" or (self.checkShutdown() and not self.dataReady("inbox")):
break
yield 1
if self.shutdownMsg:
self.send(self.shutdownMsg, "signal")
else:
self.send(producerFinished(), "signal")
def parse_seq_tags(fields):
"""Parses YUV4MPEG header tags"""
params = {}
tags = {}
while fields:
m = re.match("^ (.)(\S*)(.*)$", fields)
(tag,value,fields) = m.groups()
tags[tag] = value
if "W" in tags and "H" in tags:
params['size'] = (int(tags["W"]), int(tags["H"]))
else:
raise
if "C" in tags:
C = tags["C"]
if C == "420jpeg": # 4:2:0 with JPEG/MPEG-1 siting (default)
params['pixformat'] = "YUV420_planar"
params['chroma_size'] = (params['size'][0]/2, params['size'][1]/2)
elif C == "420mpeg2": # 4:2:0 with MPEG-2 siting
params['pixformat'] = "YUV420_planar"
params['chroma_size'] = (params['size'][0]/2, params['size'][1]/2)
elif C == "420paldv": # 4:2:0 with PAL-DV siting
params['pixformat'] = "YUV420_planar"
params['chroma_size'] = (params['size'][0]/2, params['size'][1]/2)
elif C == "411": # 4:1:1, cosited
params['pixformat'] = "YUV411_planar"
params['chroma_size'] = (params['size'][0]/4, params['size'][1])
elif C == "422": # 4:2:2, cosited
params['pixformat'] = "YUV422_planar"
params['chroma_size'] = (params['size'][0]/2, params['size'][1])
elif C == "444": # 4:4:4 (no subsampling)
params['pixformat'] = "YUV444_planar"
params['chroma_size'] = (params['size'][0], params['size'][1])
elif C == "444alpha": # 4:4:4 with an alpha channel
params['pixformat'] = "YUV4444_planar"
params['chroma_size'] = (params['size'][0], params['size'][1])
elif C == "mono": # luma (Y') plane only
params['pixformat'] = "Y_planar"
params['chroma_size'] = (0,0)
else:
params['pixformat'] = "YUV420_planar"
params['chroma_size'] = (params['size'][0]/2, params['size'][1]/2)
if "I" in tags:
I = tags["I"]
if I == "?": # unknown (default)
pass
elif I == "p": # progressive/none
params["interlaced"] = False
elif I == "t": # top-field-first
params["interlaced"] = True
params["topfieldfirst"] = True
elif I == "b": # bottom-field-first
params["interlaced"] = True
params["topfieldfirst"] = False
elif I == "m": # mixed-mode: refer to 'I' tag in frame header
pass
if "F" in tags:
m = re.match("^(\d+):(\d+)$",tags["F"])
num, denom = float(m.groups()[0]), float(m.groups()[1])
if denom > 0:
params["frame_rate"] = num/denom
if "A" in tags:
m = re.match("^(\d+):(\d+)$",tags["A"])
num, denom = float(m.groups()[0]), float(m.groups()[1])
if denom > 0:
params["pixel_aspect"] = num/denom
if "X" in tags:
params["sequence_meta"] = tags["X"]
return params
def parse_frame_tags(fields):
"""\
Parses YUV4MPEG frame tags.
"""
params = {}
tags = {}
while fields:
m = re.match("^ (.)(\S*)(.*)$", fields)
(tag,value,fields) = m.groups()
tags[tag] = value
if "I" in tags:
x,y,z = tags["I"][0], tags["I"][1], tags["I"][2]
if x == "t": # top-field-first
params["interlaced"] = True
params["topfieldfirst"] = True
elif x == "T": # top-field-first and repeat
params["interlaced"] = True
params["topfieldfirst"] = True
elif x == "b": # bottom-field-first
params["interlaced"] = True
params["topfieldfirst"] = False
elif x == "B": # bottom-field-first and repeat
params["interlaced"] = True
params["topfieldfirst"] = False
elif x == "1": # single progressive frame
params["interlaced"] = False
elif x == "2": # double progressive frame (repeat)
params["interlaced"] = False
elif x == "3": # triple progressive frame (repeat)
params["interlaced"] = False
if y == "p": # fields sampled at same time
params["interlaced"] = False
elif y == "i": # fields sampled at different times
params["interlaced"] = True
if z == "p": # progressive (subsampling over whole frame)
pass
elif z == "i": # interlaced (each field subsampled independently)
pass
elif z == "?": # unknown (allowed only for non-4:2:0 subsampling)
pass
if "X" in tags:
params["meta"] = tags["X"]
return params
class FrameToYUV4MPEG(component):
"""\
FrameToYUV4MPEG() -> new FrameToYUV4MPEG component.
Parses uncompressed video frame data structures sent to its "inbox" inbox
and writes YUV4MPEG format binary data as strings to its "outbox" outbox.
"""
def checkShutdown(self):
"""\
Collects any new shutdown messages arriving at the "control" inbox, and
ensures self.shutdownMsg contains the highest priority one encountered
so far.
"""
while self.dataReady("control"):
msg = self.recv("control")
if isinstance(msg, producerFinished) and not isinstance(self.shutdownMsg,shutdownMicroprocess):
self.shutdownMsg = msg
elif isinstance(msg, shutdownMicroprocess):
self.shutdownMsg = msg
def canShutdown(self):
"""\
Returns true if the component should terminate when it has finished
processing any pending data.
"""
return isinstance(self.shutdownMsg, (producerFinished, shutdownMicroprocess))
def mustShutdown(self):
"""Returns true if the component should terminate immediately."""
return isinstance(self.shutdownMsg, shutdownMicroprocess)
def sendoutbox(self,data):
"""\
Generator.
Sends data out of the "outbox" outbox. If the destination is full
(noSpaceInBox exception) then it waits until there is space. It keeps
retrying until it succeeds.
If the component is ordered to immediately terminate then "STOP" is
raised as an exception.
"""
while 1:
try:
self.send(data,"outbox")
return
except noSpaceInBox:
self.checkShutdown()
if self.mustShutdown():
raise "STOP"
self.pause()
yield 1
self.checkShutdown()
if self.mustShutdown():
raise "STOP"
def main(self):
"""Main loop"""
self.shutdownMsg = None
try:
while not self.dataReady("inbox"):
self.checkShutdown()
if self.canShutdown():
raise "STOP"
self.pause()
yield 1
frame = self.recv("inbox")
for _ in self.write_header(frame):
yield _
for _ in self.write_frame(frame):
yield _
while 1:
while self.dataReady("inbox"):
frame = self.recv("inbox")
for _ in self.write_frame(frame):
yield _
self.checkShutdown()
if self.canShutdown():
raise "STOP"
self.pause()
yield 1
except "STOP":
self.send(self.shutdownMsg,"signal")
def write_header(self, frame):
"""\
Generator.
Sends the YUV4MPEG format header to the "outbox" outbox, based on
attributes of the supplied frame data structure.
"""
format = "YUV4MPEG2 W%d H%d" % tuple(frame['size'])
if frame['pixformat']=="YUV420_planar":
format += " C420mpeg2"
elif frame['pixformat']=="YUV411_planar":
format += " C411"
elif frame['pixformat']=="YUV422_planar":
format += " C422"
elif frame['pixformat']=="YUV444_planar":
format += " C444"
elif frame['pixformat']=="YUV4444_planar":
format += " C444alpha"
elif frame['pixformat']=="Y_planar":
format += " Cmono"
interlace = frame.get("interlaced",False)
topfieldfirst = frame.get("topfieldfirst",False)
if interlace and topfieldfirst:
format += " It"
elif interlace and not topfieldfirst:
format += " Ib"
elif not interlace:
format += " Ip"
rate = frame.get("frame_rate", 0)
if rate > 0:
num,denom = rational(rate)
format += " F%d:%d" % (num,denom)
rate = frame.get("pixel_aspect", 0)
if rate > 0:
num,denom = rational(rate)
format += " A%d:%d" % (num,denom)
if "sequence_meta" in frame:
format += " X"+frame['sequence_meta']
format += "\x0a"
for _ in self.sendoutbox(format):
yield _
def write_frame(self, frame):
"""\
Generator.
Writes out YUV4MPEG format frame marker and data.
"""
for _ in self.sendoutbox("FRAME\x0a"):
yield _
for component in frame['yuv']:
for _ in self.sendoutbox(component):
yield _
__kamaelia_components__ = ( YUV4MPEGToFrame, FrameToYUV4MPEG, )
if __name__ == "__main__":
from Kamaelia.Chassis.Pipeline import Pipeline
from Kamaelia.File.Reading import RateControlledFileReader
from Kamaelia.UI.Pygame.VideoOverlay import VideoOverlay
Pipeline( RateControlledFileReader("/data/stream.yuv",readmode="bytes",rate=25*(608256+128)),
YUV4MPEGToFrame(),
FrameToYUV4MPEG(),
YUV4MPEGToFrame(),
VideoOverlay(),
).run()
|
sparkslabs/kamaelia_
|
Sketches/MH/Video/YUV4MPEG.py
|
Python
|
apache-2.0
| 23,091
|
[
"DIRAC"
] |
1eac47f85505db87c47eede3d9d1d6c4e153aca998653939a28343f3ece34af4
|
#!/usr/bin/env python
# -*- mode: python; coding: utf-8; -*-
# ---------------------------------------------------------------------------
#
# Copyright (C) 1998-2003 Markus Franz Xaver Johannes Oberhumer
# Copyright (C) 2003 Mt. Hood Playing Card Co.
# Copyright (C) 2005-2009 Skomoroh
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ---------------------------------------------------------------------------
# imports
import os
import time
from six.moves import tkinter
from six.moves import tkinter_font
# PySol imports
from pysollib.mygettext import _
from pysollib.mfxutil import kwdefault, KwStruct
from pysollib.mfxutil import format_time
# from pysollib.util import *
from pysollib.stats import PysolStatsFormatter, ProgressionFormatter
from pysollib.settings import TOP_TITLE
# Toolkit imports
from pysollib.ui.tktile.tkutil import bind, loadImage
from .tkwidget import MfxDialog, MfxMessageDialog
from .tkwidget import MfxScrolledCanvas
# FIXME - this file a quick hack and needs a rewrite
# ************************************************************************
# *
# ************************************************************************
class SingleGame_StatsDialog(MfxDialog):
def __init__(self, parent, title, app, player, gameid, **kw):
self.app = app
self.selected_game = None
kw = self.initKw(kw)
MfxDialog.__init__(self, parent, title, kw.resizable, kw.default)
top_frame, bottom_frame = self.createFrames(kw)
self.top_frame = top_frame
self.createBitmaps(top_frame, kw)
#
self.player = player or _("Demo games")
self.top.wm_minsize(200, 200)
self.button = kw.default
#
# createChart = self.create3DBarChart
createChart = self.createPieChart
# createChart = self.createSimpleChart
# if parent.winfo_screenwidth() < 800 or parent.winfo_screenheight() <
# 600:
# createChart = self.createPieChart
# createChart = self.createSimpleChart
#
self.font = self.app.getFont("default")
self.tk_font = tkinter_font.Font(self.top, self.font)
self.font_metrics = self.tk_font.metrics()
self._calc_tabs()
#
won, lost = app.stats.getStats(player, gameid)
createChart(app, won, lost, _("Total"))
won, lost = app.stats.getSessionStats(player, gameid)
createChart(app, won, lost, _("Current session"))
#
focus = self.createButtons(bottom_frame, kw)
self.mainloop(focus, kw.timeout)
#
# helpers
#
def _calc_tabs(self):
#
font = self.tk_font
t0 = 160
t = ''
for i in (_("Won:"),
_("Lost:"),
_("Total:")):
if len(i) > len(t):
t = i
t1 = font.measure(t)
# t1 = max(font.measure(_("Won:")),
# font.measure(_("Lost:")),
# font.measure(_("Total:")))
t1 += 10
# t2 = font.measure('99999')+10
t2 = 45
# t3 = font.measure('100%')+10
t3 = 45
tx = (t0, t0+t1+t2, t0+t1+t2+t3)
#
ls = self.font_metrics['linespace']
ls += 5
ls = max(ls, 20)
ty = (ls, 2*ls, 3*ls+15, 3*ls+25)
#
self.tab_x, self.tab_y = tx, ty
def _getPwon(self, won, lost):
pwon, plost = 0.0, 0.0
if won + lost > 0:
pwon = float(won) / (won + lost)
pwon = min(max(pwon, 0.00001), 0.99999)
plost = 1.0 - pwon
return pwon, plost
def _createChartInit(self, text):
w, h = self.tab_x[-1]+20, self.tab_y[-1]+20
c = tkinter.Canvas(self.top_frame, width=w, height=h)
c.pack(side='top', fill='both', expand=False, padx=20, pady=10)
self.canvas = c
# self.fg = c.cget("insertbackground")
self.fg = c.option_get('foreground', '') or c.cget("insertbackground")
#
c.create_rectangle(2, 7, w, h, fill="", outline="#7f7f7f")
label = tkinter.Label(c, text=text, font=self.font, bd=0, padx=3,
pady=1)
dy = int(self.font_metrics['ascent']) - 10
dy //= 2
c.create_window(20, -dy, window=label, anchor="nw")
def _createChartTexts(self, tx, ty, won, lost):
c, tfont, fg = self.canvas, self.font, self.fg
pwon, plost = self._getPwon(won, lost)
#
x = tx[0]
dy = int(self.font_metrics['ascent']) - 10
dy //= 2
c.create_text(
x, ty[0]-dy, text=_("Won:"), anchor="nw", font=tfont, fill=fg)
c.create_text(
x, ty[1]-dy, text=_("Lost:"), anchor="nw", font=tfont, fill=fg)
c.create_text(
x, ty[2]-dy, text=_("Total:"), anchor="nw", font=tfont, fill=fg)
x = tx[1] - 16
c.create_text(
x, ty[0]-dy, text="%d" % won, anchor="ne", font=tfont, fill=fg)
c.create_text(
x, ty[1]-dy, text="%d" % lost, anchor="ne", font=tfont, fill=fg)
c.create_text(
x, ty[2]-dy, text="%d" % (won + lost), anchor="ne", font=tfont,
fill=fg)
y = ty[2] - 11
c.create_line(tx[0], y, x, y, fill=fg)
if won + lost > 0:
x = tx[2]
pw = int(round(100.0 * pwon))
c.create_text(
x, ty[0]-dy, text="%d%%" % pw, anchor="ne",
font=tfont, fill=fg)
c.create_text(
x, ty[1]-dy, text="%d%%" % (100-pw), anchor="ne", font=tfont,
fill=fg)
# def _createChart3DBar(self, canvas, perc, x, y, p, col):
# if perc < 0.005:
# return
# # translate and scale
# p = list(p[:])
# for i in (0, 1, 2, 3):
# p[i] = (x + p[i][0], y + p[i][1])
# j = i + 4
# dx = int(round(p[j][0] * perc))
# dy = int(round(p[j][1] * perc))
# p[j] = (p[i][0] + dx, p[i][1] + dy)
# # draw rects
# def draw_rect(a, b, c, d, col, canvas=canvas, p=p):
# points = (p[a][0], p[a][1], p[b][0], p[b][1],
# p[c][0], p[c][1], p[d][0], p[d][1])
# canvas.create_polygon(points, fill=col)
# draw_rect(0, 1, 5, 4, col[0])
# draw_rect(1, 2, 6, 5, col[1])
# draw_rect(4, 5, 6, 7, col[2])
# # draw lines
# def draw_line(a, b, canvas=canvas, p=p):
# # print a, b, p[a], p[b]
# canvas.create_line(p[a][0], p[a][1], p[b][0], p[b][1])
# draw_line(0, 1)
# draw_line(1, 2)
# draw_line(0, 4)
# draw_line(1, 5)
# draw_line(2, 6)
# # draw_line(3, 7) # test
# draw_line(4, 5)
# draw_line(5, 6)
# draw_line(6, 7)
# draw_line(7, 4)
#
# charts
#
# def createSimpleChart(self, app, won, lost, text):
# #c, tfont, fg = self._createChartInit(frame, 300, 100, text)
# self._createChartInit(300, 100, text)
# c, tfont, fg = self.canvas, self.font, self.fg
# #
# tx = (90, 180, 210)
# ty = (21, 41, 75)
# self._createChartTexts(tx, ty, won, lost)
# def create3DBarChart(self, app, won, lost, text):
# image = app.gimages.stats[0]
# iw, ih = image.width(), image.height()
# #c, tfont, fg = self._createChartInit(frame, iw+160, ih, text)
# self._createChartInit(iw+160, ih, text)
# c, tfont, fg = self.canvas, self.font, self.fg
# pwon, plost = self._getPwon(won, lost)
# #
# tx = (iw+20, iw+110, iw+140)
# yy = ih//2 # + 7
# ty = (yy+21-46, yy+41-46, yy+75-46)
# #
# c.create_image(0, 7, image=image, anchor="nw")
# #
# p = ((0, 0), (44, 6), (62, -9), (20, -14),
# (-3, -118), (-1, -120), (-1, -114), (-4, -112))
# col = ("#00ff00", "#008200", "#00c300")
# self._createChart3DBar(c, pwon, 102, 145+7, p, col)
# p = ((0, 0), (49, 6), (61, -10), (15, -15),
# (1, -123), (3, -126), (4, -120), (1, -118))
# col = ("#ff0000", "#860400", "#c70400")
# self._createChart3DBar(c, plost, 216, 159+7, p, col)
# #
# self._createChartTexts(tx, ty, won, lost)
# c.create_text(tx[0], ty[0]-48, text=self.player, anchor="nw",
# font=tfont, fill=fg)
def createPieChart(self, app, won, lost, text):
# c, tfont, fg = self._createChartInit(frame, 300, 100, text)
#
self._createChartInit(text)
c, tfont = self.canvas, self.font
pwon, plost = self._getPwon(won, lost)
#
# tx = (160, 250, 280)
# ty = (21, 41, 75)
#
tx, ty = self.tab_x, self.tab_y
if won + lost > 0:
# s, ewon, elost = 90.0, -360.0 * pwon, -360.0 * plost
s, ewon, elost = 0.0, 360.0 * pwon, 360.0 * plost
c.create_arc(
20, 25+9, 110, 75+9, fill="#007f00", start=s, extent=ewon)
c.create_arc(
20, 25+9, 110, 75+9, fill="#7f0000", start=s+ewon,
extent=elost)
c.create_arc(
20, 25, 110, 75, fill="#00ff00", start=s, extent=ewon)
c.create_arc(
20, 25, 110, 75, fill="#ff0000", start=s+ewon,
extent=elost)
x, y = tx[0] - 25, ty[0]
c.create_rectangle(x, y, x+10, y+10, fill="#00ff00")
y = ty[1]
c.create_rectangle(x, y, x+10, y+10, fill="#ff0000")
else:
c.create_oval(20, 25+10, 110, 75+10, fill="#7f7f7f")
c.create_oval(20, 25, 110, 75, fill="#f0f0f0")
c.create_text(
65, 50, text=_("No games"), anchor="center",
font=tfont, fill="#bfbfbf")
#
self._createChartTexts(tx, ty, won, lost)
#
#
#
def initKw(self, kw):
kw = KwStruct(
kw,
strings=(_("&OK"),
(_("&All games..."), 102),
(TOP_TITLE+"...", 105),
(_("&Reset..."), 302)), default=0,
image=self.app.gimages.logos[5],
padx=10, pady=10,
)
return MfxDialog.initKw(self, kw)
# ************************************************************************
# *
# ************************************************************************
class CanvasFormatter(PysolStatsFormatter):
def __init__(self, app, canvas, parent_window, font, w, h):
self.app = app
self.canvas = canvas
self.parent_window = parent_window
# self.fg = canvas.cget("insertbackground")
self.fg = canvas.option_get('foreground', '') or \
canvas.cget("insertbackground")
self.font = font
self.w = w
self.h = h
# self.x = self.y = 0
self.gameid = None
self.gamenumber = None
self.canvas.config(yscrollincrement=h)
self._tabs = None
def _addItem(self, id):
self.canvas.dialog.nodes[id] = (self.gameid, self.gamenumber)
def _calc_tabs(self, arg):
tw = 15*self.w
# tw = 160
self._tabs = [tw]
font = tkinter_font.Font(self.canvas, self.font)
for t in arg[1:]:
tw = font.measure(t)+20
self._tabs.append(tw)
self._tabs.append(10)
def pstats(self, y, args, gameid=None):
x = 1
t1, t2, t3, t4, t5, t6, t7 = args
self.gameid = gameid
if gameid is None: # header
self.gameid = 'header'
for var, text, anchor, tab in (
('name', t1, 'nw', self._tabs[0]+self._tabs[1]),
('played', t2, 'ne', self._tabs[2]),
('won', t3, 'ne', self._tabs[3]),
('lost', t4, 'ne', self._tabs[4]),
('time', t5, 'ne', self._tabs[5]),
('moves', t6, 'ne', self._tabs[6]),
('percent', t7, 'ne', self._tabs[7]),
):
self.gamenumber = None
if gameid is None: # header
self.gamenumber = var
id = self.canvas.create_text(x, y, text=text, anchor=anchor,
font=self.font, fill=self.fg)
self._addItem(id)
x += tab
self.pstats_perc(x, y, t7)
def pstats_perc(self, x, y, t):
if not (t and "0" <= t[0] <= "9"):
return
perc = int(round(float(str(t))))
if perc < 1:
return
rx, ry, rw, rh = x, y+1, 2 + 8*10, self.h-5
if 1:
w = int(round(rw*perc/100.0))
if 1 and w < 1:
return
if w > 0:
w = max(3, w)
w = min(rw - 2, w)
self.canvas.create_rectangle(
rx, ry, rx+w, ry+rh, width=1,
fill="#00ff00", outline="#000000")
if w < rw:
self.canvas.create_rectangle(
rx+w, ry, rx+rw, ry+rh, width=1,
fill="#ff0000", outline="#000000")
return
# fill = "#ffffff"
# fill = self.canvas["bg"]
fill = None
self.canvas.create_rectangle(rx, ry, rx+rw, ry+rh, width=1,
fill=fill, outline="#808080")
if 1:
rx, rw = rx + 1, rw - 1
ry, rh = ry + 1, rh - 1
w = int(round(rw*perc/100.0))
if w > 0:
self.canvas.create_rectangle(rx, ry, rx+w, ry+rh, width=0,
fill="#00ff00", outline="")
if w < rw:
self.canvas.create_rectangle(
rx+w, ry, rx+rw, ry+rh, width=0,
fill="#ff0000", outline="")
return
p = 1.0
ix = rx + 2
for i in (1, 11, 21, 31, 41, 51, 61, 71, 81, 91):
if perc < i:
break
# c = "#ff8040"
r, g, b = 255, 128*p, 64*p
c = "#%02x%02x%02x" % (int(r), int(g), int(b))
self.canvas.create_rectangle(ix, ry+2, ix+6, ry+rh-2, width=0,
fill=c, outline=c)
ix = ix + 8
p = max(0.0, p - 0.1)
def writeStats(self, player, sort_by='name'):
header = self.getStatHeader()
y = 0
if self._tabs is None:
self._calc_tabs(header)
self.pstats(y, header)
#
y += 2*self.h
for result in self.getStatResults(player, sort_by):
gameid = result.pop()
self.pstats(y, result, gameid)
y += self.h
#
y += self.h
total, played, won, lost, time_, moves, perc = self.getStatSummary()
s = _("Total (%d out of %d games)") % (played, total)
self.pstats(y, (s, won+lost, won, lost, time_, moves, perc))
def writeLog(self, player, prev_games):
y = 0
header = self.getLogHeader()
t1, t2, t3, t4 = header
s = "%-25s %-20s %-17s %s" % header
id = self.canvas.create_text(1, y, text=s, anchor="nw",
font=self.font, fill=self.fg)
self._addItem(id)
y += 2*self.h
if not player or not prev_games:
return 0
for result in self.getLogResults(player, prev_games):
s = "%-25s %-20s %-17s %s" % tuple(result[:4])
id = self.canvas.create_text(1, y, text=s, anchor="nw",
font=self.font, fill=self.fg)
y += self.h
return 1
def writeFullLog(self, player):
prev_games = self.app.stats.prev_games.get(player)
return self.writeLog(player, prev_games)
def writeSessionLog(self, player):
prev_games = self.app.stats.session_games.get(player)
return self.writeLog(player, prev_games)
# ************************************************************************
# *
# ************************************************************************
class AllGames_StatsDialogScrolledCanvas(MfxScrolledCanvas):
pass
class AllGames_StatsDialog(MfxDialog):
YVIEW = 0
FONT_TYPE = "default"
def __init__(self, parent, title, app, player, **kw):
lines = 25
# if parent and parent.winfo_screenheight() < 600:
# lines = 20
#
self.font = app.getFont(self.FONT_TYPE)
font = tkinter_font.Font(parent, self.font)
self.font_metrics = font.metrics()
self.CHAR_H = self.font_metrics['linespace']
self.CHAR_W = font.measure('M')
self.app = app
#
self.player = player
self.title = title
self.sort_by = 'name'
self.selected_game = None
#
kwdefault(kw, width=self.CHAR_W*64, height=lines*self.CHAR_H)
kw = self.initKw(kw)
MfxDialog.__init__(self, parent, title, kw.resizable, kw.default)
top_frame, bottom_frame = self.createFrames(kw)
self.createBitmaps(top_frame, kw)
#
self.top.wm_minsize(200, 200)
self.button = kw.default
#
self.sc = AllGames_StatsDialogScrolledCanvas(
top_frame, width=kw.width, height=kw.height)
self.sc.pack(fill='both', expand=True, padx=kw.padx, pady=kw.pady)
#
self.nodes = {}
self.canvas = self.sc.canvas
self.canvas.dialog = self
bind(self.canvas, "<1>", self.singleClick)
self.fillCanvas(player, title)
bbox = self.canvas.bbox("all")
# print bbox
# self.canvas.config(scrollregion=bbox)
dx, dy = 4, 0
self.canvas.config(scrollregion=(-dx, -dy, bbox[2]+dx, bbox[3]+dy))
self.canvas.xview_moveto(-dx)
self.canvas.yview_moveto(self.YVIEW)
#
focus = self.createButtons(bottom_frame, kw)
self.mainloop(focus, kw.timeout)
def initKw(self, kw):
kw = KwStruct(
kw,
strings=(_("&OK"),
(_("&Save to file"), 202),
(_("&Reset all..."), 301),),
default=0,
resizable=True,
padx=10, pady=10,
# width=900,
)
return MfxDialog.initKw(self, kw)
def destroy(self):
self.app = None
self.canvas.dialog = None
self.nodes = {}
self.sc.destroy()
MfxDialog.destroy(self)
def rearrange(self, sort_by):
if self.sort_by == sort_by:
return
self.sort_by = sort_by
self.fillCanvas(self.player, self.title)
def singleClick(self, event=None):
id = self.canvas.find_withtag('current')
if not id:
return
# print 'singleClick:', id, self.nodes.get(id[0])
gameid, gamenumber = self.nodes.get(id[0], (None, None))
if gameid == 'header':
if self.sort_by == gamenumber:
return
self.sort_by = gamenumber
self.fillCanvas(self.player, self.title)
return
# FIXME / TODO
return
if gameid and gamenumber:
print(gameid, gamenumber)
elif gameid:
print(gameid)
#
#
#
def fillCanvas(self, player, header):
self.canvas.delete('all')
self.nodes = {}
writer = CanvasFormatter(self.app, self.canvas, self,
self.font, self.CHAR_W, self.CHAR_H)
writer.writeStats(player, self.sort_by)
# ************************************************************************
# *
# ************************************************************************
class FullLog_StatsDialog(AllGames_StatsDialog):
YVIEW = 1
FONT_TYPE = "fixed"
def fillCanvas(self, player, header):
writer = CanvasFormatter(self.app, self.canvas, self,
self.font, self.CHAR_W, self.CHAR_H)
writer.writeFullLog(player)
def initKw(self, kw):
kw = KwStruct(kw,
strings=(_("&OK"), (_("Session &log..."), 104),
(_("&Save to file"), 203)), default=0,
width=76*self.CHAR_W,
)
return AllGames_StatsDialog.initKw(self, kw)
class SessionLog_StatsDialog(FullLog_StatsDialog):
def fillCanvas(self, player, header):
PysolStatsFormatter()
writer = CanvasFormatter(self.app, self.canvas, self,
self.font, self.CHAR_W, self.CHAR_H)
writer.writeSessionLog(player)
def initKw(self, kw):
kw = KwStruct(
kw,
strings=(_("&OK"), (_("&Full log..."), 103),
(_("&Save to file"), 204)),
default=0,)
return FullLog_StatsDialog.initKw(self, kw)
# ************************************************************************
# *
# ************************************************************************
class Status_StatsDialog(MfxMessageDialog):
def __init__(self, parent, game):
stats, gstats = game.stats, game.gstats
w1 = w2 = ""
n = 0
for s in game.s.foundations:
n = n + len(s.cards)
w1 = (_("Highlight piles: ") + str(stats.highlight_piles) + "\n" +
_("Highlight cards: ") + str(stats.highlight_cards) + "\n" +
_("Highlight same rank: ") + str(stats.highlight_samerank) +
"\n")
if game.s.talon:
if game.gameinfo.redeals != 0:
w2 = w2 + _("\nRedeals: ") + str(game.s.talon.round - 1)
w2 = w2 + _("\nCards in Talon: ") + str(len(game.s.talon.cards))
if game.s.waste and game.s.waste not in game.s.foundations:
w2 = w2 + _("\nCards in Waste: ") + str(len(game.s.waste.cards))
if game.s.foundations:
w2 = w2 + _("\nCards in Foundations: ") + str(n)
#
date = time.strftime(
"%Y-%m-%d %H:%M",
time.localtime(game.gstats.start_time))
MfxMessageDialog.__init__(
self, parent, title=_("Game status"),
text=game.getTitleName() + "\n" +
game.getGameNumber(format=1) + "\n" +
_("Playing time: ") + game.getTime() + "\n" +
_("Started at: ") + date + "\n\n" +
_("Moves: ") + str(game.moves.index) + "\n" +
_("Undo moves: ") + str(stats.undo_moves) + "\n" +
_("Bookmark moves: ") + str(gstats.goto_bookmark_moves) + "\n" +
_("Demo moves: ") + str(stats.demo_moves) + "\n" +
_("Total player moves: ") + str(stats.player_moves) + "\n" +
_("Total moves in this game: ") + str(stats.total_moves) + "\n" +
_("Hints: ") + str(stats.hints) + "\n" +
"\n" +
w1 + w2,
strings=(_("&OK"),
(_("&Statistics..."), 101),
(TOP_TITLE+"...", 105), ),
image=game.app.gimages.logos[3],
image_side="left", image_padx=20,
padx=20,
)
# ************************************************************************
# *
# ************************************************************************
class _TopDialog(MfxDialog):
def __init__(self, parent, title, top, **kw):
kw = self.initKw(kw)
MfxDialog.__init__(self, parent, title, kw.resizable, kw.default)
top_frame, bottom_frame = self.createFrames(kw)
self.createBitmaps(top_frame, kw)
cnf = {'master': top_frame,
'highlightthickness': 1,
'highlightbackground': 'black',
}
frame = tkinter.Frame(**cnf)
frame.pack(expand=True, fill='both', padx=10, pady=10)
frame.columnconfigure(0, weight=1)
cnf['master'] = frame
cnf['text'] = _('N')
label = tkinter.Label(**cnf)
label.grid(row=0, column=0, sticky='ew')
cnf['text'] = _('Game number')
label = tkinter.Label(**cnf)
label.grid(row=0, column=1, sticky='ew')
cnf['text'] = _('Started at')
label = tkinter.Label(**cnf)
label.grid(row=0, column=2, sticky='ew')
cnf['text'] = _('Result')
label = tkinter.Label(**cnf)
label.grid(row=0, column=3, sticky='ew')
row = 1
for i in top:
# N
cnf['text'] = str(row)
label = tkinter.Label(**cnf)
label.grid(row=row, column=0, sticky='ew')
# Game number
cnf['text'] = '#'+str(i.game_number)
label = tkinter.Label(**cnf)
label.grid(row=row, column=1, sticky='ew')
# Start time
t = time.strftime(
'%Y-%m-%d %H:%M', time.localtime(i.game_start_time))
cnf['text'] = t
label = tkinter.Label(**cnf)
label.grid(row=row, column=2, sticky='ew')
# Result
if isinstance(i.value, float):
# time
s = format_time(i.value)
else:
# moves
s = str(i.value)
cnf['text'] = s
label = tkinter.Label(**cnf)
label.grid(row=row, column=3, sticky='ew')
row += 1
focus = self.createButtons(bottom_frame, kw)
self.mainloop(focus, kw.timeout)
def initKw(self, kw):
kw = KwStruct(kw, strings=(_('&OK'),), default=0, separator=True)
return MfxDialog.initKw(self, kw)
class Top_StatsDialog(MfxDialog):
def __init__(self, parent, title, app, player, gameid, **kw):
self.app = app
kw = self.initKw(kw)
MfxDialog.__init__(self, parent, title, kw.resizable, kw.default)
top_frame, bottom_frame = self.createFrames(kw)
self.createBitmaps(top_frame, kw)
frame = tkinter.Frame(top_frame)
frame.pack(expand=True, fill='both', padx=10, pady=10)
frame.columnconfigure(0, weight=1)
if (player in app.stats.games_stats and
gameid in app.stats.games_stats[player] and
app.stats.games_stats[player][gameid].time_result.top):
tkinter.Label(frame, text=_('Minimum')).grid(row=0, column=1)
tkinter.Label(frame, text=_('Maximum')).grid(row=0, column=2)
tkinter.Label(frame, text=_('Average')).grid(row=0, column=3)
# tkinter.Label(frame, text=_('Total')).grid(row=0, column=4)
s = app.stats.games_stats[player][gameid]
row = 1
ll = [
(_('Playing time:'),
format_time(s.time_result.min),
format_time(s.time_result.max),
format_time(s.time_result.average),
format_time(s.time_result.total),
s.time_result.top,
),
(_('Moves:'),
s.moves_result.min,
s.moves_result.max,
round(s.moves_result.average, 2),
s.moves_result.total,
s.moves_result.top,
),
(_('Total moves:'),
s.total_moves_result.min,
s.total_moves_result.max,
round(s.total_moves_result.average, 2),
s.total_moves_result.total,
s.total_moves_result.top,
),
]
# if s.score_result.min:
# ll.append(('Score:',
# s.score_result.min,
# s.score_result.max,
# round(s.score_result.average, 2),
# s.score_result.top,
# ))
# if s.score_casino_result.min:
# ll.append(('Casino Score:',
# s.score_casino_result.min,
# s.score_casino_result.max,
# round(s.score_casino_result.average, 2), ))
for l, min, max, avr, tot, top in ll:
tkinter.Label(frame, text=l).grid(row=row, column=0)
tkinter.Label(frame, text=str(min)).grid(row=row, column=1)
tkinter.Label(frame, text=str(max)).grid(row=row, column=2)
tkinter.Label(frame, text=str(avr)).grid(row=row, column=3)
# tkinter.Label(frame, text=str(tot)).grid(row=row, column=4)
b = tkinter.Button(frame, text=TOP_TITLE+' ...', width=10,
command=lambda top=top: self.showTop(top))
b.grid(row=row, column=5)
row += 1
else:
tkinter.Label(frame, text=_('No TOP for this game')).pack()
focus = self.createButtons(bottom_frame, kw)
self.mainloop(focus, kw.timeout)
def showTop(self, top):
# print top
_TopDialog(self.top, TOP_TITLE, top)
def initKw(self, kw):
kw = KwStruct(kw,
strings=(_('&OK'),),
default=0,
image=self.app.gimages.logos[4],
separator=True,
)
return MfxDialog.initKw(self, kw)
# ************************************************************************
# *
# ************************************************************************
class ProgressionDialog(MfxDialog):
def __init__(self, parent, title, app, player, gameid, **kw):
font_name = app.getFont('default')
font = tkinter_font.Font(parent, font_name)
tkfont = tkinter_font.Font(parent, font)
font_metrics = font.metrics()
measure = tkfont.measure
self.text_height = font_metrics['linespace']
self.text_width = measure('XX.XX.XX')
self.items = []
self.formatter = ProgressionFormatter(app, player, gameid)
kw = self.initKw(kw)
MfxDialog.__init__(self, parent, title, kw.resizable, kw.default)
top_frame, bottom_frame = self.createFrames(kw)
self.createBitmaps(top_frame, kw)
frame = tkinter.Frame(top_frame)
frame.pack(expand=True, fill='both', padx=5, pady=10)
frame.columnconfigure(0, weight=1)
# constants
self.canvas_width, self.canvas_height = 600, 250
if parent.winfo_screenwidth() < 800 or \
parent.winfo_screenheight() < 600:
self.canvas_width, self.canvas_height = 400, 200
self.xmargin, self.ymargin = 10, 10
self.graph_dx, self.graph_dy = 10, 10
self.played_color = '#ff7ee9'
self.won_color = '#00dc28'
self.percent_color = 'blue'
# create canvas
self.canvas = canvas = tkinter.Canvas(frame, bg='#dfe8ff',
highlightthickness=1,
highlightbackground='black',
width=self.canvas_width,
height=self.canvas_height)
canvas.pack(side='left', padx=5)
#
dir = os.path.join('images', 'stats')
try:
fn = app.dataloader.findImage('progression', dir)
self.bg_image = loadImage(fn)
canvas.create_image(0, 0, image=self.bg_image, anchor='nw')
except Exception:
pass
#
tw = max(measure(_('Games/day')),
measure(_('Games/week')),
measure(_('% won')))
self.left_margin = self.xmargin+tw//2
self.right_margin = self.xmargin+tw//2
self.top_margin = 15+self.text_height
self.bottom_margin = 15+self.text_height+10+self.text_height
#
x0, y0 = self.left_margin, self.canvas_height-self.bottom_margin
x1, y1 = self.canvas_width-self.right_margin, self.top_margin
canvas.create_rectangle(x0, y0, x1, y1, fill='white')
# horizontal axis
canvas.create_line(x0, y0, x1, y0, width=3)
# left vertical axis
canvas.create_line(x0, y0, x0, y1, width=3)
t = _('Games/day')
self.games_text_id = canvas.create_text(x0-4, y1-4, anchor='s', text=t)
# right vertical axis
canvas.create_line(x1, y0, x1, y1, width=3)
canvas.create_text(x1+4, y1-4, anchor='s', text=_('% won'))
# caption
d = self.text_height
x, y = self.xmargin, self.canvas_height-self.ymargin
canvas.create_rectangle(x, y, x+d, y-d, outline='black',
fill=self.played_color)
x += d+5
canvas.create_text(x, y, anchor='sw', text=_('Played'))
x += measure(_('Played'))+20
canvas.create_rectangle(x, y, x+d, y-d, outline='black',
fill=self.won_color)
x += d+5
canvas.create_text(x, y, anchor='sw', text=_('Won'))
x += measure(_('Won'))+20
canvas.create_rectangle(x, y, x+d, y-d, outline='black',
fill=self.percent_color)
x += d+5
canvas.create_text(x, y, anchor='sw', text=_('% won'))
# right frame
right_frame = tkinter.Frame(frame)
right_frame.pack(side='left', fill='x', padx=5)
self.all_games_variable = var = tkinter.StringVar()
var.set('all')
b = tkinter.Radiobutton(right_frame, text=_('All games'),
variable=var, value='all',
command=self.updateGraph,
justify='left', anchor='w'
)
b.pack(fill='x', expand=True, padx=3, pady=1)
b = tkinter.Radiobutton(right_frame, text=_('Current game'),
variable=var, value='current',
command=self.updateGraph,
justify='left', anchor='w'
)
b.pack(fill='x', expand=True, padx=3, pady=1)
label_frame = tkinter.LabelFrame(right_frame, text=_('Statistics for'))
label_frame.pack(side='top', fill='x', pady=10)
self.variable = var = tkinter.StringVar()
var.set('week')
for v, t in (
('week', _('Last 7 days')),
('month', _('Last month')),
('year', _('Last year')),
('all', _('All time')),
):
b = tkinter.Radiobutton(label_frame, text=t, variable=var, value=v,
command=self.updateGraph,
justify='left', anchor='w'
)
b.pack(fill='x', expand=True, padx=3, pady=1)
label_frame = tkinter.LabelFrame(right_frame, text=_('Show graphs'))
label_frame.pack(side='top', fill='x')
self.played_graph_var = tkinter.BooleanVar()
self.played_graph_var.set(True)
b = tkinter.Checkbutton(label_frame, text=_('Played'),
command=self.updateGraph,
variable=self.played_graph_var,
justify='left', anchor='w'
)
b.pack(fill='x', expand=True, padx=3, pady=1)
self.won_graph_var = tkinter.BooleanVar()
self.won_graph_var.set(True)
b = tkinter.Checkbutton(label_frame, text=_('Won'),
command=self.updateGraph,
variable=self.won_graph_var,
justify='left', anchor='w'
)
b.pack(fill='x', expand=True, padx=3, pady=1)
self.percent_graph_var = tkinter.BooleanVar()
self.percent_graph_var.set(True)
b = tkinter.Checkbutton(label_frame, text=_('% won'),
command=self.updateGraph,
variable=self.percent_graph_var,
justify='left', anchor='w'
)
b.pack(fill='x', expand=True, padx=3, pady=1)
self.updateGraph()
focus = self.createButtons(bottom_frame, kw)
self.mainloop(focus, kw.timeout)
def initKw(self, kw):
kw = KwStruct(kw, strings=(_('&OK'),), default=0, separator=True)
return MfxDialog.initKw(self, kw)
def updateGraph(self, *args):
interval = self.variable.get()
canvas = self.canvas
if self.items:
canvas.delete(*self.items)
self.items = []
all_games = (self.all_games_variable.get() == 'all')
result = self.formatter.getResults(interval, all_games)
if interval in ('week', 'month'):
t = _('Games/day')
else:
t = _('Games/week')
canvas.itemconfig(self.games_text_id, text=t)
graph_width = self.canvas_width-self.left_margin-self.right_margin
graph_height = self.canvas_height-self.top_margin-self.bottom_margin
dx = (graph_width-2*self.graph_dx)//(len(result)-1)
graph_dx = (graph_width-(len(result)-1)*dx)//2
dy = (graph_height-self.graph_dy)//5
x0, y0 = self.left_margin, self.canvas_height-self.bottom_margin
x1, y1 = self.canvas_width-self.right_margin, self.top_margin
td = self.text_height//2
# vertical scale
x = x0+graph_dx
xx = -100
for res in result:
if res[0] is not None and x > xx+self.text_width+4:
# id = canvas.create_line(x, y0, x, y0-5, width=3)
# self.items.append(id)
id = canvas.create_line(x, y0, x, y1, stipple='gray50')
self.items.append(id)
id = canvas.create_text(x, y0+td, anchor='n', text=res[0])
self.items.append(id)
xx = x
else:
id = canvas.create_line(x, y0, x, y0-3, width=1)
self.items.append(id)
x += dx
# horizontal scale
max_games = max([i[1] for i in result])
games_delta = max_games//5+1
percent = 0
games = 0
for y in range(y0, y1, -dy):
if y != y0:
id = canvas.create_line(x0, y, x1, y, stipple='gray50')
self.items.append(id)
id = canvas.create_text(x0-td, y, anchor='e', text=str(games))
self.items.append(id)
id = canvas.create_text(x1+td, y, anchor='w', text=str(percent))
self.items.append(id)
games += games_delta
percent += 20
# draw result
games_resolution = float(dy)/games_delta
percent_resolution = float(dy)/20
played_coords = []
won_coords = []
percent_coords = []
x = x0+graph_dx
for res in result:
played, won = res[1], res[2]
y = y0 - int(games_resolution*played)
played_coords += [x, y]
y = y0 - int(games_resolution*won)
won_coords += [x, y]
if played > 0:
percent = int(100.*won/played)
else:
percent = 0
y = y0 - int(percent_resolution*percent)
percent_coords += [x, y]
x += dx
if self.played_graph_var.get():
id = canvas.create_line(fill=self.played_color, width=3,
*played_coords)
self.items.append(id)
if self.won_graph_var.get():
id = canvas.create_line(fill=self.won_color, width=3,
*won_coords)
self.items.append(id)
if self.percent_graph_var.get():
id = canvas.create_line(fill=self.percent_color, width=3,
*percent_coords)
self.items.append(id)
|
jimsize/PySolFC
|
pysollib/tk/tkstats.py
|
Python
|
gpl-3.0
| 40,666
|
[
"CASINO"
] |
cdd65bad2d3995e231db3d606437c596baa124f4913c44729eac1885fb99db95
|
"""
Create and put Requests to archive files.
**List of operations**
#. Optionally replicate files to SourceSE
#. ArchiveFiles: Create a tarball from input files, upload tarball to TarballSE
#. ReplicateAndRegister Tarball to TargetSE
#. Optionally: Add LFNs to an ArchiveSE
#. Optionally: Check for Tarball Migration
#. Remove all other replicas for these files, or remove all files
#. Remove original replica of Tarball
Will copy all the respective files and place them in to tarballs. Then the tarballs are migrated to
another storage element. Once the file is migrated to tape the original files will be
removed. Optionally the original files can be registered in a special archive SE, so that their
metadata is preserved.
**Related Options**
This script only works if the ``ArchiveFiles`` and ``CheckMigration`` RequestHandlers are configured.
To prevent submission of broken requests the script needs to be enabled in the Operations section of the CS
* Operations/DataManagement/ArchiveFiles/Enabled=True
Default values for any of the command line options can also be set in the CS
* Operations/DataManagement/ArchiveFiles/ArchiveSE
* Operations/DataManagement/ArchiveFiles/TarballSE
* Operations/DataManagement/ArchiveFiles/SourceSE
* Operations/DataManagement/ArchiveFiles/MaxFiles
* ...
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import DIRAC
from DIRAC import gLogger
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.Core.Utilities import DEncode
from DIRAC.Core.Utilities.ReturnValues import returnSingleResult
from DIRAC.Core.Base import Script
from DIRAC.Core.Utilities.DIRACScript import DIRACScript
from DIRAC.FrameworkSystem.private.standardLogging.LogLevels import LogLevels
from DIRAC.RequestManagementSystem.Client.File import File
from DIRAC.RequestManagementSystem.Client.Request import Request
from DIRAC.RequestManagementSystem.Client.Operation import Operation
sLog = gLogger.getSubLogger('AddArchive')
__RCSID__ = '$Id$'
MAX_SIZE = 2 * 1024 * 1024 * 1024 # 2 GB
MAX_FILES = 2000
class CreateArchiveRequest(object):
"""Create the request to archive files."""
def __init__(self):
"""Constructor."""
self._fcClient = None
self._reqClient = None
self.switches = {}
self.requests = []
self.lfnList = []
self.metaData = None
self.options = [('A', 'ArchiveSE', 'SE for registering archive files at'),
('I', 'TarballSE', 'SE to initially upload tarball'),
('P', 'Path', 'LFN path to folder, all files in the folder will be archived'),
('N', 'Name', 'Name of the Tarball, if not given: Path_Tars/Path_N.tar'
' will be used to store tarballs'),
('L', 'List', 'File containing list of LFNs to archive, requires Name to be given'),
('', 'MaxFiles', 'Maximum number to put in one tarball: Default %d' % MAX_FILES),
('', 'MaxSize', 'Maximum number of Bytes to put in one tarball: Default %d' % MAX_SIZE),
('S', 'SourceSE', 'Where to remove the LFNs from'),
('T', 'TargetSE', 'Where to move the Tarball to'),
]
self.flags = [('M', 'ReplicateTarball', 'Replicate the tarball'),
('C', 'CheckMigration',
'Ensure the tarball is migrated to tape before removing any files or replicas'),
('D', 'RemoveReplicas', 'Remove Replicas from non-ArchiveSE'),
('U', 'RemoveFiles', 'Remove Archived files completely'),
('R', 'RegisterDescendent', 'Register the Tarball as a descendent of the archived LFNs'),
('', 'AllowReplication', 'Enable first replicating to Source-SE'),
('', 'SourceOnly', 'Only treat files that are already at the Source-SE'),
('X', 'Execute', 'Put Requests, else dryrun'),
]
self.registerSwitchesAndParseCommandLine()
self.switches['MaxSize'] = int(self.switches.setdefault('MaxSize', MAX_SIZE))
self.switches['MaxFiles'] = int(self.switches.setdefault('MaxFiles', MAX_FILES))
self.getLFNList()
self.getLFNMetadata()
self.lfnChunks = []
self.replicaSEs = []
@property
def fcClient(self):
"""Return FileCatalogClient."""
if not self._fcClient:
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
self._fcClient = FileCatalog()
return self._fcClient
@property
def reqClient(self):
"""Return RequestClient."""
if not self._reqClient:
from DIRAC.RequestManagementSystem.Client.ReqClient import ReqClient
self._reqClient = ReqClient()
return self._reqClient
@property
def dryRun(self):
"""Return dry run flag."""
return self.switches['DryRun']
@property
def targetSE(self):
"""Return the list of targetSE."""
return self.switches['TargetSE']
@property
def sourceSEs(self):
"""Return the list of sourceSEs."""
return self.switches['SourceSE']
@property
def name(self):
"""Return the name of the Request."""
return self.switches.get('Name', None)
@property
def lfnFolderPath(self):
"""Return the lfn folder path where to find the files of the request."""
return self.switches.get('Path', None)
def registerSwitchesAndParseCommandLine(self):
"""Register the default plus additional parameters and parse options.
:param list options: list of three tuple for options to add to the script
:param list flags: list of three tuple for flags to add to the script
:param str opName
"""
for short, longOption, doc in self.options:
Script.registerSwitch(short + ':' if short else '', longOption + '=', doc)
for short, longOption, doc in self.flags:
Script.registerSwitch(short, longOption, doc)
self.switches[longOption] = False
Script.parseCommandLine()
if Script.getPositionalArgs():
Script.showHelp(exitCode=1)
ops = Operations()
if not ops.getValue('DataManagement/ArchiveFiles/Enabled', False):
sLog.error('The "ArchiveFiles" operation is not enabled, contact your administrator!')
DIRAC.exit(1)
for _short, longOption, _doc in self.options:
defaultValue = ops.getValue('DataManagement/ArchiveFiles/%s' % longOption, None)
if defaultValue:
sLog.verbose('Found default value in the CS for %r with value %r' % (longOption, defaultValue))
self.switches[longOption] = defaultValue
for _short, longOption, _doc in self.flags:
defaultValue = ops.getValue('DataManagement/ArchiveFiles/%s' % longOption, False)
if defaultValue:
sLog.verbose('Found default value in the CS for %r with value %r' % (longOption, defaultValue))
self.switches[longOption] = defaultValue
for switch in Script.getUnprocessedSwitches():
for short, longOption, doc in self.options:
if switch[0] == short or switch[0].lower() == longOption.lower():
sLog.verbose('Found switch %r with value %r' % (longOption, switch[1]))
self.switches[longOption] = switch[1]
break
for short, longOption, doc in self.flags:
if switch[0] == short or switch[0].lower() == longOption.lower():
self.switches[longOption] = True
break
self.checkSwitches()
self.switches['DryRun'] = not self.switches.get('Execute', False)
self.switches['SourceSE'] = self.switches.get('SourceSE', '').split(',')
def getLFNList(self):
"""Get list of LFNs.
Either read the provided file, or get the files found beneath the provided folder.
:param dict switches: options from command line
:returns: list of lfns
:raises: RuntimeError, ValueError
"""
if self.switches.get('List'):
if os.path.exists(self.switches.get('List')):
self.lfnList = list(set([line.split()[0]
for line in open(self.switches.get('List')).read().splitlines()]))
else:
raise ValueError('%s not a file' % self.switches.get('List'))
elif self.lfnFolderPath:
path = self.lfnFolderPath
sLog.debug('Check if %r is a directory' % path)
isDir = returnSingleResult(self.fcClient.isDirectory(path))
sLog.debug('Result: %r' % isDir)
if not isDir['OK'] or not isDir['Value']:
sLog.error('Path is not a directory', isDir.get('Message', ''))
raise RuntimeError('Path %r is not a directory' % path)
sLog.notice('Looking for files in %r' % path)
metaDict = {'SE': self.sourceSEs[0]} if self.switches.get('SourceOnly') else {}
lfns = self.fcClient.findFilesByMetadata(metaDict=metaDict, path=path)
if not lfns['OK']:
sLog.error('Could not find files')
raise RuntimeError(lfns['Message'])
self.lfnList = lfns['Value']
if self.lfnList:
sLog.notice('Will create request(s) with %d lfns' % len(self.lfnList))
if len(self.lfnList) == 1:
raise RuntimeError('Only 1 file in the list, aborting!')
return
raise ValueError('"Path" or "List" need to be provided!')
def putOrRunRequests(self):
"""Run or put requests."""
requestIDs = []
if self.dryRun:
sLog.notice('Would have created %d requests' % len(self.requests))
for reqID, req in enumerate(self.requests):
sLog.notice('Request %d:' % reqID)
for opID, op in enumerate(req):
sLog.notice(' Operation %d: %s #lfn %d' % (opID, op.Type, len(op)))
return 0
for request in self.requests:
putRequest = self.reqClient.putRequest(request)
if not putRequest['OK']:
sLog.error('unable to put request %r: %s' % (request.RequestName, putRequest['Message']))
continue
requestIDs.append(str(putRequest['Value']))
sLog.always('Request %r has been put to ReqDB for execution.' % request.RequestName)
if requestIDs:
sLog.always('%d requests have been put to ReqDB for execution' % len(requestIDs))
sLog.always('RequestID(s): %s' % ' '.join(requestIDs))
sLog.always('You can monitor the request status using the command: dirac-rms-request <requestName/ID>')
return 0
sLog.error('No requests created')
return 1
def checkSwitches(self):
"""Check the switches, set autoName if needed."""
if not self.switches.get('SourceSE'):
raise RuntimeError('Have to set "SourceSE"')
if not self.switches.get('List') and not self.switches.get('Path'):
raise RuntimeError('Have to set "List" or "Path"')
if not self.name and self.lfnFolderPath:
self.switches['AutoName'] = os.path.join(os.path.dirname(self.lfnFolderPath),
os.path.basename(self.lfnFolderPath) + '.tar')
sLog.notice('Using %r for tarball' % self.switches.get('AutoName'))
if self.switches.get('List') and not self.name:
raise RuntimeError('Have to set "Name" with "List"')
if self.switches.get('RemoveReplicas') and self.switches.get('ArchiveSE') is None:
sLog.error("'RemoveReplicas' does not work without 'ArchiveSE'")
raise RuntimeError('ArchiveSE missing')
if self.switches.get('RemoveReplicas') and self.switches.get('RemoveFiles'):
sLog.error("Use either 'RemoveReplicas' or 'RemoveFiles', not both!")
raise RuntimeError('Too many removal flags')
if self.switches.get('ReplicateTarball') and not self.switches.get('TargetSE'):
sLog.error("Have to set 'TargetSE' with 'ReplicateTarball'")
raise RuntimeError('ReplicateTarball missing TargetSE')
def splitLFNsBySize(self):
"""Split LFNs into MAX_SIZE chunks of at most MAX_FILES length.
:return: list of list of lfns
"""
sLog.notice('Splitting files by Size')
lfnChunk = []
totalSize = 0
for lfn, info in self.metaData['Successful'].items():
if (totalSize > self.switches['MaxSize'] or len(lfnChunk) >= self.switches['MaxFiles']):
self.lfnChunks.append(lfnChunk)
sLog.notice('Created Chunk of %s lfns with %s bytes' % (len(lfnChunk), totalSize))
lfnChunk = []
totalSize = 0
lfnChunk.append(lfn)
totalSize += info['Size']
self.lfnChunks.append(lfnChunk)
sLog.notice('Created Chunk of %s lfns with %s bytes' % (len(lfnChunk), totalSize))
self.replicaSEs = set([seItem for se in self.fcClient.getReplicas(self.lfnList)['Value']['Successful'].values()
for seItem in se.keys()])
def run(self):
"""Perform checks and create the request."""
if self.switches.get('AutoName'):
baseArchiveLFN = archiveLFN = self.switches['AutoName']
tarballName = os.path.basename(archiveLFN)
else:
baseArchiveLFN = archiveLFN = self.name
tarballName = os.path.basename(archiveLFN)
baseRequestName = requestName = 'Archive_%s' % tarballName.rsplit('.', 1)[0]
from DIRAC.RequestManagementSystem.private.RequestValidator import RequestValidator
self.splitLFNsBySize()
for count, lfnChunk in enumerate(self.lfnChunks):
if not lfnChunk:
sLog.error('LFN list is empty!!!')
return 1
if len(self.lfnChunks) > 1:
requestName = '%s_%d' % (baseRequestName, count)
baseName = os.path.split(baseArchiveLFN.rsplit('.', 1)[0])
archiveLFN = '%s/%s_Tars/%s_%d.tar' % (baseName[0], baseName[1], baseName[1], count)
self.checkArchive(archiveLFN)
request = self.createRequest(requestName, archiveLFN, lfnChunk)
valid = RequestValidator().validate(request)
if not valid['OK']:
sLog.error('putRequest: request not valid', '%s' % valid['Message'])
return 1
else:
self.requests.append(request)
self.putOrRunRequests()
return 0
def addLFNs(self, operation, lfns, addPFN=False):
"""Add lfns to operation.
:param operation: the operation instance to which the files will be added
:param list lfns: list of lfns
:param bool addPFN: if true adds PFN to each File
"""
if not self.metaData:
self.getLFNMetadata()
for lfn in lfns:
metaDict = self.metaData['Successful'][lfn]
opFile = File()
opFile.LFN = lfn
if addPFN:
opFile.PFN = lfn
opFile.Size = metaDict['Size']
if 'Checksum' in metaDict:
# should check checksum type, now assuming Adler32 (metaDict['ChecksumType'] = 'AD')
opFile.Checksum = metaDict['Checksum']
opFile.ChecksumType = 'ADLER32'
operation.addFile(opFile)
def getLFNMetadata(self):
"""Get the metadata for all the LFNs."""
metaData = self.fcClient.getFileMetadata(self.lfnList)
error = False
if not metaData['OK']:
sLog.error('Unable to read metadata for lfns: %s' % metaData['Message'])
raise RuntimeError('Could not read metadata: %s' % metaData['Message'])
self.metaData = metaData['Value']
for failedLFN, reason in self.metaData['Failed'].items():
sLog.error('skipping %s: %s' % (failedLFN, reason))
error = True
if error:
raise RuntimeError('Could not read all metadata')
for lfn in self.metaData['Successful'].keys():
sLog.verbose('found %s' % lfn)
def createRequest(self, requestName, archiveLFN, lfnChunk):
"""Create the Request."""
request = Request()
request.RequestName = requestName
self._checkReplicaSites(request, lfnChunk)
archiveFiles = Operation()
archiveFiles.Type = 'ArchiveFiles'
archiveFiles.Arguments = DEncode.encode({'SourceSE': self.sourceSEs[0],
'TarballSE': self.switches['TarballSE'],
'RegisterDescendent': self.switches['RegisterDescendent'],
'ArchiveLFN': archiveLFN})
self.addLFNs(archiveFiles, lfnChunk)
request.addOperation(archiveFiles)
# Replicate the Tarball, ArchiveFiles will upload it
if self.switches.get('ReplicateTarball'):
replicateAndRegisterTarBall = Operation()
replicateAndRegisterTarBall.Type = 'ReplicateAndRegister'
replicateAndRegisterTarBall.TargetSE = self.targetSE
opFile = File()
opFile.LFN = archiveLFN
replicateAndRegisterTarBall.addFile(opFile)
request.addOperation(replicateAndRegisterTarBall)
if self.switches.get('CheckMigration'):
checkMigrationTarBall = Operation()
checkMigrationTarBall.Type = 'CheckMigration'
migrationTarget = self.targetSE if self.switches.get('ReplicateTarball') else self.switches['TarballSE']
checkMigrationTarBall.TargetSE = migrationTarget
opFile = File()
opFile.LFN = archiveLFN
checkMigrationTarBall.addFile(opFile)
request.addOperation(checkMigrationTarBall)
# Register Archive Replica for LFNs
if self.switches.get('ArchiveSE'):
registerArchived = Operation()
registerArchived.Type = 'RegisterReplica'
registerArchived.TargetSE = self.switches.get('ArchiveSE')
self.addLFNs(registerArchived, lfnChunk, addPFN=True)
request.addOperation(registerArchived)
# Remove all Other Replicas for LFNs
if self.switches.get('RemoveReplicas'):
removeArchiveReplicas = Operation()
removeArchiveReplicas.Type = 'RemoveReplica'
removeArchiveReplicas.TargetSE = ','.join(self.replicaSEs)
self.addLFNs(removeArchiveReplicas, lfnChunk)
request.addOperation(removeArchiveReplicas)
# Remove all Replicas for LFNs
if self.switches.get('RemoveFiles'):
removeArchiveFiles = Operation()
removeArchiveFiles.Type = 'RemoveFile'
self.addLFNs(removeArchiveFiles, lfnChunk)
request.addOperation(removeArchiveFiles)
# Remove Original tarball replica
if self.switches.get('ReplicateTarball'):
removeTarballOrg = Operation()
removeTarballOrg.Type = 'RemoveReplica'
removeTarballOrg.TargetSE = self.sourceSEs[0]
opFile = File()
opFile.LFN = archiveLFN
removeTarballOrg.addFile(opFile)
request.addOperation(removeTarballOrg)
return request
def checkArchive(self, archiveLFN):
"""Check that archiveLFN does not exist yet."""
sLog.notice('Using Tarball: %s' % archiveLFN)
exists = returnSingleResult(self.fcClient.isFile(archiveLFN))
sLog.debug('Checking for Tarball existence %r' % exists)
if exists['OK'] and exists['Value']:
raise RuntimeError('Tarball %r already exists' % archiveLFN)
sLog.debug('Checking permissions for %r' % archiveLFN)
hasAccess = returnSingleResult(self.fcClient.hasAccess(archiveLFN, 'addFile'))
if not archiveLFN or not hasAccess['OK'] or not hasAccess['Value']:
sLog.error('Error checking tarball location: %r' % hasAccess)
raise ValueError('%s is not a valid path, parameter "Name" must be correct' % archiveLFN)
def _checkReplicaSites(self, request, lfnChunk):
"""Ensure that all lfns can be found at the SourceSE, otherwise add replication operation to request.
If SourceOnly is set just rejetct those LFNs.
"""
resReplica = self.fcClient.getReplicas(lfnChunk)
if not resReplica['OK']:
sLog.error('Failed to get replica information:', resReplica['Message'])
raise RuntimeError('Failed to get replica information')
atSource = []
notAt = []
failed = []
sourceSE = self.sourceSEs[0]
for lfn, replInfo in resReplica['Value']['Successful'].items():
if sourceSE in replInfo:
atSource.append(lfn)
else:
sLog.notice('WARN: LFN %r not found at source, only at: %s' % (lfn, ','.join(replInfo.keys())))
notAt.append(lfn)
for lfn, errorMessage in resReplica['Value']['Failed'].items():
sLog.error('Failed to get replica info', '%s: %s' % (lfn, errorMessage))
failed.append(lfn)
if failed:
raise RuntimeError('Failed to get replica information')
sLog.notice('Found %d files to replicate' % len(notAt))
if not notAt:
return
if notAt and self.switches.get('AllowReplication'):
self._replicateSourceFiles(request, notAt)
else:
raise RuntimeError('Not all files are at the Source, exiting')
def _replicateSourceFiles(self, request, lfns):
"""Create the replicateAndRegisterRequest.
:param request: The request to add the operation to
:param lfns: list of LFNs
"""
registerSource = Operation()
registerSource.Type = 'ReplicateAndRegister'
registerSource.TargetSE = self.sourceSEs[0]
self.addLFNs(registerSource, lfns, addPFN=True)
request.addOperation(registerSource)
@DIRACScript()
def main():
try:
CAR = CreateArchiveRequest()
CAR.run()
except Exception as e:
if LogLevels.getLevelValue(sLog.getLevel()) <= LogLevels.VERBOSE:
sLog.exception('Failed to create Archive Request')
else:
sLog.error('ERROR: Failed to create Archive Request:', str(e))
exit(1)
exit(0)
if __name__ == "__main__":
main()
|
yujikato/DIRAC
|
src/DIRAC/DataManagementSystem/scripts/dirac_dms_create_archive_request.py
|
Python
|
gpl-3.0
| 21,052
|
[
"DIRAC"
] |
96751abfa6b42e66dd1efddd0cf04b8e92aae33e32459ad17f5860ca0e67e7a1
|
"""Base class for SOM and Neural Gas."""
import json
import logging
import time
import types
from collections import Counter, defaultdict
from typing import Callable, Dict, List, Optional, Tuple, TypeVar
import numpy as np
from tqdm import tqdm
from somber.components.utilities import shuffle, Scaler
from somber.components.initializers import range_initialization
from somber.distance import euclidean
# For class methods
_T = TypeVar("_T")
logger = logging.getLogger(__name__)
class Base(object):
"""
This is a base class for the Neural gas and SOM.
"""
# Static property names
param_names = {
"num_neurons",
"weights",
"data_dimensionality",
"params",
"valfunc",
"argfunc",
}
def __init__(
self,
num_neurons: Tuple[int],
data_dimensionality: Optional[int],
params: Dict[str, float],
argfunc: str = "argmin",
valfunc: str = "min",
initializer: Callable = range_initialization,
scaler: Optional[Scaler] = None,
) -> None:
"""
Organize nothing.
:param num_neurons: The number of neurons to create.
:param data_dimensionality: The dimensionality of the input data. Set to None
to infer it from the data.
:param params: A dictionary describing the parameters which need to be reduced
over time. Each parameter is denoted by two fields: "value" and
"factor", which denote the current value, and the constant factor
with which the value is multiplied in each update step.
:param argfunc: The name of the function which is used for calculating the index of
the BMU.
:param valfunc: The name of the function which is used for calculating the value of the
BMU.
:param initializer: A function which takes in the input data and weight matrix and returns
an initialized weight matrix. The initializers are defined in
somber.components.initializers. Can be set to None.
:param scaler: An initialized instance of Scaler which is used to scale the data
to have mean 0 and stdev 1. If this is set to None, the SOM will
create a scaler.
"""
self.num_neurons = np.int(num_neurons)
self.data_dimensionality = data_dimensionality
if self.data_dimensionality:
self.weights = np.zeros((num_neurons, data_dimensionality))
else:
self.weights = None
self.argfunc = argfunc
self.valfunc = valfunc
self.trained = False
if scaler is None:
self.scaler = Scaler()
self.initializer = initializer
self.params = params
self.scaler = scaler
def fit(
self,
X: np.ndarray,
num_epochs: int = 10,
updates_epoch: Optional[int] = None,
stop_param_updates: Optional[Dict[str, int]] = None,
batch_size: int = 32,
show_progressbar: bool = False,
refit: bool = True,
) -> _T:
"""
Fit the learner to some data.
:param X: The input data.
:param num_epochs: The number of epochs to train for.
:param updates_epoch: The number of updates to perform on the learning rate and
neighborhood per epoch. 10 suffices for most problems.
:param stop_param_updates: The epoch at which to stop updating each param.
This means that the specified parameter will be reduced to 0 at the
specified epoch. If this is None, all params become 0 at the end.
:param batch_size: The batch size to use. Warning: batching can change your
performance dramatically, depending on the task.
:param show_progressbar: Whether to show a progressbar during training.
"""
if self.data_dimensionality is None:
self.data_dimensionality = X.shape[-1]
self.weights = np.zeros((self.num_neurons, self.data_dimensionality))
X = self._check_input(X)
if not self.trained or refit:
X = self._init_weights(X)
else:
if self.scaler is not None:
self.weights = self.scaler.transform(self.weights)
if updates_epoch is None:
X_len = X.shape[0]
updates_epoch = np.min([50, X_len // batch_size])
stop_param_updates = stop_param_updates or {}
constants = self._pre_train(stop_param_updates, num_epochs, updates_epoch)
start = time.time()
progressbar = tqdm(total=len(X) * num_epochs) if show_progressbar else None
for epoch in range(num_epochs):
logger.info(f"Epoch {epoch+1} of {num_epochs}")
self._epoch(X, batch_size, updates_epoch, constants, progressbar)
if progressbar is not None:
progressbar.close()
self.trained = True
if self.scaler is not None:
self.weights = self.scaler.inverse_transform(self.weights)
logger.info(f"Total train time: {time.time() - start}")
return self
def _init_weights(self, X: np.ndarray) -> np.ndarray:
"""Set the weights and normalize data before starting training."""
X = np.asarray(X, dtype=np.float64)
if self.scaler is not None:
X = self.scaler.fit_transform(X)
if self.initializer is not None:
self.weights = self.initializer(X, self.num_neurons)
for v in self.params.values():
v["value"] = v["orig"]
return X
def _pre_train(
self, stop_param_updates: Dict[str, int], num_epochs: int, updates_epoch: int
) -> Dict[str, float]:
"""Set parameters and constants before training."""
# Calculate the total number of updates given early stopping.
updates = {
k: stop_param_updates.get(k, num_epochs) * updates_epoch
for k, v in self.params.items()
}
# Calculate the value of a single step given the number of allowed
# updates.
single_steps = {
k: np.exp(-((1.0 - (1.0 / v))) * self.params[k]["factor"])
for k, v in updates.items()
}
# Calculate the factor given the true factor and the value of a
# single step.
constants = {
k: np.exp(-self.params[k]["factor"]) / v for k, v in single_steps.items()
}
return constants
def fit_predict(
self,
X: np.ndarray,
num_epochs: int = 10,
updates_epoch: int = 10,
stop_param_updates: Optional[Dict[str, int]] = None,
batch_size: int = 1,
show_progressbar: bool = False,
) -> np.ndarray:
"""First fit, then predict."""
self.fit(
X,
num_epochs,
updates_epoch,
stop_param_updates,
batch_size,
show_progressbar,
)
return self.predict(X, batch_size=batch_size)
def fit_transform(
self,
X: np.ndarray,
num_epochs: int = 10,
updates_epoch: int = 10,
stop_param_updates: Optional[Dict[str, int]] = None,
batch_size: int = 1,
show_progressbar: bool = False,
show_epoch: bool = False,
) -> np.ndarray:
"""First fit, then transform."""
self.fit(
X,
num_epochs,
updates_epoch,
stop_param_updates,
batch_size,
show_progressbar,
show_epoch,
)
return self.transform(X, batch_size=batch_size)
def _epoch(
self,
X: np.ndarray,
batch_size: int,
updates_epoch: int,
constants: Dict[str, float],
progressbar: tqdm,
) -> None:
"""
Run a single epoch.
:param X: The training data.
:param batch_size: The batch size
:param updates_epoch: The number of updates to perform per epoch
:param constants: A dictionary containing the constants with which to update the
parameters in self.parameters.
:param progressbar: The progressbar instance to show and update during training
"""
# Create batches
X_ = self._create_batches(X, batch_size)
X_len = np.prod(X.shape[:-1])
update_step = np.ceil(X_.shape[0] / updates_epoch)
# Initialize the previous activation
prev = self._init_prev(X_)
influences = self._update_params(constants)
# Iterate over the training data
for idx, x in enumerate(X_):
# Our batches are padded, so we need to
# make sure we know when we hit the padding
# so we don't inadvertently learn zeroes.
diff = X_len - (idx * batch_size)
if diff and diff < batch_size:
x = x[:diff]
# Prev_activation may be None
if prev is not None:
prev = prev[:diff]
# If we hit an update step, perform an update.
if idx % update_step == 0:
influences = self._update_params(constants)
logger.info(self.params)
prev = self._propagate(x, influences, prev_activation=prev)
if progressbar is not None:
progressbar.update(batch_size)
def _update_params(self, constants: Dict[str, float]) -> np.ndarray:
"""Update params and return new influence."""
for k, v in constants.items():
self.params[k]["value"] *= v
influence = self._calculate_influence(self.params["infl"]["value"])
return influence * self.params["lr"]["value"]
def _init_prev(self, x: np.ndarray) -> Optional[np.ndarray]:
"""Initialize recurrent SOMs."""
return None
def _get_bmu(self, activations: np.ndarray) -> np.ndarray:
"""Get bmu based on activations."""
return activations.__getattribute__(self.argfunc)(1)
def _create_batches(
self, X: np.ndarray, batch_size: int, shuffle_data: bool = True
) -> np.ndarray:
"""
Create batches out of a sequence of data.
This function will append zeros to the end of your data to ensure that
all batches are even-sized. These are masked out during training.
"""
if shuffle_data:
X = shuffle(X)
if batch_size > X.shape[0]:
batch_size = X.shape[0]
max_x = int(np.ceil(X.shape[0] / batch_size))
X = np.resize(X, (max_x, batch_size, X.shape[-1]))
return X
def _propagate(self, x: np.ndarray, influences: np.ndarray, **kwargs) -> np.ndarray:
"""Propagate a single batch of examples through the network."""
activation, difference_x = self.forward(x)
update = self.backward(difference_x, influences, activation)
self.weights += update.mean(0)
return activation
def forward(self, x: np.ndarray, **kwargs) -> np.ndarray:
"""
Get the best matching neurons, and the difference between inputs.
Note: it might seem like this function can be replaced by a call to
distance function. This is only true for the regular SOM. other
SOMs, like the recurrent SOM need more complicated forward pass
functions.
:param x: The input vectors.
:return: A tuple containing the activations and differences between neurons
and input, respectively.
"""
return self.distance_function(x, self.weights)
def backward(
self,
diff_x: np.ndarray,
influences: np.ndarray,
activations: np.ndarray,
**kwargs,
) -> np.ndarray:
"""
Backward pass through the network, including update.
:param diff_x: A matrix containing the differences between the input and
neurons.
:param influences: A matrix containing the influence each neuron has on each
other neuron. This is used to calculate the updates.
:param activations: The activations each neuron has to each data point. This is
used to calculate the BMU.
:return: The updates to be applied.
"""
bmu = self._get_bmu(activations)
influence = influences[bmu]
update = influence[:, :, None] * diff_x
return update
def distance_function(
self, x: np.ndarray, weights: np.ndarray
) -> Tuple[np.ndarray, np.ndarray]:
"""
Calculate euclidean distance between a batch of input data and weights.
:param x: The input data.
:param weights: The weights
:return: A tuple of distances from each input to each weight, and the
feature-wise differences between each input and each weight
"""
distances, differences = euclidean(x, weights)
# For clarity
return distances, differences
def _check_input(self, X: np.ndarray) -> np.ndarray:
"""
Check the input for validity.
Ensures that the input data, X, is a 2-dimensional matrix, and that
the second dimension of this matrix has the same dimensionality as
the weight matrix.
"""
if np.ndim(X) == 1:
X = np.reshape(X, (1, -1))
if X.ndim != 2:
raise ValueError(f"Your data is not a 2D matrix. Actual size: {X.shape}")
if X.shape[1] != self.data_dimensionality:
raise ValueError(
f"Your data size != weight dim: {X.shape[1]}, "
f"expected {self.data_dimensionality}"
)
return X
def transform(
self, X: np.ndarray, batch_size: int = 100, show_progressbar: bool = False
) -> np.ndarray:
"""
Transform input to a distance matrix by measuring the L2 distance.
:param X: The input data.
:param batch_size: The batch size to use in transformation. This may affect the
transformation in stateful, i.e. sequential SOMs.
:param show_progressbar: Whether to show a progressbar during transformation.
:return: An array of activations for each input item.
"""
X = self._check_input(X)
batched = self._create_batches(X, batch_size, shuffle_data=False)
activations = []
prev = self._init_prev(batched)
for x in tqdm(batched, disable=not show_progressbar):
prev = self.forward(x, prev_activation=prev)[0]
activations.extend(prev)
activations = np.asarray(activations, dtype=np.float64)
activations = activations[: X.shape[0]]
return activations.reshape(X.shape[0], self.num_neurons)
def predict(
self, X: np.ndarray, batch_size: int = 32, show_progressbar: bool = False
) -> np.ndarray:
"""
Predict the BMU for each input data.
:param X: The input data.
:param batch_size: The batch size to use in prediction. This may affect prediction
in stateful, i.e. sequential SOMs.
:param show_progressbar : Whether to show a progressbar during prediction.
:return: An array containing the BMU for each input data point.
"""
dist = self.transform(X, batch_size, show_progressbar)
res = dist.__getattribute__(self.argfunc)(1)
return res
def quantization_error(self, X, batch_size=1):
"""
Calculate the quantization error.
Find the the minimum euclidean distance between the units and
some input.
:param X: The input data.
:param batch_size: The batch size to use for processing.
:return: The error for each data point.
"""
dist = self.transform(X, batch_size)
res = dist.__getattribute__(self.valfunc)(1)
return res
def receptive_field(
self, X: np.ndarray, identities: List, max_len=10, threshold=0.9, batch_size=1
) -> Dict[int, List[List[str]]]:
"""
Calculate the receptive field of the SOM on some data.
The receptive field is the common ending of all sequences which
lead to the activation of a given BMU. If a SOM is well-tuned to
specific sequences, it will have longer receptive fields, and therefore
gives a better description of the dynamics of a given system.
:param X: Input data.
:param identities: A list of symbolic identities associated with each input.
We enpect this list to be as long as the input data.
:param max_len: The maximum length to attempt to find. Raising this increases
memory use.
:param threshold: The threshold at which we consider a receptive field valid.
If at least this proportion of the sequences of a neuron have the same
suffix, that suffix is counted as acquired by the SOM.
:param batch_size: The batch size to use in prediction
:return: A dictionary mapping from the neuron id to the found sequences for
that neuron. The sequences are represented as lists of symbols of identities.
"""
receptive_fields = defaultdict(list)
predictions = self.predict(X)
if len(predictions) != len(identities):
raise ValueError(
"X and identities are not the same length: "
f"{len(predictions)} and {len(identities)}"
)
for idx, p in enumerate(predictions.tolist()):
receptive_fields[p].append(identities[idx + 1 - max_len : idx + 1])
rec = {}
for k, v in receptive_fields.items():
# if there's only one sequence, we don't know
# anything abouw how salient it is.
seq = []
if len(v) <= 1:
continue
else:
for x in reversed(list(zip(*v))):
x = Counter(x)
if x.most_common(1)[0][1] / sum(x.values()) > threshold:
seq.append(x.most_common(1)[0][0])
else:
rec[k] = seq
break
return rec
@classmethod
def load(cls, path: str) -> _T:
"""
Load a SOM from a JSON file saved with this package.
:param path: The path to the JSON file.
:return: A som of the specified class.
"""
data = json.load(open(path))
weights = data["weights"]
weights = np.asarray(weights, dtype=np.float64)
s = cls(
data["num_neurons"],
data["data_dimensionality"],
data["params"]["lr"]["orig"],
neighborhood=data["params"]["infl"]["orig"],
valfunc=data["valfunc"],
argfunc=data["argfunc"],
lr_lambda=data["params"]["lr"]["factor"],
nb_lambda=data["params"]["nb"]["factor"],
)
s.weights = weights
s.trained = True
return s
def save(self, path: str) -> None:
"""Save a SOM to a JSON file."""
to_save = {}
for x in self.param_names:
attr = self.__getattribute__(x)
if type(attr) == np.ndarray:
attr = [[float(x) for x in row] for row in attr]
elif isinstance(attr, types.FunctionType):
attr = attr.__name__
to_save[x] = attr
json.dump(to_save, open(path, "w"))
|
stephantul/somber
|
somber/base.py
|
Python
|
mit
| 19,473
|
[
"NEURON"
] |
169dbd04dcd042390cddf31f56c167ec11913b5aea7b5cd2fb91546da1555018
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
=============================================================
Compare the effect of different scalers on data with outliers
=============================================================
Feature 0 (median income in a block) and feature 5 (number of households) of
the `California housing dataset
<http://www.dcc.fc.up.pt/~ltorgo/Regression/cal_housing.html>`_ have very
different scales and contain some very large outliers. These two
characteristics lead to difficulties to visualize the data and, more
importantly, they can degrade the predictive performance of many machine
learning algorithms. Unscaled data can also slow down or even prevent the
convergence of many gradient-based estimators.
Indeed many estimators are designed with the assumption that each feature takes
values close to zero or more importantly that all features vary on comparable
scales. In particular, metric-based and gradient-based estimators often assume
approximately standardized data (centered features with unit variances). A
notable exception are decision tree-based estimators that are robust to
arbitrary scaling of the data.
This example uses different scalers, transformers, and normalizers to bring the
data within a pre-defined range.
Scalers are linear (or more precisely affine) transformers and differ from each
other in the way to estimate the parameters used to shift and scale each
feature.
``QuantileTransformer`` provides non-linear transformations in which distances
between marginal outliers and inliers are shrunk. ``PowerTransformer`` provides
non-linear transformations in which data is mapped to a normal distribution to
stabilize variance and minimize skewness.
Unlike the previous transformations, normalization refers to a per sample
transformation instead of a per feature transformation.
The following code is a bit verbose, feel free to jump directly to the analysis
of the results_.
"""
# Author: Raghav RV <rvraghav93@gmail.com>
# Guillaume Lemaitre <g.lemaitre58@gmail.com>
# Thomas Unterthiner
# License: BSD 3 clause
from __future__ import print_function
import numpy as np
import matplotlib as mpl
from matplotlib import pyplot as plt
from matplotlib import cm
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import minmax_scale
from sklearn.preprocessing import MaxAbsScaler
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import RobustScaler
from sklearn.preprocessing import Normalizer
from sklearn.preprocessing import QuantileTransformer
from sklearn.preprocessing import PowerTransformer
from sklearn.datasets import fetch_california_housing
print(__doc__)
dataset = fetch_california_housing()
X_full, y_full = dataset.data, dataset.target
# Take only 2 features to make visualization easier
# Feature of 0 has a long tail distribution.
# Feature 5 has a few but very large outliers.
X = X_full[:, [0, 5]]
distributions = [
('Unscaled data', X),
('Data after standard scaling',
StandardScaler().fit_transform(X)),
('Data after min-max scaling',
MinMaxScaler().fit_transform(X)),
('Data after max-abs scaling',
MaxAbsScaler().fit_transform(X)),
('Data after robust scaling',
RobustScaler(quantile_range=(25, 75)).fit_transform(X)),
('Data after power transformation (Yeo-Johnson)',
PowerTransformer(method='yeo-johnson').fit_transform(X)),
('Data after power transformation (Box-Cox)',
PowerTransformer(method='box-cox').fit_transform(X)),
('Data after quantile transformation (gaussian pdf)',
QuantileTransformer(output_distribution='normal')
.fit_transform(X)),
('Data after quantile transformation (uniform pdf)',
QuantileTransformer(output_distribution='uniform')
.fit_transform(X)),
('Data after sample-wise L2 normalizing',
Normalizer().fit_transform(X)),
]
# scale the output between 0 and 1 for the colorbar
y = minmax_scale(y_full)
# plasma does not exist in matplotlib < 1.5
cmap = getattr(cm, 'plasma_r', cm.hot_r)
def create_axes(title, figsize=(16, 6)):
fig = plt.figure(figsize=figsize)
fig.suptitle(title)
# define the axis for the first plot
left, width = 0.1, 0.22
bottom, height = 0.1, 0.7
bottom_h = height + 0.15
left_h = left + width + 0.02
rect_scatter = [left, bottom, width, height]
rect_histx = [left, bottom_h, width, 0.1]
rect_histy = [left_h, bottom, 0.05, height]
ax_scatter = plt.axes(rect_scatter)
ax_histx = plt.axes(rect_histx)
ax_histy = plt.axes(rect_histy)
# define the axis for the zoomed-in plot
left = width + left + 0.2
left_h = left + width + 0.02
rect_scatter = [left, bottom, width, height]
rect_histx = [left, bottom_h, width, 0.1]
rect_histy = [left_h, bottom, 0.05, height]
ax_scatter_zoom = plt.axes(rect_scatter)
ax_histx_zoom = plt.axes(rect_histx)
ax_histy_zoom = plt.axes(rect_histy)
# define the axis for the colorbar
left, width = width + left + 0.13, 0.01
rect_colorbar = [left, bottom, width, height]
ax_colorbar = plt.axes(rect_colorbar)
return ((ax_scatter, ax_histy, ax_histx),
(ax_scatter_zoom, ax_histy_zoom, ax_histx_zoom),
ax_colorbar)
def plot_distribution(axes, X, y, hist_nbins=50, title="",
x0_label="", x1_label=""):
ax, hist_X1, hist_X0 = axes
ax.set_title(title)
ax.set_xlabel(x0_label)
ax.set_ylabel(x1_label)
# The scatter plot
colors = cmap(y)
ax.scatter(X[:, 0], X[:, 1], alpha=0.5, marker='o', s=5, lw=0, c=colors)
# Removing the top and the right spine for aesthetics
# make nice axis layout
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
ax.spines['left'].set_position(('outward', 10))
ax.spines['bottom'].set_position(('outward', 10))
# Histogram for axis X1 (feature 5)
hist_X1.set_ylim(ax.get_ylim())
hist_X1.hist(X[:, 1], bins=hist_nbins, orientation='horizontal',
color='grey', ec='grey')
hist_X1.axis('off')
# Histogram for axis X0 (feature 0)
hist_X0.set_xlim(ax.get_xlim())
hist_X0.hist(X[:, 0], bins=hist_nbins, orientation='vertical',
color='grey', ec='grey')
hist_X0.axis('off')
###############################################################################
# Two plots will be shown for each scaler/normalizer/transformer. The left
# figure will show a scatter plot of the full data set while the right figure
# will exclude the extreme values considering only 99 % of the data set,
# excluding marginal outliers. In addition, the marginal distributions for each
# feature will be shown on the side of the scatter plot.
def make_plot(item_idx):
title, X = distributions[item_idx]
ax_zoom_out, ax_zoom_in, ax_colorbar = create_axes(title)
axarr = (ax_zoom_out, ax_zoom_in)
plot_distribution(axarr[0], X, y, hist_nbins=200,
x0_label="Median Income",
x1_label="Number of households",
title="Full data")
# zoom-in
zoom_in_percentile_range = (0, 99)
cutoffs_X0 = np.percentile(X[:, 0], zoom_in_percentile_range)
cutoffs_X1 = np.percentile(X[:, 1], zoom_in_percentile_range)
non_outliers_mask = (
np.all(X > [cutoffs_X0[0], cutoffs_X1[0]], axis=1) &
np.all(X < [cutoffs_X0[1], cutoffs_X1[1]], axis=1))
plot_distribution(axarr[1], X[non_outliers_mask], y[non_outliers_mask],
hist_nbins=50,
x0_label="Median Income",
x1_label="Number of households",
title="Zoom-in")
norm = mpl.colors.Normalize(y_full.min(), y_full.max())
mpl.colorbar.ColorbarBase(ax_colorbar, cmap=cmap,
norm=norm, orientation='vertical',
label='Color mapping for values of y')
########################################################################
# .. _results:
#
# Original data
# -------------
#
# Each transformation is plotted showing two transformed features, with the
# left plot showing the entire dataset, and the right zoomed-in to show the
# dataset without the marginal outliers. A large majority of the samples are
# compacted to a specific range, [0, 10] for the median income and [0, 6] for
# the number of households. Note that there are some marginal outliers (some
# blocks have more than 1200 households). Therefore, a specific pre-processing
# can be very beneficial depending of the application. In the following, we
# present some insights and behaviors of those pre-processing methods in the
# presence of marginal outliers.
make_plot(0)
#######################################################################
# StandardScaler
# --------------
#
# ``StandardScaler`` removes the mean and scales the data to unit variance.
# However, the outliers have an influence when computing the empirical mean and
# standard deviation which shrink the range of the feature values as shown in
# the left figure below. Note in particular that because the outliers on each
# feature have different magnitudes, the spread of the transformed data on
# each feature is very different: most of the data lie in the [-2, 4] range for
# the transformed median income feature while the same data is squeezed in the
# smaller [-0.2, 0.2] range for the transformed number of households.
#
# ``StandardScaler`` therefore cannot guarantee balanced feature scales in the
# presence of outliers.
make_plot(1)
##########################################################################
# MinMaxScaler
# ------------
#
# ``MinMaxScaler`` rescales the data set such that all feature values are in
# the range [0, 1] as shown in the right panel below. However, this scaling
# compress all inliers in the narrow range [0, 0.005] for the transformed
# number of households.
#
# As ``StandardScaler``, ``MinMaxScaler`` is very sensitive to the presence of
# outliers.
make_plot(2)
#############################################################################
# MaxAbsScaler
# ------------
#
# ``MaxAbsScaler`` differs from the previous scaler such that the absolute
# values are mapped in the range [0, 1]. On positive only data, this scaler
# behaves similarly to ``MinMaxScaler`` and therefore also suffers from the
# presence of large outliers.
make_plot(3)
##############################################################################
# RobustScaler
# ------------
#
# Unlike the previous scalers, the centering and scaling statistics of this
# scaler are based on percentiles and are therefore not influenced by a few
# number of very large marginal outliers. Consequently, the resulting range of
# the transformed feature values is larger than for the previous scalers and,
# more importantly, are approximately similar: for both features most of the
# transformed values lie in a [-2, 3] range as seen in the zoomed-in figure.
# Note that the outliers themselves are still present in the transformed data.
# If a separate outlier clipping is desirable, a non-linear transformation is
# required (see below).
make_plot(4)
##############################################################################
# PowerTransformer
# ----------------
#
# ``PowerTransformer`` applies a power transformation to each feature to make
# the data more Gaussian-like. Currently, ``PowerTransformer`` implements the
# Yeo-Johnson and Box-Cox transforms. The power transform finds the optimal
# scaling factor to stabilize variance and mimimize skewness through maximum
# likelihood estimation. By default, ``PowerTransformer`` also applies
# zero-mean, unit variance normalization to the transformed output. Note that
# Box-Cox can only be applied to strictly positive data. Income and number of
# households happen to be strictly positive, but if negative values are present
# the Yeo-Johnson transformed is to be preferred.
make_plot(5)
make_plot(6)
##############################################################################
# QuantileTransformer (Gaussian output)
# -------------------------------------
#
# ``QuantileTransformer`` has an additional ``output_distribution`` parameter
# allowing to match a Gaussian distribution instead of a uniform distribution.
# Note that this non-parametetric transformer introduces saturation artifacts
# for extreme values.
make_plot(7)
###################################################################
# QuantileTransformer (uniform output)
# ------------------------------------
#
# ``QuantileTransformer`` applies a non-linear transformation such that the
# probability density function of each feature will be mapped to a uniform
# distribution. In this case, all the data will be mapped in the range [0, 1],
# even the outliers which cannot be distinguished anymore from the inliers.
#
# As ``RobustScaler``, ``QuantileTransformer`` is robust to outliers in the
# sense that adding or removing outliers in the training set will yield
# approximately the same transformation on held out data. But contrary to
# ``RobustScaler``, ``QuantileTransformer`` will also automatically collapse
# any outlier by setting them to the a priori defined range boundaries (0 and
# 1).
make_plot(8)
##############################################################################
# Normalizer
# ----------
#
# The ``Normalizer`` rescales the vector for each sample to have unit norm,
# independently of the distribution of the samples. It can be seen on both
# figures below where all samples are mapped onto the unit circle. In our
# example the two selected features have only positive values; therefore the
# transformed data only lie in the positive quadrant. This would not be the
# case if some original features had a mix of positive and negative values.
make_plot(9)
plt.show()
|
vortex-ape/scikit-learn
|
examples/preprocessing/plot_all_scaling.py
|
Python
|
bsd-3-clause
| 14,019
|
[
"Gaussian"
] |
295b73f161fd2c84c297602cb17d3b9c18af53782e9112df499d6e72baf593a2
|
import cv2
import numpy as np
import sys
def order_points(pts):
# initialzie a list of coordinates that will be ordered
# such that the first entry in the list is the top-left,
# the second entry is the top-right, the third is the
# bottom-right, and the fourth is the bottom-left
rect = np.zeros((4, 2), dtype="float32")
# the top-left point will have the smallest sum, whereas
# the bottom-right point will have the largest sum
s = pts.sum(axis=1)
rect[0] = pts[np.argmin(s)]
rect[2] = pts[np.argmax(s)]
# now, compute the difference between the points, the
# top-right point will have the smallest difference,
# whereas the bottom-left will have the largest difference
diff = np.diff(pts, axis=1)
rect[1] = pts[np.argmin(diff)]
rect[3] = pts[np.argmax(diff)]
# return the ordered coordinates
return rect
def four_point_transform(image, pts):
# obtain a consistent order of the points and unpack them
# individually
rect = order_points(pts)
(tl, tr, br, bl) = rect
# compute the width of the new image, which will be the
# maximum distance between bottom-right and bottom-left
# x-coordiates or the top-right and top-left x-coordinates
widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
maxWidth = max(int(widthA), int(widthB))
# compute the height of the new image, which will be the
# maximum distance between the top-right and bottom-right
# y-coordinates or the top-left and bottom-left y-coordinates
heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
maxHeight = max(int(heightA), int(heightB))
# now that we have the dimensions of the new image, construct
# the set of destination points to obtain a "birds eye view",
# (i.e. top-down view) of the image, again specifying points
# in the top-left, top-right, bottom-right, and bottom-left
# order
dst = np.array([
[0, 0],
[maxWidth - 1, 0],
[maxWidth - 1, maxHeight - 1],
[0, maxHeight - 1]], dtype="float32")
# compute the perspective transform matrix and then apply it
M = cv2.getPerspectiveTransform(rect, dst)
warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))
# return the warped image
return warped
def check_include(centre_list, x_centre, y_centre):
for point in centre_list:
x_difference = point[0] - x_centre
y_difference = point[1] - y_centre
if abs(x_difference) < 10 and abs(y_difference) < 10:
return False
return True
def find_centre(cnts):
# x_axis is a list, store all the x_axis data of one contour
# y_axis is a list, store all the y_axis data of same contour
# cnts[0] is a list of point, which is one rectangle
centre_list = []
for cnt in cnts:
x_axis = []
y_axis = []
for point in cnt:
x_axis.append(point[0][0])
y_axis.append(point[0][1])
# print cnts[0][0][0][0]
x_axis = sorted(x_axis)
y_axis = sorted(y_axis)
x_centre = int((x_axis[0] + x_axis[-1]) / 2)
y_centre = int((y_axis[0] + y_axis[-1]) / 2)
# print "The smallest x coordinate is",x_axis[0]
# print "The smallest y coordinate is",y_axis[0]
# print "The biggest x coordinate is",x_axis[-1]
# print "The biggest y coordinate is",y_axis[-1]
# print "The centre of this rectangle is (%d,%d)" %(x_centre, y_centre)
if (check_include(centre_list, x_centre, y_centre)):
centre_list.append((x_centre, y_centre))
# print "The centre of this rectangle is (%d,%d)" %(x_centre, y_centre)
return centre_list
def process_centre_list(centre_list):
# this function loop want to put same rows of answer area into same list.
# And use a list to hold all of rows. So it is a 2D list.
# the centre_list is in the order of y-axis from small to large.
# In this particular case, every row has three question and each question has 4 rectangles.
# In each line, the y-axis is almost same, so we can calculate the difference between different
# y-axis to determine whether the two rectangle is in same line.
# current_total_delta is total difference of y-axis in one row.
# current_total_delta_copy tries to store the old data in for loop.
# current_average_number is number of rectangles we calculate
current_total_delta = 0
current_total_delta_copy = 0
current_average_number = 1
# current_average_delta = current_total_delta/current_average_number
# current_average_delta_copy tries to store the old data.
current_average_delta = 0
current_average_delta_copy = 0
# row_list is a list of column_list
# column_list is a list of point of every line of answer area
row_list = []
column_list = []
for i in range(len(centre_list) - 1):
delta_y1 = (centre_list[i + 1][1] - centre_list[i][1])
# print delta_y1
current_total_delta_copy = current_total_delta
current_total_delta += delta_y1
current_average_delta = 1.0 * current_total_delta / current_average_number
current_average_number += 1
if current_average_delta > current_average_delta_copy * 3 and current_average_delta_copy != 0:
# print "this is average number ",current_average_number
# print "This is current_average_delta " , current_average_delta
# print "This is current_average_delta_copy " , current_average_delta_copy
current_total_delta = current_total_delta_copy # restore total delta from copy
column_list.append(centre_list[i])
row_list.append(column_list)
column_list = []
current_total_delta = 0
current_total_delta_copy = 0
current_average_number = 1
continue
column_list.append(centre_list[i])
current_average_delta_copy = current_average_delta
return row_list
# This function want to find the answer student choose.
# centre_list: list. Hold all the coordinate of centre of rectangle.
# thresh1: image object. The image after threshold.
def find_answer(centre_list, thresh1):
# the point is the centre of rectangle.
# We choose a 80*80 square, to detect whether there is black pixel in this square.
for point in centre_list:
px = 0
x_start, x_end = point[0] - 40, point[0] + 40
y_start, y_end = point[1] - 40, point[1] + 40
for x in range(x_start, x_end):
for y in range(y_start, y_end):
px += thresh1[y, x]
# print "this is pixel " , px
# 1532000 is a threshold. The value under the 1532000 means student has handwriting
# in this region.
if px < 1532000:
cv2.circle(thresh1, (x - 40, y - 40), 40, (0, 0, 0))
# this function want to find the answer rectangle which are not found by findContours
# function
def find_missing_rectangle(centre_list, centre_list_col, x_uncertainty, y_uncertainty):
row_list = []
total_list = []
# print centre_list_col
base = centre_list_col[0][1] # use column point as the base
y_max = base + y_uncertainty # add base and y_uncertainty
for i in range(len(centre_list_col)):
if centre_list_col[i][1] < y_max:
row_list.append(centre_list_col[i])
else:
# in this case, we end up one line, and change to another line
# so I set a new base.
y_max = centre_list_col[i][1] + y_uncertainty
total_list.append(row_list)
row_list = [] # renew the row_list
# add the first element of next line into new row_list
row_list.append(centre_list_col[i])
# add final row list into total list.
total_list.append(row_list)
# ============================================================
# for test
# ============================================================
# sum = 0
# for i in range(len(total_list)):
# # pass
# print sorted(total_list[i])
# print "length is ", len(total_list[i])
# sum += len(total_list[i])
# print("\n")
# # print "\n"
# # print(total_list)
# print sum
# ============================================================
# end test
# ============================================================
# to get the max_length of a row of question.
# and then get a base_list of row_list
max_length = len(total_list[0])
base_list = []
for row_list in total_list:
if len(row_list) > max_length:
max_length = len(row_list)
base_list = row_list
# print "length of half rectangle is ", x_uncertainty
total_list_copy = []
# sort base list
base_list = sorted(base_list)
for row_list in total_list:
# print "this is row_list" , row_list
# print '\n'
row_list = sorted(row_list)
if len(row_list) == max_length:
total_list_copy.append(row_list)
continue
for i in range(max_length):
try:
base = base_list[i][0] - x_uncertainty
if row_list[i][0] > base:
x_axis = base_list[i][0]
y_axis = row_list[0][1]
row_list.insert(i, (x_axis, y_axis))
centre_list.append((x_axis, y_axis))
# print "length of row list is ", len(row_list)
if len(row_list) == max_length:
total_list_copy.append(row_list)
break
except:
x_axis = base_list[i][0]
y_axis = row_list[0][1]
row_list.insert(i, (x_axis, y_axis))
centre_list.append((x_axis, y_axis))
if len(row_list) == max_length:
total_list_copy.append(row_list)
break
return total_list_copy
# answer_list is a list. It contains x elements, x is rows of the answer sheet. It is also list
# every row_list contains also list which are centre points of rectangle.
def find_answer2(answer_list,number_of_choice,thresh1,pixel=40, number_of_question=40):
column = len(answer_list[0])/number_of_choice
assert(column == 3)
answer = []
number_of_question = 0
number_of_answer = 0
for i in range(column):
# print number_of_question
if number_of_answer==40:
break
for j in range(len(answer_list)):
boundary = 1532000
number_of_answer = 0
while(True):
# print boundary
# print number_of_answer
# print "i j k" , i ,j
for k in range(i*4,i*4+number_of_choice):
point = answer_list[j][k]
px = 0
x_start, x_end = point[0] - pixel, point[0] + pixel
y_start, y_end = point[1] - pixel, point[1] + pixel
for x in range(x_start, x_end):
for y in range(y_start, y_end):
px += thresh1[y, x]
# print "this is pixel " , px
# 1532000 is a threshold. The value under the 1532000 means student has handwriting
# in this region.
# print px
if px < boundary:
cv2.circle(thresh1, (x - pixel, y - pixel), 40, (0, 0, 0))
number_of_answer += 1
choice = str(k)
if number_of_answer == 1:
number_of_question += 1
answer.append(choice)
break
if number_of_question==40:
break
if number_of_answer == 0:
boundary = boundary * (1.01)
number_of_answer = 0
else:
boundary = boundary / 1.01
number_of_answer = 0
if number_of_answer==40:
break
return answer
# answers is a string contains all of choice of student
# number_of_choice is a integer contains the choice of this paper
# This function want to change the number in answers into ABCD letter
def change_num_into_choice(answers, num_of_choice):
# this is return value
new_answer = ""
for answer in answers:
# the answer is the column number of the answer sheet.
# so first mod the number of choice to get 0~3
answer = str(int(answer) % num_of_choice )
answer = ord(answer) # get the ascii number of answer
answer += 17 # 17 is difference from 0 to A, 1 to B, 2 to C, 3 to D
answer = chr(answer) # to change the ascii number into char.
new_answer += answer
return new_answer
def grade_answer(correct_answer,answer):
temp = ""
result = []
for letter in correct_answer:
if letter.isalpha()==True :
temp += letter
correct_answer = temp
print len(correct_answer)
print len(answer)
if len(correct_answer) != len(answer):
print "The number of answer is inconsistent with correct answer."
return None
for i in range(len(answer)):
temp = []
if answer[i] != correct_answer[i]:
temp.append(i)
temp.append(answer[i])
temp.append(correct_answer[i])
# temp += (answer[i] + correct_answer[i])
result.append(temp)
return result
def grading(image1, answer_file_name):
name = "upload/" + image1
image = cv2.imread(name)
f = open("upload/"+answer_file_name)
correct_answer = f.read()
image = cv2.resize(image,None,fx=2, fy=2, interpolation = cv2.INTER_LANCZOS4)
# ratio = 1000.0 / image.shape[1]
# # new dimension for image
# dim = (1000, int(image.shape[0] * ratio))
# # perform the actual resizing of the image and show it
# # interpolation = cv2.INTER_AREA this is the algorithm we used. Do worry now
# image = cv2.resize(image, dim, interpolation = cv2.INTER_AREA)
# ratio = image.shape[0] / 500.0
# orig = image.copy()
# convert image to grayscale
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# blur the image slightly to remove noise.
# gray = cv2.bilateralFilter(gray, 11, 17, 17)
gray = cv2.GaussianBlur(gray, (5, 5), 0) #is an alternative way to blur the image
# canny edge detection
edged = cv2.Canny(gray, 30, 200)
# two threshold method.
# The first one is normal threshold method
# The second one is use Gaussian method which has better effect.
# ret,thresh1 = cv2.threshold(gray,150,150,cv2.THRESH_BINARY)
thresh1 = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)
# cv2.imshow("gray image", thresh1)
# cv2.imwrite('thresh1.png',thresh1)
# cv2.waitKey(15000)
# find contours in the edged image, keep only the largest ones, and initialize
# our screen contour
# findContours takes three parameter:
# First parameter: the image we want to find counter. Need to copy since this method will
# destroy the image.
# Second parameter: cv2.RETR_TREE tells OpenCV to compute the hierarchy (relationship)
# between contours
# Third parameter: compress the contours to save space using cv2.CV_CHAIN_APPROX_SIMPLE
try:
(cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
except:
(_, cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# (cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# the number of returned parameter is different depending on the version of openCV
# for 2.x it is (cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# for 3.x it is (_, cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# sort the counter. The reference is the countourArea. And we only get largest 10
# countour.
cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[:1000]
# cnts = sorted(cnts, key = cv2.contourArea,reverse=True)[:500]
# a new list to store all the rectangle counter
cnts_rect = []
# initialize the screenCnt.
screenCnt = None
# loop over our contours
for c in cnts:
# approximate the contour
peri = cv2.arcLength(c, True)
# This function gives the number of vertices of the figure
# For example, approx returns 4 if the shape is rectangle and 5 if the shape is pentagon
# k is constant, it can be changing from 0.005 to 0.1
# k = 0.005
k = 0.01
approx = cv2.approxPolyDP(c, k * peri, True)
# if our approximated contour has four points, then
# we can assume that we have found our screen
if len(approx) == 4 and cv2.contourArea(c) > 1200:
screenCnt = approx
cnts_rect.append(approx)
# print "this is coutour area ", cv2.contourArea(c)
# the print is for test
# print screenCnt[0][0]
# to draw the contours in the original image.
# print len(cnts_rect)
cv2.drawContours(image, cnts_rect, -1, (0, 255, 0), 3)
cv2.imshow("Game Boy Screen", image)
cv2.imwrite('contours.png', image)
cv2.waitKey(10000)
# to find height and length of the rectangle
'''
height = cnts_rect[0][2][0][1] - cnts_rect[0][0][0][1]
length = cnts_rect[0][2][0][0] - cnts_rect[0][0][0][0]
# x_axis is a list, store all the x_axis data of one contour
# y_axis is a list, store all the y_axis data of same contour
# cnts[0] is a list of point, which is one rectangle
centre_list = find_centre(cnts_rect)
# print len(centre_list)
# print "this length of centre_list is ", len(centre_list)
centre_list_col = sorted(centre_list, key=lambda point: point[1])
# answer_list is a list. It contains x elements, x is rows of the answer sheet. It is also list
# every row_list contains also list which are centre points of rectangle.
answer_list = find_missing_rectangle(centre_list, centre_list_col, length // 2, height // 2)
# ============================================================
# for test print point in centre list
# ============================================================
# print len(answer_list)
# for list1 in answer_list:
# print("the length of list1 is ", len(list1))
# for element in list1:
# print element
# print len(answer_list)
# ============================================================
# end test
# ============================================================
number_of_choice = 4
answer = find_answer2(answer_list,number_of_choice,thresh1,pixel=40,number_of_question=40)
answer = change_num_into_choice(answer,number_of_choice)
# print "length is " ,len(answer)
# print answer
result = grade_answer(correct_answer,answer)
print result
return result
# i = 0
# print len(centre_list_col)
# for i in range(150):
# print centre_list_col[i]
centre_list = sorted(centre_list, key=lambda point: point[0])
# print "The number of centre point " , len(centre_list)
# # for test.
# i = 0
# print len(centre_list)
# for i in range(138):
# print centre_list[i]
# cv2.circle(image,centre_list[i],20,(0,0,0))
# row_list = process_centre_list(centre_list)
# find_answer(centre_list, thresh1)
# cv2.imshow("Game Boy Screen", image)
# cv2.imshow("gray image", thresh1)
cv2.imwrite('contours.png', image)
cv2.imwrite('thresh1.png',thresh1)
# cv2.waitKey(15000)
# apply the four point transform to obtain a top-down
# view of the original image
warped = four_point_transform(orig, screenCnt.reshape(4, 2) * ratio)
warped = four_point_transform(orig, screenCnt.reshape(4, 2) * ratio)
warped = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY)
ret, thresh1 = cv2.threshold(warped, 80, 85, cv2.THRESH_BINARY)
# cv2.imshow("Binary",thresh1 )
warped = warped.astype("uint8") * 255
# cv2.waitKey(10000)
cv2.imwrite('messigray.png', image)
'''
def this_is_also_test():
pass
# test for different comment
def test1():
pass
if __name__ == '__main__':
image_file = "wrap.png"
answer_file = "answer.txt"
answer = grading(image_file, answer_file)
print "This is the output of the main function ", answer
|
HuimingCheng/AutoGrading
|
Test_code/differentVersion/v1.py
|
Python
|
mit
| 20,872
|
[
"Gaussian"
] |
3bb880271deccb220880508f04c9271c305478409da39988386b567dbd8e4c8f
|
from setuptools import setup, find_packages
setup(name='pipeable',
version='1.0.3',
description='A utility for making Python scripts compatible with the | operator on the command line.',
author='Brian Budge',
author_email='budgebrian21@gmail.com',
url='https://github.com/budgebi/pipeable',
license='MIT',
packages= find_packages(exclude=['test']),
python_requires='>=3',
keywords='pipeline, pipe, pipeable',
install_require=[],
extra_require=[]
)
|
budgebi/pipeable
|
setup.py
|
Python
|
mit
| 520
|
[
"Brian"
] |
c587eb1094851ab6cc389feb8d821d991ce39d17823948eb2c8a79cbbc27a0db
|
import decimal
import json
import networkx as nx
import re
from operator import itemgetter
from datetime import datetime, timedelta
from collections import defaultdict
from itertools import chain
from functools import partial
from django.core.serializers.json import DjangoJSONEncoder
from django.http import HttpResponse, Http404
from django.shortcuts import get_object_or_404
from django.db import connection
from django.db.models import Q
from django.views.decorators.cache import never_cache
from rest_framework.decorators import api_view
from catmaid.models import Project, UserRole, Class, ClassInstance, Review, \
ClassInstanceClassInstance, Relation, Treenode, TreenodeConnector
from catmaid.objects import Skeleton, SkeletonGroup, \
compartmentalize_skeletongroup_by_edgecount, \
compartmentalize_skeletongroup_by_confidence
from catmaid.control.authentication import requires_user_role, \
can_edit_class_instance_or_fail, can_edit_or_fail
from catmaid.control.common import insert_into_log, get_class_to_id_map, \
get_relation_to_id_map, _create_relation
from catmaid.control.neuron import _delete_if_empty
from catmaid.control.neuron_annotations import create_annotation_query, \
_annotate_entities, _update_neuron_annotations
from catmaid.control.review import get_treenodes_to_reviews, get_review_status
from catmaid.control.tree_util import find_root, reroot, edge_count_to_root
def get_skeleton_permissions(request, project_id, skeleton_id):
""" Tests editing permissions of a user on a skeleton and returns the
result as JSON object."""
try:
nn = _get_neuronname_from_skeletonid( project_id, skeleton_id )
can_edit = can_edit_class_instance_or_fail(request.user,
nn['neuronid'])
except:
can_edit = False
permissions = {
'can_edit': can_edit,
}
return HttpResponse(json.dumps(permissions))
@api_view(['POST'])
@requires_user_role([UserRole.Annotate, UserRole.Browse])
def open_leaves(request, project_id=None, skeleton_id=None):
"""List open leaf nodes in a skeleton.
Return a list of the ID and location of open leaf nodes in a skeleton,
their path length distance to the specified treenode, and their creation
time.
Leaves are considered open if they are not tagged with a tag matching
a particular regex.
.. note:: This endpoint is used interactively by the client so performance
is critical.
---
parameters:
- name: treenode_id
description: ID of the origin treenode for path length distances
required: true
type: integer
paramType: form
models:
open_leaf_node:
id: open_leaf_node
properties:
- description: ID of an open leaf treenode
type: integer
required: true
- description: Node location
type: array
items:
type: number
format: double
required: true
- description: Distance from the query node
type: number
format: double
required: true
- description: Node creation time
type: string
format: date-time
required: true
type:
- type: array
items:
$ref: open_leaf_node
required: true
"""
tnid = int(request.POST['treenode_id'])
cursor = connection.cursor()
cursor.execute("SELECT id FROM relation WHERE project_id=%s AND relation_name='labeled_as'" % int(project_id))
labeled_as = cursor.fetchone()[0]
# Select all nodes and their tags
cursor.execute('''
SELECT t.id, t.parent_id, t.location_x, t.location_y, t.location_z, t.creation_time, ci.name
FROM treenode t LEFT OUTER JOIN (treenode_class_instance tci INNER JOIN class_instance ci ON tci.class_instance_id = ci.id AND tci.relation_id = %s) ON t.id = tci.treenode_id
WHERE t.skeleton_id = %s
''' % (labeled_as, int(skeleton_id)))
# Some entries repeated, when a node has more than one tag
# Create a graph with edges from parent to child, and accumulate parents
tree = nx.DiGraph()
for row in cursor.fetchall():
nodeID = row[0]
if row[1]:
# It is ok to add edges that already exist: DiGraph doesn't keep duplicates
tree.add_edge(row[1], nodeID)
else:
tree.add_node(nodeID)
tree.node[nodeID]['loc'] = (row[2], row[3], row[4])
tree.node[nodeID]['ct'] = row[5]
if row[6]:
props = tree.node[nodeID]
tags = props.get('tags')
if tags:
tags.append(row[6])
else:
props['tags'] = [row[6]]
if tnid not in tree:
raise Exception("Could not find %s in skeleton %s" % (tnid, int(skeleton_id)))
reroot(tree, tnid)
distances = edge_count_to_root(tree, root_node=tnid)
# Iterate end nodes to find which are open.
nearest = []
end_tags = ['uncertain continuation', 'not a branch', 'soma',
'^(?i)(really|uncertain|anterior|posterior)?\s?ends?$']
end_regex = re.compile('(?:' + ')|(?:'.join(end_tags) + ')')
for nodeID, out_degree in tree.out_degree_iter():
if 0 == out_degree or nodeID == tnid and 1 == out_degree:
# Found an end node
props = tree.node[nodeID]
# Check if not tagged with a tag containing 'end'
if not 'tags' in props or not any(end_regex.match(s) for s in props['tags']):
# Found an open end
d = distances[nodeID]
nearest.append([nodeID, props['loc'], d, props['ct']])
return HttpResponse(json.dumps(nearest, cls=DjangoJSONEncoder))
@api_view(['POST'])
@requires_user_role([UserRole.Annotate, UserRole.Browse])
def find_labels(request, project_id=None, skeleton_id=None):
"""List nodes in a skeleton with labels matching a query.
Find all nodes in this skeleton with labels (front-end node tags) matching
a regular expression, sort them by ascending path distance from a treenode
in the skeleton, and return the result.
---
parameters:
- name: treenode_id
description: ID of the origin treenode for path length distances
required: true
type: integer
paramType: form
- name: label_regex
description: Regular expression query to match labels
required: true
type: string
paramType: form
models:
find_labels_node:
id: find_labels_node
properties:
- description: ID of a node with a matching label
type: integer
required: true
- description: Node location
type: array
items:
type: number
format: double
required: true
- description: Path distance from the origin treenode
type: number
format: double
required: true
- description: Labels on this node matching the query
type: array
items:
type: string
required: true
type:
- type: array
items:
$ref: find_labels_node
required: trueist open leaf nodes in a skeleton.
"""
tnid = int(request.POST['treenode_id'])
label_regex = str(request.POST['label_regex'])
cursor = connection.cursor()
cursor.execute("SELECT id FROM relation WHERE project_id=%s AND relation_name='labeled_as'" % int(project_id))
labeled_as = cursor.fetchone()[0]
# Select all nodes in the skeleton and any matching labels
cursor.execute('''
SELECT
t.id,
t.parent_id,
t.location_x,
t.location_y,
t.location_z,
ci.name
FROM treenode t
LEFT OUTER JOIN (
treenode_class_instance tci
INNER JOIN class_instance ci
ON (tci.class_instance_id = ci.id AND tci.relation_id = %s AND ci.name ~ %s))
ON t.id = tci.treenode_id
WHERE t.skeleton_id = %s
''', (labeled_as, label_regex, int(skeleton_id)))
# Some entries repeated, when a node has more than one matching label
# Create a graph with edges from parent to child, and accumulate parents
tree = nx.DiGraph()
for row in cursor.fetchall():
nodeID = row[0]
if row[1]:
# It is ok to add edges that already exist: DiGraph doesn't keep duplicates
tree.add_edge(row[1], nodeID)
else:
tree.add_node(nodeID)
tree.node[nodeID]['loc'] = (row[2], row[3], row[4])
if row[5]:
props = tree.node[nodeID]
tags = props.get('tags')
if tags:
tags.append(row[5])
else:
props['tags'] = [row[5]]
if tnid not in tree:
raise Exception("Could not find %s in skeleton %s" % (tnid, int(skeleton_id)))
reroot(tree, tnid)
distances = edge_count_to_root(tree, root_node=tnid)
nearest = []
for nodeID, props in tree.nodes_iter(data=True):
if 'tags' in props:
# Found a node with a matching label
d = distances[nodeID]
nearest.append([nodeID, props['loc'], d, props['tags']])
nearest.sort(key=lambda n: n[2])
return HttpResponse(json.dumps(nearest))
@requires_user_role([UserRole.Annotate, UserRole.Browse])
def skeleton_statistics(request, project_id=None, skeleton_id=None):
p = get_object_or_404(Project, pk=project_id)
skel = Skeleton( skeleton_id = skeleton_id, project_id = project_id )
const_time = skel.measure_construction_time()
construction_time = '{0} minutes {1} seconds'.format( const_time / 60, const_time % 60)
return HttpResponse(json.dumps({
'node_count': skel.node_count(),
'input_count': skel.input_count(),
'output_count': skel.output_count(),
'presynaptic_sites': skel.presynaptic_sites_count(),
'postsynaptic_sites': skel.postsynaptic_sites_count(),
'cable_length': int(skel.cable_length()),
'measure_construction_time': construction_time,
'percentage_reviewed': "%.2f" % skel.percentage_reviewed() }), content_type='application/json')
@requires_user_role([UserRole.Annotate, UserRole.Browse])
def contributor_statistics(request, project_id=None, skeleton_id=None):
return contributor_statistics_multiple(request, project_id=project_id, skeleton_ids=[int(skeleton_id)])
@requires_user_role([UserRole.Annotate, UserRole.Browse])
def contributor_statistics_multiple(request, project_id=None, skeleton_ids=None):
contributors = defaultdict(int)
n_nodes = 0
# Count the total number of 20-second intervals with at least one treenode in them
n_time_bins = 0
n_review_bins = 0
n_multi_review_bins = 0
epoch = datetime.utcfromtimestamp(0)
if not skeleton_ids:
skeleton_ids = tuple(int(v) for k,v in request.POST.iteritems() if k.startswith('skids['))
# Count time bins separately for each skeleton
time_bins = None
last_skeleton_id = None
for row in Treenode.objects.filter(skeleton_id__in=skeleton_ids).order_by('skeleton').values_list('skeleton_id', 'user_id', 'creation_time').iterator():
if last_skeleton_id != row[0]:
if time_bins:
n_time_bins += len(time_bins)
time_bins = set()
last_skeleton_id = row[0]
n_nodes += 1
contributors[row[1]] += 1
time_bins.add(int((row[2] - epoch).total_seconds() / 20))
# Process last one
if time_bins:
n_time_bins += len(time_bins)
# Take into account that multiple people may have reviewed the same nodes
# Therefore measure the time for the user that has the most nodes reviewed,
# then add the nodes not reviewed by that user but reviewed by the rest
def process_reviews(rev):
seen = set()
min_review_bins = set()
multi_review_bins = 0
for reviewer, treenodes in sorted(rev.iteritems(), key=itemgetter(1), reverse=True):
reviewer_bins = set()
for treenode, timestamp in treenodes.iteritems():
time_bin = int((timestamp - epoch).total_seconds() / 20)
reviewer_bins.add(time_bin)
if not (treenode in seen):
seen.add(treenode)
min_review_bins.add(time_bin)
multi_review_bins += len(reviewer_bins)
#
return len(min_review_bins), multi_review_bins
rev = None
last_skeleton_id = None
for row in Review.objects.filter(skeleton_id__in=skeleton_ids).order_by('skeleton').values_list('reviewer', 'treenode', 'review_time', 'skeleton_id').iterator():
if last_skeleton_id != row[3]:
if rev:
a, b = process_reviews(rev)
n_review_bins += a
n_multi_review_bins += b
# Reset for next skeleton
rev = defaultdict(dict)
last_skeleton_id = row[3]
#
rev[row[0]][row[1]] = row[2]
# Process last one
if rev:
a, b = process_reviews(rev)
n_review_bins += a
n_multi_review_bins += b
relations = {row[0]: row[1] for row in Relation.objects.filter(project_id=project_id).values_list('relation_name', 'id').iterator()}
pre = relations['presynaptic_to']
post = relations['postsynaptic_to']
synapses = {}
synapses[pre] = defaultdict(int)
synapses[post] = defaultdict(int)
# This may be succint but unless one knows SQL it makes no sense at all
for row in TreenodeConnector.objects.filter(
Q(relation_id=pre) | Q(relation_id=post),
skeleton_id__in=skeleton_ids
).values_list('user_id', 'relation_id').iterator():
synapses[row[1]][row[0]] += 1
return HttpResponse(json.dumps({
'construction_minutes': int(n_time_bins / 3.0),
'min_review_minutes': int(n_review_bins / 3.0),
'multiuser_review_minutes': int(n_multi_review_bins / 3.0),
'n_nodes': n_nodes,
'node_contributors': contributors,
'n_pre': sum(synapses[relations['presynaptic_to']].itervalues()),
'n_post': sum(synapses[relations['postsynaptic_to']].itervalues()),
'pre_contributors': synapses[relations['presynaptic_to']],
'post_contributors': synapses[relations['postsynaptic_to']]}))
@requires_user_role([UserRole.Annotate, UserRole.Browse])
def node_count(request, project_id=None, skeleton_id=None, treenode_id=None):
# Works with either the skeleton_id or the treenode_id
p = get_object_or_404(Project, pk=project_id)
if not skeleton_id:
skeleton_id = Treenode.objects.get(pk=treenode_id).skeleton_id
return HttpResponse(json.dumps({
'count': Treenode.objects.filter(skeleton_id=skeleton_id).count(),
'skeleton_id': skeleton_id}), content_type='application/json')
def _get_neuronname_from_skeletonid( project_id, skeleton_id ):
p = get_object_or_404(Project, pk=project_id)
qs = ClassInstanceClassInstance.objects.filter(
relation__relation_name='model_of',
project=p,
class_instance_a=int(skeleton_id)).select_related("class_instance_b")
try:
return {'neuronname': qs[0].class_instance_b.name,
'neuronid': qs[0].class_instance_b.id }
except IndexError:
raise Exception("Couldn't find a neuron linking to a skeleton with " \
"ID %s" % skeleton_id)
@requires_user_role([UserRole.Annotate, UserRole.Browse])
def neuronname(request, project_id=None, skeleton_id=None):
return HttpResponse(json.dumps(_get_neuronname_from_skeletonid(project_id, skeleton_id)), content_type='application/json')
def _neuronnames(skeleton_ids, project_id):
qs = ClassInstanceClassInstance.objects.filter(
relation__relation_name='model_of',
project=project_id,
class_instance_a__in=skeleton_ids).select_related("class_instance_b").values_list("class_instance_a", "class_instance_b__name")
return dict(qs)
@requires_user_role([UserRole.Annotate, UserRole.Browse])
def neuronnames(request, project_id=None):
""" Returns a JSON object with skeleton IDs as keys and neuron names as values. """
skeleton_ids = tuple(int(v) for k,v in request.POST.iteritems() if k.startswith('skids['))
return HttpResponse(json.dumps(_neuronnames(skeleton_ids, project_id)))
def check_annotations_on_split(project_id, skeleton_id, over_annotation_set,
under_annotation_set):
""" With respect to annotations, a split is only correct if one part keeps
the whole set of annotations.
"""
# Get current annotation set
annotation_query = create_annotation_query(project_id,
{'skeleton_id': skeleton_id})
# Check if current set is equal to under or over set
current_annotation_set = frozenset(a.name for a in annotation_query)
if not current_annotation_set.difference(over_annotation_set):
return True
if not current_annotation_set.difference(under_annotation_set):
return True
return False
def check_new_annotations(project_id, user, entity_id, annotation_set):
""" With respect to annotations, the new annotation set is only valid if the
user doesn't remove annotations for which (s)he has no permissions.
"""
# Get current annotation links
annotation_links = ClassInstanceClassInstance.objects.filter(
project_id=project_id,
class_instance_b__class_column__class_name='annotation',
relation__relation_name='annotated_with',
class_instance_a_id=entity_id).values_list(
'class_instance_b__name', 'id', 'user')
# Build annotation name indexed dict to the link's id and user
annotations = {l[0]:(l[1], l[2]) for l in annotation_links}
current_annotation_set = frozenset(annotations.keys())
# If the current annotation set is not included completely in the new
# set, we have to check if the user has permissions to edit the missing
# annotations.
removed_annotations = current_annotation_set - annotation_set
for rl in removed_annotations:
try:
can_edit_or_fail(user, annotations[rl][0],
'class_instance_class_instance')
except:
return False
# Otherwise, everything is fine
return True
def check_annotations_on_join(project_id, user, from_neuron_id, to_neuron_id,
ann_set):
""" With respect to annotations, a join is only correct if the user doesn't
remove annotations for which (s)he has no permissions.
"""
return check_new_annotations(project_id, user, from_neuron_id, ann_set) and \
check_new_annotations(project_id, user, to_neuron_id, ann_set)
@requires_user_role(UserRole.Annotate)
def split_skeleton(request, project_id=None):
""" The split is only possible if the neuron is not locked or if it is
locked by the current user or if the current user belongs to the group
of the user who locked it. Of course, the split is also possible if
the current user is a super-user. Also, all reviews of the treenodes in the
new neuron are updated to refer to the new skeleton.
"""
treenode_id = int(request.POST['treenode_id'])
treenode = Treenode.objects.get(pk=treenode_id)
skeleton_id = treenode.skeleton_id
project_id = int(project_id)
upstream_annotation_map = json.loads(request.POST.get('upstream_annotation_map'))
downstream_annotation_map = json.loads(request.POST.get('downstream_annotation_map'))
cursor = connection.cursor()
# Check if the treenode is root!
if not treenode.parent:
return HttpResponse(json.dumps({'error': 'Can\'t split at the root node: it doesn\'t have a parent.'}))
# Check if annotations are valid
if not check_annotations_on_split(project_id, skeleton_id,
frozenset(upstream_annotation_map.keys()),
frozenset(downstream_annotation_map.keys())):
raise Exception("Annotation distribution is not valid for splitting. " \
"One part has to keep the whole set of annotations!")
skeleton = ClassInstance.objects.select_related('user').get(pk=skeleton_id)
# retrieve neuron of this skeleton
neuron = ClassInstance.objects.get(
cici_via_b__relation__relation_name='model_of',
cici_via_b__class_instance_a_id=skeleton_id)
# Make sure the user has permissions to edit
can_edit_class_instance_or_fail(request.user, neuron.id, 'neuron')
# Retrieve the id, parent_id of all nodes in the skeleton. Also
# pre-emptively lock all treenodes and connectors in the skeleton to prevent
# race conditions resulting in inconsistent skeleton IDs from, e.g., node
# creation or update.
cursor.execute('''
SELECT 1 FROM treenode_connector tc WHERE tc.skeleton_id = %s
ORDER BY tc.id
FOR NO KEY UPDATE OF tc;
SELECT t.id, t.parent_id FROM treenode t WHERE t.skeleton_id = %s
ORDER BY t.id
FOR NO KEY UPDATE OF t
''', (skeleton_id, skeleton_id)) # no need to sanitize
# build the networkx graph from it
graph = nx.DiGraph()
for row in cursor.fetchall():
graph.add_node( row[0] )
if row[1]:
# edge from parent_id to id
graph.add_edge( row[1], row[0] )
# find downstream nodes starting from target treenode_id
# and generate the list of IDs to change, starting at treenode_id (inclusive)
change_list = nx.bfs_tree(graph, treenode_id).nodes()
if not change_list:
# When splitting an end node, the bfs_tree doesn't return any nodes,
# which is surprising, because when the splitted tree has 2 or more nodes
# the node at which the split is made is included in the list.
change_list.append(treenode_id)
# create a new skeleton
new_skeleton = ClassInstance()
new_skeleton.name = 'Skeleton'
new_skeleton.project_id = project_id
new_skeleton.user = skeleton.user # The same user that owned the skeleton to split
new_skeleton.class_column = Class.objects.get(class_name='skeleton', project_id=project_id)
new_skeleton.save()
new_skeleton.name = 'Skeleton {0}'.format( new_skeleton.id ) # This could be done with a trigger in the database
new_skeleton.save()
# Create new neuron
new_neuron = ClassInstance()
new_neuron.name = 'Neuron'
new_neuron.project_id = project_id
new_neuron.user = skeleton.user
new_neuron.class_column = Class.objects.get(class_name='neuron',
project_id=project_id)
new_neuron.save()
new_neuron.name = 'Neuron %s' % str(new_neuron.id)
new_neuron.save()
# Assign the skeleton to new neuron
cici = ClassInstanceClassInstance()
cici.class_instance_a = new_skeleton
cici.class_instance_b = new_neuron
cici.relation = Relation.objects.get(relation_name='model_of', project_id=project_id)
cici.user = skeleton.user # The same user that owned the skeleton to split
cici.project_id = project_id
cici.save()
# update skeleton_id of list in treenode table
# This creates a lazy QuerySet that, upon calling update, returns a new QuerySet
# that is then executed. It does NOT create an update SQL query for every treenode.
tns = Treenode.objects.filter(id__in=change_list).update(skeleton=new_skeleton)
# update the skeleton_id value of the treenode_connector table
tc = TreenodeConnector.objects.filter(
relation__relation_name__endswith = 'synaptic_to',
treenode__in=change_list,
).update(skeleton=new_skeleton)
# setting new root treenode's parent to null
Treenode.objects.filter(id=treenode_id).update(parent=None, editor=request.user)
# Update annotations of existing neuron to have only over set
_update_neuron_annotations(project_id, request.user, neuron.id,
upstream_annotation_map)
# Update all reviews of the treenodes that are moved to a new neuron to
# refer to the new skeleton.
Review.objects.filter(treenode_id__in=change_list).update(skeleton=new_skeleton)
# Update annotations of under skeleton
_annotate_entities(project_id, [new_neuron.id], downstream_annotation_map)
# Log the location of the node at which the split was done
location = (treenode.location_x, treenode.location_y, treenode.location_z)
insert_into_log(project_id, request.user.id, "split_skeleton", location,
"Split skeleton with ID {0} (neuron: {1})".format( skeleton_id, neuron.name ) )
return HttpResponse(json.dumps({'skeleton_id': new_skeleton.id}), content_type='application/json')
@api_view(['GET'])
@never_cache
@requires_user_role([UserRole.Annotate, UserRole.Browse])
def root_for_skeleton(request, project_id=None, skeleton_id=None):
"""Retrieve ID and location of the skeleton's root treenode.
---
type:
root_id:
type: integer
required: true
x:
type: number
format: double
required: true
y:
type: number
format: double
required: true
z:
type: number
format: double
required: true
"""
tn = Treenode.objects.get(
project=project_id,
parent__isnull=True,
skeleton_id=skeleton_id)
return HttpResponse(json.dumps({
'root_id': tn.id,
'x': tn.location_x,
'y': tn.location_y,
'z': tn.location_z}),
content_type='application/json')
@requires_user_role([UserRole.Annotate, UserRole.Browse])
def skeleton_ancestry(request, project_id=None):
# All of the values() things in this function can be replaced by
# prefetch_related when we upgrade to Django 1.4 or above
skeleton_id = int(request.POST.get('skeleton_id', None))
if skeleton_id is None:
raise Exception('A skeleton id has not been provided!')
relation_map = get_relation_to_id_map(project_id)
for rel in ['model_of', 'part_of']:
if rel not in relation_map:
raise Exception(' => "Failed to find the required relation %s' % rel)
response_on_error = ''
try:
response_on_error = 'The search query failed.'
neuron_rows = ClassInstanceClassInstance.objects.filter(
class_instance_a=skeleton_id,
relation=relation_map['model_of']).values(
'class_instance_b',
'class_instance_b__name')
neuron_count = neuron_rows.count()
if neuron_count == 0:
raise Exception('No neuron was found that the skeleton %s models' % skeleton_id)
elif neuron_count > 1:
raise Exception('More than one neuron was found that the skeleton %s models' % skeleton_id)
parent_neuron = neuron_rows[0]
ancestry = []
ancestry.append({
'name': parent_neuron['class_instance_b__name'],
'id': parent_neuron['class_instance_b'],
'class': 'neuron'})
# Doing this query in a loop is horrible, but it should be very rare
# for the hierarchy to be more than 4 deep or so. (This is a classic
# problem of not being able to do recursive joins in pure SQL.)
# Detects erroneous cyclic hierarchy.
current_ci = parent_neuron['class_instance_b']
seen = set([current_ci])
while True:
response_on_error = 'Could not retrieve parent of class instance %s' % current_ci
parents = ClassInstanceClassInstance.objects.filter(
class_instance_a=current_ci,
relation=relation_map['part_of']).values(
'class_instance_b__name',
'class_instance_b',
'class_instance_b__class_column__class_name')
parent_count = parents.count()
if parent_count == 0:
break # We've reached the top of the hierarchy.
elif parent_count > 1:
raise Exception('More than one class_instance was found that the class_instance %s is part_of.' % current_ci)
else:
parent = parents[0]
ancestry.append({
'name': parent['class_instance_b__name'],
'id': parent['class_instance_b'],
'class': parent['class_instance_b__class_column__class_name']
})
current_ci = parent['class_instance_b']
if current_ci in seen:
raise Exception('Cyclic hierarchy detected for skeleton #%s' % skeleton_id)
return HttpResponse(json.dumps(ancestry))
except Exception as e:
raise Exception(response_on_error + ':' + str(e))
def _connected_skeletons(skeleton_ids, op, relation_id_1, relation_id_2, model_of_id, cursor):
def newSynapseCounts():
return [0, 0, 0, 0, 0]
class Partner:
def __init__(self):
self.num_nodes = 0
self.skids = defaultdict(newSynapseCounts) # skid vs synapse count
# Dictionary of partner skeleton ID vs Partner
def newPartner():
return Partner()
partners = defaultdict(newPartner)
# Obtain the synapses made by all skeleton_ids considering the desired direction of the synapse, as specified by relation_id_1 and relation_id_2:
cursor.execute('''
SELECT t1.skeleton_id, t2.skeleton_id, LEAST(t1.confidence, t2.confidence)
FROM treenode_connector t1,
treenode_connector t2
WHERE t1.skeleton_id IN (%s)
AND t1.relation_id = %s
AND t1.connector_id = t2.connector_id
AND t2.relation_id = %s
''' % (','.join(map(str, skeleton_ids)), int(relation_id_1), int(relation_id_2)))
# Sum the number of synapses
for srcID, partnerID, confidence in cursor.fetchall():
partners[partnerID].skids[srcID][confidence - 1] += 1
# There may not be any synapses
if not partners:
return partners, []
# If op is AND, discard entries where only one of the skids has synapses
if len(skeleton_ids) > 1 and 'AND' == op:
for partnerID in partners.keys(): # keys() is a copy of the keys
if len(skeleton_ids) != len(partners[partnerID].skids):
del partners[partnerID]
# With AND it is possible that no common partners exist
if not partners:
return partners, []
# Obtain a string with unique skeletons
skids_string = '),('.join(map(str, partners.iterkeys()))
# Count nodes of each partner skeleton
cursor.execute('''
SELECT skeleton_id, count(skeleton_id)
FROM treenode,
(VALUES (%s)) skeletons(skid)
WHERE skeleton_id = skid
GROUP BY skeleton_id
''' % skids_string) # no need to sanitize
for row in cursor.fetchall():
partners[row[0]].num_nodes = row[1]
# Find which reviewers have reviewed any partner skeletons
cursor.execute('''
SELECT DISTINCT reviewer_id
FROM review,
(VALUES (%s)) skeletons(skid)
WHERE skeleton_id = skid
''' % skids_string) # no need to sanitize
reviewers = [row[0] for row in cursor]
return partners, reviewers
def _skeleton_info_raw(project_id, skeletons, op):
cursor = connection.cursor()
# Obtain the IDs of the 'presynaptic_to', 'postsynaptic_to' and 'model_of' relations
cursor.execute('''
SELECT relation_name,
id
FROM relation
WHERE project_id=%s
AND (relation_name='presynaptic_to'
OR relation_name='postsynaptic_to'
OR relation_name='model_of')''' % project_id)
relation_ids = dict(cursor.fetchall())
# Obtain partner skeletons and their info
incoming, incoming_reviewers = _connected_skeletons(skeletons, op, relation_ids['postsynaptic_to'], relation_ids['presynaptic_to'], relation_ids['model_of'], cursor)
outgoing, outgoing_reviewers = _connected_skeletons(skeletons, op, relation_ids['presynaptic_to'], relation_ids['postsynaptic_to'], relation_ids['model_of'], cursor)
def prepare(partners):
for partnerID in partners.keys():
partner = partners[partnerID]
skids = partner.skids
# jsonize: swap class instance by its dict of members vs values
if partner.skids:
partners[partnerID] = partner.__dict__
else:
del partners[partnerID]
prepare(incoming)
prepare(outgoing)
return incoming, outgoing, incoming_reviewers, outgoing_reviewers
@api_view(['POST'])
@requires_user_role([UserRole.Annotate, UserRole.Browse])
def skeleton_info_raw(request, project_id=None):
"""Retrieve a list of down/up-stream partners of a set of skeletons.
From a queried set of source skeletons, find all upstream and downstream
partners, the number of synapses between each source and each partner,
and a list of reviewers for each partner set. Confidence distributions for
each synapse count are included. Optionally find only those partners
that are common between the source skeleton set.
---
parameters:
- name: source[]
description: IDs of the skeletons whose partners to find
required: true
type: array
items:
type: integer
paramType: form
- name: boolean_op
description: |
Whether to find partners of any source skeleton ("OR") or partners
common to all source skeletons ("AND")
required: true
type: string
paramType: form
models:
skeleton_info_raw_partners:
id: skeleton_info_raw_partners
properties:
'{skeleton_id}':
$ref: skeleton_info_raw_partner
description: Map from partners' skeleton IDs to their information
required: true
skeleton_info_raw_partner:
id: skeleton_info_raw_partner
properties:
skids:
$ref: skeleton_info_raw_partner_counts
required: true
num_nodes:
description: The number of treenodes in this skeleton
required: true
type: integer
skeleton_info_raw_partner_counts:
id: skeleton_info_raw_partner_counts
properties:
'{skeleton_id}':
$ref: skeleton_info_raw_partner_count
description: |
Synapse counts between the partner and the source skeleton with
this ID
required: true
skeleton_info_raw_partner_count:
id: skeleton_info_raw_partner_count
properties:
- description: Number of synapses with confidence 1
type: integer
required: true
- description: Number of synapses with confidence 2
type: integer
required: true
- description: Number of synapses with confidence 3
type: integer
required: true
- description: Number of synapses with confidence 4
type: integer
required: true
- description: Number of synapses with confidence 5
type: integer
required: true
type:
incoming:
$ref: skeleton_info_raw_partners
description: Upstream synaptic partners
required: true
outgoing:
$ref: skeleton_info_raw_partners
description: Downstream synaptic partners
required: true
incoming_reviewers:
description: IDs of reviewers who have reviewed any upstream partners.
required: true
type: array
items:
type: integer
outgoing_reviewers:
description: IDs of reviewers who have reviewed any downstream partners.
required: true
type: array
items:
type: integer
"""
# sanitize arguments
project_id = int(project_id)
skeletons = tuple(int(v) for k,v in request.POST.iteritems() if k.startswith('source_skeleton_ids['))
op = str(request.POST.get('boolean_op')) # values: AND, OR
op = {'AND': 'AND', 'OR': 'OR'}[op] # sanitize
incoming, outgoing, incoming_reviewers, outgoing_reviewers = _skeleton_info_raw(project_id, skeletons, op)
return HttpResponse(json.dumps({
'incoming': incoming,
'outgoing': outgoing,
'incoming_reviewers': incoming_reviewers,
'outgoing_reviewers': outgoing_reviewers}),
content_type='application/json')
@requires_user_role(UserRole.Browse)
def connectivity_matrix(request, project_id=None):
# sanitize arguments
project_id = int(project_id)
rows = tuple(int(v) for k, v in request.POST.iteritems() if k.startswith('rows['))
cols = tuple(int(v) for k, v in request.POST.iteritems() if k.startswith('columns['))
matrix = get_connectivity_matrix(project_id, rows, cols)
return HttpResponse(json.dumps(matrix), content_type='application/json')
def get_connectivity_matrix(project_id, row_skeleton_ids, col_skeleton_ids):
"""
Return a sparse connectivity matrix representation for the given skeleton
IDS. The returned dictionary has a key for each row skeleton having
outgoing connections to one or more column skeletons. Each entry stores a
dictionary that maps the connection partners to the individual outgoing
synapse counts.
"""
cursor = connection.cursor()
relation_map = get_relation_to_id_map(project_id)
post_rel_id = relation_map['postsynaptic_to']
pre_rel_id = relation_map['presynaptic_to']
# Obtain all synapses made between row skeletons and column skeletons.
cursor.execute('''
SELECT t1.skeleton_id, t2.skeleton_id
FROM treenode_connector t1,
treenode_connector t2
WHERE t1.skeleton_id IN (%s)
AND t2.skeleton_id IN (%s)
AND t1.connector_id = t2.connector_id
AND t1.relation_id = %s
AND t2.relation_id = %s
''' % (','.join(map(str, row_skeleton_ids)),
','.join(map(str, col_skeleton_ids)),
pre_rel_id, post_rel_id))
# Build a sparse connectivity representation. For all skeletons requested
# map a dictionary of partner skeletons and the number of synapses
# connecting to each partner.
outgoing = defaultdict(dict)
for r in cursor.fetchall():
source, target = r[0], r[1]
mapping = outgoing[source]
count = mapping.get(target, 0)
mapping[target] = count + 1
return outgoing
@api_view(['POST'])
@requires_user_role([UserRole.Browse, UserRole.Annotate])
def review_status(request, project_id=None):
"""Retrieve the review status for a collection of skeletons.
The review status for each skeleton in the request is a tuple of total
nodes and number of reviewed nodes (integers). The reviews of only
certain users or a reviewer team may be counted instead of all reviews.
---
parameters:
- name: skeleton_ids[]
description: IDs of the skeletons to retrieve.
required: true
type: array
items:
type: integer
paramType: form
- name: whitelist
description: |
ID of the user whose reviewer team to use to filter reviews
(exclusive to user_ids)
type: integer
paramType: form
- name: user_ids[]
description: |
IDs of the users whose reviews should be counted (exclusive
to whitelist)
type: array
items:
type: integer
paramType: form
models:
review_status_tuple:
id: review_status_tuple
properties:
- description: Total number of treenodes in the skeleton
type: integer
required: true
- description: |
Number of reviewed treenodes in the skeleton matching filters
(if any)
type: integer
required: true
type:
'{skeleton_id}':
$ref: review_status_tuple
required: true
"""
skeleton_ids = set(int(v) for k,v in request.POST.iteritems() if k.startswith('skeleton_ids['))
whitelist = bool(json.loads(request.POST.get('whitelist', 'false')))
whitelist_id = None
user_ids = None
if whitelist:
whitelist_id = request.user.id
else:
user_ids = set(int(v) for k,v in request.POST.iteritems() if k.startswith('user_ids['))
status = get_review_status(skeleton_ids, project_id=project_id,
whitelist_id=whitelist_id, user_ids=user_ids)
return HttpResponse(json.dumps(status))
@requires_user_role(UserRole.Annotate)
def reroot_skeleton(request, project_id=None):
""" Any user with an Annotate role can reroot any skeleton.
"""
treenode_id = request.POST.get('treenode_id', None)
treenode = _reroot_skeleton(treenode_id, project_id)
response_on_error = ''
try:
if treenode:
response_on_error = 'Failed to log reroot.'
location = (treenode.location_x, treenode.location_y, treenode.location_z)
insert_into_log(project_id, request.user.id, 'reroot_skeleton',
location, 'Rerooted skeleton for '
'treenode with ID %s' % treenode.id)
return HttpResponse(json.dumps({'newroot': treenode.id}))
# Else, already root
return HttpResponse(json.dumps({'error': 'Node #%s is already '
'root!' % treenode_id}))
except Exception as e:
raise Exception(response_on_error + ':' + str(e))
def _reroot_skeleton(treenode_id, project_id):
""" Returns the treenode instance that is now root,
or False if the treenode was root already. """
if treenode_id is None:
raise Exception('A treenode id has not been provided!')
response_on_error = ''
try:
response_on_error = 'Failed to select treenode with id %s.' % treenode_id
rootnode = Treenode.objects.get(id=treenode_id, project=project_id)
# Obtain the treenode from the response
first_parent = rootnode.parent_id
# If no parent found it is assumed this node is already root
if first_parent is None:
return False
response_on_error = 'An error occured while rerooting.'
q_treenode = Treenode.objects.filter(
skeleton_id=rootnode.skeleton_id,
project=project_id).values_list('id', 'parent_id', 'confidence')
nodes = {tnid: (parent_id, confidence) for (tnid, parent_id, confidence) in list(q_treenode)}
# Traverse up the chain of parents, reversing the parent relationships so
# that the selected treenode (with ID treenode_id) becomes the root.
new_parents = []
new_parent = rootnode.id
new_confidence = rootnode.confidence
node = first_parent
while True:
# Store current values to be used in next iteration
parent, confidence = nodes[node]
# Set new values
new_parents.append((node, new_parent, new_confidence))
if parent is None:
# Root has been reached
break
else:
# Prepare next iteration
new_parent = node
new_confidence = confidence
node = parent
# Finally make treenode root
new_parents.append((rootnode.id, 'NULL', 5)) # Reset to maximum confidence.
cursor = connection.cursor()
cursor.execute('''
UPDATE treenode
SET parent_id = v.parent_id,
confidence = v.confidence
FROM (VALUES %s) v(id, parent_id, confidence)
WHERE treenode.id = v.id
''' % ','.join(['(%s,%s,%s)' % node for node in new_parents]))
return rootnode
except Exception as e:
raise Exception(response_on_error + ':' + str(e))
def _root_as_parent(oid):
""" Returns True if the parent group of the given element ID is the root group. """
cursor = connection.cursor()
# Try to select the parent group of the parent group;
# if none, then the parent group is the root group.
cursor.execute('''
SELECT count(*)
FROM class_instance_class_instance cici1,
class_instance_class_instance cici2,
relation r
WHERE cici1.class_instance_a = %s
AND cici1.class_instance_b = cici2.class_instance_a
AND cici1.relation_id = r.id
AND r.relation_name = 'part_of'
AND cici2.class_instance_a = cici1.class_instance_b
AND cici2.relation_id = r.id
''' % int(oid))
return 0 == cursor.fetchone()[0]
@requires_user_role(UserRole.Annotate)
def join_skeleton(request, project_id=None):
""" An user with an Annotate role can join two skeletons if the neurons
modeled by these skeletons are not locked by another user or if the current
user belongs to the group of the user who locked the neurons. A super-user
can join any skeletons.
"""
response_on_error = 'Failed to join'
try:
from_treenode_id = int(request.POST.get('from_id', None))
to_treenode_id = int(request.POST.get('to_id', None))
annotation_set = request.POST.get('annotation_set', None)
if annotation_set:
annotation_set = json.loads(annotation_set)
_join_skeleton(request.user, from_treenode_id, to_treenode_id,
project_id, annotation_set)
response_on_error = 'Could not log actions.'
return HttpResponse(json.dumps({
'message': 'success',
'fromid': from_treenode_id,
'toid': to_treenode_id}))
except Exception as e:
raise Exception(response_on_error + ':' + str(e))
def _join_skeleton(user, from_treenode_id, to_treenode_id, project_id,
annotation_map):
""" Take the IDs of two nodes, each belonging to a different skeleton, and
make to_treenode be a child of from_treenode, and join the nodes of the
skeleton of to_treenode into the skeleton of from_treenode, and delete the
former skeleton of to_treenode. All annotations in annotation_set will be
linked to the skeleton of to_treenode. It is expected that <annotation_map>
is a dictionary, mapping an annotation to an annotator ID. Also, all
reviews of the skeleton that changes ID are changed to refer to the new
skeleton ID. If annotation_map is None, the resulting skeleton will have
all annotations available on both skeletons combined.
"""
if from_treenode_id is None or to_treenode_id is None:
raise Exception('Missing arguments to _join_skeleton')
response_on_error = ''
try:
from_treenode_id = int(from_treenode_id)
to_treenode_id = int(to_treenode_id)
try:
from_treenode = Treenode.objects.get(pk=from_treenode_id)
except Treenode.DoesNotExist:
raise Exception("Could not find a skeleton for treenode #%s" % from_treenode_id)
try:
to_treenode = Treenode.objects.get(pk=to_treenode_id)
except Treenode.DoesNotExist:
raise Exception("Could not find a skeleton for treenode #%s" % to_treenode_id)
from_skid = from_treenode.skeleton_id
from_neuron = _get_neuronname_from_skeletonid( project_id, from_skid )
to_skid = to_treenode.skeleton_id
to_neuron = _get_neuronname_from_skeletonid( project_id, to_skid )
if from_skid == to_skid:
raise Exception('Cannot join treenodes of the same skeleton, this would introduce a loop.')
# Make sure the user has permissions to edit both neurons
can_edit_class_instance_or_fail(
user, from_neuron['neuronid'], 'neuron')
can_edit_class_instance_or_fail(
user, to_neuron['neuronid'], 'neuron')
# Check if annotations are valid, if there is a particular selection
if annotation_map is None:
# Get all current annotations of both skeletons and merge them for
# a complete result set.
from_annotation_info = get_annotation_info(project_id, (from_skid,),
annotations=True, metaannotations=False, neuronnames=False)
to_annotation_info = get_annotation_info(project_id, (to_skid,),
annotations=True, metaannotations=False, neuronnames=False)
# Create a new annotation map with the expected structure of
# 'annotationname' vs. 'annotator id'.
def merge_into_annotation_map(source, skid, target):
skeletons = source['skeletons']
if skeletons and skid in skeletons:
for a in skeletons[skid]['annotations']:
annotation = source['annotations'][a['id']]
target[annotation] = a['uid']
# Merge from after to, so that it overrides entries from the merged
# in skeleton.
annotation_map = dict()
merge_into_annotation_map(to_annotation_info, to_skid, annotation_map)
merge_into_annotation_map(from_annotation_info, from_skid, annotation_map)
else:
if not check_annotations_on_join(project_id, user,
from_neuron['neuronid'], to_neuron['neuronid'],
frozenset(annotation_map.keys())):
raise Exception("Annotation distribution is not valid for joining. " \
"Annotations for which you don't have permissions have to be kept!")
# Reroot to_skid at to_treenode if necessary
response_on_error = 'Could not reroot at treenode %s' % to_treenode_id
_reroot_skeleton(to_treenode_id, project_id)
# The target skeleton is removed and its treenode assumes
# the skeleton id of the from-skeleton.
response_on_error = 'Could not update Treenode table with new skeleton id for joined treenodes.'
Treenode.objects.filter(skeleton=to_skid).update(skeleton=from_skid)
response_on_error = 'Could not update TreenodeConnector table.'
TreenodeConnector.objects.filter(
skeleton=to_skid).update(skeleton=from_skid)
# Update reviews from 'losing' neuron to now belong to the new neuron
response_on_error = 'Couldn not update reviews with new skeleton IDs for joined treenodes.'
Review.objects.filter(skeleton_id=to_skid).update(skeleton=from_skid)
# Remove skeleton of to_id (deletes cicic part_of to neuron by cascade,
# leaving the parent neuron dangling in the object tree).
response_on_error = 'Could not delete skeleton with ID %s.' % to_skid
ClassInstance.objects.filter(pk=to_skid).delete()
# Remove the 'losing' neuron if it is empty
_delete_if_empty(to_neuron['neuronid'])
# Update the parent of to_treenode.
response_on_error = 'Could not update parent of treenode with ID %s' % to_treenode_id
Treenode.objects.filter(id=to_treenode_id).update(parent=from_treenode_id, editor=user)
# Update linked annotations of neuron
response_on_error = 'Could not update annotations of neuron ' \
'with ID %s' % from_neuron['neuronid']
_update_neuron_annotations(project_id, user, from_neuron['neuronid'],
annotation_map)
from_location = (from_treenode.location_x, from_treenode.location_y,
from_treenode.location_z)
insert_into_log(project_id, user.id, 'join_skeleton',
from_location, 'Joined skeleton with ID %s (neuron: ' \
'%s) into skeleton with ID %s (neuron: %s, annotations: %s)' % \
(to_skid, to_neuron['neuronname'], from_skid,
from_neuron['neuronname'], ', '.join(annotation_map.keys())))
except Exception as e:
raise Exception(response_on_error + ':' + str(e))
def _import_skeleton(request, project_id, arborescence, neuron_id=None, name=None):
"""Create a skeleton from a networkx directed tree.
Associate the skeleton to the specified neuron, or a new one if none is
provided. Returns a dictionary of the neuron and skeleton IDs, and the
original arborescence with attributes added for treenode IDs.
"""
# TODO: There is significant reuse here of code from create_treenode that
# could be DRYed up.
relation_map = get_relation_to_id_map(project_id)
class_map = get_class_to_id_map(project_id)
new_skeleton = ClassInstance()
new_skeleton.user = request.user
new_skeleton.project_id = project_id
new_skeleton.class_column_id = class_map['skeleton']
if name is not None:
new_skeleton.name = name
else:
new_skeleton.name = 'skeleton'
new_skeleton.save()
new_skeleton.name = 'skeleton %d' % new_skeleton.id
new_skeleton.save()
def relate_neuron_to_skeleton(neuron, skeleton):
return _create_relation(request.user, project_id,
relation_map['model_of'], skeleton, neuron)
if neuron_id is not None:
# Check that the neuron to use exists
if 0 == ClassInstance.objects.filter(pk=neuron_id).count():
neuron_id = None
if neuron_id is not None:
# Raise an Exception if the user doesn't have permission to
# edit the existing neuron.
can_edit_class_instance_or_fail(request.user, neuron_id, 'neuron')
else:
# A neuron does not exist, therefore we put the new skeleton
# into a new neuron.
new_neuron = ClassInstance()
new_neuron.user = request.user
new_neuron.project_id = project_id
new_neuron.class_column_id = class_map['neuron']
if name is not None:
new_neuron.name = name
else:
new_neuron.name = 'neuron'
new_neuron.save()
new_neuron.name = 'neuron %d' % new_neuron.id
new_neuron.save()
neuron_id = new_neuron.id
relate_neuron_to_skeleton(neuron_id, new_skeleton.id)
# For pathological networks this can error, so do it before inserting
# treenodes.
root = find_root(arborescence)
if root is None:
raise Exception('No root, provided graph is malformed!')
# Bulk create the required number of treenodes. This must be done in two
# steps because treenode IDs are not known.
cursor = connection.cursor()
cursor.execute("""
INSERT INTO treenode (project_id, location_x, location_y, location_z,
editor_id, user_id, skeleton_id)
SELECT t.project_id, t.x, t.x, t.x, t.user_id, t.user_id, t.skeleton_id
FROM (VALUES (%(project_id)s, 0, %(user_id)s, %(skeleton_id)s))
AS t (project_id, x, user_id, skeleton_id),
generate_series(1, %(num_treenodes)s)
RETURNING treenode.id;
""" % {
'project_id': project_id,
'user_id': request.user.id,
'skeleton_id': new_skeleton.id,
'num_treenodes': arborescence.number_of_nodes()})
treenode_ids = cursor.fetchall()
# Flatten IDs
treenode_ids = list(chain.from_iterable(treenode_ids))
nx.set_node_attributes(arborescence, 'id', dict(zip(arborescence.nodes(), treenode_ids)))
# Set parent node ID
for n, nbrs in arborescence.adjacency_iter():
for nbr in nbrs:
arborescence.node[nbr]['parent_id'] = arborescence.node[n]['id']
arborescence.node[root]['parent_id'] = 'NULL'
new_location = tuple([arborescence.node[root][k] for k in ('x', 'y', 'z')])
treenode_values = \
'),('.join([','.join(map(str, [n[1][k] for k in ('id', 'x', 'y', 'z', 'parent_id')])) \
for n in arborescence.nodes_iter(data=True)])
cursor.execute("""
UPDATE treenode SET
location_x = v.x,
location_y = v.y,
location_z = v.z,
parent_id = v.parent_id
FROM (VALUES (%s)) AS v(id, x, y, z, parent_id)
WHERE treenode.id = v.id AND treenode.skeleton_id = %s
""" % (treenode_values, new_skeleton.id)) # Include skeleton ID for index performance.
# Log import.
insert_into_log(project_id, request.user.id, 'create_neuron',
new_location, 'Create neuron %d and skeleton '
'%d via import' % (new_neuron.id, new_skeleton.id))
return {'neuron_id': neuron_id, 'skeleton_id': new_skeleton.id, 'graph': arborescence}
@requires_user_role(UserRole.Annotate)
def reset_own_reviewer_ids(request, project_id=None, skeleton_id=None):
""" Remove all reviews done by the requsting user in the skeleten with ID
<skeleton_id>.
"""
skeleton_id = int(skeleton_id) # sanitize
Review.objects.filter(skeleton_id=skeleton_id, reviewer=request.user).delete()
insert_into_log(project_id, request.user.id, 'reset_reviews',
None, 'Reset reviews for skeleton %s' % skeleton_id)
return HttpResponse(json.dumps({'status': 'success'}), content_type='application/json')
@requires_user_role(UserRole.Annotate)
def fetch_treenodes(request, project_id=None, skeleton_id=None, with_reviewers=None):
""" Fetch the topology only, optionally with the reviewer IDs. """
skeleton_id = int(skeleton_id)
cursor = connection.cursor()
cursor.execute('''
SELECT id, parent_id
FROM treenode
WHERE skeleton_id = %s
''' % skeleton_id)
if with_reviewers:
reviews = get_treenodes_to_reviews(skeleton_ids=[skeleton_id])
treenode_data = tuple([r[0], r[1], reviews.get(r[0], [])] \
for r in cursor.fetchall())
else:
treenode_data = tuple(cursor.fetchall())
return HttpResponse(json.dumps(treenode_data))
@requires_user_role(UserRole.Browse)
def annotation_list(request, project_id=None):
""" Returns a JSON serialized object that contains information about the
given skeletons.
"""
skeleton_ids = tuple(int(v) for k,v in request.POST.iteritems()
if k.startswith('skeleton_ids['))
annotations = bool(int(request.POST.get("annotations", 0)))
metaannotations = bool(int(request.POST.get("metaannotations", 0)))
neuronnames = bool(int(request.POST.get("neuronnames", 0)))
response = get_annotation_info(project_id, skeleton_ids, annotations,
metaannotations, neuronnames)
return HttpResponse(json.dumps(response), content_type="application/json")
def get_annotation_info(project_id, skeleton_ids, annotations, metaannotations,
neuronnames):
if not skeleton_ids:
raise ValueError("No skeleton IDs provided")
classes = dict(Class.objects.filter(project_id=project_id).values_list('class_name', 'id'))
relations = dict(Relation.objects.filter(project_id=project_id).values_list('relation_name', 'id'))
cursor = connection.cursor()
# Create a map of skeleton IDs to neuron IDs
cursor.execute("""
SELECT cici.class_instance_a, cici.class_instance_b
FROM class_instance_class_instance cici
WHERE cici.project_id = %s AND
cici.relation_id = %s AND
cici.class_instance_a IN %s
""", (project_id, relations['model_of'], skeleton_ids))
n_to_sk_ids = {n:s for s,n in cursor.fetchall()}
neuron_ids = n_to_sk_ids.keys()
if not neuron_ids:
raise Http404('No skeleton or neuron found')
# Query for annotations of the given skeletons, specifically
# neuron_id, auid, aid and aname.
cursor.execute("""
SELECT cici.class_instance_a AS neuron_id, cici.user_id AS auid,
cici.class_instance_b AS aid, ci.name AS aname
FROM class_instance_class_instance cici INNER JOIN
class_instance ci ON cici.class_instance_b = ci.id
WHERE cici.relation_id = %s AND
cici.class_instance_a IN (%s) AND
ci.class_id = %s
""" % (relations['annotated_with'],
','.join(map(str, neuron_ids)),
classes['annotation']))
# Build result dictionaries: one that maps annotation IDs to annotation
# names and another one that lists annotation IDs and annotator IDs for
# each skeleton ID.
annotations = {}
skeletons = {}
for row in cursor.fetchall():
skid, auid, aid, aname = n_to_sk_ids[row[0]], row[1], row[2], row[3]
if aid not in annotations:
annotations[aid] = aname
skeleton = skeletons.get(skid)
if not skeleton:
skeleton = {'annotations': []}
skeletons[skid] = skeleton
skeleton['annotations'].append({
'uid': auid,
'id': aid,
})
# Assemble response
response = {
'annotations': annotations,
'skeletons': skeletons,
}
# If wanted, get the neuron name of each skeleton
if neuronnames:
cursor.execute("""
SELECT ci.id, ci.name
FROM class_instance ci
WHERE ci.id IN (%s)
""" % (','.join(map(str, neuron_ids))))
response['neuronnames'] = {n_to_sk_ids[n]:name for n,name in cursor.fetchall()}
# If wanted, get the meta annotations for each annotation
if metaannotations and len(annotations):
# Request only ID of annotated annotations, annotator ID, meta
# annotation ID, meta annotation Name
cursor.execute("""
SELECT cici.class_instance_a AS aid, cici.user_id AS auid,
cici.class_instance_b AS maid, ci.name AS maname
FROM class_instance_class_instance cici INNER JOIN
class_instance ci ON cici.class_instance_b = ci.id
WHERE cici.project_id = %s AND
cici.relation_id = %s AND
cici.class_instance_a IN (%s) AND
ci.class_id = %s
""" % (project_id, relations['annotated_with'],
','.join(map(str, annotations.keys())),
classes['annotation']))
# Add this to the response
metaannotations = {}
for row in cursor.fetchall():
aaid, auid, maid, maname = row[0], row[1], row[2], row[3]
if maid not in annotations:
annotations[maid] = maname
annotation = metaannotations.get(aaid)
if not annotation:
annotation = {'annotations': []}
metaannotations[aaid] = annotation
annotation['annotations'].append({
'uid': auid,
'id': maid,
})
response['metaannotations'] = metaannotations
return response
@api_view(['GET'])
@requires_user_role(UserRole.Browse)
def list_skeletons(request, project_id):
"""List skeletons matching filtering criteria.
The result set is the intersection of skeletons matching criteria (the
criteria are conjunctive) unless stated otherwise.
---
parameters:
- name: created_by
description: Filter for user ID of the skeletons' creator.
type: integer
paramType: query
- name: reviewed_by
description: Filter for user ID of the skeletons' reviewer.
type: integer
paramType: query
- name: from_date
description: Filter for skeletons with nodes created after this date.
type: string
format: date
paramType: query
- name: to_date
description: Filter for skeletons with nodes created before this date.
type: string
format: date
paramType: query
- name: nodecount_gt
description: |
Filter for skeletons with more nodes than this threshold. Removes
all other criteria.
type: integer
paramType: query
type:
- type: array
items:
type: integer
description: ID of skeleton matching the criteria.
required: true
"""
created_by = request.GET.get('created_by', None)
reviewed_by = request.GET.get('reviewed_by', None)
from_date = request.GET.get('from', None)
to_date = request.GET.get('to', None)
nodecount_gt = int(request.GET.get('nodecount_gt', 0))
# Sanitize
if reviewed_by:
reviewed_by = int(reviewed_by)
if created_by:
created_by = int(created_by)
if from_date:
from_date = datetime.strptime(from_date, '%Y%m%d')
if to_date:
to_date = datetime.strptime(to_date, '%Y%m%d')
response = _list_skeletons(project_id, created_by, reviewed_by, from_date, to_date, nodecount_gt)
return HttpResponse(json.dumps(response), content_type="application/json")
def _list_skeletons(project_id, created_by=None, reviewed_by=None, from_date=None,
to_date=None, nodecount_gt=0):
""" Returns a list of skeleton IDs of which nodes exist that fulfill the
given constraints (if any). It can be constrained who created nodes in this
skeleton during a given period of time. Having nodes that are reviewed by
a certain user is another constraint. And so is the node count that one can
specify which each result node must exceed.
"""
if created_by and reviewed_by:
raise ValueError("Please specify node creator or node reviewer")
if reviewed_by:
params = [project_id, reviewed_by]
query = '''
SELECT DISTINCT r.skeleton_id
FROM review r
WHERE r.project_id=%s AND r.reviewer_id=%s
'''
if from_date:
params.append(from_date.isoformat())
query += " AND r.review_time >= %s"
if to_date:
to_date = to_date + timedelta(days=1)
params.append(to_date.isoformat())
query += " AND r.review_time < %s"
else:
params = [project_id]
query = '''
SELECT DISTINCT skeleton_id
FROM treenode t
WHERE t.project_id=%s
'''
if created_by:
params.append(created_by)
query += " AND t.user_id=%s"
if from_date:
params.append(from_date.isoformat())
query += " AND t.creation_time >= %s"
if to_date:
to_date = to_date + timedelta(days=1)
params.append(to_date.isoformat())
query += " AND t.creation_time < %s"
if nodecount_gt > 0:
params.append(nodecount_gt)
query = '''
SELECT sub.skeleton_id
FROM (
SELECT t.skeleton_id AS skeleton_id, COUNT(*) AS count
FROM (%s) q JOIN treenode t ON q.skeleton_id = t.skeleton_id
GROUP BY t.skeleton_id
) AS sub WHERE sub.count > %%s
''' % query
cursor = connection.cursor()
cursor.execute(query, params)
return [r[0] for r in cursor.fetchall()]
@requires_user_role([UserRole.Annotate, UserRole.Browse])
def adjacency_matrix(request, project_id=None):
skeletonlist = request.POST.getlist('skeleton_list[]')
skeletonlist = map(int, skeletonlist)
p = get_object_or_404(Project, pk=project_id)
skelgroup = SkeletonGroup( skeletonlist, p.id )
nodeslist = [ {'group': 1,
'id': k,
'name': d['neuronname']} for k,d in skelgroup.graph.nodes_iter(data=True) ]
nodesid_list = [ele['id'] for ele in nodeslist]
data = {
'nodes': nodeslist,
'links': [ {'id': '%i_%i' % (u,v),
'source': nodesid_list.index(u),
'target': nodesid_list.index(v),
'value': d['count']} for u,v,d in skelgroup.graph.edges_iter(data=True) ]
}
return HttpResponse(json.dumps(data, sort_keys=True, indent=4), content_type='application/json')
@requires_user_role([UserRole.Annotate, UserRole.Browse])
def skeletonlist_subgraph(request, project_id=None):
skeletonlist = request.POST.getlist('skeleton_list[]')
skeletonlist = map(int, skeletonlist)
p = get_object_or_404(Project, pk=project_id)
skelgroup = SkeletonGroup( skeletonlist, p.id )
data = {
'nodes': [ {'id': str(k),
'label': str(d['baseName']),
'skeletonid': str(d['skeletonid']),
'node_count': d['node_count']
} for k,d in skelgroup.graph.nodes_iter(data=True) ],
'edges': [ {'id': '%i_%i' % (u,v),
'source': str(u),
'target': str(v),
'weight': d['count'],
'label': str(d['count']),
'directed': True} for u,v,d in skelgroup.graph.edges_iter(data=True) ]
}
return HttpResponse(json.dumps(data, sort_keys=True, indent=4), content_type='application/json')
@requires_user_role([UserRole.Annotate, UserRole.Browse])
def skeletonlist_confidence_compartment_subgraph(request, project_id=None):
skeletonlist = request.POST.getlist('skeleton_list[]')
skeletonlist = map(int, skeletonlist)
confidence = int(request.POST.get('confidence_threshold', 5))
p = get_object_or_404(Project, pk=project_id)
# skelgroup = SkeletonGroup( skeletonlist, p.id )
# split up where conficence bigger than confidence
resultgraph = compartmentalize_skeletongroup_by_confidence( skeletonlist, p.id, confidence )
data = {
'nodes': [ { 'data': {'id': str(k),
'label': str(d['neuronname']),
'skeletonid': str(d['skeletonid']),
'node_count': d['node_count']} } for k,d in resultgraph.nodes_iter(data=True) ],
'edges': [ { 'data': {'id': '%s_%s' % (u,v),
'source': str(u),
'target': str(v),
'weight': d['count'],
'label': str(d['count']),
'directed': True}} for u,v,d in resultgraph.edges_iter(data=True) ]
}
return HttpResponse(json.dumps(data, sort_keys=True, indent=4), content_type='application/json')
@requires_user_role([UserRole.Annotate, UserRole.Browse])
def skeletonlist_edgecount_compartment_subgraph(request, project_id=None):
skeletonlist = request.POST.getlist('skeleton_list[]')
skeletonlist = map(int, skeletonlist)
edgecount = int(request.POST.get('edgecount', 10))
p = get_object_or_404(Project, pk=project_id)
# skelgroup = SkeletonGroup( skeletonlist, p.id )
# split up where conficence bigger than confidence
resultgraph = compartmentalize_skeletongroup_by_edgecount( skeletonlist, p.id, edgecount )
data = {
'nodes': [ { 'data': {'id': str(k),
'label': str(d['neuronname']),
'skeletonid': str(d['skeletonid']),
'node_count': d['node_count']} } for k,d in resultgraph.nodes_iter(data=True) ],
'edges': [ { 'data': {'id': '%s_%s' % (u,v),
'source': str(u),
'target': str(v),
'weight': d['count'],
'label': str(d['count']),
'directed': True}} for u,v,d in resultgraph.edges_iter(data=True) ]
}
return HttpResponse(json.dumps(data, sort_keys=True, indent=4), content_type='application/json')
@requires_user_role([UserRole.Annotate, UserRole.Browse])
def all_shared_connectors(request, project_id=None):
skeletonlist = request.POST.getlist('skeletonlist[]')
skeletonlist = map(int, skeletonlist)
p = get_object_or_404(Project, pk=project_id)
skelgroup = SkeletonGroup( skeletonlist, p.id )
return HttpResponse(json.dumps(dict.fromkeys(skelgroup.all_shared_connectors()) ), content_type='application/json')
|
aschampion/CATMAID
|
django/applications/catmaid/control/skeleton.py
|
Python
|
gpl-3.0
| 71,881
|
[
"NEURON"
] |
c0acc1eaaf210f6a3d62d1b80726d26f3d4a557bb487c15b42c77777a66c8c01
|
"""
PySCeS - Python Simulator for Cellular Systems (http://pysces.sourceforge.net)
Copyright (C) 2004-2015 B.G. Olivier, J.M. Rohwer, J.-H.S Hofmeyr all rights reserved,
Brett G. Olivier (bgoli@users.sourceforge.net)
Triple-J Group for Molecular Cell Physiology
Stellenbosch University, South Africa.
Permission to use, modify, and distribute this software is given under the
terms of the PySceS (BSD style) license. See LICENSE.txt that came with
this distribution for specifics.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
Brett G. Olivier
"""
from pysces.version import __version__
__doc__ = '''PySCeS ModelMap module: useful for exploring model component relations'''
class ModelMapBase(object):
name = None
def getName(self):
return self.name
def setName(self,name):
self.name = name
def get(self, attr):
"""Return an attribute whose name is str(attr)"""
try:
return getattr(self, attr)
except:
print "%s is not an attribute of this instance" % attr
return None
class MapList(list):
def __init__(self, *args):
list.__init__(self,*args)
def asSet(self):
return set(self.__getslice__(0, self.__len__()))
class ModelMap(ModelMapBase):
__nDict__ = None
reactions = None
species = None
species_fixed = None
compartments = None
__model__ = None
__InitStrings__ = None
__InitDict__ = None
__not_inited__ = None
global_parameters = None
__parameter_store__ = None
def __init__(self, model):
self.setName(model.ModelFile[:-4])
self.__nDict__ = model.__nDict__
self.__model__ = model
self.__InitDict__ = self.__model__.__InitDict__.copy()
self.__compartments__ = self.__model__.__compartments__.copy()
for k in self.__InitDict__.keys():
self.__InitDict__[k] = getattr(self.__model__, k)
self.global_parameters = []
self.__parameter_store__ = []
self.__not_inited__ = []
#operational shortcuts
self.addSpecies()
self.addReactions()
self.generateMappings()
self.addCompartments()
def __cleanString__(self,s):
s = s.lstrip()
s = s.rstrip()
return s
def addOneSpecies(self, species, fix=False):
s = Species(species)
s.setValue(self.__InitDict__[s.name])
if fix: s.fixed = True
setattr(self, species, s)
self.species.append(s)
if fix: self.species_fixed.append(s)
def addCompartments(self):
self.compartments = []
for c in self.__model__.__compartments__:
co = Compartment(self.__model__.__compartments__[c]['name'], self.__model__.__compartments__[c]['size'])
self.compartments.append(co)
setattr(self, c, co)
cname = [c.name for c in self.compartments]
for s in self.__model__.__sDict__.keys():
if self.__model__.__sDict__[s]['compartment'] in cname:
getattr(self, self.__model__.__sDict__[s]['compartment']).setComponent(getattr(self, s))
getattr(self, s).compartment = getattr(self, self.__model__.__sDict__[s]['compartment'])
for r in self.__model__.__nDict__.keys():
if self.__model__.__nDict__[r]['compartment'] in cname:
getattr(self, self.__model__.__nDict__[r]['compartment']).setComponent(getattr(self, r))
getattr(self, r).compartment = getattr(self, self.__model__.__nDict__[r]['compartment'])
def addOneReaction(self, reaction):
r = Reaction(reaction)
r.addFormula(self.__nDict__[r.name]['RateEq'].replace('self.',''))
if self.__nDict__[r.name]['Type'] == 'Irrev': r.reversible = False
fxnames = self.hasFixedSpecies()
for p in self.__nDict__[r.name]['Params']:
p = p.replace('self.','')
if p not in self.hasGlobalParameters() and p not in fxnames and not self.__compartments__.has_key(p):
if self.__InitDict__.has_key(p):
par = Parameter(p, self.__InitDict__[p])
else:
par = Parameter(p)
if p not in self.__not_inited__: self.__not_inited__.append(p)
par.setAssociation(r)
self.global_parameters.append(par)
setattr(self, p, par)
r.addParameter(par)
elif p not in fxnames and not self.__compartments__.has_key(p):
pidx = self.hasGlobalParameters().index(p)
self.global_parameters[pidx].setAssociation(r)
r.addParameter(self.global_parameters[pidx])
setattr(self, reaction, r)
self.reactions.append(r)
def addSpecies(self):
self.species = []
self.species_fixed = []
for s in self.__model__.species:
self.addOneSpecies(s, fix=False)
for s in self.__model__.fixed_species:
self.addOneSpecies(s, fix=True)
def addReactions(self):
self.reactions = []
for r in self.__model__.reactions:
self.addOneReaction(r)
def generateMappings(self):
for reac in self.reactions:
for reag in self.__nDict__[reac.name]['Reagents']:
if self.__nDict__[reac.name]['Reagents'][reag] < 0.0:
reac.addSubstrate(getattr(self, reag.replace('self.','')))
getattr(self, reag.replace('self.','')).setSubstrate(getattr(self, reac.name))
else:
reac.addProduct(getattr(self, reag.replace('self.','')))
getattr(self, reag.replace('self.','')).setProduct(getattr(self, reac.name))
reac.stoichiometry.setdefault(reag.replace('self.',''), self.__nDict__[reac.name]['Reagents'][reag])
for mod in self.__nDict__[reac.name]['Modifiers']:
reac.addModifier(getattr(self, mod.replace('self.','')))
getattr(self, mod.replace('self.','')).setModifier(getattr(self, reac.name))
def hasReactions(self):
return MapList([r.name for r in self.reactions])
def hasSpecies(self):
return MapList([s.name for s in self.species])
def hasFixedSpecies(self):
return MapList([s.name for s in self.species_fixed])
def findReactionsThatIncludeAllSpecifiedReagents(self, *args):
assert len(args) > 1, '\nNeed two or more species for this one!'
setlist = [getattr(self, s).isReagentOf().asSet() for s in args]
isect = setlist[0]
for s in setlist:
isect.intersection_update(s)
return MapList(isect)
def hasGlobalParameters(self):
return MapList(p.name for p in self.global_parameters)
class Reaction(ModelMapBase):
modifiers = None
substrates = None
products = None
stoichiometry = None
parameters = None
reversible = True
formula = None
compartment = None
def __init__(self, name):
self.setName(name)
self.modifiers = []
self.substrates = []
self.products = []
self.stoichiometry = {}
self.parameters = []
def addSubstrate(self, species):
setattr(self, species.name, species)
self.substrates.append(species)
def addProduct(self, species):
setattr(self, species.name, species)
self.products.append(species)
def addModifier(self, species):
setattr(self, species.name, species)
self.modifiers.append(species)
def addFormula(self, formula):
self.formula = formula
def addParameter(self, par):
setattr(self, par.name, par)
self.parameters.append(par)
def hasProducts(self, t=type):
return MapList([p.name for p in self.products])
def hasSubstrates(self):
return MapList([s.name for s in self.substrates])
def hasModifiers(self):
return MapList([m.name for m in self.modifiers])
def hasParameters(self):
return MapList([p.name for p in self.parameters])
def hasReagents(self):
return MapList(self.hasSubstrates() + self.hasProducts())
class NumberBase(ModelMapBase):
value = None
def __call__(self):
return self.value
def getValue(self):
return self.value
def setValue(self, v):
self.value = v
class Species(NumberBase):
subs = None
prods = None
mods = None
fixed = False
compartment = None
def __init__(self, name):
self.setName(name)
self.subs = []
self.prods = []
self.mods = []
def setSubstrate(self, reaction):
setattr(self, reaction.name, reaction)
self.subs.append(reaction)
def setProduct(self, reaction):
setattr(self, reaction.name, reaction)
self.prods.append(reaction)
def setModifier(self, reaction):
setattr(self, reaction.name, reaction)
self.mods.append(reaction)
def isSubstrateOf(self):
return MapList([r.name for r in self.subs])
def isProductOf(self):
return MapList([r.name for r in self.prods])
def isModifierOf(self):
return MapList([r.name for r in self.mods])
def isReagentOf(self):
return MapList(self.isSubstrateOf() + self.isProductOf())
class Parameter(NumberBase):
association = None
formula = None
def __init__(self, name, value=None):
self.name = name
self.value = value
self.association = []
def setAssociation(self, reac):
self.association.append(reac)
setattr(self, reac.name, reac)
def isParameterOf(self):
return MapList([a.name for a in self.association])
def setFormula(self, formula):
self.formula = formula
class Compartment(NumberBase):
components = None
def __init__(self, name, value=None):
self.name = name
self.value = value
self.components = []
def setComponent(self, comp):
self.components.append(comp)
setattr(self, comp.name, comp)
def hasComponents(self):
return MapList([a.name for a in self.components])
if __name__ == '__main__':
import pysces
M = pysces.model('pysces_model_linear1')
M.doLoad()
print '\nModel', M.ModelFile
print '============='
modmap = ModelMap(M)
print 'Reactions\n', modmap.hasReactions()
print 'Species\n', modmap.hasSpecies()
print 'FixedSpecies\n', modmap.hasFixedSpecies()
print ' '
print 'R1 has reagents\n', modmap.R1.hasReagents()
print 'R1 has sub\n', modmap.R1.hasSubstrates()
print 'R1 has prod\n', modmap.R1.hasProducts()
print 'R1 has mod\n', modmap.R1.hasModifiers()
print ' '
print 's2 is reagent\n', modmap.s2.isReagentOf()
print 's2 is sub\n', modmap.s2.isSubstrateOf()
print 's2 is prod\n', modmap.s2.isProductOf()
print 's2 is mod\n', modmap.s2.isModifierOf()
print ' '
print 'R2 stoich\n', modmap.R2.stoichiometry
print ' '
print 'findReactionsThatIncludeAllSpecifiedReagents(A, B):', modmap.findReactionsThatIncludeAllSpecifiedReagents('s1','s2')
print '\nmodmap.hasGlobalParameters\n', modmap.hasGlobalParameters()
print '\nParameter associations'
for p in modmap.global_parameters:
print '%s.isParameterOf() %s' % (p.name, p.isParameterOf())
|
asttra/pysces
|
pysces/PyscesModelMap.py
|
Python
|
bsd-3-clause
| 11,732
|
[
"PySCeS"
] |
1031f7eaaa528e231082045a7862c400f6f9a47cdbc5dbe5a21fe9041df1043e
|
# -*- coding: utf-8 -*-
"""ANTS Apply Transforms interface
Change directory to provide relative paths for doctests
>>> import os
>>> filepath = os.path.dirname( os.path.realpath( __file__ ) )
>>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data'))
>>> os.chdir(datadir)
"""
from __future__ import print_function, division, unicode_literals, absolute_import
from builtins import range, str
import os
from .base import ANTSCommand, ANTSCommandInputSpec
from ..base import TraitedSpec, File, traits, isdefined, InputMultiPath
from ...utils.filemanip import split_filename
class WarpTimeSeriesImageMultiTransformInputSpec(ANTSCommandInputSpec):
dimension = traits.Enum(4, 3, argstr='%d', usedefault=True,
desc='image dimension (3 or 4)', position=1)
input_image = File(argstr='%s', mandatory=True, copyfile=True,
desc=('image to apply transformation to (generally a '
'coregistered functional)'))
out_postfix = traits.Str('_wtsimt', argstr='%s', usedefault=True,
desc=('Postfix that is prepended to all output '
'files (default = _wtsimt)'))
reference_image = File(argstr='-R %s', xor=['tightest_box'],
desc='reference image space that you wish to warp INTO')
tightest_box = traits.Bool(argstr='--tightest-bounding-box',
desc=('computes tightest bounding box (overrided by '
'reference_image if given)'),
xor=['reference_image'])
reslice_by_header = traits.Bool(argstr='--reslice-by-header',
desc=('Uses orientation matrix and origin encoded in '
'reference image file header. Not typically used '
'with additional transforms'))
use_nearest = traits.Bool(argstr='--use-NN',
desc='Use nearest neighbor interpolation')
use_bspline = traits.Bool(argstr='--use-Bspline',
desc='Use 3rd order B-Spline interpolation')
transformation_series = InputMultiPath(File(exists=True), argstr='%s',
desc='transformation file(s) to be applied',
mandatory=True, copyfile=False)
invert_affine = traits.List(traits.Int,
desc=('List of Affine transformations to invert.'
'E.g.: [1,4,5] inverts the 1st, 4th, and 5th Affines '
'found in transformation_series. Note that indexing '
'starts with 1 and does not include warp fields. Affine '
'transformations are distinguished '
'from warp fields by the word "affine" included in their filenames.'))
class WarpTimeSeriesImageMultiTransformOutputSpec(TraitedSpec):
output_image = File(exists=True, desc='Warped image')
class WarpTimeSeriesImageMultiTransform(ANTSCommand):
"""Warps a time-series from one space to another
Examples
--------
>>> from nipype.interfaces.ants import WarpTimeSeriesImageMultiTransform
>>> wtsimt = WarpTimeSeriesImageMultiTransform()
>>> wtsimt.inputs.input_image = 'resting.nii'
>>> wtsimt.inputs.reference_image = 'ants_deformed.nii.gz'
>>> wtsimt.inputs.transformation_series = ['ants_Warp.nii.gz','ants_Affine.txt']
>>> wtsimt.cmdline # doctest: +ALLOW_UNICODE
'WarpTimeSeriesImageMultiTransform 4 resting.nii resting_wtsimt.nii -R ants_deformed.nii.gz ants_Warp.nii.gz \
ants_Affine.txt'
>>> wtsimt = WarpTimeSeriesImageMultiTransform()
>>> wtsimt.inputs.input_image = 'resting.nii'
>>> wtsimt.inputs.reference_image = 'ants_deformed.nii.gz'
>>> wtsimt.inputs.transformation_series = ['ants_Warp.nii.gz','ants_Affine.txt']
>>> wtsimt.inputs.invert_affine = [1] # # this will invert the 1st Affine file: ants_Affine.txt
>>> wtsimt.cmdline # doctest: +ALLOW_UNICODE
'WarpTimeSeriesImageMultiTransform 4 resting.nii resting_wtsimt.nii -R ants_deformed.nii.gz ants_Warp.nii.gz \
-i ants_Affine.txt'
"""
_cmd = 'WarpTimeSeriesImageMultiTransform'
input_spec = WarpTimeSeriesImageMultiTransformInputSpec
output_spec = WarpTimeSeriesImageMultiTransformOutputSpec
def _format_arg(self, opt, spec, val):
if opt == 'out_postfix':
_, name, ext = split_filename(
os.path.abspath(self.inputs.input_image))
return name + val + ext
if opt == 'transformation_series':
series = []
affine_counter = 0
affine_invert = []
for transformation in val:
if 'Affine' in transformation and \
isdefined(self.inputs.invert_affine):
affine_counter += 1
if affine_counter in self.inputs.invert_affine:
series += ['-i']
affine_invert.append(affine_counter)
series += [transformation]
if isdefined(self.inputs.invert_affine):
diff_inv = set(self.inputs.invert_affine) - set(affine_invert)
if diff_inv:
raise Exceptions("Review invert_affine, not all indexes from invert_affine were used, "
"check the description for the full definition")
return ' '.join(series)
return super(WarpTimeSeriesImageMultiTransform, self)._format_arg(opt, spec, val)
def _list_outputs(self):
outputs = self._outputs().get()
_, name, ext = split_filename(os.path.abspath(self.inputs.input_image))
outputs['output_image'] = os.path.join(os.getcwd(),
''.join((name,
self.inputs.out_postfix,
ext)))
return outputs
def _run_interface(self, runtime, correct_return_codes=[0]):
runtime = super(WarpTimeSeriesImageMultiTransform, self)._run_interface(runtime, correct_return_codes=[0, 1])
if "100 % complete" not in runtime.stdout:
self.raise_exception(runtime)
return runtime
class WarpImageMultiTransformInputSpec(ANTSCommandInputSpec):
dimension = traits.Enum(3, 2, argstr='%d', usedefault=True,
desc='image dimension (2 or 3)', position=1)
input_image = File(argstr='%s', mandatory=True,
desc=('image to apply transformation to (generally a '
'coregistered functional)'), position=2)
output_image = File(genfile=True, hash_files=False, argstr='%s',
desc='name of the output warped image', position=3, xor=['out_postfix'])
out_postfix = File("_wimt", usedefault=True, hash_files=False,
desc=('Postfix that is prepended to all output '
'files (default = _wimt)'), xor=['output_image'])
reference_image = File(argstr='-R %s', xor=['tightest_box'],
desc='reference image space that you wish to warp INTO')
tightest_box = traits.Bool(argstr='--tightest-bounding-box',
desc=('computes tightest bounding box (overrided by '
'reference_image if given)'),
xor=['reference_image'])
reslice_by_header = traits.Bool(argstr='--reslice-by-header',
desc=('Uses orientation matrix and origin encoded in '
'reference image file header. Not typically used '
'with additional transforms'))
use_nearest = traits.Bool(argstr='--use-NN',
desc='Use nearest neighbor interpolation')
use_bspline = traits.Bool(argstr='--use-BSpline',
desc='Use 3rd order B-Spline interpolation')
transformation_series = InputMultiPath(File(exists=True), argstr='%s',
desc='transformation file(s) to be applied',
mandatory=True, position=-1)
invert_affine = traits.List(traits.Int,
desc=('List of Affine transformations to invert.'
'E.g.: [1,4,5] inverts the 1st, 4th, and 5th Affines '
'found in transformation_series. Note that indexing '
'starts with 1 and does not include warp fields. Affine '
'transformations are distinguished '
'from warp fields by the word "affine" included in their filenames.'))
class WarpImageMultiTransformOutputSpec(TraitedSpec):
output_image = File(exists=True, desc='Warped image')
class WarpImageMultiTransform(ANTSCommand):
"""Warps an image from one space to another
Examples
--------
>>> from nipype.interfaces.ants import WarpImageMultiTransform
>>> wimt = WarpImageMultiTransform()
>>> wimt.inputs.input_image = 'structural.nii'
>>> wimt.inputs.reference_image = 'ants_deformed.nii.gz'
>>> wimt.inputs.transformation_series = ['ants_Warp.nii.gz','ants_Affine.txt']
>>> wimt.cmdline # doctest: +ALLOW_UNICODE
'WarpImageMultiTransform 3 structural.nii structural_wimt.nii -R ants_deformed.nii.gz ants_Warp.nii.gz \
ants_Affine.txt'
>>> wimt = WarpImageMultiTransform()
>>> wimt.inputs.input_image = 'diffusion_weighted.nii'
>>> wimt.inputs.reference_image = 'functional.nii'
>>> wimt.inputs.transformation_series = ['func2anat_coreg_Affine.txt','func2anat_InverseWarp.nii.gz', \
'dwi2anat_Warp.nii.gz','dwi2anat_coreg_Affine.txt']
>>> wimt.inputs.invert_affine = [1] # this will invert the 1st Affine file: 'func2anat_coreg_Affine.txt'
>>> wimt.cmdline # doctest: +ALLOW_UNICODE
'WarpImageMultiTransform 3 diffusion_weighted.nii diffusion_weighted_wimt.nii -R functional.nii \
-i func2anat_coreg_Affine.txt func2anat_InverseWarp.nii.gz dwi2anat_Warp.nii.gz dwi2anat_coreg_Affine.txt'
"""
_cmd = 'WarpImageMultiTransform'
input_spec = WarpImageMultiTransformInputSpec
output_spec = WarpImageMultiTransformOutputSpec
def _gen_filename(self, name):
if name == 'output_image':
_, name, ext = split_filename(
os.path.abspath(self.inputs.input_image))
return ''.join((name, self.inputs.out_postfix, ext))
return None
def _format_arg(self, opt, spec, val):
if opt == 'transformation_series':
series = []
affine_counter = 0
affine_invert = []
for transformation in val:
if "affine" in transformation.lower() and \
isdefined(self.inputs.invert_affine):
affine_counter += 1
if affine_counter in self.inputs.invert_affine:
series += ['-i']
affine_invert.append(affine_counter)
series += [transformation]
if isdefined(self.inputs.invert_affine):
diff_inv = set(self.inputs.invert_affine) - set(affine_invert)
if diff_inv:
raise Exceptions("Review invert_affine, not all indexes from invert_affine were used, "
"check the description for the full definition")
return ' '.join(series)
return super(WarpImageMultiTransform, self)._format_arg(opt, spec, val)
def _list_outputs(self):
outputs = self._outputs().get()
if isdefined(self.inputs.output_image):
outputs['output_image'] = os.path.abspath(self.inputs.output_image)
else:
outputs['output_image'] = os.path.abspath(
self._gen_filename('output_image'))
return outputs
class ApplyTransformsInputSpec(ANTSCommandInputSpec):
dimension = traits.Enum(2, 3, 4, argstr='--dimensionality %d',
desc=('This option forces the image to be treated '
'as a specified-dimensional image. If not '
'specified, antsWarp tries to infer the '
'dimensionality from the input image.'))
input_image_type = traits.Enum(0, 1, 2, 3,
argstr='--input-image-type %d',
desc=('Option specifying the input image '
'type of scalar (default), vector, '
'tensor, or time series.'))
input_image = File(argstr='--input %s', mandatory=True,
desc=('image to apply transformation to (generally a '
'coregistered functional)'),
exists=True)
output_image = traits.Str(argstr='--output %s', desc='output file name',
genfile=True, hash_files=False)
out_postfix = traits.Str("_trans", usedefault=True,
desc=('Postfix that is appended to all output '
'files (default = _trans)'))
reference_image = File(argstr='--reference-image %s', mandatory=True,
desc='reference image space that you wish to warp INTO',
exists=True)
interpolation = traits.Enum('Linear',
'NearestNeighbor',
'CosineWindowedSinc',
'WelchWindowedSinc',
'HammingWindowedSinc',
'LanczosWindowedSinc',
'MultiLabel',
'Gaussian',
'BSpline',
argstr='%s', usedefault=True)
interpolation_parameters = traits.Either(
traits.Tuple(traits.Int()), # BSpline (order)
traits.Tuple(traits.Float(), # Gaussian/MultiLabel (sigma, alpha)
traits.Float())
)
transforms = traits.Either(
InputMultiPath(File(exists=True)), 'identity', argstr='%s', mandatory=True,
desc='transform files: will be applied in reverse order. For '
'example, the last specified transform will be applied first.')
invert_transform_flags = InputMultiPath(traits.Bool())
default_value = traits.Float(0.0, argstr='--default-value %g', usedefault=True)
print_out_composite_warp_file = traits.Bool(False, requires=["output_image"],
desc='output a composite warp file instead of a transformed image')
float = traits.Bool(argstr='--float %d', default=False, desc='Use float instead of double for computations.')
class ApplyTransformsOutputSpec(TraitedSpec):
output_image = File(exists=True, desc='Warped image')
class ApplyTransforms(ANTSCommand):
"""ApplyTransforms, applied to an input image, transforms it according to a
reference image and a transform (or a set of transforms).
Examples
--------
>>> from nipype.interfaces.ants import ApplyTransforms
>>> at = ApplyTransforms()
>>> at.inputs.input_image = 'moving1.nii'
>>> at.inputs.reference_image = 'fixed1.nii'
>>> at.inputs.transforms = 'identity'
>>> at.cmdline # doctest: +ALLOW_UNICODE
'antsApplyTransforms --default-value 0 --input moving1.nii \
--interpolation Linear --output moving1_trans.nii \
--reference-image fixed1.nii -t identity'
>>> at = ApplyTransforms()
>>> at.inputs.dimension = 3
>>> at.inputs.input_image = 'moving1.nii'
>>> at.inputs.reference_image = 'fixed1.nii'
>>> at.inputs.output_image = 'deformed_moving1.nii'
>>> at.inputs.interpolation = 'Linear'
>>> at.inputs.default_value = 0
>>> at.inputs.transforms = ['ants_Warp.nii.gz', 'trans.mat']
>>> at.inputs.invert_transform_flags = [False, False]
>>> at.cmdline # doctest: +ALLOW_UNICODE
'antsApplyTransforms --default-value 0 --dimensionality 3 --input moving1.nii --interpolation Linear \
--output deformed_moving1.nii --reference-image fixed1.nii --transform [ ants_Warp.nii.gz, 0 ] \
--transform [ trans.mat, 0 ]'
>>> at1 = ApplyTransforms()
>>> at1.inputs.dimension = 3
>>> at1.inputs.input_image = 'moving1.nii'
>>> at1.inputs.reference_image = 'fixed1.nii'
>>> at1.inputs.output_image = 'deformed_moving1.nii'
>>> at1.inputs.interpolation = 'BSpline'
>>> at1.inputs.interpolation_parameters = (5,)
>>> at1.inputs.default_value = 0
>>> at1.inputs.transforms = ['ants_Warp.nii.gz', 'trans.mat']
>>> at1.inputs.invert_transform_flags = [False, False]
>>> at1.cmdline # doctest: +ALLOW_UNICODE
'antsApplyTransforms --default-value 0 --dimensionality 3 --input moving1.nii --interpolation BSpline[ 5 ] \
--output deformed_moving1.nii --reference-image fixed1.nii --transform [ ants_Warp.nii.gz, 0 ] \
--transform [ trans.mat, 0 ]'
"""
_cmd = 'antsApplyTransforms'
input_spec = ApplyTransformsInputSpec
output_spec = ApplyTransformsOutputSpec
def _gen_filename(self, name):
if name == 'output_image':
output = self.inputs.output_image
if not isdefined(output):
_, name, ext = split_filename(self.inputs.input_image)
output = name + self.inputs.out_postfix + ext
return output
return None
def _get_transform_filenames(self):
retval = []
for ii in range(len(self.inputs.transforms)):
if isdefined(self.inputs.invert_transform_flags):
if len(self.inputs.transforms) == len(self.inputs.invert_transform_flags):
invert_code = 1 if self.inputs.invert_transform_flags[
ii] else 0
retval.append("--transform [ %s, %d ]" %
(self.inputs.transforms[ii], invert_code))
else:
raise Exception(("ERROR: The useInverse list must have the same number "
"of entries as the transformsFileName list."))
else:
retval.append("--transform %s" % self.inputs.transforms[ii])
return " ".join(retval)
def _get_output_warped_filename(self):
if isdefined(self.inputs.print_out_composite_warp_file):
return "--output [ %s, %d ]" % (self._gen_filename("output_image"),
int(self.inputs.print_out_composite_warp_file))
else:
return "--output %s" % (self._gen_filename("output_image"))
def _format_arg(self, opt, spec, val):
if opt == "output_image":
return self._get_output_warped_filename()
elif opt == "transforms":
if val == 'identity':
return '-t identity'
return self._get_transform_filenames()
elif opt == 'interpolation':
if self.inputs.interpolation in ['BSpline', 'MultiLabel', 'Gaussian'] and \
isdefined(self.inputs.interpolation_parameters):
return '--interpolation %s[ %s ]' % (self.inputs.interpolation,
', '.join([str(param)
for param in self.inputs.interpolation_parameters]))
else:
return '--interpolation %s' % self.inputs.interpolation
return super(ApplyTransforms, self)._format_arg(opt, spec, val)
def _list_outputs(self):
outputs = self._outputs().get()
outputs['output_image'] = os.path.abspath(
self._gen_filename('output_image'))
return outputs
class ApplyTransformsToPointsInputSpec(ANTSCommandInputSpec):
dimension = traits.Enum(2, 3, 4, argstr='--dimensionality %d',
desc=('This option forces the image to be treated '
'as a specified-dimensional image. If not '
'specified, antsWarp tries to infer the '
'dimensionality from the input image.'))
input_file = File(argstr='--input %s', mandatory=True,
desc=("Currently, the only input supported is a csv file with "
"columns including x,y (2D), x,y,z (3D) or x,y,z,t,label (4D) column headers."
"The points should be defined in physical space."
"If in doubt how to convert coordinates from your files to the space"
"required by antsApplyTransformsToPoints try creating/drawing a simple"
"label volume with only one voxel set to 1 and all others set to 0."
"Write down the voxel coordinates. Then use ImageMaths LabelStats to find"
"out what coordinates for this voxel antsApplyTransformsToPoints is"
"expecting."),
exists=True)
output_file = traits.Str(argstr='--output %s',
desc='Name of the output CSV file', name_source=['input_file'],
hash_files=False, name_template='%s_transformed.csv')
transforms = traits.List(File(exists=True), argstr='%s', mandatory=True,
desc='transforms that will be applied to the points')
invert_transform_flags = traits.List(traits.Bool(),
desc='list indicating if a transform should be reversed')
class ApplyTransformsToPointsOutputSpec(TraitedSpec):
output_file = File(exists=True, desc='csv file with transformed coordinates')
class ApplyTransformsToPoints(ANTSCommand):
"""ApplyTransformsToPoints, applied to an CSV file, transforms coordinates
using provided transform (or a set of transforms).
Examples
--------
>>> from nipype.interfaces.ants import ApplyTransforms
>>> at = ApplyTransformsToPoints()
>>> at.inputs.dimension = 3
>>> at.inputs.input_file = 'moving.csv'
>>> at.inputs.transforms = ['trans.mat', 'ants_Warp.nii.gz']
>>> at.inputs.invert_transform_flags = [False, False]
>>> at.cmdline # doctest: +ALLOW_UNICODE
'antsApplyTransformsToPoints --dimensionality 3 --input moving.csv --output moving_transformed.csv \
--transform [ trans.mat, 0 ] --transform [ ants_Warp.nii.gz, 0 ]'
"""
_cmd = 'antsApplyTransformsToPoints'
input_spec = ApplyTransformsToPointsInputSpec
output_spec = ApplyTransformsToPointsOutputSpec
def _get_transform_filenames(self):
retval = []
for ii in range(len(self.inputs.transforms)):
if isdefined(self.inputs.invert_transform_flags):
if len(self.inputs.transforms) == len(self.inputs.invert_transform_flags):
invert_code = 1 if self.inputs.invert_transform_flags[
ii] else 0
retval.append("--transform [ %s, %d ]" %
(self.inputs.transforms[ii], invert_code))
else:
raise Exception(("ERROR: The useInverse list must have the same number "
"of entries as the transformsFileName list."))
else:
retval.append("--transform %s" % self.inputs.transforms[ii])
return " ".join(retval)
def _format_arg(self, opt, spec, val):
if opt == "transforms":
return self._get_transform_filenames()
return super(ApplyTransformsToPoints, self)._format_arg(opt, spec, val)
|
mick-d/nipype
|
nipype/interfaces/ants/resampling.py
|
Python
|
bsd-3-clause
| 24,240
|
[
"Gaussian"
] |
dae0ea53939442b45a4ad13ab13dfca8807760e1d5c0d3124558c067053655d4
|
#!/usr/bin/python
#
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example adds several text ads to a given ad group. To get ad_group_id,
run get_ad_groups.py.
Tags: AdGroupAdService.mutate
Api: AdWordsOnly
"""
__author__ = 'api.kwinter@gmail.com (Kevin Winter)'
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import AdWordsClient
ad_group_id = 'INSERT_AD_GROUP_ID_HERE'
def main(client, ad_group_id):
# Initialize appropriate service.
ad_group_ad_service = client.GetAdGroupAdService(
'https://adwords-sandbox.google.com', 'v201109_1')
# Construct operations and add ads.
operations = [
{
'operator': 'ADD',
'operand': {
'xsi_type': 'AdGroupAd',
'adGroupId': ad_group_id,
'ad': {
'xsi_type': 'TextAd',
'url': 'http://www.example.com',
'displayUrl': 'example.com',
'description1': 'Visit the Red Planet in style.',
'description2': 'Low-gravity fun for everyone!',
'headline': 'Luxury Cruise to Mars'
},
# Optional fields.
'status': 'PAUSED'
}
# If needed, you could specify an exemption request here.
# 'exemptionRequests': [{
# # This comes back in a PolicyViolationError.
# 'key' {
# 'policyName': '...',
# 'violatingText': '...'
# }
# }]
},
{
'operator': 'ADD',
'operand': {
'xsi_type': 'AdGroupAd',
'adGroupId': ad_group_id,
'ad': {
'xsi_type': 'TextAd',
'url': 'http://www.example.com',
'displayUrl': 'example.com',
'description1': 'Enjoy your stay at Red Planet.',
'description2': 'Buy your tickets now!',
'headline': 'Luxury Cruise to Mars'
}
}
}
]
ads = ad_group_ad_service.Mutate(operations)[0]
# Display results.
for ad in ads['value']:
print ('Ad with id \'%s\' and of type \'%s\' was added.'
% (ad['ad']['id'], ad['ad']['Ad_Type']))
print
print ('Usage: %s units, %s operations' % (client.GetUnits(),
client.GetOperations()))
if __name__ == '__main__':
# Initialize client object.
client = AdWordsClient(path=os.path.join('..', '..', '..', '..', '..'))
main(client, ad_group_id)
|
nearlyfreeapps/python-googleadwords
|
examples/adspygoogle/adwords/v201109_1/basic_operations/add_text_ads.py
|
Python
|
apache-2.0
| 3,212
|
[
"VisIt"
] |
6efb7eeb7247121a4ff7f9e4f1f1104c1ef44d04b99efba7134f5fe1ecf51bed
|
import pytest
import sys
sys.path.append("../src/")
import cryspy
from cryspy.fromstr import fromstr as fs
import numpy as np
approxdelta = 0.00000001
def approx(x1, x2):
x1 = cryspy.numbers.Mixed(x1)
x2 = cryspy.numbers.Mixed(x2)
if isinstance(x1.value, float) or isinstance(x2.value, float):
return abs(x1.value - x2.value) < approxdelta
else:
return x1 == x2
def test_Karussell():
metric = cryspy.geo.Cellparameters(1, 1, 1, 90, 90, 90).to_Metric()
k = cryspy.utils.Karussell(metric, fs("d 1 0 0"), fs("d 0 1 0"))
d1 = k.direction(0)
assert float(metric.length(d1 - fs("d 1.0 0.0 0"))) < 1e-9
d2 = k.direction(np.pi / 2)
assert float(metric.length(d2 - fs("d 0 1 0"))) < 1e-9
metric = cryspy.geo.Cellparameters(1, 1, 1, 90, 90, 45).to_Metric()
k = cryspy.utils.Karussell(metric, fs("d 1 0 0"), fs("d 0 1 0"))
d1 = k.direction(0)
assert float(metric.length(d1 - fs("d 1.0 0.0 0"))) < 1e-9
d2 = k.direction(np.pi / 4)
assert float(metric.length(d2 - fs("d 0 1 0"))) < 1e-9
def test_fill():
atomset = cryspy.crystal.Atomset({cryspy.crystal.Atom("Fe1", "Fe", fs("p 1/2 1/2 1/2"))})
atomset = cryspy.utils.fill(atomset, [0.6, 0.6, 0.6])
assert len(atomset.menge) == 27
atomset = cryspy.crystal.Atomset({cryspy.crystal.Atom("Fe1", "Fe", fs("p 0 0 0"))})
atomset = cryspy.utils.fill(atomset, [0.1, 0.1, 0.1])
assert len(atomset.menge) == 8
def test_ldu_decomposition():
A = fs("< 5 >")
assert cryspy.utils.ldu_decomposition(A) == \
[fs("<1>"), fs("<5>"), fs("<1>")]
A = fs(" 2 1 1 0 \n 4 3 3 1 \n 8 7 9 5 \n 6 7 9 8")
[L, D, U] = cryspy.utils.ldu_decomposition(A)
print(L)
print(D)
print(U)
assert L == fs("1 0 0 0 \n 2 1 0 0 \n 4 3 1 0 \n 3 4 1 1")
assert D == fs("2 0 0 0 \n 0 1 0 0 \n 0 0 2 0 \n 0 0 0 2")
assert U == fs("1 1/2 1/2 0 \n 0 1 1 1 \n 0 0 1 1 \n 0 0 0 1")
def test_calculate_transformation_for_normalized_axes():
metrics = [cryspy.geo.Cellparameters(1, 1, 1, 90, 90, 90).to_Metric(),
cryspy.geo.Cellparameters(2, 1, 3, 90, 90, 40).to_Metric(),
cryspy.geo.Cellparameters(3.0, 2.7, 3, 60.8, 67.9, 60).to_Metric(),
cryspy.geo.Cellparameters(1.23, 3.8, 8.4, 60, 70, 80).to_Metric()
]
for metric1 in metrics:
T = cryspy.utils.calculate_transformation_for_normalized_axes(metric1)
metric2 = T ** metric1
print(fs("d 1 0 0"))
print(T ** fs("d 1 0 0"))
assert approx(metric2.dangle(fs("d 1 0 0"), T ** fs("d 1 0 0")), 0)
assert approx(metric2.dangle(fs("d 0 1 0"), T ** fs("d 0 1 0")), 0)
assert approx(metric2.dangle(fs("d 0 0 1"), T ** fs("d 0 0 1")), 0)
assert approx(metric2.dangle(fs("d 0 1 0"), fs("d 0 0 1")),
metric1.dangle(fs("d 0 1 0"), fs("d 0 0 1")))
assert approx(metric2.dangle(fs("d 0 0 1"), fs("d 1 0 0")),
metric1.dangle(fs("d 0 0 1"), fs("d 1 0 0")))
assert approx(metric2.dangle(fs("d 1 0 0"), fs("d 0 1 0")),
metric1.dangle(fs("d 1 0 0"), fs("d 0 1 0")))
assert approx(metric1.length(T.inv() ** fs("d 1 0 0")), 1)
assert approx(metric1.length(T.inv() ** fs("d 0 1 0")), 1)
assert approx(metric1.length(T.inv() ** fs("d 0 0 1")), 1)
def test_autotetraeder():
metric = cryspy.geo.Cellparameters(1, 1, 1, 90, 90, 90).to_Metric()
Atom = cryspy.crystal.Atom
facecolor = (1, 1, 1)
faceopacity = 0.5
plotedges = True
edgecolor = (0, 0, 0)
edgewidth = 0.02
atomset = cryspy.crystal.Atomset({
Atom("Fe1", "Fe", fs("p 1/2 1/2 1/2")),
Atom("H1", "H", fs("p 0 0 0")),
Atom("H2", "H", fs("p 1 1 0")),
Atom("H3", "H", fs("p 0 1 1")),
Atom("H4", "H", fs("p 1 0 1"))
})
tetra1 = cryspy.utils.auto_tetrahedron(
"Tetra",
atomset,
metric,
atomset.get_atom("Fe1").pos,
["H"],
facecolor, faceopacity, plotedges, edgecolor, edgewidth
)
tetra2 = cryspy.utils.tetrahedron(
"Tetra",
atomset.get_atom("H1").pos,
atomset.get_atom("H2").pos,
atomset.get_atom("H4").pos,
atomset.get_atom("H3").pos,
facecolor, faceopacity, plotedges, edgecolor, edgewidth
)
assert tetra1 == tetra2
def test_auto_octahedron():
metric = cryspy.geo.Cellparameters(1, 1, 1, 90, 90, 90).to_Metric()
Atom = cryspy.crystal.Atom
facecolor = (1, 1, 1)
faceopacity = 0.5
plotedges = True
edgecolor = (0, 0, 0)
edgewidth = 0.02
atomset = cryspy.crystal.Atomset({
Atom("Fe1", "Fe", fs("p 1/2 1/2 1/2")),
Atom("H1", "H", fs("p 1/2 1/2 0")),
Atom("H2", "H", fs("p 1/2 1/2 1")),
Atom("H3", "H", fs("p 1/2 0 1/2")),
Atom("H4", "H", fs("p 1/2 1 1/2")),
Atom("H5", "H", fs("p 0 1/2 1/2")),
Atom("H6", "H", fs("p 1 1/2 1/2"))
})
octa1 = cryspy.utils.auto_octahedron(
"Octa",
atomset,
metric,
atomset.get_atom("Fe1").pos,
["H"],
facecolor, faceopacity, plotedges, edgecolor, edgewidth
)
octa2 = cryspy.utils.octahedron(
"Octa",
atomset.get_atom("H1").pos,
atomset.get_atom("H3").pos,
atomset.get_atom("H5").pos,
atomset.get_atom("H4").pos,
atomset.get_atom("H6").pos,
atomset.get_atom("H2").pos,
facecolor, faceopacity, plotedges, edgecolor, edgewidth
)
assert octa1 == octa2
|
tobias-froehlich/cryspy
|
tests/test_utils.py
|
Python
|
gpl-3.0
| 5,536
|
[
"CRYSTAL"
] |
e81c94a1a95434e98d44930102715bf44cff9a81f41ea101471654170fa2b412
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.