text stringlengths 12 1.05M | repo_name stringlengths 5 86 | path stringlengths 4 191 | language stringclasses 1 value | license stringclasses 15 values | size int32 12 1.05M | keyword listlengths 1 23 | text_hash stringlengths 64 64 |
|---|---|---|---|---|---|---|---|
import os
from gpaw import GPAW
from ase import *
from gpaw.wannier import Wannier
from gpaw.utilities import equal
natoms = 1
hhbondlength = 0.9
atoms = Atoms([Atom('H', (0, 4.0, 4.0))],
cell=(hhbondlength, 8., 8.),
pbc=True).repeat((natoms, 1, 1))
# Displace kpoints sligthly, so that the symmetry program does
# not use inversion symmetry to reduce kpoints.
assert natoms < 5
kpts = [21, 11, 7, 1][natoms - 1]
occupationenergy = [30., 0., 0., 0.][natoms - 1]
kpts = monkhorst_pack((kpts, 1, 1)) + 2e-5
if 1:
# GPAW calculator:
calc = GPAW(nbands=natoms // 2 + 4,
kpts=kpts,
width=.1,
spinpol=False,
convergence={'eigenstates': 1e-7})
atoms.set_calculator(calc)
atoms.get_potential_energy()
calc.write('hwire%s.gpw' % natoms, 'all')
else:
calc = GPAW('hwire%s.gpw' % natoms, txt=None)
wannier = Wannier(numberofwannier=natoms,
calculator=calc,
occupationenergy=occupationenergy,)
# initialwannier=[[[1.* i / natoms, .5, .5], [0,], .5]
# for i in range(natoms)])
wannier.localize()
wannier.translate_all_wannier_functions_to_cell([1, 0, 0])
centers = wannier.get_centers()
for i in wannier.get_sorted_indices():
center = centers[i]['pos']
print center
quotient = round(center[0] / hhbondlength)
equal(hhbondlength*quotient - center[0], 0., 2e-3)
equal(center[1], 4., 2e-3)
equal(center[2], 4., 2e-3)
for i in range(natoms):
wannier.write_cube(i, 'hwire%s.cube' % i, real=True)
os.system('rm hwire1.gpw hwire*.cube')
| qsnake/gpaw | oldtest/wannier-hwire.py | Python | gpl-3.0 | 1,691 | [
"ASE",
"GPAW"
] | 42821eeb6699a07e804e2d3c91623d2c02f43949dba7b76f3d654a4d3e865eaa |
""" DIRAC Transformation DB
Transformation database is used to collect and serve the necessary information
in order to automate the task of job preparation for high level transformations.
This class is typically used as a base class for more specific data processing
databases
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
import re
import time
import threading
from errno import ENOENT
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.Core.Base.DB import DB
from DIRAC.Core.Utilities.DErrno import cmpError
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
from DIRAC.Core.Utilities.List import stringListToString, intListToString, breakListIntoChunks
from DIRAC.Core.Utilities.Shifter import setupShifterProxyInEnv
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.Core.Utilities.Subprocess import pythonCall
from DIRAC.DataManagementSystem.Client.MetaQuery import MetaQuery
__RCSID__ = "$Id$"
MAX_ERROR_COUNT = 10
#############################################################################
class TransformationDB(DB):
""" TransformationDB class
"""
def __init__(self, dbname=None, dbconfig=None, dbIn=None):
""" The standard constructor takes the database name (dbname) and the name of the
configuration section (dbconfig)
"""
if not dbname:
dbname = 'TransformationDB'
if not dbconfig:
dbconfig = 'Transformation/TransformationDB'
if not dbIn:
DB.__init__(self, dbname, dbconfig)
self.lock = threading.Lock()
self.allowedStatusForTasks = ('Unused', 'ProbInFC')
self.TRANSPARAMS = ['TransformationID',
'TransformationName',
'Description',
'LongDescription',
'CreationDate',
'LastUpdate',
'AuthorDN',
'AuthorGroup',
'Type',
'Plugin',
'AgentType',
'Status',
'FileMask',
'TransformationGroup',
'GroupSize',
'InheritedFrom',
'Body',
'MaxNumberOfTasks',
'EventsPerTask',
'TransformationFamily']
self.mutable = ['TransformationName',
'Description',
'LongDescription',
'AgentType',
'Status',
'MaxNumberOfTasks',
'TransformationFamily',
'Body'] # for the moment include TransformationFamily
self.TRANSFILEPARAMS = ['TransformationID',
'FileID',
'Status',
'TaskID',
'TargetSE',
'UsedSE',
'ErrorCount',
'LastUpdate',
'InsertedTime']
self.TRANSFILETASKPARAMS = ['TransformationID',
'FileID',
'TaskID']
self.TASKSPARAMS = ['TaskID',
'TransformationID',
'ExternalStatus',
'ExternalID',
'TargetSE',
'CreationTime',
'LastUpdateTime']
self.ADDITIONALPARAMETERS = ['TransformationID',
'ParameterName',
'ParameterValue',
'ParameterType'
]
# Intialize filter Queries with Input Meta Queries
self.filterQueries = []
res = self.__updateFilterQueries()
if not res['OK']:
gLogger.fatal("Failed to create filter queries")
# This is here to ensure full compatibility between different versions of the MySQL DB schema
self.isTransformationTasksInnoDB = True
res = self._query("SELECT Engine FROM INFORMATION_SCHEMA.TABLES WHERE table_name = 'TransformationTasks'")
if not res['OK']:
raise RuntimeError(res['Message'])
else:
engine = res['Value'][0][0]
if engine.lower() != 'innodb':
self.isTransformationTasksInnoDB = False
def getName(self):
""" Get the database name
"""
return self.dbName
###########################################################################
#
# These methods manipulate the Transformations table
#
def addTransformation(self, transName, description, longDescription, authorDN, authorGroup, transType,
plugin, agentType, fileMask,
transformationGroup='General',
groupSize=1,
inheritedFrom=0,
body='',
maxTasks=0,
eventsPerTask=0,
addFiles=True,
connection=False,
inputMetaQuery=None,
outputMetaQuery=None):
""" Add new transformation definition including its input streams
"""
connection = self.__getConnection(connection)
res = self._getTransformationID(transName, connection=connection)
if res['OK']:
return S_ERROR("Transformation with name %s already exists with TransformationID = %d" % (transName,
res['Value']))
elif res['Message'] != "Transformation does not exist":
return res
self.lock.acquire()
res = self._escapeString(body)
if not res['OK']:
return S_ERROR("Failed to parse the transformation body")
body = res['Value']
req = "INSERT INTO Transformations (TransformationName,Description,LongDescription, \
CreationDate,LastUpdate,AuthorDN,AuthorGroup,Type,Plugin,AgentType,\
FileMask,Status,TransformationGroup,GroupSize,\
InheritedFrom,Body,MaxNumberOfTasks,EventsPerTask)\
VALUES ('%s','%s','%s',\
UTC_TIMESTAMP(),UTC_TIMESTAMP(),'%s','%s','%s','%s','%s',\
'%s','New','%s',%d,\
%d,%s,%d,%d);" % \
(transName, description, longDescription,
authorDN, authorGroup, transType, plugin, agentType,
fileMask, transformationGroup, groupSize,
inheritedFrom, body, maxTasks, eventsPerTask)
res = self._update(req, connection)
if not res['OK']:
self.lock.release()
return res
transID = res['lastRowId']
self.lock.release()
# Add Input and Output Meta Queries to the transformation if they are defined
if inputMetaQuery:
res = self.createTransformationMetaQuery(transID, inputMetaQuery, 'Input')
if not res['OK']:
gLogger.error("Failed to add input meta query to the transformation", res['Message'])
return self.deleteTransformation(transID, connection=connection)
if outputMetaQuery:
res = self.createTransformationMetaQuery(transID, outputMetaQuery, 'Output')
if not res['OK']:
gLogger.error("Failed to add output meta query to the transformation", res['Message'])
return self.deleteTransformation(transID, connection=connection)
# If the transformation has an input data specification
if inputMetaQuery:
self.filterQueries.append((transID, inputMetaQuery))
if inheritedFrom:
res = self._getTransformationID(inheritedFrom, connection=connection)
if not res['OK']:
gLogger.error("Failed to get ID for parent transformation, now deleting", res['Message'])
return self.deleteTransformation(transID, connection=connection)
originalID = res['Value']
# FIXME: this is not the right place to change status information, and in general the whole should not be here
res = self.setTransformationParameter(originalID, 'Status', 'Completing',
author=authorDN, connection=connection)
if not res['OK']:
gLogger.error("Failed to update parent transformation status: now deleting", res['Message'])
return self.deleteTransformation(transID, connection=connection)
res = self.setTransformationParameter(originalID, 'AgentType', 'Automatic',
author=authorDN, connection=connection)
if not res['OK']:
gLogger.error("Failed to update parent transformation agent type, now deleting", res['Message'])
return self.deleteTransformation(transID, connection=connection)
message = 'Creation of the derived transformation (%d)' % transID
self.__updateTransformationLogging(originalID, message, authorDN, connection=connection)
res = self.getTransformationFiles(condDict={'TransformationID': originalID}, connection=connection)
if not res['OK']:
gLogger.error("Could not get transformation files, now deleting", res['Message'])
return self.deleteTransformation(transID, connection=connection)
if res['Records']:
res = self.__insertExistingTransformationFiles(transID, res['Records'], connection=connection)
if not res['OK']:
gLogger.error("Could not insert files, now deleting", res['Message'])
return self.deleteTransformation(transID, connection=connection)
# Add files to the DataFiles table
catalog = FileCatalog()
if addFiles and inputMetaQuery:
res = catalog.findFilesByMetadata(inputMetaQuery)
if not res['OK']:
gLogger.error("Failed to find files to be added to the transformation", res['Message'])
return res
filesToAdd = res['Value']
gLogger.notice('filesToAdd', filesToAdd)
if filesToAdd:
connection = self.__getConnection(connection)
res = self.__addDataFiles(filesToAdd, connection=connection)
if not res['OK']:
return res
lfnFileIDs = res['Value']
# Add the files to the transformations
fileIDs = []
for lfn in filesToAdd:
if lfn in lfnFileIDs:
fileIDs.append(lfnFileIDs[lfn])
res = self.__addFilesToTransformation(transID, fileIDs, connection=connection)
if not res['OK']:
gLogger.error("Failed to add files to transformation", "%s %s" % (transID, res['Message']))
message = "Created transformation %d" % transID
self.__updateTransformationLogging(transID, message, authorDN, connection=connection)
return S_OK(transID)
def getTransformations(self, condDict=None, older=None, newer=None, timeStamp='LastUpdate',
orderAttribute=None, limit=None, extraParams=False, offset=None, connection=False):
""" Get parameters of all the Transformations with support for the web standard structure """
connection = self.__getConnection(connection)
req = "SELECT %s FROM Transformations %s" % (intListToString(self.TRANSPARAMS),
self.buildCondition(condDict, older, newer, timeStamp,
orderAttribute, limit, offset=offset))
res = self._query(req, connection)
if not res['OK']:
return res
if condDict is None:
condDict = {}
webList = []
resultList = []
for row in res['Value']:
# Prepare the structure for the web
rList = [str(item) if not isinstance(item, six.integer_types) else item for item in row]
transDict = dict(zip(self.TRANSPARAMS, row))
webList.append(rList)
if extraParams:
res = self.__getAdditionalParameters(transDict['TransformationID'], connection=connection)
if not res['OK']:
return res
transDict.update(res['Value'])
resultList.append(transDict)
result = S_OK(resultList)
result['Records'] = webList
result['ParameterNames'] = self.TRANSPARAMS
return result
def getTransformation(self, transName, extraParams=False, connection=False):
"""Get Transformation definition and parameters of Transformation identified by TransformationID
"""
res = self._getConnectionTransID(connection, transName)
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
res = self.getTransformations(condDict={'TransformationID': transID}, extraParams=extraParams,
connection=connection)
if not res['OK']:
return res
if not res['Value']:
return S_ERROR("Transformation %s did not exist" % transName)
return S_OK(res['Value'][0])
def getTransformationParameters(self, transName, parameters, connection=False):
""" Get the requested parameters for a supplied transformation """
if isinstance(parameters, six.string_types):
parameters = [parameters]
extraParams = bool(set(parameters) - set(self.TRANSPARAMS))
res = self.getTransformation(transName, extraParams=extraParams, connection=connection)
if not res['OK']:
return res
transParams = res['Value']
paramDict = {}
for reqParam in parameters:
if reqParam not in transParams:
return S_ERROR("Parameter %s not defined for transformation %s" % (reqParam, transName))
paramDict[reqParam] = transParams[reqParam]
if len(paramDict) == 1:
return S_OK(paramDict[reqParam])
return S_OK(paramDict)
def getTransformationWithStatus(self, status, connection=False):
""" Gets a list of the transformations with the supplied status """
req = "SELECT TransformationID FROM Transformations WHERE Status = '%s';" % status
res = self._query(req, conn=connection)
if not res['OK']:
return res
transIDs = [tupleIn[0] for tupleIn in res['Value']]
return S_OK(transIDs)
def getTableDistinctAttributeValues(self, table, attributes, selectDict, older=None, newer=None,
timeStamp=None, connection=False):
tableFields = {'Transformations': self.TRANSPARAMS,
'TransformationTasks': self.TASKSPARAMS,
'TransformationFiles': self.TRANSFILEPARAMS}
possibleFields = tableFields.get(table, [])
return self.__getTableDistinctAttributeValues(table, possibleFields, attributes, selectDict, older, newer,
timeStamp, connection=connection)
def __getTableDistinctAttributeValues(self, table, possible, attributes, selectDict, older, newer,
timeStamp, connection=False):
connection = self.__getConnection(connection)
attributeValues = {}
for attribute in attributes:
if possible and (attribute not in possible):
return S_ERROR('Requested attribute (%s) does not exist in table %s' % (attribute, table))
res = self.getDistinctAttributeValues(table, attribute, condDict=selectDict, older=older, newer=newer,
timeStamp=timeStamp, connection=connection)
if not res['OK']:
return S_ERROR('Failed to serve values for attribute %s in table %s' % (attribute, table))
attributeValues[attribute] = res['Value']
return S_OK(attributeValues)
def __updateTransformationParameter(self, transID, paramName, paramValue, connection=False):
if paramName not in self.mutable:
return S_ERROR("Can not update the '%s' transformation parameter" % paramName)
if paramName == 'Body':
res = self._escapeString(paramValue)
if not res['OK']:
return S_ERROR("Failed to parse parameter value")
paramValue = res['Value']
req = "UPDATE Transformations SET %s=%s, LastUpdate=UTC_TIMESTAMP() WHERE TransformationID=%d" % (paramName,
paramValue,
transID)
return self._update(req, connection)
req = "UPDATE Transformations SET %s='%s', LastUpdate=UTC_TIMESTAMP() WHERE TransformationID=%d" % (paramName,
paramValue,
transID)
return self._update(req, connection)
def _getTransformationID(self, transName, connection=False):
""" Method returns ID of transformation with the name=<name> """
try:
transName = int(transName)
cmd = "SELECT TransformationID from Transformations WHERE TransformationID=%d;" % transName
except ValueError:
if not isinstance(transName, six.string_types):
return S_ERROR("Transformation should be ID or name")
cmd = "SELECT TransformationID from Transformations WHERE TransformationName='%s';" % transName
res = self._query(cmd, connection)
if not res['OK']:
gLogger.error("Failed to obtain transformation ID for transformation", "%s: %s" % (transName, res['Message']))
return res
elif not res['Value']:
gLogger.verbose("Transformation %s does not exist" % (transName))
return S_ERROR("Transformation does not exist")
return S_OK(res['Value'][0][0])
def __deleteTransformation(self, transID, connection=False):
req = "DELETE FROM Transformations WHERE TransformationID=%d;" % transID
return self._update(req, connection)
def __updateFilterQueries(self, connection=False):
""" Get filters for all defined input streams in all the transformations.
"""
resultList = []
res = self.getTransformations(condDict={'Status': ['New', 'Active', 'Stopped', 'Flush', 'Completing']},
connection=connection)
if not res['OK']:
return res
transIDs = res['Value']
for transDict in transIDs:
transID = str(transDict['TransformationID'])
res = self.getTransformationMetaQuery(transID, 'Input')
if not res['OK']:
continue
resultList.append((transID, res['Value']))
self.filterQueries = resultList
return S_OK(resultList)
###########################################################################
#
# These methods manipulate the AdditionalParameters tables
#
def setTransformationParameter(self, transName, paramName, paramValue, author='', connection=False):
""" Add a parameter for the supplied transformations """
res = self._getConnectionTransID(connection, transName)
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
message = ''
if paramName in self.TRANSPARAMS:
res = self.__updateTransformationParameter(transID, paramName, paramValue, connection=connection)
if res['OK']:
pv = self._escapeString(paramValue)
if not pv['OK']:
return S_ERROR("Failed to parse parameter value")
paramValue = pv['Value']
if paramName == 'Body':
message = 'Body updated'
else:
message = '%s updated to %s' % (paramName, paramValue)
else:
res = self.__addAdditionalTransformationParameter(transID, paramName, paramValue, connection=connection)
if res['OK']:
message = 'Added additional parameter %s' % paramName
if message:
self.__updateTransformationLogging(transID, message, author, connection=connection)
return res
def getAdditionalParameters(self, transName, connection=False):
res = self._getConnectionTransID(connection, transName)
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
return self.__getAdditionalParameters(transID, connection=connection)
def deleteTransformationParameter(self, transName, paramName, author='', connection=False):
""" Delete a parameter from the additional parameters table """
res = self._getConnectionTransID(connection, transName)
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
if paramName in self.TRANSPARAMS:
return S_ERROR("Can not delete core transformation parameter")
res = self.__deleteTransformationParameters(transID, parameters=[paramName], connection=connection)
if not res['OK']:
return res
self.__updateTransformationLogging(transID, 'Removed additional parameter %s' % paramName, author,
connection=connection)
return res
def __addAdditionalTransformationParameter(self, transID, paramName, paramValue, connection=False):
req = "DELETE FROM AdditionalParameters WHERE TransformationID=%d AND ParameterName='%s'" % (transID, paramName)
res = self._update(req, connection)
if not res['OK']:
return res
res = self._escapeString(paramValue)
if not res['OK']:
return S_ERROR("Failed to parse parameter value")
paramValue = res['Value']
paramType = 'StringType'
if isinstance(paramValue, six.integer_types):
paramType = 'IntType'
req = "INSERT INTO AdditionalParameters (%s) VALUES (%s,'%s',%s,'%s');" % (', '.join(self.ADDITIONALPARAMETERS),
transID, paramName,
paramValue, paramType)
return self._update(req, connection)
def __getAdditionalParameters(self, transID, connection=False):
req = "SELECT %s FROM AdditionalParameters WHERE TransformationID = %d" % (', '.join(self.ADDITIONALPARAMETERS),
transID)
res = self._query(req, connection)
if not res['OK']:
return res
paramDict = {}
for _transID, parameterName, parameterValue, parameterType in res['Value']:
if parameterType in ('IntType', 'LongType'):
parameterValue = int(parameterValue)
paramDict[parameterName] = parameterValue
return S_OK(paramDict)
def __deleteTransformationParameters(self, transID, parameters=None, connection=False):
""" Remove the parameters associated to a transformation """
if parameters is None:
parameters = []
req = "DELETE FROM AdditionalParameters WHERE TransformationID=%d" % transID
if parameters:
req = "%s AND ParameterName IN (%s);" % (req, stringListToString(parameters))
return self._update(req, connection)
###########################################################################
#
# These methods manipulate the TransformationFiles table
#
def addFilesToTransformation(self, transName, lfns, connection=False):
""" Add a list of LFNs to the transformation directly """
gLogger.info("TransformationDB.addFilesToTransformation:"
" Attempting to add %s files to transformations: %s" % (len(lfns), transName))
if not lfns:
return S_ERROR('Zero length LFN list')
res = self._getConnectionTransID(connection, transName)
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
# Add missing files if necessary (__addDataFiles does the job)
res = self.__addDataFiles(lfns, connection=connection)
if not res['OK']:
return res
fileIDs = dict((fileID, lfn) for lfn, fileID in res['Value'].items())
# Attach files to transformation
successful = {}
if fileIDs:
res = self.__addFilesToTransformation(transID, list(fileIDs), connection=connection)
if not res['OK']:
return res
for fileID in fileIDs:
lfn = fileIDs[fileID]
successful[lfn] = "Added" if fileID in res['Value'] else "Present"
resDict = {'Successful': successful, 'Failed': {}}
return S_OK(resDict)
def getTransformationFiles(self, condDict=None, older=None, newer=None, timeStamp='LastUpdate',
orderAttribute=None, limit=None, offset=None, connection=False):
""" Get files for the supplied transformations with support for the web standard structure """
connection = self.__getConnection(connection)
req = "SELECT %s FROM TransformationFiles" % (intListToString(self.TRANSFILEPARAMS))
originalFileIDs = {}
if condDict is None:
condDict = {}
if condDict or older or newer:
lfns = condDict.pop('LFN', None)
if lfns:
if isinstance(lfns, six.string_types):
lfns = [lfns]
res = self.__getFileIDsForLfns(lfns, connection=connection)
if not res['OK']:
return res
originalFileIDs = res['Value'][0]
condDict['FileID'] = list(originalFileIDs)
for val in condDict.values():
if not val:
return S_OK([])
req = "%s %s" % (req, self.buildCondition(condDict, older, newer, timeStamp, orderAttribute, limit,
offset=offset))
res = self._query(req, connection)
if not res['OK']:
return res
transFiles = res['Value']
fileIDs = [int(row[1]) for row in transFiles]
webList = []
resultList = []
if not fileIDs:
originalFileIDs = {}
else:
if not originalFileIDs:
res = self.__getLfnsForFileIDs(fileIDs, connection=connection)
if not res['OK']:
return res
originalFileIDs = res['Value'][1]
for row in transFiles:
lfn = originalFileIDs[row[1]]
# Prepare the structure for the web
fDict = {'LFN': lfn}
fDict.update(dict(zip(self.TRANSFILEPARAMS, row)))
# Note: the line below is returning "None" if the item is None... This seems to work but is ugly...
rList = [lfn] + [str(item) if not isinstance(item, six.integer_types) else item for item in row]
webList.append(rList)
resultList.append(fDict)
result = S_OK(resultList)
result['Records'] = webList
result['ParameterNames'] = ['LFN'] + self.TRANSFILEPARAMS
return result
def getFileSummary(self, lfns, connection=False):
""" Get file status summary in all the transformations """
connection = self.__getConnection(connection)
condDict = {'LFN': lfns}
res = self.getTransformationFiles(condDict=condDict, connection=connection)
if not res['OK']:
return res
resDict = {}
for fileDict in res['Value']:
resDict.setdefault(fileDict['LFN'], {})[fileDict['TransformationID']] = fileDict
failedDict = dict.fromkeys(set(lfns) - set(resDict), 'Did not exist in the Transformation database')
return S_OK({'Successful': resDict, 'Failed': failedDict})
def setFileStatusForTransformation(self, transID, fileStatusDict=None, connection=False):
""" Set file status for the given transformation, based on
fileStatusDict {fileID_A: ('statusA',errorA), fileID_B: ('statusB',errorB), ...}
The ErrorCount is incremented if errorA flag is True
"""
if not fileStatusDict:
return S_OK()
# Building the request with "ON DUPLICATE KEY UPDATE"
reqBase = "INSERT INTO TransformationFiles (TransformationID, FileID, Status, ErrorCount, LastUpdate) VALUES "
# Get fileID and status for each case: error and no error
statusFileDict = {}
for fileID, (status, error) in fileStatusDict.items():
statusFileDict.setdefault(error, []).append((fileID, status))
for error, fileIDStatusList in statusFileDict.items():
req = reqBase + ','.join("(%d, %d, '%s', 0, UTC_TIMESTAMP())" %
(transID, fileID, status) for fileID, status in fileIDStatusList)
if error:
# Increment the error counter when we requested
req += " ON DUPLICATE KEY UPDATE Status=VALUES(Status),ErrorCount=ErrorCount+1,LastUpdate=VALUES(LastUpdate)"
else:
req += " ON DUPLICATE KEY UPDATE Status=VALUES(Status),LastUpdate=VALUES(LastUpdate)"
result = self._update(req, connection)
if not result['OK']:
return result
return S_OK()
def getTransformationStats(self, transName, connection=False):
""" Get number of files in Transformation Table for each status """
res = self._getConnectionTransID(connection, transName)
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
res = self.getCounters('TransformationFiles', ['TransformationID', 'Status'], {'TransformationID': transID})
if not res['OK']:
return res
statusDict = dict((attrDict['Status'], count)
for attrDict, count in res['Value'] if '-' not in attrDict['Status'])
statusDict['Total'] = sum(statusDict.values())
return S_OK(statusDict)
def getTransformationFilesCount(self, transName, field, selection=None, connection=False):
""" Get the number of files in the TransformationFiles table grouped by the supplied field """
res = self._getConnectionTransID(connection, transName)
if not res['OK']:
return res
if selection is None:
selection = {}
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
selection['TransformationID'] = transID
if field not in self.TRANSFILEPARAMS:
return S_ERROR("Supplied field not in TransformationFiles table")
res = self.getCounters('TransformationFiles', ['TransformationID', field], selection)
if not res['OK']:
return res
countDict = dict((attrDict[field], count) for attrDict, count in res['Value'])
countDict['Total'] = sum(countDict.values())
return S_OK(countDict)
def __addFilesToTransformation(self, transID, fileIDs, connection=False):
req = "SELECT FileID from TransformationFiles"
req = req + " WHERE TransformationID = %d AND FileID IN (%s);" % (transID, intListToString(fileIDs))
res = self._query(req, connection)
if not res['OK']:
return res
for tupleIn in res['Value']:
fileIDs.remove(tupleIn[0])
if not fileIDs:
return S_OK([])
req = "INSERT INTO TransformationFiles (TransformationID,FileID,LastUpdate,InsertedTime) VALUES"
for fileID in fileIDs:
req = "%s (%d,%d,UTC_TIMESTAMP(),UTC_TIMESTAMP())," % (req, transID, fileID)
req = req.rstrip(',')
res = self._update(req, connection)
if not res['OK']:
return res
return S_OK(fileIDs)
def __insertExistingTransformationFiles(self, transID, fileTuplesList, connection=False):
""" Inserting already transformation files in TransformationFiles table (e.g. for deriving transformations)
"""
gLogger.info("Inserting %d files in TransformationFiles" % len(fileTuplesList))
# splitting in various chunks, in case it is too big
for fileTuples in breakListIntoChunks(fileTuplesList, 10000):
gLogger.verbose("Adding first %d files in TransformationFiles (out of %d)" % (len(fileTuples),
len(fileTuplesList)))
req = "INSERT INTO TransformationFiles (TransformationID,Status,TaskID,FileID,TargetSE,UsedSE,LastUpdate) VALUES"
candidates = False
for ft in fileTuples:
_lfn, originalID, fileID, status, taskID, targetSE, usedSE, _errorCount, _lastUpdate, _insertTime = ft[:10]
if status not in ('Removed', ):
candidates = True
if not re.search('-', status):
status = "%s-inherited" % status
if taskID:
# Should be readable up to 999,999 tasks: that field is an int(11) in the DB, not a string
taskID = 1000000 * int(originalID) + int(taskID)
req = "%s (%d,'%s','%d',%d,'%s','%s',UTC_TIMESTAMP())," % (req, transID, status, taskID,
fileID, targetSE, usedSE)
if not candidates:
continue
req = req.rstrip(",")
res = self._update(req, connection)
if not res['OK']:
return res
return S_OK()
def __assignTransformationFile(self, transID, taskID, se, fileIDs, connection=False):
""" Make necessary updates to the TransformationFiles table for the newly created task
"""
req = "UPDATE TransformationFiles SET TaskID='%d',UsedSE='%s',Status='Assigned',LastUpdate=UTC_TIMESTAMP()"
req = (req + " WHERE TransformationID = %d AND FileID IN (%s);") % (taskID, se, transID, intListToString(fileIDs))
res = self._update(req, connection)
if not res['OK']:
gLogger.error("Failed to assign file to task", res['Message'])
fileTuples = []
for fileID in fileIDs:
fileTuples.append(("(%d,%d,%d)" % (transID, fileID, taskID)))
req = "INSERT INTO TransformationFileTasks (TransformationID,FileID,TaskID) VALUES %s" % ','.join(fileTuples)
res = self._update(req, connection)
if not res['OK']:
gLogger.error("Failed to assign file to task", res['Message'])
return res
def __setTransformationFileStatus(self, fileIDs, status, connection=False):
req = "UPDATE TransformationFiles SET Status = '%s' WHERE FileID IN (%s);" % (status, intListToString(fileIDs))
res = self._update(req, connection)
if not res['OK']:
gLogger.error("Failed to update file status", res['Message'])
return res
def __setTransformationFileUsedSE(self, fileIDs, usedSE, connection=False):
req = "UPDATE TransformationFiles SET UsedSE = '%s' WHERE FileID IN (%s);" % (usedSE, intListToString(fileIDs))
res = self._update(req, connection)
if not res['OK']:
gLogger.error("Failed to update file usedSE", res['Message'])
return res
def __resetTransformationFile(self, transID, taskID, connection=False):
req = "UPDATE TransformationFiles SET TaskID=NULL, UsedSE='Unknown', Status='Unused'\
WHERE TransformationID = %d AND TaskID=%d;" % (transID, taskID)
res = self._update(req, connection)
if not res['OK']:
gLogger.error("Failed to reset transformation file", res['Message'])
return res
def __deleteTransformationFiles(self, transID, connection=False):
""" Remove the files associated to a transformation.
It also tries to remove the associated DataFiles.
If these DataFiles are still used by other transformations, they
will be kept thanks to the ForeignKey constraint.
In the very unlikely event of removing a file that was juuuuuuuust about to be
used by another transformation, well, tough luck, but the other transformation
will succeed at the next attempt to insert the file.
"""
# The IGNORE keyword will make sure we do not abort the full removal
# on a foreign key error
# https://dev.mysql.com/doc/refman/5.7/en/sql-mode.html#ignore-strict-comparison
req = "DELETE IGNORE tf, df \
FROM TransformationFiles tf \
JOIN DataFiles df \
ON tf.FileID=df.FileID \
WHERE TransformationID = %d;" % transID
res = self._update(req, connection)
if not res['OK']:
gLogger.error("Failed to delete transformation files", res['Message'])
return res
###########################################################################
#
# These methods manipulate the TransformationFileTasks table
#
def __deleteTransformationFileTask(self, transID, taskID, connection=False):
''' Delete the file associated to a given task of a given transformation
from the TransformationFileTasks table for transformation with TransformationID and TaskID
'''
req = "DELETE FROM TransformationFileTasks WHERE TransformationID=%d AND TaskID=%d" % (transID, taskID)
return self._update(req, connection)
def __deleteTransformationFileTasks(self, transID, connection=False):
''' Remove all associations between files, tasks and a transformation '''
req = "DELETE FROM TransformationFileTasks WHERE TransformationID = %d;" % transID
res = self._update(req, connection)
if not res['OK']:
gLogger.error("Failed to delete transformation files/task history", res['Message'])
return res
###########################################################################
#
# These methods manipulate the TransformationTasks table
#
def getTransformationTasks(self, condDict=None, older=None, newer=None, timeStamp='CreationTime',
orderAttribute=None, limit=None, inputVector=False,
offset=None, connection=False):
connection = self.__getConnection(connection)
req = "SELECT %s FROM TransformationTasks %s" % (intListToString(self.TASKSPARAMS),
self.buildCondition(condDict, older, newer, timeStamp,
orderAttribute, limit, offset=offset))
res = self._query(req, connection)
if not res['OK']:
return res
if condDict is None:
condDict = {}
webList = []
resultList = []
for row in res['Value']:
# Prepare the structure for the web
rList = [str(item) if not isinstance(item, six.integer_types) else item for item in row]
taskDict = dict(zip(self.TASKSPARAMS, row))
webList.append(rList)
if inputVector:
taskDict['InputVector'] = ''
taskID = taskDict['TaskID']
transID = taskDict['TransformationID']
res = self.getTaskInputVector(transID, taskID)
if res['OK']:
if taskID in res['Value']:
taskDict['InputVector'] = res['Value'][taskID]
else:
return res
resultList.append(taskDict)
result = S_OK(resultList)
result['Records'] = webList
result['ParameterNames'] = self.TASKSPARAMS
return result
def getTasksForSubmission(self, transName, numTasks=1, site='', statusList=None,
older=None, newer=None, connection=False):
""" Select tasks with the given status (and site) for submission """
res = self._getConnectionTransID(connection, transName)
if not res['OK']:
return res
if statusList is None:
statusList = ['Created']
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
condDict = {"TransformationID": transID}
if statusList:
condDict["ExternalStatus"] = statusList
if site:
numTasks = 0
res = self.getTransformationTasks(condDict=condDict, older=older, newer=newer,
timeStamp='CreationTime', orderAttribute=None, limit=numTasks,
inputVector=True, connection=connection)
if not res['OK']:
return res
tasks = res['Value']
# Now prepare the tasks
resultDict = {}
for taskDict in tasks:
if len(resultDict) >= numTasks:
break
taskDict['Status'] = taskDict.pop('ExternalStatus')
taskDict['InputData'] = taskDict.pop('InputVector')
taskDict.pop('LastUpdateTime')
taskDict.pop('CreationTime')
taskDict.pop('ExternalID')
taskID = taskDict['TaskID']
resultDict[taskID] = taskDict
if site:
resultDict[taskID]['Site'] = site
return S_OK(resultDict)
def deleteTasks(self, transName, taskIDbottom, taskIDtop, author='', connection=False):
""" Delete tasks with taskID range in transformation """
res = self._getConnectionTransID(connection, transName)
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
for taskID in range(taskIDbottom, taskIDtop + 1):
res = self.__removeTransformationTask(transID, taskID, connection=connection)
if not res['OK']:
return res
message = "Deleted tasks from %d to %d" % (taskIDbottom, taskIDtop)
self.__updateTransformationLogging(transID, message, author, connection=connection)
return res
def reserveTask(self, transName, taskID, connection=False):
""" Reserve the taskID from transformation for submission """
res = self._getConnectionTransID(connection, transName)
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
res = self.__checkUpdate("TransformationTasks", "ExternalStatus", "Reserved", {"TransformationID": transID,
"TaskID": taskID},
connection=connection)
if not res['OK']:
return res
if not res['Value']:
return S_ERROR('Failed to set Reserved status for job %d - already Reserved' % int(taskID))
# The job is reserved, update the time stamp
res = self.setTaskStatus(transID, taskID, 'Reserved', connection=connection)
if not res['OK']:
return S_ERROR('Failed to set Reserved status for job %d - failed to update the time stamp' % int(taskID))
return S_OK()
def setTaskStatusAndWmsID(self, transName, taskID, status, taskWmsID, connection=False):
""" Set status and ExternalID for job with taskID in production with transformationID
"""
res = self._getConnectionTransID(connection, transName)
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
# Set ID first in order to be sure there is no status set without the ID being set
res = self.__setTaskParameterValue(transID, taskID, 'ExternalID', taskWmsID, connection=connection)
if not res['OK']:
return res
return self.__setTaskParameterValue(transID, taskID, 'ExternalStatus', status, connection=connection)
def setTaskStatus(self, transName, taskID, status, connection=False):
""" Set status for job with taskID in production with transformationID """
res = self._getConnectionTransID(connection, transName)
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
if not isinstance(taskID, list):
taskIDList = [taskID]
else:
taskIDList = list(taskID)
for taskID in taskIDList:
res = self.__setTaskParameterValue(transID, taskID, 'ExternalStatus', status, connection=connection)
if not res['OK']:
return res
return S_OK()
def getTransformationTaskStats(self, transName='', connection=False):
""" Returns dictionary with number of jobs per status for the given production.
"""
connection = self.__getConnection(connection)
if transName:
res = self._getTransformationID(transName, connection=connection)
if not res['OK']:
gLogger.error("Failed to get ID for transformation", res['Message'])
return res
res = self.getCounters('TransformationTasks', ['ExternalStatus'], {'TransformationID': res['Value']},
connection=connection)
else:
res = self.getCounters('TransformationTasks', ['ExternalStatus', 'TransformationID'], {},
connection=connection)
if not res['OK']:
return res
statusDict = {}
total = 0
for attrDict, count in res['Value']:
status = attrDict['ExternalStatus']
statusDict[status] = count
total += count
statusDict['TotalCreated'] = total
return S_OK(statusDict)
def __setTaskParameterValue(self, transID, taskID, paramName, paramValue, connection=False):
req = "UPDATE TransformationTasks SET %s='%s', LastUpdateTime=UTC_TIMESTAMP()" % (paramName, paramValue)
req = req + " WHERE TransformationID=%d AND TaskID=%d;" % (transID, taskID)
return self._update(req, connection)
def __deleteTransformationTasks(self, transID, connection=False):
""" Delete all the tasks from the TransformationTasks table for transformation with TransformationID
"""
req = "DELETE FROM TransformationTasks WHERE TransformationID=%d" % transID
return self._update(req, connection)
def __deleteTransformationTask(self, transID, taskID, connection=False):
""" Delete the task from the TransformationTasks table for transformation with TransformationID
"""
req = "DELETE FROM TransformationTasks WHERE TransformationID=%d AND TaskID=%d" % (transID, taskID)
return self._update(req, connection)
def __deleteTransformationMetaQueries(self, transID, connection=False):
""" Delete all the meta queries from the TransformationMetaQueries table for transformation with TransformationID
"""
req = "DELETE FROM TransformationMetaQueries WHERE TransformationID=%d" % transID
return self._update(req, connection)
####################################################################
#
# These methods manipulate the TransformationMetaQueries table. It replaces all methods used to manipulate
# the old InputDataQuery table
#
def createTransformationMetaQuery(self, transName, queryDict, queryType, author='', connection=False):
""" Add a Meta Query to a given transformation """
res = self._getConnectionTransID(connection, transName)
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
return self.__addMetaQuery(transID, queryDict, queryType, author=author, connection=connection)
def __addMetaQuery(self, transID, queryDict, queryType, author='', connection=False):
""" Insert the Meta Query into the TransformationMetaQuery table """
res = self.getTransformationMetaQuery(transID, queryType, connection=connection)
if res['OK']:
return S_ERROR("Meta query already exists for transformation")
if not cmpError(res, ENOENT):
return res
for parameterName in sorted(queryDict):
parameterValue = queryDict[parameterName]
if not parameterValue:
continue
parameterType = 'String'
if isinstance(parameterValue, (list, tuple)):
if isinstance(parameterValue[0], six.integer_types):
parameterType = 'Integer'
parameterValue = [str(x) for x in parameterValue]
parameterValue = ';;;'.join(parameterValue)
else:
if isinstance(parameterValue, six.integer_types):
parameterType = 'Integer'
parameterValue = str(parameterValue)
if isinstance(parameterValue, dict):
parameterType = 'Dict'
parameterValue = str(parameterValue)
res = self.insertFields('TransformationMetaQueries', ['TransformationID', 'MetaDataName', 'MetaDataValue',
'MetaDataType', 'QueryType'],
[transID, parameterName, parameterValue, parameterType, queryType], conn=connection)
if not res['OK']:
message = 'Failed to add meta query'
self.deleteTransformationMetaQuery(transID, queryType, connection=connection)
break
else:
message = 'Added meta data query'
self.__updateTransformationLogging(transID, message, author, connection=connection)
return res
def deleteTransformationMetaQuery(self, transName, queryType, author='', connection=False):
""" Remove a Meta Query from the TransformationMetaQueries table """
res = self._getConnectionTransID(connection, transName)
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
res = self._escapeString(queryType)
if not res['OK']:
return S_ERROR("Failed to parse the transformation query type")
queryType = res['Value']
req = "DELETE FROM TransformationMetaQueries WHERE TransformationID=%d AND QueryType=%s;" % (transID, queryType)
res = self._update(req, connection)
if not res['OK']:
return res
if res['Value']:
# Add information to the transformation logging
message = 'Deleted meta data query'
self.__updateTransformationLogging(transID, message, author, connection=connection)
return res
def getTransformationMetaQuery(self, transName, queryType, connection=False):
"""Get the Meta Query for a given transformation.
:param transName: transformation name or ID
:type transName: str or int
:param str queryType: 'Input' or 'Output' query
:param connection: DB connection
:returns: S_OK with query dictionary, S_ERROR, ENOENT if no query defined
"""
res = self._getConnectionTransID(connection, transName)
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
res = self._escapeString(queryType)
if not res['OK']:
return S_ERROR("Failed to parse the transformation query type")
queryType = res['Value']
req = "SELECT MetaDataName,MetaDataValue,MetaDataType FROM TransformationMetaQueries"
req = req + " WHERE TransformationID=%d AND QueryType=%s;" % (transID, queryType)
res = self._query(req, connection)
if not res['OK']:
return res
queryDict = {}
for parameterName, parameterValue, parameterType in res['Value']:
if re.search(';;;', str(parameterValue)):
parameterValue = parameterValue.split(';;;')
if parameterType == 'Integer':
parameterValue = [int(x) for x in parameterValue]
elif parameterType == 'Integer':
parameterValue = int(parameterValue)
elif parameterType == 'Dict':
parameterValue = eval(parameterValue)
queryDict[parameterName] = parameterValue
if not queryDict:
return S_ERROR(ENOENT, "No MetaQuery found for transformation")
return S_OK(queryDict)
###########################################################################
#
# These methods manipulate the TaskInputs table
#
def getTaskInputVector(self, transName, taskID, connection=False):
""" Get input vector for the given task """
res = self._getConnectionTransID(connection, transName)
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
if not isinstance(taskID, list):
taskIDList = [taskID]
else:
taskIDList = list(taskID)
taskString = ','.join(["'%s'" % x for x in taskIDList])
req = "SELECT TaskID,InputVector FROM TaskInputs WHERE TaskID in (%s) AND TransformationID='%d';" % (taskString,
transID)
res = self._query(req)
inputVectorDict = {}
if not res['OK']:
return res
elif res['Value']:
for row in res['Value']:
inputVectorDict[row[0]] = row[1]
return S_OK(inputVectorDict)
def __insertTaskInputs(self, transID, taskID, lfns, connection=False):
vector = str.join(';', lfns)
fields = ['TransformationID', 'TaskID', 'InputVector']
values = [transID, taskID, vector]
res = self.insertFields('TaskInputs', fields, values, connection)
if not res['OK']:
gLogger.error("Failed to add input vector to task %d" % taskID)
return res
def __deleteTransformationTaskInputs(self, transID, taskID=0, connection=False):
""" Delete all the tasks inputs from the TaskInputs table for transformation with TransformationID
"""
req = "DELETE FROM TaskInputs WHERE TransformationID=%d" % transID
if taskID:
req = "%s AND TaskID=%d" % (req, int(taskID))
return self._update(req, connection)
###########################################################################
#
# These methods manipulate the TransformationLog table
#
def __updateTransformationLogging(self, transName, message, authorDN, connection=False):
""" Update the Transformation log table with any modifications
"""
if not authorDN:
res = getProxyInfo(False, False)
if res['OK']:
authorDN = res['Value']['subject']
res = self._getConnectionTransID(connection, transName)
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
req = "INSERT INTO TransformationLog (TransformationID,Message,Author,MessageDate)"
req = req + " VALUES (%s,'%s','%s',UTC_TIMESTAMP());" % (transID, message, authorDN)
return self._update(req, connection)
def getTransformationLogging(self, transName, connection=False):
""" Get logging info from the TransformationLog table
"""
res = self._getConnectionTransID(connection, transName)
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
req = "SELECT TransformationID, Message, Author, MessageDate FROM TransformationLog"
req = req + " WHERE TransformationID=%s ORDER BY MessageDate;" % (transID)
res = self._query(req)
if not res['OK']:
return res
transList = []
for transID, message, authorDN, messageDate in res['Value']:
transDict = {}
transDict['TransformationID'] = transID
transDict['Message'] = message
transDict['AuthorDN'] = authorDN
transDict['MessageDate'] = messageDate
transList.append(transDict)
return S_OK(transList)
def __deleteTransformationLog(self, transID, connection=False):
""" Remove the entries in the transformation log for a transformation
"""
req = "DELETE FROM TransformationLog WHERE TransformationID=%d;" % transID
return self._update(req, connection)
###########################################################################
#
# These methods manipulate the DataFiles table
#
def __getAllFileIDs(self, connection=False):
""" Get all the fileIDs for the supplied list of lfns
"""
req = "SELECT LFN,FileID FROM DataFiles;"
res = self._query(req, connection)
if not res['OK']:
return res
fids = {}
lfns = {}
for lfn, fileID in res['Value']:
fids[fileID] = lfn
lfns[lfn] = fileID
return S_OK((fids, lfns))
def __getFileIDsForLfns(self, lfns, connection=False):
""" Get file IDs for the given list of lfns
warning: if the file is not present, we'll see no errors
"""
req = "SELECT LFN,FileID FROM DataFiles WHERE LFN in (%s);" % (stringListToString(lfns))
res = self._query(req, connection)
if not res['OK']:
return res
lfns = dict(res['Value'])
# Reverse dictionary
fids = dict((fileID, lfn) for lfn, fileID in lfns.items())
return S_OK((fids, lfns))
def __getLfnsForFileIDs(self, fileIDs, connection=False):
""" Get lfns for the given list of fileIDs
"""
req = "SELECT LFN,FileID FROM DataFiles WHERE FileID in (%s);" % stringListToString(fileIDs)
res = self._query(req, connection)
if not res['OK']:
return res
fids = dict(res['Value'])
# Reverse dictionary
lfns = dict((fileID, lfn) for lfn, fileID in fids.items())
return S_OK((fids, lfns))
def __addDataFiles(self, lfns, connection=False):
""" Add a file to the DataFiles table and retrieve the FileIDs
"""
res = self.__getFileIDsForLfns(lfns, connection=connection)
if not res['OK']:
return res
# Insert only files not found, and assume the LFN is unique in the table
lfnFileIDs = res['Value'][1]
for lfn in set(lfns) - set(lfnFileIDs):
req = "INSERT INTO DataFiles (LFN,Status) VALUES ('%s','New');" % lfn
res = self._update(req, connection)
# If the LFN is duplicate we get an error and ignore it
if res['OK']:
lfnFileIDs[lfn] = res['lastRowId']
return S_OK(lfnFileIDs)
def __setDataFileStatus(self, fileIDs, status, connection=False):
""" Set the status of the supplied files
"""
req = "UPDATE DataFiles SET Status = '%s' WHERE FileID IN (%s);" % (status, intListToString(fileIDs))
return self._update(req, connection)
###########################################################################
#
# These methods manipulate multiple tables
#
def addTaskForTransformation(self, transID, lfns=None, se='Unknown', connection=False):
""" Create a new task with the supplied files for a transformation.
"""
res = self._getConnectionTransID(connection, transID)
if not res['OK']:
return res
if lfns is None:
lfns = []
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
# Be sure the all the supplied LFNs are known to the database for the supplied transformation
fileIDs = []
if lfns:
res = self.getTransformationFiles(condDict={'TransformationID': transID, 'LFN': lfns}, connection=connection)
if not res['OK']:
return res
foundLfns = set()
for fileDict in res['Value']:
fileIDs.append(fileDict['FileID'])
lfn = fileDict['LFN']
if fileDict['Status'] in self.allowedStatusForTasks:
foundLfns.add(lfn)
else:
gLogger.error("Supplied file not in %s status but %s" % (self.allowedStatusForTasks, fileDict['Status']), lfn)
unavailableLfns = set(lfns) - foundLfns
if unavailableLfns:
gLogger.error("Supplied files not found for transformation", sorted(unavailableLfns))
return S_ERROR("Not all supplied files available in the transformation database")
# Insert the task into the jobs table and retrieve the taskID
self.lock.acquire()
req = "INSERT INTO TransformationTasks(TransformationID, ExternalStatus, ExternalID, TargetSE,"
req = req + " CreationTime, LastUpdateTime)"
req = req + " VALUES (%s,'%s','%d','%s', UTC_TIMESTAMP(), UTC_TIMESTAMP());" % (transID, 'Created', 0, se)
res = self._update(req, connection)
if not res['OK']:
self.lock.release()
gLogger.error("Failed to publish task for transformation", res['Message'])
return res
# With InnoDB, TaskID is computed by a trigger, which sets the local variable @last (per connection)
# @last is the last insert TaskID. With multi-row inserts, will be the first new TaskID inserted.
# The trigger TaskID_Generator must be present with the InnoDB schema (defined in TransformationDB.sql)
if self.isTransformationTasksInnoDB:
res = self._query("SELECT @last;", connection)
else:
res = self._query("SELECT LAST_INSERT_ID();", connection)
self.lock.release()
if not res['OK']:
return res
taskID = int(res['Value'][0][0])
gLogger.verbose("Published task %d for transformation %d." % (taskID, transID))
# If we have input data then update their status, and taskID in the transformation table
if lfns:
res = self.__insertTaskInputs(transID, taskID, lfns, connection=connection)
if not res['OK']:
self.__removeTransformationTask(transID, taskID, connection=connection)
return res
res = self.__assignTransformationFile(transID, taskID, se, fileIDs, connection=connection)
if not res['OK']:
self.__removeTransformationTask(transID, taskID, connection=connection)
return res
return S_OK(taskID)
def extendTransformation(self, transName, nTasks, author='', connection=False):
""" Extend SIMULATION type transformation by nTasks number of tasks
"""
connection = self.__getConnection(connection)
res = self.getTransformation(transName, connection=connection)
if not res['OK']:
gLogger.error("Failed to get transformation details", res['Message'])
return res
transType = res['Value']['Type']
transID = res['Value']['TransformationID']
extendableProds = Operations().getValue('Transformations/ExtendableTransfTypes', ['Simulation', 'MCSimulation'])
if transType.lower() not in [ep.lower() for ep in extendableProds]:
return S_ERROR('Can not extend non-SIMULATION type production')
taskIDs = []
for _task in range(nTasks):
res = self.addTaskForTransformation(transID, connection=connection)
if not res['OK']:
return res
taskIDs.append(res['Value'])
# Add information to the transformation logging
message = 'Transformation extended by %d tasks' % nTasks
self.__updateTransformationLogging(transName, message, author, connection=connection)
return S_OK(taskIDs)
def cleanTransformation(self, transName, author='', connection=False):
""" Clean the transformation specified by name or id """
res = self._getConnectionTransID(connection, transName)
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
res = self.__deleteTransformationFileTasks(transID, connection=connection)
if not res['OK']:
return res
res = self.__deleteTransformationFiles(transID, connection=connection)
if not res['OK']:
return res
res = self.__deleteTransformationTaskInputs(transID, connection=connection)
if not res['OK']:
return res
res = self.__deleteTransformationTasks(transID, connection=connection)
if not res['OK']:
return res
res = self.__deleteTransformationMetaQueries(transID, connection=connection)
if not res['OK']:
return res
self.__updateTransformationLogging(transID, "Transformation Cleaned", author, connection=connection)
return S_OK(transID)
def deleteTransformation(self, transName, author='', connection=False):
""" Remove the transformation specified by name or id """
res = self._getConnectionTransID(connection, transName)
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
res = self.cleanTransformation(transID, author=author, connection=connection)
if not res['OK']:
return res
res = self.__deleteTransformationLog(transID, connection=connection)
if not res['OK']:
return res
res = self.__deleteTransformationParameters(transID, connection=connection)
if not res['OK']:
return res
res = self.__deleteTransformation(transID, connection=connection)
if not res['OK']:
return res
res = self.__updateFilterQueries(connection=connection)
if not res['OK']:
return res
return S_OK()
def __removeTransformationTask(self, transID, taskID, connection=False):
res = self.__deleteTransformationTaskInputs(transID, taskID, connection=connection)
if not res['OK']:
return res
res = self.__deleteTransformationFileTask(transID, taskID, connection=connection)
if not res['OK']:
return res
res = self.__resetTransformationFile(transID, taskID, connection=connection)
if not res['OK']:
return res
return self.__deleteTransformationTask(transID, taskID, connection=connection)
def __checkUpdate(self, table, param, paramValue, selectDict=None, connection=False):
""" Check whether the update will perform an update """
req = "UPDATE %s SET %s = '%s'" % (table, param, paramValue)
if selectDict:
req = "%s %s" % (req, self.buildCondition(selectDict))
return self._update(req, connection)
def __getConnection(self, connection):
if connection:
return connection
res = self._getConnection()
if res['OK']:
return res['Value']
gLogger.warn("Failed to get MySQL connection", res['Message'])
return connection
def _getConnectionTransID(self, connection, transName):
connection = self.__getConnection(connection)
res = self._getTransformationID(transName, connection=connection)
if not res['OK']:
gLogger.error("Failed to get ID for transformation", res['Message'])
return res
transID = res['Value']
resDict = {'Connection': connection, 'TransformationID': transID}
return S_OK(resDict)
####################################################################################
#
# This part should correspond to the DIRAC Standard File Catalog interface
#
####################################################################################
def exists(self, lfns, connection=False):
""" Check the presence of the lfn in the TransformationDB DataFiles table
"""
gLogger.info("TransformationDB.exists: Attempting to determine existence of %s files." % len(lfns))
res = self.__getFileIDsForLfns(lfns, connection=connection)
if not res['OK']:
return res
fileIDs = res['Value'][0]
failed = {}
successful = {}
fileIDsValues = set(fileIDs.values())
for lfn in lfns:
successful[lfn] = (lfn in fileIDsValues)
resDict = {'Successful': successful, 'Failed': failed}
return S_OK(resDict)
def addFile(self, fileDicts, force=False, connection=False):
""" Add the supplied lfn to the Transformations and to the DataFiles table if it passes the filter
"""
gLogger.info("TransformationDB.addFile: Attempting to add %s files." % len(fileDicts))
successful = {}
failed = {}
# Determine which files pass the filters and are to be added to transformations
transFiles = {}
filesToAdd = []
catalog = FileCatalog()
for lfn in fileDicts:
gLogger.info("addFile: Attempting to add file %s" % lfn)
res = catalog.getFileUserMetadata(lfn)
if not res['OK']:
gLogger.error("Failed to getFileUserMetadata for file", "%s: %s" % (lfn, res['Message']))
failed[lfn] = res['Message']
continue
else:
metadatadict = res['Value']
gLogger.info('Filter file with metadata', metadatadict)
transIDs = self._filterFileByMetadata(metadatadict)
gLogger.info('Transformations passing the filter: %s' % transIDs)
if not (transIDs or force): # not clear how force should be used for
successful[lfn] = False # True -> False bug fix: otherwise it is set to True even if transIDs is empty.
else:
filesToAdd.append(lfn)
for trans in transIDs:
if trans not in transFiles:
transFiles[trans] = []
transFiles[trans].append(lfn)
# Add the files to the transformations
gLogger.info('Files to add to transformations:', filesToAdd)
if filesToAdd:
for transID, lfns in transFiles.items():
res = self.addFilesToTransformation(transID, lfns)
if not res['OK']:
gLogger.error("Failed to add files to transformation", "%s %s" % (transID, res['Message']))
return res
else:
for lfn in lfns:
successful[lfn] = True
res = S_OK({'Successful': successful, 'Failed': failed})
return res
def removeFile(self, lfns, connection=False):
""" Remove file specified by lfn from the ProcessingDB
"""
gLogger.info("TransformationDB.removeFile: Attempting to remove %s files." % len(lfns))
failed = {}
successful = {}
connection = self.__getConnection(connection)
if not lfns:
return S_ERROR("No LFNs supplied")
res = self.__getFileIDsForLfns(lfns, connection=connection)
if not res['OK']:
return res
fileIDs, lfnFilesIDs = res['Value']
for lfn in lfns:
if lfn not in lfnFilesIDs:
successful[lfn] = 'File does not exist'
if fileIDs:
res = self.__setTransformationFileStatus(list(fileIDs), 'Deleted', connection=connection)
if not res['OK']:
return res
res = self.__setDataFileStatus(list(fileIDs), 'Deleted', connection=connection)
if not res['OK']:
return S_ERROR("TransformationDB.removeFile: Failed to remove files.")
for lfn in lfnFilesIDs:
if lfn not in failed:
successful[lfn] = True
resDict = {'Successful': successful, 'Failed': failed}
return S_OK(resDict)
def addDirectory(self, path, force=False):
""" Adds all the files stored in a given directory in file catalog """
gLogger.info("TransformationDB.addDirectory: Attempting to populate %s." % path)
res = pythonCall(30, self.__addDirectory, path, force)
if not res['OK']:
gLogger.error("Failed to invoke addDirectory with shifter proxy")
return res
return res['Value']
def __addDirectory(self, path, force):
res = setupShifterProxyInEnv("ProductionManager")
if not res['OK']:
return S_OK("Failed to setup shifter proxy")
catalog = FileCatalog()
start = time.time()
res = catalog.listDirectory(path)
if not res['OK']:
gLogger.error("TransformationDB.addDirectory: Failed to get files. %s" % res['Message'])
return res
if path not in res['Value']['Successful']:
gLogger.error("TransformationDB.addDirectory: Failed to get files.")
return res
gLogger.info("TransformationDB.addDirectory: Obtained %s files in %s seconds." % (path, time.time() - start))
successful = []
failed = []
for lfn in res['Value']['Successful'][path]["Files"]:
res = self.addFile({lfn: {}}, force=force)
if not res['OK'] or lfn not in res['Value']['Successful']:
failed.append(lfn)
else:
successful.append(lfn)
return {"OK": True, "Value": len(res['Value']['Successful']), "Successful": successful, "Failed": failed}
def setMetadata(self, path, usermetadatadict):
"""
It can be applied to a file or to a directory (path).
For a file, add the file to Transformations if the updated metadata dictionary passes the filter.
For a directory, add the files contained in the directory to the Transformations
if the the updated metadata dictionary passes the filter.
"""
gLogger.info("setMetadata: Attempting to set metadata %s to: %s" % (usermetadatadict, path))
transFiles = {}
filesToAdd = []
catalog = FileCatalog()
res = catalog.isFile(path)
if res['OK']:
isFile = res['Value']['Successful'][path]
else:
gLogger.error("Failed isFile %s: %s" % (path, res['Message']))
return res
res = catalog.isDirectory(path)
if res['OK']:
isDirectory = res['Value']['Successful'][path]
else:
gLogger.error("Failed isDirectory %s: %s" % (path, res['Message']))
return res
if isFile:
res = catalog.getFileUserMetadata(path)
elif isDirectory:
res = catalog.getDirectoryUserMetadata(path)
if not res['OK']:
gLogger.error("Failed to get User Metadata %s: %s" % (path, res['Message']))
return res
else:
metadatadict = res['Value']
metadatadict.update(usermetadatadict)
gLogger.info('Filter file with metadata:', metadatadict)
transIDs = self._filterFileByMetadata(metadatadict)
gLogger.info('Transformations passing the filter: %s' % transIDs)
if not transIDs:
return S_OK()
elif isFile:
filesToAdd.append(path)
elif isDirectory:
res = catalog.findFilesByMetadata(metadatadict, path)
if not res['OK']:
gLogger.error("Failed to findFilesByMetadata %s: %s" % (path, res['Message']))
return res
filesToAdd.extend(res['Value'])
for trans in transIDs:
if trans not in transFiles:
transFiles[trans] = []
transFiles[trans].extend(filesToAdd)
# Add the files to the transformations
gLogger.info('Files to add to transformations:', filesToAdd)
if filesToAdd:
for transID, lfns in transFiles.items():
res = self.addFilesToTransformation(transID, lfns)
if not res['OK']:
gLogger.error("Failed to add files to transformation", "%s %s" % (transID, res['Message']))
return res
return S_OK()
def _filterFileByMetadata(self, metadatadict):
"""Pass the input metadatadict through those currently active"""
transIDs = []
queries = self.filterQueries
catalog = FileCatalog()
gLogger.info('Filter file by queries', queries)
res = catalog.getMetadataFields()
if not res['OK']:
gLogger.error("Error in getMetadataFields: %s" % res['Message'])
return res
if not res['Value']:
gLogger.error("Error: no metadata fields defined")
return res
typeDict = res['Value']['FileMetaFields']
typeDict.update(res['Value']['DirectoryMetaFields'])
for transID, query in queries:
gLogger.info("Check the transformation status")
res = self.getTransformationParameters(transID, 'Status')
if res['Value'] not in ['New', 'Active', 'Stopped', 'Completing', 'Flush']:
continue
mq = MetaQuery(query, typeDict)
gLogger.info("Apply query %s to metadata %s" % (mq.getMetaQuery(), metadatadict))
res = mq.applyQuery(metadatadict)
if not res['OK']:
gLogger.error("Error in applying query: %s" % res['Message'])
return res
elif res['Value']:
gLogger.info("Apply query result is True")
transIDs.append(transID)
else:
gLogger.info("Apply query result is False")
return transIDs
| yujikato/DIRAC | src/DIRAC/TransformationSystem/DB/TransformationDB.py | Python | gpl-3.0 | 73,129 | [
"DIRAC"
] | 92d07f51bf5ca3ff4c714b2a250717298a32ac4fb157086616a7c76f35c6d524 |
###########################################################################
#
# This program is part of Zenoss Core, an open source monitoring platform.
# Copyright (C) 2008, Zenoss Inc.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 as published by
# the Free Software Foundation.
#
# For complete information please visit: http://www.zenoss.com/oss/
#
###########################################################################
################################
# These variables are overwritten by Zenoss when the ZenPack is exported
# or saved. Do not modify them directly here.
NAME = 'ZenPacks.AndreaConsadori.Alvarion'
VERSION = '3.0'
AUTHOR = 'Andrea Consadori'
LICENSE = ''
NAMESPACE_PACKAGES = ['ZenPacks', 'ZenPacks.AndreaConsadori']
PACKAGES = ['ZenPacks', 'ZenPacks.AndreaConsadori', 'ZenPacks.AndreaConsadori.Alvarion']
INSTALL_REQUIRES = []
COMPAT_ZENOSS_VERS = '>=3.0'
PREV_ZENPACK_NAME = ''
# STOP_REPLACEMENTS
################################
# Zenoss will not overwrite any changes you make below here.
from setuptools import setup, find_packages
setup(
# This ZenPack metadata should usually be edited with the Zenoss
# ZenPack edit page. Whenever the edit page is submitted it will
# overwrite the values below (the ones it knows about) with new values.
name = NAME,
version = VERSION,
author = AUTHOR,
license = LICENSE,
# This is the version spec which indicates what versions of Zenoss
# this ZenPack is compatible with
compatZenossVers = COMPAT_ZENOSS_VERS,
# previousZenPackName is a facility for telling Zenoss that the name
# of this ZenPack has changed. If no ZenPack with the current name is
# installed then a zenpack of this name if installed will be upgraded.
prevZenPackName = PREV_ZENPACK_NAME,
# Indicate to setuptools which namespace packages the zenpack
# participates in
namespace_packages = NAMESPACE_PACKAGES,
# Tell setuptools what packages this zenpack provides.
packages = find_packages(),
# Tell setuptools to figure out for itself which files to include
# in the binary egg when it is built.
include_package_data = True,
# Tell setuptools what non-python files should also be included
# with the binary egg.
package_data = {
'': ['*.txt'],
'':['../COPYRIGHT.txt','../LICENSE.txt'],
NAME: ['objects/*','skins/*/*','services/*', 'reports/*/*',
'modeler/*/*', 'daemons/*', 'lib/*', 'libexec/*'],
},
# Indicate dependencies on other python modules or ZenPacks. This line
# is modified by zenoss when the ZenPack edit page is submitted. Zenoss
# tries to put add/delete the names it manages at the beginning of this
# list, so any manual additions should be added to the end. Things will
# go poorly if this line is broken into multiple lines or modified to
# dramatically.
install_requires = INSTALL_REQUIRES,
# Every ZenPack egg must define exactly one zenoss.zenpacks entry point
# of this form.
entry_points = {
'zenoss.zenpacks': '%s = %s' % (NAME, NAME),
},
# All ZenPack eggs must be installed in unzipped form.
zip_safe = False,
)
| anksp21/Community-Zenpacks | ZenPacks.AndreaConsadori.Alvarion/setup.py | Python | gpl-2.0 | 3,300 | [
"VisIt"
] | eea8187a7d619a3b9196b527278484c6fe2d05b9e8d5e818960f02771e441e57 |
import math
import random
import numpy
from effectlayer import *
class FireflySwarmLayer(EffectLayer):
"""
Each LED node is a firefly. When one blinks, it pulls its neighbors closer or
further from blinking themselves, bringing the group into and out of sync.
For a full explanation of how this works, see:
Synchronization of Pulse-Coupled Biological Oscillators
Renato E. Mirollo; Steven H. Strogatz
SIAM Journal on Applied Mathematics, Vol. 50, No. 6. (Dec., 1990), pp. 1645-1662
This has a bug - it can miss blinks if update isn't called frequently enough -
but it's only apparent at unacceptably low framerates and no time to fix now.
"""
class Firefly:
"""
A single firefly. Its activation level increases monotonically in range [0,1] as
a function of time. When its activation reaches 1, it initiates a blink and drops
back to 0.
"""
CYCLE_TIME = 3 # seconds
NUDGE = 0.2 # how much to nudge it toward firing after its neighbor fires
EXP = 2.0 # exponent for phase->activation function, chosen somewhat arbitrarily
def __init__(self, node, color=(1,1,1)):
self.offset = random.random() * self.CYCLE_TIME
self.node = node
self.color = color
self.blinktime = 0
def nudge(self, params, response_level):
# Bump this firefly forward or backward in its cycle, closer to or further from
# its next blink, depending on response level
p = self.phi(params)
a = self.activation(p)
response = response_level - 0.5
nudge_size = response*self.NUDGE
# if we always "desync" at same rate, it won't actually desync
if response < 0:
nudge_size *= (random.random()+0.5)
a2 = max(min(a + nudge_size, 1), 0)
# find the phase parameter corresponding to that activation level
p2 = self.activation_to_phi(a2)
# adjust time offset to bring us to that phase
self.offset += (p2 - p) * self.CYCLE_TIME
# TMI
debug=False
if self.node == 1 and debug:
print self.offset,
print p,
print p2,
print self.phi(params),
print self.activation(self.phi(params))
# now that we've changed its state, we need to re-update it
self.update(params)
def phi(self, params):
"""
Converts current time + time offset into phi (oscillatory phase parameter in range [0,1])
"""
return ((params.time + self.offset) % self.CYCLE_TIME)/self.CYCLE_TIME + 0.01
def activation(self, phi):
"""
Converts phi into activation level. Activation function must be concave in order for
this algorithm to work.
"""
return pow(phi, 1/self.EXP)
def activation_to_phi(self, f):
""" Convert from an activation level back to a phi value. """
return pow(f, self.EXP)
def update(self, params):
"""
Note the time when activation crosses threshold, so we can use it as the onset time for rendering the
actual blink. Return whether firefly has just crossed the threshold or not so we know whether to nudge its
neighbors.
"""
p = self.phi(params)
blink = self.activation(p) >= 1
if blink:
self.blinktime = params.time
return blink
def render(self, model, params, frame):
"""
Draw pulses with sinusoidal ramp-up/ramp-down
"""
dt = params.time - self.blinktime
dur = float(self.CYCLE_TIME)/2
if dt < dur:
scale = math.sin(math.pi * dt/dur)
if self.color is None:
frame[self.node] *= scale
else:
frame[self.node] += numpy.array(self.color) * scale
else:
if self.color is None:
frame[self.node] = 0
def __init__(self):
super(FireflySwarmLayer, self).__init__()
self.cyclers = []
self.cachedModel = None
def render(self, model, params, frame):
response_level = 0.99
self.color = (0.7, 0.9, 0.8)
if model != self.cachedModel:
self.numberOfNodes = len(model.nodes)
self.cyclers = [ FireflySwarmLayer.Firefly(e, color=self.color) for e in range(self.numberOfNodes) ]
self.cachedModel = model
blink = self.cyclers[0].update(params)
self.cyclers[0].render(model, params, frame)
for c in self.cyclers[1:]:
if blink and response_level:
c.nudge(params, response_level)
else:
c.update(params)
c.render(model, params, frame)
return frame
| FlamingLotusGirls/soma | pier14/opc-client/effects/firefly_swarm.py | Python | apache-2.0 | 5,203 | [
"Firefly"
] | e4852fe1d0d79fbc12bd1dadba3dc13a489ef82ff083843fab07b92276310308 |
# Modeled on Python's Lib/compiler/visitor.py
class BadNode (Exception): pass
class ASTWalk:
def _default(self, node, *args):
raise BadNode (str (node))
def dispatch(self, node, *args):
methname = 'visit' + node.__class__.__name__
method = getattr(self.visitor, methname, self.default)
return method (node, *args)
def preorder(self, tree, visitor, *args):
self.visitor = visitor
self.default = getattr (self.visitor, 'visitdefault', self._default)
l = self.dispatch(tree, *args)
if l == None: return
for n in l:
self.dispatch (n, *args)
| seblefevre/testerman | plugins/codecs/ber/visitor.py | Python | gpl-2.0 | 637 | [
"VisIt"
] | fc3f5c2b22b42787e583e53c3e8c218e1b85c33929e9494006c82ca13fe441bc |
""" This contains unit tests to make sure that the migration between PyGSI and M2Crypto is as smooth as possible
The test covers only the method exposed by the PyGSI version for the time being.
We are not testing:
* generateProxyRequest -> boils down to testing X509Request
* setCertificate -> not used anywhere
We are skipping:
* getPublicKey -> no way to test that really
* getSerialNumber -> buggy in PyGSI
"""
# redefined-outer-name is needed because we keep passing get_X509Certificate_class as param
# pylint: disable=redefined-outer-name
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from DIRAC.Core.Security.test.x509TestUtilities import (
deimportDIRAC, CERTS, CERTCONTENTS, getCertOption, HOSTCERT, VOMSPROXY, VOMS_PROXY_ATTR
)
from pytest import mark, fixture, skip
parametrize = mark.parametrize
X509CERTTYPES = ('M2_X509Certificate',)
# This fixture will return a X509Certificate class
# https://docs.pytest.org/en/latest/fixture.html#automatic-grouping-of-tests-by-fixture-instances
@fixture(scope="function", params=X509CERTTYPES)
def get_X509Certificate_class(request):
""" Fixture to return the X509Certificate class.
It also 'de-import' DIRAC before and after
"""
# Clean before
deimportDIRAC()
x509Class = request.param
if x509Class == 'M2_X509Certificate':
from DIRAC.Core.Security.m2crypto.X509Certificate import X509Certificate
else:
raise NotImplementedError()
yield X509Certificate
# Clean after
deimportDIRAC()
@parametrize('cert_file', CERTS)
def test_executeOnlyIfCertLoaded(cert_file, get_X509Certificate_class):
"""" Tests whether the executeOnlyIfCertLoaded decorator works"""
x509Cert = get_X509Certificate_class()
# Since we did not load the certificate, we should get S_ERROR
res = x509Cert.getNotAfterDate()
from DIRAC.Core.Utilities.DErrno import ENOCERT
assert res['Errno'] == ENOCERT
# Now load it
x509Cert.load(cert_file)
res = x509Cert.getNotAfterDate()
assert res['OK']
@parametrize('cert_file', CERTS)
def test_load(cert_file, get_X509Certificate_class):
"""" Just load a certificate """
x509Cert = get_X509Certificate_class()
res = x509Cert.load(cert_file)
assert res['OK']
def test_load_non_existing_file(get_X509Certificate_class):
"""" Just load a non existing file and non pem formated string """
x509Cert = get_X509Certificate_class()
res = x509Cert.load('/tmp/nonexistingFile.pem')
assert not res['OK']
from DIRAC.Core.Utilities.DErrno import ECERTREAD
assert res['Errno'] == ECERTREAD
@parametrize('cert_file', CERTS)
def test_loadFromFile(cert_file, get_X509Certificate_class):
"""" Just load a certificate """
x509Cert = get_X509Certificate_class()
res = x509Cert.loadFromFile(cert_file)
assert res['OK']
def test_loadFromFile_non_existing_file(get_X509Certificate_class):
"""" Just load a non existing file"""
x509Cert = get_X509Certificate_class()
res = x509Cert.loadFromFile('/tmp/nonexistingFile.pem')
assert not res['OK']
from DIRAC.Core.Utilities.DErrno import EOF
assert res['Errno'] == EOF
# pylint: disable=unused-argument
@parametrize('cert_content_type', CERTCONTENTS)
def test_loadFromString(cert_content_type, get_X509Certificate_class, indirect=('hostcertcontent', 'usercertcontent')):
"""" Just load a certificate from PEM string
:param cert_content_type: either HOSTCERTCONTENT or USERCERTCONTENT
:param indirect: pytest trick,
see https://docs.pytest.org/en/latest/example/parametrize.html#apply-indirect-on-particular-arguments
"""
x509Cert = get_X509Certificate_class()
res = x509Cert.loadFromString(CERTCONTENTS[cert_content_type])
assert res['OK'], res
def test_loadFromString_non_pem(get_X509Certificate_class):
"""" Just load a non pem formated string """
x509Cert = get_X509Certificate_class()
res = x509Cert.loadFromString('THIS IS NOT PEM DATA')
assert not res['OK']
from DIRAC.Core.Utilities.DErrno import ECERTREAD
assert res['Errno'] == ECERTREAD
# TODO: have a non valid certificate to try
@parametrize('cert_file', CERTS)
def test_hasExpired(cert_file, get_X509Certificate_class):
"""" Load a valid certificate and check it has not expired"""
x509Cert = get_X509Certificate_class()
x509Cert.load(cert_file)
res = x509Cert.hasExpired()
assert res['OK']
assert not res['Value']
@parametrize('cert_file', CERTS)
def test_getNotAfterDate(cert_file, get_X509Certificate_class):
"""" Load a valid certificate and check its expiration date"""
x509Cert = get_X509Certificate_class()
x509Cert.load(cert_file)
res = x509Cert.getNotAfterDate()
assert res['OK']
# We expect getNotAfterDate to return a datetime
assert res['Value'].date() == getCertOption(cert_file, 'endDate')
@parametrize('cert_file', CERTS)
def test_getNotBeforeDate(cert_file, get_X509Certificate_class):
"""" Load a valid certificate and check its start validity date"""
x509Cert = get_X509Certificate_class()
x509Cert.load(cert_file)
res = x509Cert.getNotBeforeDate()
assert res['OK']
# We expect getNotBeforeDate to return a datetime
assert res['Value'].date() == getCertOption(cert_file, 'startDate')
@parametrize('cert_file', CERTS)
def test_getSubjectDN(cert_file, get_X509Certificate_class):
"""" Load a valid certificate and check its subject"""
x509Cert = get_X509Certificate_class()
x509Cert.load(cert_file)
res = x509Cert.getSubjectDN()
assert res['OK']
assert res['Value'] == getCertOption(cert_file, 'subjectDN')
@parametrize('cert_file', CERTS)
def test_getIssuerDN(cert_file, get_X509Certificate_class):
"""" Load a valid certificate and check its issuer"""
x509Cert = get_X509Certificate_class()
x509Cert.load(cert_file)
res = x509Cert.getIssuerDN()
assert res['OK']
assert res['Value'] == getCertOption(cert_file, 'issuerDN')
# # TODO: this method seems not to be used anyway
# @parametrize('cert_file', CERTS)
# def test_getSubjectNameObject(cert_file, get_X509Certificate_class):
# """" Load a valid certificate and check its subject object. """
# x509Cert = get_X509Certificate_class()
# x509Cert.load(cert_file)
# res = x509Cert.getSubjectNameObject()
# assert res['OK']
# # We cannot compare the objects themselves because it is a different object...
# expectedValue = getCertOption(cert_file, 'subjectDN')
# try:
# # This works in the case of pyGSI
# returnedValue = res['Value'].one_line()
# except AttributeError:
# # This works in the case of M2Crypto
# returnedValue = str(res['Value'])
# assert returnedValue == expectedValue
# # TODO: this method seems not to be used anyway
# @parametrize('cert_file', CERTS)
# def test_getIssuerNameObject(cert_file, get_X509Certificate_class):
# """" Load a valid certificate and check its subject object. """
# x509Cert = get_X509Certificate_class()
# x509Cert.load(cert_file)
# res = x509Cert.getIssuerNameObject()
# assert res['OK']
# # We cannot compare the objects themselves because it is a different object...
# expectedValue = getCertOption(cert_file, 'issuerDN')
# try:
# # This works in the case of pyGSI
# returnedValue = res['Value'].one_line()
# except AttributeError:
# # This works in the case of M2Crypto
# returnedValue = str(res['Value'])
# assert returnedValue == expectedValue
@mark.skip(reason="no way of currently testing this")
@parametrize('cert_file', CERTS)
def test_getPublicKey(cert_file, get_X509Certificate_class):
"""" Load a valid certificate and verify its public key (for m2crypto only)"""
x509Cert = get_X509Certificate_class()
x509Cert.load(cert_file)
res = x509Cert.getPublicKey()
assert res['OK']
if 'm2crypto' in get_X509Certificate_class.__module__:
print(x509Cert.verify(res['Value']))
@parametrize('cert_file', CERTS)
def test_getSerialNumber(cert_file, get_X509Certificate_class):
"""" Load a valid certificate and check its public key"""
x509Cert = get_X509Certificate_class()
x509Cert.load(cert_file)
res = x509Cert.getSerialNumber()
assert res['OK']
assert res['Value'] == getCertOption(cert_file, 'serial')
@parametrize('cert_file', CERTS)
def test_getDIRACGroup_on_cert(cert_file, get_X509Certificate_class):
"""" Load a valid certificate and check if there is a dirac group. It should not"""
x509Cert = get_X509Certificate_class()
x509Cert.load(cert_file)
# ignoreDefault is used such that there is no attempt to look for group in the CS
res = x509Cert.getDIRACGroup(ignoreDefault=True)
assert res['OK']
assert res['Value'] is False
@parametrize('cert_file', CERTS)
def test_hasVOMSExtensions_on_cert(cert_file, get_X509Certificate_class):
"""" Load a valid certificate and check if it has VOMS extensions. It should not"""
x509Cert = get_X509Certificate_class()
x509Cert.load(cert_file)
# ignoreDefault is used such that there is no attempt to look for group in the CS
res = x509Cert.hasVOMSExtensions()
assert res['OK']
assert res['Value'] is False
@parametrize('cert_file', CERTS)
def test_getVOMSData_on_cert(cert_file, get_X509Certificate_class):
"""" Load a valid certificate and load the (non existing VOMS data)"""
x509Cert = get_X509Certificate_class()
x509Cert.load(cert_file)
res = x509Cert.getVOMSData()
from DIRAC.Core.Utilities.DErrno import EVOMS
assert not res['OK']
assert res['Errno'] == EVOMS
@parametrize('cert_file', CERTS)
def test_getRemainingSecs_on_cert(cert_file, get_X509Certificate_class):
"""" Load a valid certificate and check the output is a positive integer"""
x509Cert = get_X509Certificate_class()
x509Cert.load(cert_file)
res = x509Cert.getRemainingSecs()
assert res['OK']
assert isinstance(res['Value'], int) and res['Value'] > 0
@parametrize('cert_file', CERTS)
def test_getExtensions_on_cert(cert_file, get_X509Certificate_class):
"""" Load a valid certificate and check the output is a positive integer"""
x509Cert = get_X509Certificate_class()
x509Cert.load(cert_file)
res = x509Cert.getExtensions()
assert res['OK']
extensionDict = dict(extTuple for extTuple in res['Value'])
assert sorted(extensionDict) == sorted(getCertOption(cert_file, 'availableExtensions'))
# Test a few of them
for ext in ('basicConstraints', 'extendedKeyUsage'):
assert extensionDict[ext] == getCertOption(cert_file, ext)
# Valid only for Host certificate:
if cert_file == HOSTCERT:
assert extensionDict['subjectAltName'] == getCertOption(cert_file, 'subjectAltName')
###########################################################################
# Temporary. For the time being, we need a real proxy !
def test_getVOMSData(get_X509Certificate_class):
"""" Load a valid certificate and check the output is a positive integer"""
x509Cert = get_X509Certificate_class()
x509Cert.load(VOMSPROXY)
res = x509Cert.getVOMSData()
assert res['OK']
assert res['Value'] == VOMS_PROXY_ATTR
def test_hasVOMSExtensions(get_X509Certificate_class):
"""" Load a certificate generated with voms-proxy-fake and check hasVOMSExtension is True"""
x509Cert = get_X509Certificate_class()
x509Cert.load(VOMSPROXY)
res = x509Cert.hasVOMSExtensions()
assert res['OK']
assert res['Value']
| yujikato/DIRAC | src/DIRAC/Core/Security/test/Test_X509Certificate.py | Python | gpl-3.0 | 11,348 | [
"DIRAC"
] | 3a6e7091037144e723370565ab43cc760138b19636def5329c36c01fc354b8ed |
""" DIRAC Encoding utilities based on json
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import json
# Describes the way date time will be transmetted
# We do not keep miliseconds
DATETIME_DEFAULT_FORMAT = '%Y-%m-%d %H:%M:%S'
class JSerializable(object):
"""
Base class to define a serializable object by DIRAC.
An object that ought to be serialized throught DISET shoud:
* inherit from this class
* define the _attrToSerialize list as class member. It is a list of
strings containing the name of the attributes that should be serialized
* have a constructor that takes no arguments, or only keywords arguments
Exemple:
class Serializable(JSerializable):
_attrToSerialize = ['myAttr']
def __init__(self, myAttr = None):
self.myAttr = myAttr
Limitations:
* This will not work for classes defined inside classes. The class definition shoud be
visible from the global scope
* Class attributes cannot be serialized as such. They are converted to instance attributes.
"""
def _toJSON(self):
""" Translates the objct into a dictionary.
It is meant to be called by JSONDecoder only.
It relies on the attribute _attrToSerialize to know which attributes to
serialize.
The returned dictionary contains the attributes serialized as well as
hints for reconstructing the object upon receive.
:raises TypeError: If the object is not serializable (no _attrToSerialize defined)
:returns: a dictionary representing the object
"""
# If the object does not have _attrToSerialize defined
# Raise TypeError
if not hasattr(self, '_attrToSerialize'):
raise TypeError("Object not serializable. _attrToSerialize not defined")
jsonData = {}
# Store the class name and the module name
jsonData['__dCls'] = self.__class__.__name__
jsonData['__dMod'] = self.__module__
# self._attrToSerialize will be defined by the child class
for attr in self._attrToSerialize: # pylint: disable=no-member
# If an argument to serialize is not defined,
# we continue. This is handy for arguments that
# are defined dynamicaly ,like SQLAlchemy does.
if not hasattr(self, attr):
continue
attrValue = getattr(self, attr)
if attrValue is not None:
jsonData[attr] = attrValue
return jsonData
class DJSONEncoder(json.JSONEncoder):
""" This custom encoder is to add support to json for
tuple, datetime, and any object inheriting from JSerializable
"""
def default(self, obj): # pylint: disable=method-hidden
""" Add supports for datetime and JSerializable class to default json
:param obj: object to serialize
:return: json string of the serialized objects
"""
# If we have a datetime object, dumps its string representation
if isinstance(obj, datetime.datetime):
return {'__dCls': 'dt', 'obj': obj.strftime(DATETIME_DEFAULT_FORMAT)}
# if the object inherits from JSJerializable, try to serialize it
elif isinstance(obj, JSerializable):
return obj._toJSON() # pylint: disable=protected-access
# otherwise, let the parent do
return super(DJSONEncoder, self).default(obj)
class DJSONDecoder(json.JSONDecoder):
""" This custom decoder is to add support to json for
tuple, datetime, and any object inheriting from JSerializable
"""
def __init__(self, *args, **kargs):
"""
Init method needed in order to give the object_hook to have special
deserialization method.
"""
super(DJSONDecoder, self).__init__(object_hook=self.dict_to_object,
*args, **kargs)
@staticmethod
def dict_to_object(dataDict):
""" Convert the dictionary into an object.
Adds deserialization support for datetype and JSerializable
:param dataDict: json dictionary representing the data
:returns: deserialized object
"""
className = dataDict.pop('__dCls', None)
# If the class is of type dt (datetime)
if className == 'dt':
return datetime.datetime.strptime(dataDict['obj'], DATETIME_DEFAULT_FORMAT)
elif className:
import importlib
# Get the module
modName = dataDict.pop('__dMod')
# Load the module
mod = importlib.import_module(modName)
# import the class
cl = getattr(mod, className)
# Instantiate the object
obj = cl()
# Set each attribute
for attrName, attrValue in dataDict.items():
# If the value is None, do not set it
# This is needed to play along well with SQLalchemy
if attrValue is None:
continue
setattr(obj, attrName, attrValue)
return obj
# If we do not know how to serialize, just return the dictionary
return dataDict
def encode(inData):
""" Encode the input data into a JSON string
:param inData: anything that can be serialized.
Namely, anything that can be serialized by standard json package,
datetime object, tuples, and any class that inherits from JSerializable
:return: a json string
"""
return json.dumps(inData, cls=DJSONEncoder)
def decode(encodedData):
""" Decode the json encoded string
:param encodedData: json encoded string
:return: the decoded objects, encoded object length
Arguably, the length of the encodedData is useless,
but it is for compatibility
"""
return json.loads(encodedData, cls=DJSONDecoder), len(encodedData)
def strToIntDict(inDict):
""" Because JSON will transform dict with int keys to str keys,
this utility method is just to cast it back.
This shows useful in cases when sending dict indexed on
jobID or requestID for example
:param inDict: dictionary with strings as keys e.g. {'1': 1, '2': 2}
:returns: dictionary with int as keys e.g. {1: 1, 2: 2}
"""
return {int(key): value for key, value in inDict.items()}
| yujikato/DIRAC | src/DIRAC/Core/Utilities/JEncode.py | Python | gpl-3.0 | 6,181 | [
"DIRAC"
] | d842bd509f4e204ab674675e5c0787e657677d41bcf7967e3886e4d2d0a117da |
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
#pylint: disable=invalid-name
# File: ReduceSCD_Parallel.py
#
# Version 2.0, modified to work with Mantid's new python interface.
#
# This script will run multiple instances of the script ReduceSCD_OneRun.py
# in parallel, using either local processes or a slurm partition. After
# using the ReduceSCD_OneRun script to find, index and integrate peaks from
# multiple runs, this script merges the integrated peaks files and re-indexes
# them in a consistent way. If desired, the indexing can also be changed to a
# specified conventional cell.
# Many intermediate files are generated and saved, so all output is written
# to a specified output_directory. This output directory must be created
# before running this script, and must be specified in the configuration file.
# The user should first make sure that all parameters are set properly in
# the configuration file for the ReduceSCD_OneRun.py script, and that that
# script will properly reduce one scd run. Once a single run can be properly
# reduced, set the additional parameters in the configuration file that specify
# how the the list of runs will be processed in parallel.
#
#
# _v1: December 3rd 2013. Mads Joergensen
# This version now includes the possibility to use the 1D cylindrical integration method
# and the possibility to load a UB matrix which will be used for integration of the individual
# runs and to index the combined file (Code from Xiapoing).
#
#
# _v2: December 3rd 2013. Mads Joergensen
# Adds the possibility to optimize the loaded UB for each run for a better peak prediction
# It is also possible to find the common UB by using lattice parameters of the first
# run or the loaded matirix instead of the default FFT method
#
from __future__ import (absolute_import, division, print_function)
import os
import sys
import threading
import time
import ReduceDictionary
sys.path.append("/opt/mantidnightly/bin") # noqa
#sys.path.append("/opt/Mantid/bin")
from mantid.simpleapi import *
print("API Version")
print(apiVersion())
start_time = time.time()
# -------------------------------------------------------------------------
# ProcessThread is a simple local class. Each instance of ProcessThread is
# a thread that starts a command line process to reduce one run.
#
class ProcessThread ( threading.Thread ):
command = ""
def setCommand( self, command="" ):
self.command = command
def run ( self ):
print('STARTING PROCESS: ' + self.command)
os.system( self.command )
# -------------------------------------------------------------------------
#
# Get the config file name from the command line
#
if len(sys.argv) < 2:
print("You MUST give the config file name on the command line")
exit(0)
config_files = sys.argv[1:]
#
# Load the parameter names and values from the specified configuration file
# into a dictionary and set all the required parameters from the dictionary.
#
params_dictionary = ReduceDictionary.LoadDictionary( *config_files )
exp_name = params_dictionary[ "exp_name" ]
output_directory = params_dictionary[ "output_directory" ]
output_nexus = params_dictionary.get( "output_nexus", False)
reduce_one_run_script = params_dictionary[ "reduce_one_run_script" ]
slurm_queue_name = params_dictionary[ "slurm_queue_name" ]
max_processes = int(params_dictionary[ "max_processes" ])
min_d = params_dictionary[ "min_d" ]
max_d = params_dictionary[ "max_d" ]
tolerance = params_dictionary[ "tolerance" ]
cell_type = params_dictionary[ "cell_type" ]
centering = params_dictionary[ "centering" ]
allow_perm = params_dictionary[ "allow_perm" ]
run_nums = params_dictionary[ "run_nums" ]
data_directory = params_dictionary[ "data_directory" ]
use_cylindrical_integration = params_dictionary[ "use_cylindrical_integration" ]
instrument_name = params_dictionary[ "instrument_name" ]
read_UB = params_dictionary[ "read_UB" ]
UB_filename = params_dictionary[ "UB_filename" ]
UseFirstLattice = params_dictionary[ "UseFirstLattice" ]
num_peaks_to_find = params_dictionary[ "num_peaks_to_find" ]
# determine what python executable to launch new jobs with
python = sys.executable
if python is None: # not all platforms define this variable
python = 'python'
#
# Make the list of separate process commands. If a slurm queue name
# was specified, run the processes using slurm, otherwise just use
# multiple processes on the local machine.
#
procList=[]
index = 0
for r_num in run_nums:
procList.append( ProcessThread() )
cmd = '%s %s %s %s' % (python, reduce_one_run_script, " ".join(config_files), str(r_num))
if slurm_queue_name is not None:
console_file = output_directory + "/" + str(r_num) + "_output.txt"
cmd = 'srun -p ' + slurm_queue_name + \
' --cpus-per-task=3 -J ReduceSCD_Parallel.py -o ' + console_file + ' ' + cmd
procList[index].setCommand( cmd )
index = index + 1
#
# Now create and start a thread for each command to run the commands in parallel,
# starting up to max_processes simultaneously.
#
all_done = False
active_list=[]
while not all_done:
if len(procList) > 0 and len(active_list) < max_processes :
thread = procList[0]
procList.remove(thread)
active_list.append( thread )
thread.start()
time.sleep(2)
for thread in active_list:
if not thread.isAlive():
active_list.remove( thread )
if len(procList) == 0 and len(active_list) == 0 :
all_done = True
print("\n**************************************************************************************")
print("************** Completed Individual Runs, Starting to Combine Results ****************")
print("**************************************************************************************\n")
#
# First combine all of the integrated files, by reading the separate files and
# appending them to a combined output file.
#
niggli_name = output_directory + "/" + exp_name + "_Niggli"
if output_nexus:
niggli_integrate_file = niggli_name + ".nxs"
else:
niggli_integrate_file = niggli_name + ".integrate"
niggli_matrix_file = niggli_name + ".mat"
first_time = True
if output_nexus:
#Only need this for instrument for peaks_total
short_filename = "%s_%s" % (instrument_name, str(run_nums[0]))
if data_directory is not None:
full_name = data_directory + "/" + short_filename + ".nxs.h5"
if not os.path.exists(full_name):
full_name = data_directory + "/" + short_filename + "_event.nxs"
else:
candidates = FileFinder.findRuns(short_filename)
full_name = ""
for item in candidates:
if os.path.exists(item):
full_name = str(item)
if not full_name.endswith('nxs') and not full_name.endswith('h5'):
print("Exiting since the data_directory was not specified and")
print("findnexus failed for event NeXus file: " + instrument_name + " " + str(run_nums[0]))
exit(0)
#
# Load the first data file to find instrument
#
wksp = LoadEventNexus( Filename=full_name, FilterByTofMin=0, FilterByTofMax=0 )
peaks_total = CreatePeaksWorkspace(NumberOfPeaks=0, InstrumentWorkspace=wksp)
if not use_cylindrical_integration:
for r_num in run_nums:
if output_nexus:
one_run_file = output_directory + '/' + str(r_num) + '_Niggli.nxs'
peaks_ws = Load( Filename=one_run_file )
else:
one_run_file = output_directory + '/' + str(r_num) + '_Niggli.integrate'
peaks_ws = LoadIsawPeaks( Filename=one_run_file )
if first_time:
if UseFirstLattice and not read_UB:
# Find a UB (using FFT) for the first run to use in the FindUBUsingLatticeParameters
FindUBUsingFFT( PeaksWorkspace=peaks_ws, MinD=min_d, MaxD=max_d, Tolerance=tolerance )
uc_a = peaks_ws.sample().getOrientedLattice().a()
uc_b = peaks_ws.sample().getOrientedLattice().b()
uc_c = peaks_ws.sample().getOrientedLattice().c()
uc_alpha = peaks_ws.sample().getOrientedLattice().alpha()
uc_beta = peaks_ws.sample().getOrientedLattice().beta()
uc_gamma = peaks_ws.sample().getOrientedLattice().gamma()
if output_nexus:
peaks_total = CombinePeaksWorkspaces(LHSWorkspace=peaks_total, RHSWorkspace=peaks_ws)
SaveNexus( InputWorkspace=peaks_ws, Filename=niggli_integrate_file )
else:
SaveIsawPeaks( InputWorkspace=peaks_ws, AppendFile=False, Filename=niggli_integrate_file )
first_time = False
else:
if output_nexus:
peaks_total = CombinePeaksWorkspaces(LHSWorkspace=peaks_total, RHSWorkspace=peaks_ws)
SaveNexus( InputWorkspace=peaks_total, Filename=niggli_integrate_file )
else:
SaveIsawPeaks( InputWorkspace=peaks_ws, AppendFile=True, Filename=niggli_integrate_file )
#
# Load the combined file and re-index all of the peaks together.
# Save them back to the combined Niggli file (Or selected UB file if in use...)
#
if output_nexus:
peaks_ws = Load( Filename=niggli_integrate_file )
else:
peaks_ws = LoadIsawPeaks( Filename=niggli_integrate_file )
#
# Find a Niggli UB matrix that indexes the peaks in this run
# Load UB instead of Using FFT
#Index peaks using UB from UB of initial orientation run/or combined runs from first iteration of crystal orientation refinement
if read_UB:
LoadIsawUB(InputWorkspace=peaks_ws, Filename=UB_filename)
if UseFirstLattice:
# Find UB using lattice parameters from the specified file
uc_a = peaks_ws.sample().getOrientedLattice().a()
uc_b = peaks_ws.sample().getOrientedLattice().b()
uc_c = peaks_ws.sample().getOrientedLattice().c()
uc_alpha = peaks_ws.sample().getOrientedLattice().alpha()
uc_beta = peaks_ws.sample().getOrientedLattice().beta()
uc_gamma = peaks_ws.sample().getOrientedLattice().gamma()
FindUBUsingLatticeParameters(PeaksWorkspace= peaks_ws,a=uc_a,b=uc_b,c=uc_c,alpha=uc_alpha,beta=uc_beta,
gamma=uc_gamma,NumInitial=num_peaks_to_find,Tolerance=tolerance)
#OptimizeCrystalPlacement(PeaksWorkspace=peaks_ws,ModifiedPeaksWorkspace=peaks_ws,
# FitInfoTable='CrystalPlacement_info',MaxIndexingError=tolerance)
elif UseFirstLattice and not read_UB:
# Find UB using lattice parameters using the FFT results from first run if no UB file is specified
FindUBUsingLatticeParameters(PeaksWorkspace= peaks_ws,a=uc_a,b=uc_b,c=uc_c,alpha=uc_alpha,beta=uc_beta,
gamma=uc_gamma,NumInitial=num_peaks_to_find,Tolerance=tolerance)
else:
FindUBUsingFFT( PeaksWorkspace=peaks_ws, MinD=min_d, MaxD=max_d, Tolerance=tolerance )
IndexPeaks( PeaksWorkspace=peaks_ws, Tolerance=tolerance )
if output_nexus:
SaveNexus( InputWorkspace=peaks_ws, Filename=niggli_integrate_file )
else:
SaveIsawPeaks( InputWorkspace=peaks_ws, AppendFile=False, Filename=niggli_integrate_file )
SaveIsawUB( InputWorkspace=peaks_ws, Filename=niggli_matrix_file )
#
# If requested, also switch to the specified conventional cell and save the
# corresponding matrix and integrate file
#
if not use_cylindrical_integration:
if (cell_type is not None) and (centering is not None) :
conv_name = output_directory + "/" + exp_name + "_" + cell_type + "_" + centering
if output_nexus:
conventional_integrate_file = conv_name + ".nxs"
else:
conventional_integrate_file = conv_name + ".integrate"
conventional_matrix_file = conv_name + ".mat"
SelectCellOfType( PeaksWorkspace=peaks_ws, CellType=cell_type, Centering=centering,
AllowPermutations=allow_perm, Apply=True, Tolerance=tolerance )
if output_nexus:
SaveNexus( InputWorkspace=peaks_ws, Filename=conventional_integrate_file )
else:
SaveIsawPeaks( InputWorkspace=peaks_ws, AppendFile=False, Filename=conventional_integrate_file )
SaveIsawUB( InputWorkspace=peaks_ws, Filename=conventional_matrix_file )
if use_cylindrical_integration:
if (cell_type is not None) or (centering is not None):
print("WARNING: Cylindrical profiles are NOT transformed!!!")
# Combine *.profiles files
filename = output_directory + '/' + exp_name + '.profiles'
outputFile = open( filename, 'w' )
# Read and write the first run profile file with header.
r_num = run_nums[0]
filename = output_directory + '/' + instrument_name + '_' + r_num + '.profiles'
inputFile = open( filename, 'r' )
file_all_lines = inputFile.read()
outputFile.write(file_all_lines)
inputFile.close()
os.remove(filename)
# Read and write the rest of the runs without the header.
for r_num in run_nums[1:]:
filename = output_directory + '/' + instrument_name + '_' + r_num + '.profiles'
inputFile = open(filename, 'r')
for line in inputFile:
if line[0] == '0':
break
outputFile.write(line)
for line in inputFile:
outputFile.write(line)
inputFile.close()
os.remove(filename)
# Remove *.integrate file(s) ONLY USED FOR CYLINDRICAL INTEGRATION!
for integrateFile in os.listdir(output_directory):
if integrateFile.endswith('.integrate'):
os.remove(integrateFile)
end_time = time.time()
print("\n**************************************************************************************")
print("****************************** DONE PROCESSING ALL RUNS ******************************")
print("**************************************************************************************\n")
print('Total time: ' + str(end_time - start_time) + ' sec')
print('Config file: ' + ", ".join(config_files))
print('Script file: ' + reduce_one_run_script + '\n')
print()
| mganeva/mantid | scripts/SCD_Reduction/ReduceSCD_Parallel.py | Python | gpl-3.0 | 14,586 | [
"CRYSTAL"
] | c7d4b2e8ccb82e33faa4f918e6a8cfbd128e5085fade0fdbe5328085a2057eaf |
#!/usr/bin/env python
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
Parallelize gamma point
'''
import numpy
import pyscf.pbc.gto as pbcgto
import pyscf.pbc.scf as pscf
from mpi4pyscf.pbc import df as mpidf
cell = pbcgto.Cell()
cell.atom = [['C', ([ 0., 0., 0.])],
['C', ([ 0.8917, 0.8917, 0.8917])],
['C', ([ 1.7834, 1.7834, 0. ])],
['C', ([ 2.6751, 2.6751, 0.8917])],
['C', ([ 1.7834, 0. , 1.7834])],
['C', ([ 2.6751, 0.8917, 2.6751])],
['C', ([ 0. , 1.7834, 1.7834])],
['C', ([ 0.8917, 2.6751, 2.6751])]
]
cell.a = numpy.eye(3) * 3.5668
cell.basis = 'sto3g'
cell.mesh = [10] * 3
cell.verbose = 4
cell.build()
cell.max_memory = 1
mydf = mpidf.MDF(cell)
mydf.auxbasis = 'weigend'
mf = pscf.RHF(cell)
mf.exxdiv = 'ewald'
mf.with_df = mydf
mf.kernel()
mydf = mpidf.DF(cell)
mydf.auxbasis = 'weigend'
mydf.mesh = [5] * 3
mf = pscf.RHF(cell)
mf.exxdiv = 'ewald'
mf.with_df = mydf
mf.kernel() # -299.327774512958
mydf = mpidf.MDF(cell)
mydf.auxbasis = 'weigend'
mydf.mesh = [5] * 3
mf = pscf.RHF(cell)
mf.exxdiv = 'ewald'
mf.with_df = mydf
mf.kernel() # -299.328386756269
| sunqm/mpi4pyscf | examples/01-parallel_hf.py | Python | gpl-3.0 | 1,213 | [
"PySCF"
] | 399a10d33a9379fef5055777c4da9026cdb25227ba7b63698a8b4b1e8c1635ab |
'''
Copyright (C) 2015 Jeison Pacateque, Santiago Puerto, Wilmar Fernandez
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>
'''
from mayavi import mlab
from numpy import array
from mayavi.modules.text import Text
def ToyModel3d(sample):
"""
This script configure the 3D render motor (Mayavi) to show an interactive
reconstruction of the asphalt mixture sample
"""
src = mlab.pipeline.scalar_field(sample)
inverse_lut = False
colors = 5
iso = mlab.pipeline.iso_surface(src, contours=[1], opacity=0.4, colormap = 'blue-red')
iso.module_manager.scalar_lut_manager.reverse_lut = inverse_lut
iso.module_manager.scalar_lut_manager.number_of_colors = colors
ipw = mlab.pipeline.image_plane_widget(src, plane_orientation='y_axes', slice_index=10, colormap = 'blue-red')
ipw.module_manager.scalar_lut_manager.reverse_lut = inverse_lut
ipw.module_manager.scalar_lut_manager.number_of_colors = colors
scp = mlab.pipeline.scalar_cut_plane(src, colormap = 'blue-red')
scp.module_manager.scalar_lut_manager.reverse_lut = inverse_lut
scp.module_manager.scalar_lut_manager.number_of_colors = colors
#Set the Mayavi Colorbar Ranges
scp.module_manager.scalar_lut_manager.use_default_range = False
scp.module_manager.scalar_lut_manager.scalar_bar.position2 = array([ 0.1, 0.8])
scp.module_manager.scalar_lut_manager.scalar_bar.position = array([ 0.01, 0.15])
scp.module_manager.scalar_lut_manager.data_range = array([ 0., 2.])
scp.module_manager.scalar_lut_manager.scalar_bar.position2 = array([ 0.1, 0.8])
scp.module_manager.scalar_lut_manager.scalar_bar.position = array([ 0.01, 0.15])
scp.module_manager.scalar_lut_manager.data_range = array([ 0., 2.])
engine = mlab.get_engine()
textAggregate = Text()
textMastic = Text()
textVoids = Text()
engine.add_filter(textAggregate, scp.module_manager)
engine.add_filter(textMastic, ipw.module_manager)
engine.add_filter(textVoids, iso.module_manager)
textAggregate.text = 'Aggregate'
textMastic.text = 'Mastic'
textVoids.text = 'Air Voids'
textAggregate.actor.text_scale_mode = 'viewport'
textMastic.actor.text_scale_mode = 'viewport'
textVoids.actor.text_scale_mode = 'viewport'
textAggregate.actor.minimum_size = array([ 1, 10])
textMastic.actor.minimum_size = array([ 1, 10])
textVoids.actor.minimum_size = array([ 1, 10])
textAggregate.actor.position = array([ 0.115, 0.7 ])
textMastic.actor.position = array([ 0.115, 0.45])
textVoids.actor.position = array([ 0.115, 0.23])
mlab.orientation_axes()
mlab.title("Asphalt Mixture Reconstruction", size=0.25)
mlab.colorbar(title='Material', orientation='vertical', nb_labels=0, nb_colors=3)
mlab.show() | JeisonPacateque/Asphalt-Mixtures-Aging-Simulator | app/ui/render_3d.py | Python | gpl-3.0 | 3,356 | [
"Mayavi"
] | f35c091a23da129980ae3e6261db16d2572c50f183a54d3c990479b6c994f4aa |
# ======================================================================
#
# Cosmograil: cosmograil.tools.sextractor
#
# sextractor module.
#
# Author: Laurent Le Guillou <laurentl@ster.kuleuven.ac.be>
#
# $Id: sextractor.py,v 1.2 2005/07/06 21:40:43 hack Exp $
#
# ======================================================================
#
# "sextractor": wrapper around SExtractor.
#
# ======================================================================
#
# $Log: sextractor.py,v $
# Revision 1.2 2005/07/06 21:40:43 hack
# Tweakshifts version 0.5.0 (WJH):
# - added support for SExtractor PSET and
# user-supplied SExtractor config file
# - added 'nbright' parameter for selecting
# only 'nbright' objects for matching
# - redefined 'ascend' to 'fluxunits' of 'counts/cps/mag'
# - fixed bug in countSExtractorObjects()reported by Andy
# - turned off overwriting of output WCS file
#
# Revision 1.15 2005/06/29 13:07:41 hack
# Added Python interface to SExtractor to
# STSDAS$Python for use with 'tweakshifts'. WJH
# Added 3 more parameters to config
#
# Revision 1.14 2005/02/14 19:27:31 laurentl
# Added write facilities to rdb module.
#
# Revision 1.13 2005/02/14 17:47:02 laurentl
# Added iterator interface
#
# Revision 1.12 2005/02/14 17:16:30 laurentl
# clean now removes the NNW config file too.
#
# Revision 1.2 2005/02/14 17:13:49 laurentl
# *** empty log message ***
#
# Revision 1.1 2005/02/14 11:34:10 laurentl
# quality monitor now uses SExtractor wrapper.
#
# Revision 1.10 2005/02/11 14:40:35 laurentl
# minor changes
#
# Revision 1.9 2005/02/11 14:32:44 laurentl
# Fixed bugs in setup()
#
# Revision 1.8 2005/02/11 13:50:08 laurentl
# Fixed bugs in setup()
#
# Revision 1.7 2005/02/10 20:15:14 laurentl
# Improved SExtractor wrapper.
#
# Revision 1.6 2005/02/10 17:46:35 laurentl
# Greatly improved the SExtractor wrapper.
#
# Revision 1.5 2005/02/09 23:32:50 laurentl
# Implemented SExtractor wrapper
#
# Revision 1.4 2005/02/04 05:00:09 laurentl
# *** empty log message ***
#
# Revision 1.3 2005/01/06 13:37:11 laurentl
# *** empty log message ***
#
#
# ======================================================================
"""
A wrapper for SExtractor
A wrapper for SExtractor, the Source Extractor.
by Laurent Le Guillou
version: 1.15 - last modified: 2005-07-06
This wrapper allows you to configure SExtractor, run it and get
back its outputs without the need of editing SExtractor
configuration files. by default, configuration files are created
on-the-fly, and SExtractor is run silently via python.
Tested on SExtractor versions 2.2.1 and 2.3.2.
Example of use:
-----------------------------------------------------------------
import sextractor
# Create a SExtractor instance
sex = sextractor.SExtractor()
# Modify the SExtractor configuration
sex.config['GAIN'] = 0.938
sex.config['PIXEL_SCALE'] = .19
sex.config['VERBOSE_TYPE'] = "FULL"
sex.config['CHECKIMAGE_TYPE'] = "BACKGROUND"
# Add a parameter to the parameter list
sex.config['PARAMETERS_LIST'].append('FLUX_BEST')
# Lauch SExtractor on a FITS file
sex.run("nf260002.fits")
# Read the resulting catalog [first method, whole catalog at once]
catalog = sex.catalog()
for star in catalog:
print star['FLUX_BEST'], star['FLAGS']
if (star['FLAGS'] & sextractor.BLENDED):
print "This star is BLENDED"
# Read the resulting catalog [second method, whole catalog at once]
catalog_name = sex.config['CATALOG_NAME']
catalog_f = sextractor.open(catalog_name)
catalog = catalog_f.readlines()
for star in catalog:
print star['FLUX_BEST'], star['FLAGS']
if (star['FLAGS'] & sextractor.BLENDED):
print "This star is BLENDED"
catalog_f.close()
# Read the resulting catalog [third method, star by star]
catalog_name = sex.config['CATALOG_NAME']
catalog_f = sextractor.open(catalog_name)
star = catalog_f.readline()
while star:
print star['FLUX_BEST'], star['FLAGS']
if (star['FLAGS'] & sextractor.BLENDED):
print "This star is BLENDED"
star = catalog_f.readline()
catalog_f.close()
# Removing the configuration files, the catalog and
# the check image
sex.clean(config=True, catalog=True, check=True)
-----------------------------------------------------------------
"""
# ======================================================================
from __future__ import print_function
from six.moves import builtins as __builtin__
import os
import subprocess
import re
import copy
from .sexcatalog import *
# ======================================================================
__version__ = "1.15.0 (2005-07-06)"
# ======================================================================
class SExtractorException(Exception):
pass
# ======================================================================
nnw_config = \
"""NNW
# Neural Network Weights for the SExtractor star/galaxy classifier (V1.3)
# inputs: 9 for profile parameters + 1 for seeing.
# outputs: ``Stellarity index'' (0.0 to 1.0)
# Seeing FWHM range: from 0.025 to 5.5''
# (images must have 1.5 < FWHM < 5 pixels)
# Optimized for Moffat profiles with 2<= beta <= 4.
3 10 10 1
-1.56604e+00 -2.48265e+00 -1.44564e+00 -1.24675e+00 -9.44913e-01 -5.22453e-01 4.61342e-02 8.31957e-01 2.15505e+00 2.64769e-01
3.03477e+00 2.69561e+00 3.16188e+00 3.34497e+00 3.51885e+00 3.65570e+00 3.74856e+00 3.84541e+00 4.22811e+00 3.27734e+00
-3.22480e-01 -2.12804e+00 6.50750e-01 -1.11242e+00 -1.40683e+00 -1.55944e+00 -1.84558e+00 -1.18946e-01 5.52395e-01 -4.36564e-01 -5.30052e+00
4.62594e-01 -3.29127e+00 1.10950e+00 -6.01857e-01 1.29492e-01 1.42290e+00 2.90741e+00 2.44058e+00 -9.19118e-01 8.42851e-01 -4.69824e+00
-2.57424e+00 8.96469e-01 8.34775e-01 2.18845e+00 2.46526e+00 8.60878e-02 -6.88080e-01 -1.33623e-02 9.30403e-02 1.64942e+00 -1.01231e+00
4.81041e+00 1.53747e+00 -1.12216e+00 -3.16008e+00 -1.67404e+00 -1.75767e+00 -1.29310e+00 5.59549e-01 8.08468e-01 -1.01592e-02 -7.54052e+00
1.01933e+01 -2.09484e+01 -1.07426e+00 9.87912e-01 6.05210e-01 -6.04535e-02 -5.87826e-01 -7.94117e-01 -4.89190e-01 -8.12710e-02 -2.07067e+01
-5.31793e+00 7.94240e+00 -4.64165e+00 -4.37436e+00 -1.55417e+00 7.54368e-01 1.09608e+00 1.45967e+00 1.62946e+00 -1.01301e+00 1.13514e-01
2.20336e-01 1.70056e+00 -5.20105e-01 -4.28330e-01 1.57258e-03 -3.36502e-01 -8.18568e-02 -7.16163e+00 8.23195e+00 -1.71561e-02 -1.13749e+01
3.75075e+00 7.25399e+00 -1.75325e+00 -2.68814e+00 -3.71128e+00 -4.62933e+00 -2.13747e+00 -1.89186e-01 1.29122e+00 -7.49380e-01 6.71712e-01
-8.41923e-01 4.64997e+00 5.65808e-01 -3.08277e-01 -1.01687e+00 1.73127e-01 -8.92130e-01 1.89044e+00 -2.75543e-01 -7.72828e-01 5.36745e-01
-3.65598e+00 7.56997e+00 -3.76373e+00 -1.74542e+00 -1.37540e-01 -5.55400e-01 -1.59195e-01 1.27910e-01 1.91906e+00 1.42119e+00 -4.35502e+00
-1.70059e+00 -3.65695e+00 1.22367e+00 -5.74367e-01 -3.29571e+00 2.46316e+00 5.22353e+00 2.42038e+00 1.22919e+00 -9.22250e-01 -2.32028e+00
0.00000e+00
1.00000e+00
"""
# ======================================================================
class SExtractor:
"""
A wrapper class to transparently use SExtractor.
"""
_SE_config = {
"CATALOG_NAME":
{"comment": "name of the output catalog",
"value": "py-sextractor.cat"},
"CATALOG_TYPE":
{"comment":
'"NONE","ASCII_HEAD","ASCII","FITS_1.0" or "FITS_LDAC"',
"value": "ASCII_HEAD"},
"PARAMETERS_NAME":
{"comment": "name of the file containing catalog contents",
"value": "py-sextractor.param"},
"DETECT_TYPE":
{"comment": '"CCD" or "PHOTO"',
"value": "CCD"},
"FLAG_IMAGE":
{"comment": "filename for an input FLAG-image",
"value": "flag.fits"},
"DETECT_MINAREA":
{"comment": "minimum number of pixels above threshold",
"value": 5},
"DETECT_THRESH":
{"comment": "<sigmas> or <threshold>,<ZP> in mag.arcsec-2",
"value": 1.5},
"ANALYSIS_THRESH":
{"comment": "<sigmas> or <threshold>,<ZP> in mag.arcsec-2",
"value": 1.5},
"FILTER":
{"comment": 'apply filter for detection ("Y" or "N")',
"value": 'Y'},
"FILTER_NAME":
{"comment": "name of the file containing the filter",
"value": "py-sextractor.conv"},
"DEBLEND_NTHRESH":
{"comment": "Number of deblending sub-thresholds",
"value": 32},
"DEBLEND_MINCONT":
{"comment": "Minimum contrast parameter for deblending",
"value": 0.005},
"CLEAN":
{"comment": "Clean spurious detections (Y or N)",
"value": 'Y'},
"CLEAN_PARAM":
{"comment": "Cleaning efficiency",
"value": 1.0},
"MASK_TYPE":
{"comment": 'type of detection MASKing: can be one of "NONE",'
' "BLANK" or "CORRECT"',
"value": "CORRECT"},
"PHOT_APERTURES":
{"comment": "MAG_APER aperture diameter(s) in pixels",
"value": 5},
"PHOT_AUTOPARAMS":
{"comment": 'MAG_AUTO parameters: <Kron_fact>,<min_radius>',
"value": [2.5, 3.5]},
"SATUR_LEVEL":
{"comment": "level (in ADUs) at which arises saturation",
"value": 50000.0},
"MAG_ZEROPOINT":
{"comment": "magnitude zero-point",
"value": 0.0},
"MAG_GAMMA":
{"comment": "gamma of emulsion (for photographic scans)",
"value": 4.0},
"GAIN":
{"comment": "detector gain in e-/ADU",
"value": 0.0},
"PIXEL_SCALE":
{"comment": "size of pixel in arcsec (0=use FITS WCS info)",
"value": 1.0},
"SEEING_FWHM":
{"comment": "stellar FWHM in arcsec",
"value": 1.2},
"STARNNW_NAME":
{"comment": "Neural-Network_Weight table filename",
"value": "py-sextractor.nnw"},
"BACK_SIZE":
{"comment": "Background mesh: <size> or <width>,<height>",
"value": 64},
"BACK_TYPE":
{"comment": "Type of background to subtract: MANUAL or AUTO generated",
"value": 'AUTO'},
"BACK_VALUE":
{"comment": "User-supplied constant value to be subtracted as sky",
"value": "0.0,0.0"},
"BACK_FILTERSIZE":
{"comment": "Background filter: <size> or <width>,<height>",
"value": 3},
"BACKPHOTO_TYPE":
{"comment": 'can be "GLOBAL" or "LOCAL"',
"value": "GLOBAL"},
"BACKPHOTO_THICK":
{"comment": "Thickness in pixels of the background local annulus",
"value": 24},
"CHECKIMAGE_TYPE":
{"comment": 'can be one of "NONE", "BACKGROUND", "MINIBACKGROUND",'
' "-BACKGROUND", "OBJECTS", "-OBJECTS", "SEGMENTATION",'
' "APERTURES", or "FILTERED"',
"value": "NONE"},
"CHECKIMAGE_NAME":
{"comment": "Filename for the check-image",
"value": "check.fits"},
"MEMORY_OBJSTACK":
{"comment": "number of objects in stack",
"value": 3000},
"MEMORY_PIXSTACK":
{"comment": "number of pixels in stack",
"value": 300000},
"MEMORY_BUFSIZE":
{"comment": "number of lines in buffer",
"value": 1024},
"VERBOSE_TYPE":
{"comment": 'can be "QUIET", "NORMAL" or "FULL"',
"value": "QUIET"},
"WEIGHT_TYPE":
{"comment": 'type of WEIGHTing: NONE, BACKGROUND, '
'MAP_RMS, MAP_VAR or MAP_WEIGHT',
"value": "NONE"},
"WEIGHT_IMAGE":
{"comment": '# weight-map filename',
"value": "NONE"},
"WEIGHT_THRESH":
{"comment": 'weight threshold[s] for bad pixels',
"value": 0},
# -- Extra-keys (will not be saved in the main configuration file
"PARAMETERS_LIST":
{"comment": '[Extra key] catalog contents (to put in PARAMETERS_NAME)',
"value": ["NUMBER", "FLUX_BEST", "FLUXERR_BEST",
"X_IMAGE", "Y_IMAGE", "FLAGS", "FWHM_IMAGE"]},
"CONFIG_FILE":
{"comment": '[Extra key] name of the main configuration file',
"value": "py-sextractor.sex"},
"FILTER_MASK":
{"comment": 'Array to put in the FILTER_MASK file',
"value": [[1, 2, 1],
[2, 4, 2],
[1, 2, 1]]}
}
# -- Special config. keys that should not go into the config. file.
_SE_config_special_keys = ["PARAMETERS_LIST", "CONFIG_FILE", "FILTER_MASK"]
# -- Dictionary of all possible parameters (from sexcatalog.py module)
_SE_parameters = SExtractorfile._SE_keys
def __init__(self):
"""
SExtractor class constructor.
"""
self.config = (
dict([(k, copy.deepcopy(SExtractor._SE_config[k]["value"]))
for k in SExtractor._SE_config]))
# print self.config
self.program = None
self.version = None
def setup(self, path=None):
"""
Look for SExtractor program ('sextractor', or 'sex').
If a full path is provided, only this path is checked.
Raise a SExtractorException if it failed.
Return program and version if it succeed.
"""
# -- Finding sextractor program and its version
# first look for 'sextractor', then 'sex'
candidates = ['sextractor', 'sex']
if (path):
candidates = [path]
selected = None
for candidate in candidates:
try:
p = subprocess.Popen(candidate, shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
close_fds=True)
(_out_err, _in) = (p.stdout, p.stdin)
versionline = _out_err.read()
if (versionline.find("SExtractor") != -1):
selected = candidate
break
except IOError:
continue
if not(selected):
raise SExtractorException(
"""
Cannot find SExtractor program. Check your PATH,
or provide the SExtractor program path in the constructor.
"""
)
_program = selected
# print versionline
_version_match = re.search("[Vv]ersion ([0-9\.])+", versionline)
if not _version_match:
raise SExtractorException(
"Cannot determine SExtractor version."
)
_version = _version_match.group()[8:]
if not _version:
raise SExtractorException(
"Cannot determine SExtractor version."
)
# print "Use " + self.program + " [" + self.version + "]"
return _program, _version
def update_config(self):
"""
Update the configuration files according to the current
in-memory SExtractor configuration.
"""
# -- Write filter configuration file
# First check the filter itself
filter = self.config['FILTER_MASK']
rows = len(filter)
cols = len(filter[0]) # May raise ValueError, OK
filter_f = __builtin__.open(self.config['FILTER_NAME'], 'w')
filter_f.write("CONV NORM\n")
filter_f.write("# %dx%d Generated from sextractor.py module.\n" %
(rows, cols))
for row in filter:
filter_f.write(" ".join(map(repr, row)))
filter_f.write("\n")
filter_f.close()
# -- Write parameter list file
parameters_f = __builtin__.open(self.config['PARAMETERS_NAME'], 'w')
for parameter in self.config['PARAMETERS_LIST']:
print(parameter, file=parameters_f)
parameters_f.close()
# -- Write NNW configuration file
nnw_f = __builtin__.open(self.config['STARNNW_NAME'], 'w')
nnw_f.write(nnw_config)
nnw_f.close()
# -- Write main configuration file
main_f = __builtin__.open(self.config['CONFIG_FILE'], 'w')
for key in self.config.keys():
if (key in SExtractor._SE_config_special_keys):
continue
if (key == "PHOT_AUTOPARAMS"): # tuple instead of a single value
value = " ".join(map(str, self.config[key]))
else:
value = str(self.config[key])
print(("%-16s %-16s # %s" % (key, value, SExtractor._SE_config[key]['comment'])), file=main_f)
main_f.close()
def run(self, file, updateconfig=True, clean=False, path=None):
"""
Run SExtractor.
If updateconfig is True (default), the configuration
files will be updated before running SExtractor.
If clean is True (default: False), configuration files
(if any) will be deleted after SExtractor terminates.
"""
if updateconfig:
self.update_config()
# Try to find SExtractor program
# This will raise an exception if it failed
self.program, self.version = self.setup(path)
commandline = (
self.program + " -c " + self.config['CONFIG_FILE'] + " " + file)
# print commandline
rcode = os.system(commandline)
if (rcode):
raise SExtractorException(
"SExtractor command [%s] failed." % commandline
)
if clean:
self.clean()
def catalog(self):
"""
Read the output catalog produced by the last SExtractor run.
Output is a list of dictionaries, with a dictionary for
each star: {'param1': value, 'param2': value, ...}.
"""
output_f = SExtractorfile(self.config['CATALOG_NAME'], 'r')
c = output_f.read()
output_f.close()
return c
def clean(self, config=True, catalog=False, check=False):
"""
Remove the generated SExtractor files (if any).
If config is True, remove generated configuration files.
If catalog is True, remove the output catalog.
If check is True, remove output check image.
"""
try:
if (config):
os.unlink(self.config['FILTER_NAME'])
os.unlink(self.config['PARAMETERS_NAME'])
os.unlink(self.config['STARNNW_NAME'])
os.unlink(self.config['CONFIG_FILE'])
if (catalog):
os.unlink(self.config['CATALOG_NAME'])
if (check):
os.unlink(self.config['CHECKIMAGE_NAME'])
except OSError:
pass
# ======================================================================
| Pica4x6/numina | numina/util/sextractor.py | Python | gpl-3.0 | 19,085 | [
"Galaxy"
] | f50941b1b0a5b8035245aa40d240da126dd1df9566a02a6c14a7288c2dd232e7 |
################################################################################
# #
# Copyright (C) 2010-2018 The ESPResSo project #
# #
# This file is part of ESPResSo. #
# #
# ESPResSo is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# ESPResSo is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
# #
# Active Matter: Swimmer Flow Field Tutorial #
# #
##########################################################################
from __future__ import print_function
import numpy as np
import os
import espressomd
from espressomd import assert_features, lb
## Exercise 1 ##
# Create a routine to read in the hydrodynamic type
# (pusher/puller) and position at which the particle
# is initiated, set the variables 'type' and 'pos' to
# these values, respectively.
...
mode = ...
pos = ...
##########################################################################
## Exercise 2 ##
# Create an output directory that is labeled according
# to the value of the type and position, use the parameter
# 'outdir' to store this path
outdir = ...
try:
os.makedirs(outdir)
except:
print("INFO: Directory \"{}\" exists".format(outdir))
# System parameters
length = 25.0
prod_steps = 1000
prod_length = 50
dt = 0.01
system = espressomd.System(box_l=[length, length, length])
system.seed = system.cell_system.get_state()['n_nodes'] * [1234]
system.cell_system.skin = 0.3
system.time_step = dt
system.min_global_cut = 1.0
##########################################################################
# Set the position of the particle
## Exercise 3 ##
# Determine the initial position of the particle, which
# should be in the center of the box, and shifted by
# the value of 'pos' in the direction of the z-axis
x0 = ...
y0 = ...
z0 = ...
# Sphere size, mass, and moment of inertia, dipole force
sph_size = 0.5
sph_mass = 4.8
Ixyz = 4.8
force = 0.1
## Exercise 4 ##
# Why is the sphere size set to 0.5 (this value is
# an approximation for the real value)? What happens when you
# change the mass and rotational inertia? Why is the value of
# the force chosen to be low.
# Setup the particle
system.part.add(
pos=[x0, y0, z0], type=0, mass=sph_mass, rinertia=[Ixyz, Ixyz, Ixyz],
swimming={'f_swim': force, 'mode': mode, 'dipole_length': sph_size + 0.5})
## Exercise 5 ##
# Why is the dipole_length chosen in this way?
# What happens if you make the length go to zero?
# Why does this happen?
##########################################################################
# Setup the fluid (quiescent)
agrid = 1
vskin = 0.1
frict = 20.0
visco = 1.0
densi = 1.0
temp = 0.0
lbf = lb.LBFluidGPU(agrid=agrid, dens=densi, visc=visco,
tau=dt, fric=frict, couple='3pt')
## Exercise 6 ##
# What does 'couple 3pt' imply?
# Can the particle rotate in the flow field?
system.actors.add(lbf)
system.thermostat.set_lb(kT=temp)
##########################################################################
# Output the coordinates
with open("{}/trajectory.dat".format(outdir), 'w') as outfile:
print("####################################################", file=outfile)
print("# time position velocity #", file=outfile)
print("####################################################", file=outfile)
# Production run
for k in range(prod_steps):
# Output quantities
print("{time} {pos[0]} {pos[1]} {pos[2]} {vel[0]} {vel[1]} {vel[2]}"
.format(time=system.time, pos=system.part[0].pos, vel=system.part[0].v),
file=outfile)
# Output 50 simulations
if k % (prod_steps / 50) == 0:
num = k / (prod_steps / 50)
lbf.print_vtk_velocity("{}/lb_velocity_{}.vtk".format(outdir, num))
system.part.writevtk(
"{}/position_{}.vtk".format(outdir, num), types=[0])
system.integrator.run(prod_length)
## Exercise 7 ##
# Use the snapshots and paraview to visualize the final state.
# By appropriately choosing the initial position, you can ensure
# that the swimmer is in the center of the box. Explain why
# the flow lines look the way they do.
| hmenke/espresso | doc/tutorials/06-active_matter/EXERCISES/flow_field.py | Python | gpl-3.0 | 5,641 | [
"ESPResSo",
"ParaView",
"VTK"
] | 153bdd0c0fb95d15c9787f0afc6a3cc844ea42500af85648be0e4ba931f7ace6 |
from article_text_mining.full_text_pipeline import add_full_texts_from_mult_dirs, add_full_texts_from_directory
from django.conf import settings
__author__ = 'stripathy'
def add_full_texts():
"""Adds full texts from directory to DB based on rules specified in full_text_pipeline"""
if hasattr(settings, 'FULL_TEXTS_LOCAL_DIRECTORY'):
full_text_dir = settings.FULL_TEXTS_LOCAL_DIRECTORY
else:
full_text_dir = settings.FULL_TEXTS_DIRECTORY
# matching_journ_str = 'PLoS'
# print 'Adding full texts from %s journals' % matching_journ_str
# add_full_texts_from_mult_dirs(full_text_dir, matching_journ_str)
#
# matching_journ_str = 'Frontiers'
# print 'Adding full texts from %s journals' % matching_journ_str
# add_full_texts_from_mult_dirs(full_text_dir, matching_journ_str)
# add_full_texts_from_mult_dirs(full_text_dir, 'PLoS Comput Biol')
#
# add_full_texts_from_mult_dirs(full_text_dir, 'Glia')
# add_full_texts_from_mult_dirs(full_text_dir, 'Hippocampus')
# add_full_texts_from_mult_dirs(full_text_dir, 'Cereb Cortex')
# add_full_texts_from_mult_dirs(full_text_dir, 'J Comp Neurol')
# add_full_texts_from_mult_dirs(full_text_dir, 'J Neurosci Res')
# add_full_texts_from_mult_dirs(full_text_dir, 'eNeuro')
# add_full_texts_from_mult_dirs(full_text_dir, 'Physiol Rep')
# add_full_texts_from_mult_dirs(full_text_dir, 'J Neurophysiol')
#add_full_texts_from_mult_dirs(full_text_dir, 'J Neurosci')
# add_full_texts_from_mult_dirs(full_text_dir, 'Synapse')
add_full_texts_from_directory(full_text_dir + 'Cereb Cortex/')
add_full_texts_from_directory(full_text_dir + 'J Neurosci/')
# add_full_texts_from_directory(full_text_dir + 'Neuroscience Letters/')
# add_full_texts_from_directory(full_text_dir + 'Neuron/')
# add_full_texts_from_directory(full_text_dir + 'Neuroscience/')
# add_full_texts_from_directory(full_text_dir + 'Neurobiol Dis/')
# add_full_texts_from_directory(full_text_dir + 'Brain Research/')
# add_full_texts_from_directory(full_text_dir + 'Cell/')
def run():
add_full_texts()
| neuroelectro/neuroelectro_org | scripts/add_full_texts_to_db.py | Python | gpl-2.0 | 2,136 | [
"NEURON"
] | 89611f56e559b8ff23d9725d62f23fe672be2950929050ce6c72e5c22c3da69a |
from datetime import datetime
from utils import NS, get_text
from atom_objects import Category
from deposit_receipt import Deposit_Receipt
from sword2_logging import logging
from compatible_libs import etree
s_l = logging.getLogger(__name__)
class Sword_Statement(object):
def __init__(self, xml_document=None):
self.xml_document = xml_document
self.dom = None
self.parsed = False
self.valid = False
self.original_deposits = []
self.states = []
self.resources = []
self._parse_xml_document()
self._validate()
def _parse_xml_document(self):
if self.xml_document is not None:
try:
s_l.info("Attempting to parse the Statement XML document")
self.dom = etree.fromstring(self.xml_document)
self.parsed = True
except Exception, e:
s_l.error("Failed to parse document - %s" % e)
s_l.error("XML document begins:\n %s" % self.xml_document[:300])
def _validate(self): pass
class Statement_Resource(object):
def __init__(self, uri=None, is_original_deposit=False, deposited_on=None,
deposited_by=None, deposited_on_behalf_of=None):
self.uri = uri
self.is_original_deposit = is_original_deposit
self.deposited_on = deposited_on
self.deposited_by = deposited_by
self.deposited_on_behalf_of = deposited_on_behalf_of
class Atom_Statement_Entry(Deposit_Receipt, Statement_Resource):
def __init__(self, dom):
Deposit_Receipt.__init__(self, dom=dom)
Statement_Resource.__init__(self)
self.is_original_deposit = self._is_original_deposit()
self._parse_depositors()
# to provide a stable interface, use the content iri as the uri
self.uri = self.cont_iri
def _is_original_deposit(self):
# is this an original deposit?
is_original_deposit = False
for cat in self.dom.findall(NS['atom'] % 'category'):
if cat.get("term") == "http://purl.org/net/sword/terms/originalDeposit":
is_original_deposit = True
break
return is_original_deposit
def _parse_depositors(self):
do = self.dom.find(NS['sword'] % "depositedOn")
if do is not None and do.text is not None and do.text.strip() != "":
try:
self.deposited_on = datetime.strptime(do.text.strip(), "%Y-%m-%dT%H:%M:%SZ") # e.g. 2011-03-02T20:50:06Z
except Exception, e:
s_l.error("Failed to parse date - %s" % e)
s_l.error("Supplied date as string was: %s" % do.text.strip())
db = self.dom.find(NS['sword'] % "depositedBy")
if db is not None and db.text is not None and db.text.strip() != "":
self.deposited_by = db.text.strip()
dobo = self.dom.find(NS['sword'] % "depositedOnBehalfOf")
if dobo is not None and dobo.text is not None and db.text.strip() != "":
self.deposited_on_behalf_of = dobo.text.strip()
def validate(self):
# don't validate statement entries
return True
class Atom_Sword_Statement(Sword_Statement):
def __init__(self, xml_document=None):
Sword_Statement.__init__(self, xml_document)
if self.valid:
self._enumerate_feed()
else:
s_l.warn("Statement did not parse as valid, so the content will" +
" not be examined further; see the 'dom' attribute for the xml")
"""
FIXME: this implementation assumes that the atom document is a single
page, but Ben's original implementation at least started to make some
overtures towards dealing with that. This is the left behind code ...
self.first = None
self.next = None
self.previous = None
self.last = None
self.categories = []
self.entries = []
try:
coll_l.info("Attempting to parse the Feed XML document")
self.feed = etree.fromstring(xml_document)
self.parsed = True
except Exception, e:
coll_l.error("Failed to parse document - %s" % e)
coll_l.error("XML document begins:\n %s" % xml_document[:300])
self.enumerate_feed()
"""
def _enumerate_feed(self):
if self.dom is None:
return
# Handle Categories
for cat in self.dom.findall(NS['atom'] % 'category'):
if cat.get("scheme") == "http://purl.org/net/sword/terms/state":
self.states.append((cat.get("term"), cat.text.strip()))
# Handle Entries
for entry in self.dom.findall(NS['atom'] % 'entry'):
ase = Atom_Statement_Entry(entry)
if ase.is_original_deposit:
self.original_deposits.append(ase)
self.resources.append(ase)
def _validate(self):
valid = True
if self.dom is None:
return
# MUST be an ATOM Feed document
if self.dom.tag != NS['atom'] % "feed" and self.dom.tag != "feed":
valid = False
self.valid = valid
# The Feed MUST represent files contained in the item as an atom:entry element (this does not
# mandate that all files in the item are listed, though)
# Each atom:entry which is an original deposit file MUST have an atom:category element with
# the term sword:originalDeposit (this does not mandate that all original deposits are listed as entries)
# NOTE: neither of these requirements can easily be used to validate, since
# a statement may have zero entries, and an entry may or may not contain
# a category for an original deposit. So, we'll just settle for verifying
# that this is a feed, and be done with it.
class Ore_Statement_Resource(Statement_Resource):
def __init__(self, uri, is_original_deposit=False, packaging_uris=[],
deposited_on=None, deposited_by=None, deposited_on_behalf_of=None):
Statement_Resource.__init__(self, uri, is_original_deposit, deposited_on,
deposited_by, deposited_on_behalf_of)
self.uri = uri
self.packaging = packaging_uris
def __str__(self):
# FIXME: unfinished ...
return "URI: %s ; is_original_deposit: %s ; packaging_uris: %s ; deposited_on: %s"
class Ore_Sword_Statement(Sword_Statement):
def __init__(self, xml_document=None):
Sword_Statement.__init__(self, xml_document)
if self.valid:
self._enumerate_descriptions()
else:
s_l.warn("Statement did not parse as valid, so the content will" +
" not be examined further; see the 'dom' attribute for the xml")
def _enumerate_descriptions(self):
if self.dom is None:
return
aggregated_resource_uris = []
original_deposit_uris = []
state_uris = []
# first pass gets me the uris of all the things I care about
for desc in self.dom.findall(NS['rdf'] % "Description"):
# look for the aggregation
ore_idb = desc.findall(NS['ore'] % "isDescribedBy")
if ore_idb is None:
continue
# we are looking at the aggregation Describes itself
for agg_uri in desc.findall(NS['ore'] % "aggregates"):
aggregated_resource_uris.append(agg_uri.get(NS['rdf'] % "resource"))
for od_uri in desc.findall(NS['sword'] % "originalDeposit"):
original_deposit_uris.append(od_uri.get(NS['rdf'] % "resource"))
for state_uri in desc.findall(NS['sword'] % "state"):
state_uris.append(state_uri.get(NS['rdf'] % "resource"))
s_l.debug("First pass on ORE statement yielded the following Aggregated Resources: " + str(aggregated_resource_uris))
s_l.debug("First pass on ORE statement yielded the following Original Deposits: " + str(original_deposit_uris))
s_l.debug("First pass on ORE statement yielded the following States: " + str(state_uris))
# second pass, sort out the different descriptions
for desc in self.dom.findall(NS['rdf'] % "Description"):
about = desc.get(NS['rdf'] % "about")
s_l.debug("Examining Described Resource: " + str(about))
if about in state_uris:
s_l.debug(str(about) + " is a State URI")
# read and store the state information
description_text = None
sdesc = desc.find(NS['sword'] % "stateDescription")
if sdesc is not None and sdesc.text is not None and sdesc.text.strip() != "":
description_text = sdesc.text.strip()
self.states.append((about, description_text))
# remove this uri from the list of state_uris, so that we can
# deal with any left over later
state_uris.remove(about)
elif about in aggregated_resource_uris:
s_l.debug(str(about) + " is an Aggregated Resource")
is_original_deposit = about in original_deposit_uris
s_l.debug("Is Aggregated Resource an original deposit? " + str(is_original_deposit))
packaging_uris = []
for pack in desc.findall(NS['sword'] % "packaging"):
pack_uri = pack.get(NS['rdf'] % "resource")
packaging_uris.append(pack_uri)
s_l.debug("Registering Packaging URI: " + pack_uri)
deposited_on = None
do = desc.find(NS['sword'] % "depositedOn")
if do is not None and do.text is not None and do.text.strip() != "":
try:
deposited_on = datetime.strptime(do.text.strip(), "%Y-%m-%dT%H:%M:%SZ") # e.g. 2011-03-02T20:50:06Z
s_l.debug("Registering Deposited On: " + do.text.strip())
except Exception, e:
s_l.error("Failed to parse date - %s" % e)
s_l.error("Supplied date as string was: %s" % do.text.strip())
deposited_by = None
db = desc.find(NS['sword'] % "depositedBy")
if db is not None and db.text is not None and db.text.strip() != "":
deposited_by = db.text.strip()
s_l.debug("Registering Deposited By: " + deposited_by)
deposited_on_behalf_of = None
dobo = desc.find(NS['sword'] % "depositedOnBehalfOf")
if dobo is not None and dobo.text is not None and db.text.strip() != "":
deposited_on_behalf_of = dobo.text.strip()
s_l.debug("Registering Deposited On Behalf Of: " + deposited_on_behalf_of)
ose = Ore_Statement_Resource(about, is_original_deposit, packaging_uris,
deposited_on, deposited_by, deposited_on_behalf_of)
if is_original_deposit:
s_l.debug("Registering Aggregated Resource as an Original Deposit")
self.original_deposits.append(ose)
self.resources.append(ose)
# remove this uri from the list of resource_uris, so that we can
# deal with any left over later
aggregated_resource_uris.remove(about)
# finally, we may have aggregated resources and states which did not
# have rdf:Description elements associated with them. We do the minimum
# possible here to accommodate them
s_l.debug("Undescribed State URIs: " + str(state_uris))
for state in state_uris:
self.states.append((state, None))
s_l.debug("Undescribed Aggregated Resource URIs: " + str(aggregated_resource_uris))
for ar in aggregated_resource_uris:
ose = Ore_Statement_Resource(ar)
self.resources.append(ose)
def _validate(self):
valid = True
if self.dom is None:
return
# MUST be an RDF/XML resource map
# is this rdf xml:
if self.dom.tag.lower() != NS['rdf'] % "rdf" and self.dom.tag.lower() != "rdf":
s_l.info("Validation of Ore Statement failed, as root tag is not RDF: " + self.dom.tag)
valid = False
# does it meet the basic requirements of being a resource map, which
# is to have an ore:describes and and ore:isDescribedBy
describes_uri = None
rem_uri = None
aggregation_uri = None
is_described_by_uris = []
for desc in self.dom.findall(NS['rdf'] % "Description"):
# look for the describes tag
ore_desc = desc.find(NS['ore'] % "describes")
if ore_desc is not None:
describes_uri = ore_desc.get(NS['rdf'] % "resource")
rem_uri = desc.get(NS['rdf'] % "about")
# look for the isDescribedBy tag
ore_idb = desc.findall(NS['ore'] % "isDescribedBy")
if len(ore_idb) > 0:
aggregation_uri = desc.get(NS['rdf'] % "about")
for idb in ore_idb:
is_described_by_uris.append(idb.get(NS['rdf'] % "resource"))
# now check that all those uris tie up:
if describes_uri != aggregation_uri:
s_l.info("Validation of Ore Statement failed; ore:describes URI does not match Aggregation URI: " +
describes_uri + " != " + aggregation_uri)
valid = False
if rem_uri not in is_described_by_uris:
s_l.info("Validation of Ore Statement failed; Resource Map URI does not match one of ore:isDescribedBy URIs: " +
rem_uri + " not in " + str(is_described_by_uris))
valid = False
s_l.info("Statement validation; was it a success? " + str(valid))
self.valid = valid
| Hwesta/python-client-sword2 | sword2/statement.py | Python | mit | 14,477 | [
"ASE"
] | c30a18c5afad4ea54bd251bcb091470d40f12470b11c8c95a13be9cc4d4b914b |
from fortranformat import FortranRecordWriter as FW, FortranRecordReader as FR
from cube_helpers import InputFormatError, Atom, Molecule, Field
from cube_helpers import angstrom_per_bohr
import math
class RespFormats(object):
# Numeric Fortran formats specified in resp input specification
# http://upjv.q4md-forcefieldtools.org/RED/resp/#other3
header = '2I5'
atoms = '17X,3E16.7'
points = '1X,4E16.7'
class DuplicateEntryError(Exception):
pass
class InputValueError(Exception):
pass
class G09_esp(object):
def __init__(self, fn, coords_in_bohr=True, allow_dupes=False):
self._read_in(fn, coords_in_bohr, allow_dupes)
def _read_in(self, fn, coords_in_bohr, allow_dupes):
with open(fn, 'r') as f:
first_line = f.readline().rstrip('\n')
file_type = self._read_top(fn, f, first_line)
if file_type == 'Gaussian':
self._read_header(f)
self._read_atoms(f, coords_in_bohr)
self._read_moments(f)
self._g09_values_header(f)
field_info = ['input-Gaussian']
else:
self._read_header_esp(first_line)
self._read_atoms_esp(f, coords_in_bohr)
field_info = ['input-repESP']
values, points = self._read_esp_points(f, coords_in_bohr,
allow_dupes)
self.field = NonGridField(values, points, 'esp',
field_info=field_info)
@staticmethod
def raiseInputFormatError(fn):
raise InputFormatError(
"The input file {0} does not seem to be the G09 .esp format "
"(generate by specifying Pop=MK/CHelp(G) with IOp(6/50=1) or "
"the Antechamber format produced by `repESP`."
.format(fn))
def _read_top(self, fn, f, line):
if line == " ESP FILE - ATOMIC UNITS":
return 'Gaussian'
try:
parsed = FR(RespFormats.header).read(line)
except ValueError:
pass
else:
if None not in parsed:
return 'repESP'
self.raiseInputFormatError(fn)
def _read_header(self, f):
line = f.readline().split()
self.charge = int(line[2])
self.multip = int(line[-1])
def _read_header_esp(self, line):
self.atom_count, self.points_count = FR(RespFormats.header).read(line)
self.atom_count = int(self.atom_count)
self.points_count = int(self.points_count)
def _read_atoms(self, f, coords_in_bohr):
line = f.readline().split()
atom_count = int(line[-1])
self.molecule = Molecule(self)
for i in range(atom_count):
line = f.readline().split()
identity = line[0]
atomic_no = Atom.inv_periodic[identity]
coords = [float(coord.replace('D', 'E')) for coord in line[1:4]]
# Neglect the ESP value at atoms, which is given by last value
self.molecule.append(Atom(i+1, atomic_no, coords, coords_in_bohr))
def _read_atoms_esp(self, f, coords_in_bohr):
self.molecule = Molecule(self)
for i in range(self.atom_count):
line = f.readline().split()
atomic_no = 0 # This will select the last, 'Unrecognized' element
coords = [float(coord) for coord in line]
self.molecule.append(Atom(i+1, atomic_no, coords, coords_in_bohr))
def _read_moments(self, f):
assert f.readline().rstrip('\n') == " DIPOLE MOMENT:"
# Currently not implemented, the lines are just skipped
for i in range(4):
f.readline()
def _g09_values_header(self, f):
line = f.readline().split()
expected = "ESP VALUES AND GRID POINT COORDINATES. #POINTS ="
assert ' '.join(line[:-1]) == expected
self.points_count = int(line[-1])
def _read_esp_points(self, f, coords_in_bohr, allow_dupes):
points_coords = []
values = []
for line in f:
# The replace is not necessary in the case of Antechamber files
# produced by repESP, but this function is general for both types
line = [val.replace('D', 'E') for val in line.split()]
points_coords.append(tuple(line[1:4]))
values.append(float(line[0]))
if len(points_coords) != self.points_count:
raise InputFormatError(
"The number of ESP points {0} does not agree with that "
"specified at the top of the input file: {1}".format(
len(points_coords), self.points_count))
try:
points = Points(points_coords, coords_in_bohr, allow_dupes)
except DuplicateEntryError:
raise InputFormatError(
"Duplicate points in the input file. This might be an artefact"
" of the algorithm which produced the points. If these points "
"are to be counted twice, the NonGridField needs to be called "
"with `allow_dupes=True`")
except InputValueError as e:
# Translate the errors when creating Points to errors due to input
# file format
raise InputFormatError(e)
return values, points
class Points(list):
def __init__(self, points_coords, coords_in_bohr=False, allow_dupes=True):
super().__init__()
self.allow_dupes = allow_dupes
if not self.allow_dupes:
self.points_dict = {}
for point_coords in points_coords:
self.append(self._check_and_create_point(point_coords,
coords_in_bohr))
def __eq__(self, other):
if len(self) != len(other):
return False
for own_point, other_point in zip(self, other):
for coord in range(3):
if not math.isclose(own_point[coord], other_point[coord],
abs_tol=1e-6):
print(own_point[coord] - other_point[coord])
return False
return True
def __ne__(self, other):
return not self == other
def _check_and_create_point(self, point_coords, coords_in_bohr):
if len(point_coords) != 3:
raise InputValueError(
"Encountered a point with a number of coordinates {0}, which "
"is different from 3.".format(len(point_coords)))
if not self.allow_dupes:
for point_coord in point_coords:
if type(point_coord) is not str:
raise InputValueError(
"If no duplicates are allowed, `points` must be"
" a list of tuples of *strings*. Encountered type {0} "
"instead.".format(type(point_coord)))
# This is fine because it's a string representation which will be
# identical throughout given output file
if point_coords in self.points_dict:
raise DuplicateEntryError("Encountered a duplicate point: {0}"
.format(point_coords))
try:
result = [float(point_coord) for point_coord in point_coords]
except ValueError:
raise InputValueError("Couldn't convert coordinates {0} to float."
.format(point_coords))
if coords_in_bohr:
result = [angstrom_per_bohr*point_coord for point_coord in result]
return result
class NonGridField(Field):
def __init__(self, values, points, field_type, field_info=None):
"""Create a NonGridField from given coordinates and values
Parameters
----------
points : Points
values : List
The list of values at *corresponding* coordinates.
"""
super().__init__(values, field_type, field_info, check_nans=False)
self.points = points
if type(points) is not Points:
raise TypeError("Expected type Points for the points argument, "
"instead got {0}".format(type(points)))
if len(points) != len(values):
raise ValueError("The number of points {0} is different from the "
"number of values {1}.".format(len(points),
len(values)))
def write_to_file(self, output_fn, molecule, write_coords_in_bohr=True):
with open(output_fn, 'x') as f:
f.write(
FW(RespFormats.header).write(
[len(molecule), len(self.points)]
) + "\n"
)
for atom in molecule:
if write_coords_in_bohr:
coords = [atom_coord/angstrom_per_bohr for atom_coord in
atom.coords]
else:
coords = atom.coords
f.write(FW(RespFormats.atoms).write(coords) + "\n")
for point_coords, esp_val in zip(self.points, self.values):
if write_coords_in_bohr:
point_coords = [point_coord/angstrom_per_bohr for
point_coord in point_coords]
f.write(
FW(RespFormats.points).write(
[esp_val] + point_coords
) + "\n"
)
def get_values(self):
return self.values
def get_points(self):
return self.points
| jszopi/repESP | repESP_old/resp_helpers.py | Python | gpl-3.0 | 9,641 | [
"Gaussian"
] | ea733a038255eb6366fd671cb9904bf01c6defbafd574e1bdb3d70d84581e300 |
"""
Checks for the Attribute Conventions for Dataset Discovery (ACDD)
This module contains classes defined as checks part of the compliance checker
project for the verification and scoring of attributes for datasets.
"""
from datetime import timedelta
from functools import partial
import numpy as np
import pendulum
from cftime import num2pydate
from pygeoif import from_wkt
from compliance_checker import cfutil
from compliance_checker.base import (
BaseCheck,
BaseNCCheck,
Result,
check_has,
ratable_result,
)
from compliance_checker.cf.util import _possiblexunits, _possibleyunits
from compliance_checker.util import dateparse, datetime_is_iso, kvp_convert
class ACDDBaseCheck(BaseCheck):
_cc_spec = "acdd"
_cc_description = "Attribute Conventions for Dataset Discovery (ACDD)"
_cc_url = "http://wiki.esipfed.org/index.php?title=Category:Attribute_Conventions_Dataset_Discovery"
_cc_display_headers = {3: "Highly Recommended", 2: "Recommended", 1: "Suggested"}
def __init__(self):
self.high_rec_atts = ["title", "keywords", "summary"]
self.rec_atts = [
"id",
"naming_authority",
"history",
"comment",
"date_created",
"creator_name",
"creator_url",
"creator_email",
"institution",
"project",
"processing_level",
("geospatial_bounds", self.verify_geospatial_bounds),
"geospatial_lat_min",
"geospatial_lat_max",
"geospatial_lon_min",
"geospatial_lon_max",
"geospatial_vertical_min",
"geospatial_vertical_max",
"time_coverage_start",
"time_coverage_end",
"time_coverage_duration",
"time_coverage_resolution",
"standard_name_vocabulary",
"license",
]
self.sug_atts = [
"contributor_name",
"contributor_role",
"date_modified",
"date_issued",
"geospatial_lat_units",
"geospatial_lat_resolution",
"geospatial_lon_units",
"geospatial_lon_resolution",
"geospatial_vertical_units",
"geospatial_vertical_resolution",
]
# This variable is used to cache the results of applicable variables so
# the method isn't executed repeatedly.
self._applicable_variables = None
# to be used to format variable Result groups headers
self._var_header = 'variable "{}" missing the following attributes:'
# set up attributes according to version
@check_has(BaseCheck.HIGH, gname="Global Attributes")
def check_high(self, ds):
"""
Performs a check on each highly recommended attributes' existence in the dataset
:param netCDF4.Dataset ds: An open netCDF dataset
"""
return self.high_rec_atts
@check_has(BaseCheck.MEDIUM, gname="Global Attributes")
def check_recommended(self, ds):
"""
Performs a check on each recommended attributes' existence in the dataset
:param netCDF4.Dataset ds: An open netCDF dataset
"""
return self.rec_atts
@check_has(BaseCheck.LOW, gname="Global Attributes")
def check_suggested(self, ds):
"""
Performs a check on each suggested attributes' existence in the dataset
:param netCDF4.Dataset ds: An open netCDF dataset
"""
return self.sug_atts
def get_applicable_variables(self, ds):
"""
Returns a list of variable names that are applicable to ACDD Metadata
Checks for variables. This includes geophysical and coordinate
variables only.
:param netCDF4.Dataset ds: An open netCDF dataset
"""
if self._applicable_variables is None:
self.applicable_variables = cfutil.get_geophysical_variables(ds)
varname = cfutil.get_time_variable(ds)
# avoid duplicates by checking if already present
if varname and (varname not in self.applicable_variables):
self.applicable_variables.append(varname)
varname = cfutil.get_lon_variable(ds)
if varname and (varname not in self.applicable_variables):
self.applicable_variables.append(varname)
varname = cfutil.get_lat_variable(ds)
if varname and (varname not in self.applicable_variables):
self.applicable_variables.append(varname)
varname = cfutil.get_z_variable(ds)
if varname and (varname not in self.applicable_variables):
self.applicable_variables.append(varname)
return self.applicable_variables
def check_var_long_name(self, ds):
"""
Checks each applicable variable for the long_name attribute
:param netCDF4.Dataset ds: An open netCDF dataset
"""
results = []
# ACDD Variable Metadata applies to all coordinate variables and
# geophysical variables only.
for variable in self.get_applicable_variables(ds):
msgs = []
long_name = getattr(ds.variables[variable], "long_name", None)
check = long_name is not None
if not check:
msgs.append("long_name")
results.append(
Result(BaseCheck.HIGH, check, self._var_header.format(variable), msgs)
)
return results
def check_var_standard_name(self, ds):
"""
Checks each applicable variable for the standard_name attribute
:param netCDF4.Dataset ds: An open netCDF dataset
"""
results = []
for variable in self.get_applicable_variables(ds):
msgs = []
std_name = getattr(ds.variables[variable], "standard_name", None)
check = std_name is not None
if not check:
msgs.append("standard_name")
results.append(
Result(BaseCheck.HIGH, check, self._var_header.format(variable), msgs)
)
return results
def check_var_units(self, ds):
"""
Checks each applicable variable for the units attribute
:param netCDF4.Dataset ds: An open netCDF dataset
"""
results = []
for variable in self.get_applicable_variables(ds):
msgs = []
# Check units and dims for variable
unit_check = hasattr(ds.variables[variable], "units")
no_dim_check = getattr(ds.variables[variable], "dimensions") == tuple()
# Check if we have no dimensions. If no dims, skip test
if no_dim_check:
continue
# Check if we have no units
if not unit_check:
msgs.append("units")
results.append(
Result(
BaseCheck.HIGH, unit_check, self._var_header.format(variable), msgs
)
)
return results
def check_acknowledgment(self, ds):
"""
Check if acknowledgment/acknowledgment attribute is present. Because
acknowledgement has its own check, we are keeping it out of the Global
Attributes (even though it is a Global Attr).
:param netCDF4.Dataset ds: An open netCDF dataset
"""
check = False
messages = []
if hasattr(ds, "acknowledgment") or hasattr(ds, "acknowledgement"):
check = True
else:
messages.append("acknowledgment/acknowledgement not present")
# name="Global Attributes" so gets grouped with Global Attributes
return Result(BaseCheck.MEDIUM, check, "Global Attributes", msgs=messages)
def check_lat_extents(self, ds):
"""
Check that the values of geospatial_lat_min/geospatial_lat_max
approximately match the data.
:param netCDF4.Dataset ds: An open netCDF dataset
"""
if not (hasattr(ds, "geospatial_lat_min") or hasattr(ds, "geospatial_lat_max")):
return Result(
BaseCheck.MEDIUM,
False,
"geospatial_lat_extents_match",
["geospatial_lat_min/max attribute not found, CF-1.6 spec chapter 4.1"],
)
try: # type cast
lat_min = float(ds.geospatial_lat_min)
lat_max = float(ds.geospatial_lat_max)
except ValueError:
return Result(
BaseCheck.MEDIUM,
False,
"geospatial_lat_extents_match",
[
"Could not convert one of geospatial_lat_min ({}) or max ({}) to float see CF-1.6 spec chapter 4.1"
"".format(ds.geospatial_lat_min, ds.geospatial_lat_max)
],
)
# identify lat var(s) as per CF 4.1
lat_vars = {} # var -> number of criteria passed
for name, var in ds.variables.items():
# must have units
if not hasattr(var, "units"):
continue
lat_vars[var] = 0
# units in this set
if var.units in _possibleyunits:
lat_vars[var] += 1
# standard name of "latitude"
if hasattr(var, "standard_name") and var.standard_name == "latitude":
lat_vars[var] += 1
# axis of "Y"
if hasattr(var, "axis") and var.axis == "Y":
lat_vars[var] += 1
# trim out any zeros
lat_vars = {k: v for k, v in lat_vars.items() if v > 0}
if len(lat_vars) == 0:
return Result(
BaseCheck.MEDIUM,
False,
"geospatial_lat_extents_match",
[
"Could not find lat variable to test extent of geospatial_lat_min/max, see CF-1.6 spec chapter 4.1"
],
)
# sort by criteria passed
final_lats = sorted(lat_vars, key=lambda x: lat_vars[x], reverse=True)
obs_mins = {
var._name: np.nanmin(var) for var in final_lats if not np.isnan(var).all()
}
obs_maxs = {
var._name: np.nanmax(var) for var in final_lats if not np.isnan(var).all()
}
min_pass = any((np.isclose(lat_min, min_val) for min_val in obs_mins.values()))
max_pass = any((np.isclose(lat_max, max_val) for max_val in obs_maxs.values()))
allpass = sum((min_pass, max_pass))
msgs = []
if not min_pass:
msgs.append(
"Data for possible latitude variables (%s) did not match geospatial_lat_min value (%s)"
% (obs_mins, lat_min)
)
if not max_pass:
msgs.append(
"Data for possible latitude variables (%s) did not match geospatial_lat_max value (%s)"
% (obs_maxs, lat_max)
)
return Result(
BaseCheck.MEDIUM, (allpass, 2), "geospatial_lat_extents_match", msgs
)
def check_lon_extents(self, ds):
"""
Check that the values of geospatial_lon_min/geospatial_lon_max
approximately match the data.
:param netCDF4.Dataset ds: An open netCDF dataset
"""
if not (
hasattr(ds, "geospatial_lon_min") and hasattr(ds, "geospatial_lon_max")
):
return Result(
BaseCheck.MEDIUM,
False,
"geospatial_lon_extents_match",
["geospatial_lon_min/max attribute not found, CF-1.6 spec chapter 4.1"],
)
try: # type cast
lon_min = float(ds.geospatial_lon_min)
lon_max = float(ds.geospatial_lon_max)
except ValueError:
return Result(
BaseCheck.MEDIUM,
False,
"geospatial_lon_extents_match",
[
"Could not convert one of geospatial_lon_min ({}) or max ({}) to float see CF-1.6 spec chapter 4.1"
"".format(ds.geospatial_lon_min, ds.geospatial_lon_max)
],
)
# identify lon var(s) as per CF 4.2
lon_vars = {} # var -> number of criteria passed
for name, var in ds.variables.items():
# must have units
if not hasattr(var, "units"):
continue
lon_vars[var] = 0
# units in this set
if var.units in _possiblexunits:
lon_vars[var] += 1
# standard name of "longitude"
if hasattr(var, "standard_name") and var.standard_name == "longitude":
lon_vars[var] += 1
# axis of "Y"
if hasattr(var, "axis") and var.axis == "X":
lon_vars[var] += 1
# trim out any zeros
lon_vars = {k: v for k, v in lon_vars.items() if v > 0}
if len(lon_vars) == 0:
return Result(
BaseCheck.MEDIUM,
False,
"geospatial_lon_extents_match",
[
"Could not find lon variable to test extent of geospatial_lon_min/max, see CF-1.6 spec chapter 4.2"
],
)
# sort by criteria passed
final_lons = sorted(lon_vars, key=lambda x: lon_vars[x], reverse=True)
obs_mins = {
var._name: np.nanmin(var) for var in final_lons if not np.isnan(var).all()
}
obs_maxs = {
var._name: np.nanmax(var) for var in final_lons if not np.isnan(var).all()
}
min_pass = any((np.isclose(lon_min, min_val) for min_val in obs_mins.values()))
max_pass = any((np.isclose(lon_max, max_val) for max_val in obs_maxs.values()))
allpass = sum((min_pass, max_pass))
msgs = []
if not min_pass:
msgs.append(
"Data for possible longitude variables (%s) did not match geospatial_lon_min value (%s)"
% (obs_mins, lon_min)
)
if not max_pass:
msgs.append(
"Data for possible longitude variables (%s) did not match geospatial_lon_max value (%s)"
% (obs_maxs, lon_max)
)
return Result(
BaseCheck.MEDIUM, (allpass, 2), "geospatial_lon_extents_match", msgs
)
def verify_geospatial_bounds(self, ds):
"""Checks that the geospatial bounds is well formed OGC WKT"""
var = getattr(ds, "geospatial_bounds", None)
check = var is not None
if not check:
return ratable_result(
False,
"Global Attributes", # grouped with Globals
["geospatial_bounds not present"],
)
try:
# TODO: verify that WKT is valid given CRS (defaults to EPSG:4326
# in ACDD.
from_wkt(ds.geospatial_bounds)
except AttributeError:
return ratable_result(
False,
"Global Attributes", # grouped with Globals
[
(
"Could not parse WKT from geospatial_bounds,"
' possible bad value: "{}"'.format(ds.geospatial_bounds)
)
],
variable_name="geospatial_bounds",
)
# parsed OK
else:
return ratable_result(True, "Global Attributes", tuple())
def _check_total_z_extents(self, ds, z_variable):
"""
Check the entire array of Z for minimum and maximum and compare that to
the vertical extents defined in the global attributes
:param netCDF4.Dataset ds: An open netCDF dataset
:param str z_variable: Name of the variable representing the Z-Axis
"""
msgs = []
total = 2
try:
vert_min = float(ds.geospatial_vertical_min)
except ValueError:
msgs.append("geospatial_vertical_min cannot be cast to float")
try:
vert_max = float(ds.geospatial_vertical_max)
except ValueError:
msgs.append("geospatial_vertical_max cannot be cast to float")
if len(msgs) > 0:
return Result(
BaseCheck.MEDIUM, (0, total), "geospatial_vertical_extents_match", msgs
)
zvalue = ds.variables[z_variable][:]
# If the array has fill values, which is allowed in the case of point
# features
if hasattr(zvalue, "mask"):
zvalue = zvalue[~zvalue.mask]
if zvalue.size == 0:
msgs.append(
"Cannot compare geospatial vertical extents "
"against min/max of data, as non-masked data "
"length is zero"
)
return Result(
BaseCheck.MEDIUM, (0, total), "geospatial_vertical_extents_match", msgs
)
else:
zmin = zvalue.min()
zmax = zvalue.max()
if not np.isclose(vert_min, zmin):
msgs.append(
"geospatial_vertical_min != min(%s) values, %s != %s"
% (z_variable, vert_min, zmin)
)
if not np.isclose(vert_max, zmax):
msgs.append(
"geospatial_vertical_max != max(%s) values, %s != %s"
% (z_variable, vert_min, zmax)
)
return Result(
BaseCheck.MEDIUM,
(total - len(msgs), total),
"geospatial_vertical_extents_match",
msgs,
)
def _check_scalar_vertical_extents(self, ds, z_variable):
"""
Check the scalar value of Z compared to the vertical extents which
should also be equivalent
:param netCDF4.Dataset ds: An open netCDF dataset
:param str z_variable: Name of the variable representing the Z-Axis
"""
vert_min = ds.geospatial_vertical_min
vert_max = ds.geospatial_vertical_max
msgs = []
total = 2
zvalue = ds.variables[z_variable][:].item()
if not np.isclose(vert_min, vert_max):
msgs.append(
"geospatial_vertical_min != geospatial_vertical_max for scalar depth values, %s != %s"
% (vert_min, vert_max)
)
if not np.isclose(vert_max, zvalue):
msgs.append(
"geospatial_vertical_max != %s values, %s != %s"
% (z_variable, vert_max, zvalue)
)
return Result(
BaseCheck.MEDIUM,
(total - len(msgs), total),
"geospatial_vertical_extents_match",
msgs,
)
def check_vertical_extents(self, ds):
"""
Check that the values of geospatial_vertical_min/geospatial_vertical_max approximately match the data.
:param netCDF4.Dataset ds: An open netCDF dataset
"""
if not (
hasattr(ds, "geospatial_vertical_min")
and hasattr(ds, "geospatial_vertical_max")
):
return
z_variable = cfutil.get_z_variable(ds)
if not z_variable:
return Result(
BaseCheck.MEDIUM,
False,
"geospatial_vertical_extents_match",
[
"Could not find vertical variable to test extent of geospatial_vertical_min/geospatial_vertical_max, see CF-1.6 spec chapter 4.3"
],
)
if ds.variables[z_variable].dimensions == tuple():
return self._check_scalar_vertical_extents(ds, z_variable)
return self._check_total_z_extents(ds, z_variable)
def check_time_extents(self, ds):
"""
Check that the values of time_coverage_start/time_coverage_end approximately match the data.
"""
if not (
hasattr(ds, "time_coverage_start") and hasattr(ds, "time_coverage_end")
):
return
# Parse the ISO 8601 formatted dates
try:
t_min = dateparse(ds.time_coverage_start)
t_max = dateparse(ds.time_coverage_end)
except:
return Result(
BaseCheck.MEDIUM,
False,
"time_coverage_extents_match",
[
"time_coverage attributes are not formatted properly. Use the ISO 8601:2004 date format, preferably the extended format."
],
)
timevar = cfutil.get_time_variable(ds)
if not timevar:
return Result(
BaseCheck.MEDIUM,
False,
"time_coverage_extents_match",
[
"Could not find time variable to test extent of time_coverage_start/time_coverage_end, see CF-1.6 spec chapter 4.4"
],
)
# Time should be monotonically increasing, so we make that assumption here so we don't have to download THE ENTIRE ARRAY
try:
# num2date returns as naive date, but with time adjusted to UTC
# we need to attach timezone information here, or the date
# subtraction from t_min/t_max will assume that a naive timestamp is
# in the same time zone and cause erroneous results.
# Pendulum uses UTC by default, but we are being explicit here
time0 = pendulum.instance(
num2pydate(ds.variables[timevar][0], ds.variables[timevar].units), "UTC"
)
time1 = pendulum.instance(
num2pydate(ds.variables[timevar][-1], ds.variables[timevar].units),
"UTC",
)
except:
return Result(
BaseCheck.MEDIUM,
False,
"time_coverage_extents_match",
["Failed to retrieve and convert times for variables %s." % timevar],
)
start_dt = abs(time0 - t_min)
end_dt = abs(time1 - t_max)
score = 2
msgs = []
if start_dt > timedelta(hours=1):
msgs.append(
"Date time mismatch between time_coverage_start and actual "
"time values %s (time_coverage_start) != %s (time[0])"
% (t_min.isoformat(), time0.isoformat())
)
score -= 1
if end_dt > timedelta(hours=1):
msgs.append(
"Date time mismatch between time_coverage_end and actual "
"time values %s (time_coverage_end) != %s (time[N])"
% (t_max.isoformat(), time1.isoformat())
)
score -= 1
return Result(BaseCheck.MEDIUM, (score, 2), "time_coverage_extents_match", msgs)
def verify_convention_version(self, ds):
"""
Verify that the version in the Conventions field is correct
"""
try:
for convention in (
getattr(ds, "Conventions", "").replace(" ", "").split(",")
):
if convention == "ACDD-" + self._cc_spec_version:
return ratable_result(
(2, 2), None, []
) # name=None so grouped with Globals
# if no/wrong ACDD convention, return appropriate result
# Result will have name "Global Attributes" to group with globals
m = ["Conventions does not contain 'ACDD-{}'".format(self._cc_spec_version)]
return ratable_result((1, 2), "Global Attributes", m)
except AttributeError: # NetCDF attribute not found
m = [
"No Conventions attribute present; must contain ACDD-{}".format(
self._cc_spec_version
)
]
# Result will have name "Global Attributes" to group with globals
return ratable_result((0, 2), "Global Attributes", m)
class ACDDNCCheck(BaseNCCheck, ACDDBaseCheck):
pass
class ACDD1_1Check(ACDDNCCheck):
_cc_spec_version = "1.1"
_cc_description = "Attribute Conventions for Dataset Discovery (ACDD) 1.1"
register_checker = True
def __init__(self):
super(ACDD1_1Check, self).__init__()
self.rec_atts.extend(["keywords_vocabulary"])
self.sug_atts.extend(
[
"publisher_name", # publisher,dataCenter
"publisher_url", # publisher
"publisher_email", # publisher
"geospatial_vertical_positive",
]
)
class ACDD1_3Check(ACDDNCCheck):
_cc_spec_version = "1.3"
_cc_description = "Attribute Conventions for Dataset Discovery (ACDD) 1.3"
register_checker = True
def __init__(self):
super(ACDD1_3Check, self).__init__()
self.high_rec_atts.extend([("Conventions", self.verify_convention_version)])
self.rec_atts.extend(
[
"geospatial_vertical_positive",
"geospatial_bounds_crs",
"geospatial_bounds_vertical_crs",
"publisher_name", # publisher,dataCenter
"publisher_url", # publisher
"publisher_email", # publisher
"source",
]
)
self.sug_atts.extend(
[
("creator_type", ["person", "group", "institution", "position"]),
"creator_institution",
"platform",
"platform_vocabulary",
"keywords_vocabulary",
"instrument",
"metadata_link",
"product_version",
"references",
("publisher_type", ["person", "group", "institution", "position"]),
"instrument_vocabulary",
"date_metadata_modified",
"program",
"publisher_institution",
]
)
# override the ISO date checks in
def _check_attr_is_iso_date(attr, ds):
result_name = "{}_is_iso".format(attr)
if not hasattr(ds, attr):
return ratable_result(
(0, 2), result_name, ["Attr {} is not present".format(attr)]
)
else:
iso_check, msgs = datetime_is_iso(getattr(ds, attr))
return ratable_result((1 + iso_check, 2), result_name, msgs)
# run ISO 8601 date checks against the date_created, date_issued,
# date_modified, and date_metadata_modified global attributes
self.rec_atts = kvp_convert(self.rec_atts)
self.rec_atts["date_created"] = partial(_check_attr_is_iso_date, "date_created")
self.sug_atts = kvp_convert(self.sug_atts)
for k in (
"date_{}".format(suffix)
for suffix in ("issued", "modified", "metadata_modified")
):
self.sug_atts[k] = partial(_check_attr_is_iso_date, k)
def check_metadata_link(self, ds):
"""
Checks if metadata link is formed in a rational manner
:param netCDF4.Dataset ds: An open netCDF dataset
"""
if not hasattr(ds, u"metadata_link"):
return
msgs = []
meta_link = getattr(ds, "metadata_link")
if "http" not in meta_link:
msgs.append("Metadata URL should include http:// or https://")
valid_link = len(msgs) == 0
return Result(BaseCheck.LOW, valid_link, "metadata_link_valid", msgs)
def check_id_has_no_blanks(self, ds):
"""
Check if there are blanks in the id field
:param netCDF4.Dataset ds: An open netCDF dataset
"""
if not hasattr(ds, u"id"):
return
if " " in getattr(ds, u"id"):
return Result(
BaseCheck.MEDIUM,
False,
"no_blanks_in_id",
msgs=[u"There should be no blanks in the id field"],
)
else:
return Result(BaseCheck.MEDIUM, True, "no_blanks_in_id", msgs=[])
def check_var_coverage_content_type(self, ds):
"""
Check coverage content type against valid ISO-19115-1 codes
:param netCDF4.Dataset ds: An open netCDF dataset
"""
results = []
for variable in cfutil.get_geophysical_variables(ds):
msgs = []
ctype = getattr(ds.variables[variable], "coverage_content_type", None)
check = ctype is not None
if not check:
msgs.append("coverage_content_type")
results.append(
Result(
BaseCheck.HIGH, check, self._var_header.format(variable), msgs
)
)
continue
# ISO 19115-1 codes
valid_ctypes = {
"image",
"thematicClassification",
"physicalMeasurement",
"auxiliaryInformation",
"qualityInformation",
"referenceInformation",
"modelResult",
"coordinate",
}
if ctype not in valid_ctypes:
msgs.append(
'coverage_content_type in "%s"' % (variable, sorted(valid_ctypes))
)
results.append(
Result(
BaseCheck.HIGH,
check, # append to list
self._var_header.format(variable),
msgs,
)
)
return results
| ioos/compliance-checker | compliance_checker/acdd.py | Python | apache-2.0 | 29,740 | [
"NetCDF"
] | 1d871ffc4167ff8bfaec5aeadaed867148b7c80105a912130c6d5566ed095d2b |
#!/usr/bin/env python
import broker
import unittest
import json
import requests
class BrokerTestCase(unittest.TestCase):
def setUp(self):
broker.app.config['TESTING'] = True
self.app = broker.app.test_client()
def tearDown(self):
broker.Broker.reset()
def setUpBank(self):
bank_url = "http://172.17.0.2:4567/banks/1"
# create bank with id 1
r = requests.put(bank_url)
if r.status_code is not 200:
print "error creating bank: %s" % r.status_code
# create accounts for andy
payload = """{"id": "andy", "saldo": 10000,
"player": {"id": "andy"}}"""
r = requests.post("%s/players" % bank_url, data=payload)
if r.status_code is not 201 and not 409:
print "error init bank for andy: %s" % r.status_code
# create accounts for kent
payload = """{"id": "kent", "saldo": 5000,
"player": {"id": "kent"}}"""
r = requests.post("%s/players" % bank_url, data=payload)
if r.status_code is not 201 and not 409:
print "error init bank for kent: %s" % r.status_code
def test_broker_create(self):
rv = self.app.put('/broker/1')
self.assertEqual(rv.status_code, 201)
def test_broker_get(self):
self.app.put('/broker/1')
rv = self.app.get('/broker/1')
self.assertEqual(rv.status_code, 200)
broker = json.loads(rv.data)
self.assertEqual(broker['gameid'], '1')
def test_places_get_list_emtpy(self):
self.app.put('/broker/1')
rv = self.app.get('/broker/1/places')
self.assertEqual(rv.status_code, 200)
self.assertEqual(rv.data, '[]')
def test_places_get_list(self):
self.app.put('/broker/1')
self.app.put('/broker/1/places/Badstrasse',
data='''{
"place": "Badstrasse",
"owner": "",
"value": 250,
"rent": 100,
"cost": 1200,
"houses": 0
}''')
self.app.put('/broker/1/places/Turmstrasse',
data='''{
"place": "Turmstrasse",
"owner": "",
"value": 300,
"rent": 150,
"cost": 1600,
"houses": 0
}''')
rv = self.app.get('/broker/1/places')
self.assertEqual(rv.status_code, 200)
actual = json.loads(rv.data)
self.assertEqual(len(actual), 2)
def test_place_register(self):
self.app.put('/broker/1')
rv = self.app.put('/broker/1/places/Badstrasse',
data='''{
"place": "Badstrasse",
"owner": "",
"value": 250,
"rent": 100,
"cost": 1200,
"houses": 0
}''')
self.assertEqual(rv.status_code, 201)
self.assertEqual(rv.data, '/broker/1/places/Badstrasse')
def test_place_register_existing(self):
self.app.put('/broker/1')
self.app.put('/broker/1/places/Badstrasse',
data='''{
"place": "Badstrasse",
"owner": "",
"value": 250,
"rent": 100,
"cost": 1200,
"houses": 0
}''')
rv = self.app.put('/broker/1/places/Badstrasse',
data='''{
"place": "Badstrasse"
}''')
self.assertEqual(rv.status_code, 200)
def test_multiple_brokers(self):
self.app.put('/broker/1')
self.app.put('/broker/2')
self.app.put('/broker/1/places/Badstrasse',
data='''{
"place": "Badstrasse",
"owner": "",
"value": 250,
"rent": 100,
"cost": 1200,
"houses": 0
}''')
rv1 = self.app.get('/broker/1/places')
rv2 = self.app.get('/broker/2/places')
self.assertNotEqual(rv1.data, '[]')
self.assertEqual(rv2.data, '[]')
def test_place_get_by_id(self):
self.app.put('/broker/1')
rv_put = self.app.put('/broker/1/places/Badstrasse',
data='''{
"place": "Badstrasse",
"owner": "",
"value": 250,
"rent": 100,
"cost": 1200,
"houses": 0
}''')
rv_get = self.app.get(rv_put.data)
actual = json.loads(rv_get.data)
self.assertEqual(actual['place'], 'Badstrasse')
self.assertEqual(actual['owner'], '')
self.assertEqual(actual['value'], 250)
self.assertEqual(actual['rent'], 100)
self.assertEqual(actual['cost'], 1200)
self.assertEqual(actual['houses'], 0)
def test_place_set_owner(self):
self.app.put('/broker/1')
self.app.put('/broker/1/places/Badstrasse',
data='''{
"place": "Badstrasse",
"owner": "kent",
"value": 250,
"rent": 100,
"cost": 1200,
"houses": 0
}''')
rv_put = self.app.put('/broker/1/places/Badstrasse/owner',
data='''{
"id": "andy",
"name": "andy"
}''')
self.assertEqual(rv_put.status_code, 200)
rv_owner = self.app.get('/broker/1/places/Badstrasse/owner')
self.assertEqual(rv_owner.data, 'andy')
def test_place_buy(self):
self.setUpBank()
self.app.put('/broker/1')
self.app.put('/broker/1/places/Badstrasse',
data='''{
"place": "Badstrasse",
"owner": "",
"value": 250,
"rent": 100,
"cost": 1200,
"houses": 0
}''')
self.app.post('/broker/1/places/Badstrasse/visit/andy')
rv_post = self.app.post('/broker/1/places/Badstrasse/owner',
data='''{
"id": "andy",
"name": "andy"
}''')
self.assertEqual(rv_post.status_code, 200)
rv_owner = self.app.get('/broker/1/places/Badstrasse/owner')
self.assertEqual(rv_owner.data, 'andy')
def test_place_buy_fail_no_visit(self):
self.app.put('/broker/1')
self.app.put('/broker/1/places/Badstrasse',
data='''{
"place": "Badstrasse",
"owner": "",
"value": 250,
"rent": 100,
"cost": 1200,
"houses": 0
}''')
rv_post = self.app.post('/broker/1/places/Badstrasse/owner',
data='''{
"id": "andy",
"name": "andy"
}''')
self.assertEqual(rv_post.status_code, 409)
def test_place_buy_fail_wrong_visit(self):
self.app.put('/broker/1')
self.app.put('/broker/1/places/Badstrasse',
data='''{
"place": "Badstrasse",
"owner": "",
"value": 250,
"rent": 100,
"cost": 1200,
"houses": 0
}''')
self.app.put('/broker/1/places/Parkstrasse',
data='''{
"place": "Parkstrasse",
"owner": "",
"value": 150,
"rent": 200,
"cost": 2200,
"houses": 0
}''')
self.app.post('/broker/1/places/Parkstrasse/visit/andy')
rv_post = self.app.post('/broker/1/places/Badstrasse/owner',
data='''{
"id": "andy",
"name": "andy"
}''')
self.assertEqual(rv_post.status_code, 409)
def test_place_buy_fail_already_owned(self):
self.app.put('/broker/1')
self.app.put('/broker/1/places/Badstrasse',
data='''{
"place": "Badstrasse",
"owner": { "id": "kent", "name": "kent" },
"value": 250,
"rent": 100,
"cost": 1200,
"houses": 0
}''')
rv_post = self.app.post('/broker/1/places/Badstrasse/owner',
data='''{
"id": "andy",
"name": "andy"
}''')
self.assertEqual(rv_post.status_code, 409)
def test_place_visit_unowned(self):
self.app.put('/broker/1')
self.app.put('/broker/1/places/Badstrasse',
data='''{
"place": "Badstrasse",
"owner": "",
"value": 250,
"rent": 100,
"cost": 1200,
"houses": 0
}''')
rv = self.app.post('/broker/1/places/Badstrasse/visit/andy')
if rv.status_code == 200:
player = json.loads(rv.data)
self.assertEqual(player['place']['name'], 'Badstrasse')
def test_place_visit_opponent_place(self):
self.setUpBank()
self.app.put('/broker/1')
self.app.put('/broker/1/places/Badstrasse',
data='''{
"place": "Badstrasse",
"owner": {
"id": "andy",
"name": "andy"
},
"value": 250,
"rent": 100,
"cost": 1200,
"houses": 0
}''')
rv = self.app.post('/broker/1/places/Badstrasse/visit/kent')
self.assertEqual(rv.status_code, 200)
if __name__ == '__main__':
unittest.main()
| haw-vs-2015/py-monopoly | test_broker.py | Python | gpl-2.0 | 11,761 | [
"VisIt"
] | c8789d7f5515198e005a0d208e38db49aabc2247adca9e5fa9a97058fccc3adb |
import re
from .common import InfoExtractor
from ..utils import (
compat_parse_qs,
compat_urllib_parse,
compat_urllib_request,
determine_ext,
ExtractorError,
)
class MetacafeIE(InfoExtractor):
"""Information Extractor for metacafe.com."""
_VALID_URL = r'(?:http://)?(?:www\.)?metacafe\.com/watch/([^/]+)/([^/]+)/.*'
_DISCLAIMER = 'http://www.metacafe.com/family_filter/'
_FILTER_POST = 'http://www.metacafe.com/f/index.php?inputType=filter&controllerGroup=user'
IE_NAME = u'metacafe'
_TESTS = [
# Youtube video
{
u"add_ie": ["Youtube"],
u"url": u"http://metacafe.com/watch/yt-_aUehQsCQtM/the_electric_company_short_i_pbs_kids_go/",
u"file": u"_aUehQsCQtM.mp4",
u"info_dict": {
u"upload_date": u"20090102",
u"title": u"The Electric Company | \"Short I\" | PBS KIDS GO!",
u"description": u"md5:2439a8ef6d5a70e380c22f5ad323e5a8",
u"uploader": u"PBS",
u"uploader_id": u"PBS"
}
},
# Normal metacafe video
{
u'url': u'http://www.metacafe.com/watch/11121940/news_stuff_you_wont_do_with_your_playstation_4/',
u'md5': u'6e0bca200eaad2552e6915ed6fd4d9ad',
u'info_dict': {
u'id': u'11121940',
u'ext': u'mp4',
u'title': u'News: Stuff You Won\'t Do with Your PlayStation 4',
u'uploader': u'ign',
u'description': u'Sony released a massive FAQ on the PlayStation Blog detailing the PS4\'s capabilities and limitations.',
},
},
# AnyClip video
{
u"url": u"http://www.metacafe.com/watch/an-dVVXnuY7Jh77J/the_andromeda_strain_1971_stop_the_bomb_part_3/",
u"file": u"an-dVVXnuY7Jh77J.mp4",
u"info_dict": {
u"title": u"The Andromeda Strain (1971): Stop the Bomb Part 3",
u"uploader": u"anyclip",
u"description": u"md5:38c711dd98f5bb87acf973d573442e67",
},
},
# age-restricted video
{
u'url': u'http://www.metacafe.com/watch/5186653/bbc_internal_christmas_tape_79_uncensored_outtakes_etc/',
u'md5': u'98dde7c1a35d02178e8ab7560fe8bd09',
u'info_dict': {
u'id': u'5186653',
u'ext': u'mp4',
u'title': u'BBC INTERNAL Christmas Tape \'79 - UNCENSORED Outtakes, Etc.',
u'uploader': u'Dwayne Pipe',
u'description': u'md5:950bf4c581e2c059911fa3ffbe377e4b',
u'age_limit': 18,
},
},
# cbs video
{
u'url': u'http://www.metacafe.com/watch/cb-0rOxMBabDXN6/samsung_galaxy_note_2_samsungs_next_generation_phablet/',
u'info_dict': {
u'id': u'0rOxMBabDXN6',
u'ext': u'flv',
u'title': u'Samsung Galaxy Note 2: Samsung\'s next-generation phablet',
u'description': u'md5:54d49fac53d26d5a0aaeccd061ada09d',
u'duration': 129,
},
u'params': {
# rtmp download
u'skip_download': True,
},
},
]
def report_disclaimer(self):
"""Report disclaimer retrieval."""
self.to_screen(u'Retrieving disclaimer')
def _real_initialize(self):
# Retrieve disclaimer
self.report_disclaimer()
self._download_webpage(self._DISCLAIMER, None, False, u'Unable to retrieve disclaimer')
# Confirm age
disclaimer_form = {
'filters': '0',
'submit': "Continue - I'm over 18",
}
request = compat_urllib_request.Request(self._FILTER_POST, compat_urllib_parse.urlencode(disclaimer_form))
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
self.report_age_confirmation()
self._download_webpage(request, None, False, u'Unable to confirm age')
def _real_extract(self, url):
# Extract id and simplified title from URL
mobj = re.match(self._VALID_URL, url)
if mobj is None:
raise ExtractorError(u'Invalid URL: %s' % url)
video_id = mobj.group(1)
# the video may come from an external site
m_external = re.match('^(\w{2})-(.*)$', video_id)
if m_external is not None:
prefix, ext_id = m_external.groups()
# Check if video comes from YouTube
if prefix == 'yt':
return self.url_result('http://www.youtube.com/watch?v=%s' % ext_id, 'Youtube')
# CBS videos use theplatform.com
if prefix == 'cb':
return self.url_result('theplatform:%s' % ext_id, 'ThePlatform')
# Retrieve video webpage to extract further information
req = compat_urllib_request.Request('http://www.metacafe.com/watch/%s/' % video_id)
# AnyClip videos require the flashversion cookie so that we get the link
# to the mp4 file
mobj_an = re.match(r'^an-(.*?)$', video_id)
if mobj_an:
req.headers['Cookie'] = 'flashVersion=0;'
webpage = self._download_webpage(req, video_id)
# Extract URL, uploader and title from webpage
self.report_extraction(video_id)
mobj = re.search(r'(?m)&mediaURL=([^&]+)', webpage)
if mobj is not None:
mediaURL = compat_urllib_parse.unquote(mobj.group(1))
video_ext = mediaURL[-3:]
# Extract gdaKey if available
mobj = re.search(r'(?m)&gdaKey=(.*?)&', webpage)
if mobj is None:
video_url = mediaURL
else:
gdaKey = mobj.group(1)
video_url = '%s?__gda__=%s' % (mediaURL, gdaKey)
else:
mobj = re.search(r'<video src="([^"]+)"', webpage)
if mobj:
video_url = mobj.group(1)
video_ext = 'mp4'
else:
mobj = re.search(r' name="flashvars" value="(.*?)"', webpage)
if mobj is None:
raise ExtractorError(u'Unable to extract media URL')
vardict = compat_parse_qs(mobj.group(1))
if 'mediaData' not in vardict:
raise ExtractorError(u'Unable to extract media URL')
mobj = re.search(r'"mediaURL":"(?P<mediaURL>http.*?)",(.*?)"key":"(?P<key>.*?)"', vardict['mediaData'][0])
if mobj is None:
raise ExtractorError(u'Unable to extract media URL')
mediaURL = mobj.group('mediaURL').replace('\\/', '/')
video_url = '%s?__gda__=%s' % (mediaURL, mobj.group('key'))
video_ext = determine_ext(video_url)
video_title = self._html_search_regex(r'(?im)<title>(.*) - Video</title>', webpage, u'title')
description = self._og_search_description(webpage)
thumbnail = self._og_search_thumbnail(webpage)
video_uploader = self._html_search_regex(
r'submitter=(.*?);|googletag\.pubads\(\)\.setTargeting\("(?:channel|submiter)","([^"]+)"\);',
webpage, u'uploader nickname', fatal=False)
if re.search(r'"contentRating":"restricted"', webpage) is not None:
age_limit = 18
else:
age_limit = 0
return {
'_type': 'video',
'id': video_id,
'url': video_url,
'description': description,
'uploader': video_uploader,
'upload_date': None,
'title': video_title,
'thumbnail':thumbnail,
'ext': video_ext,
'age_limit': age_limit,
}
| vgrachev8/youtube-dl | youtube_dl/extractor/metacafe.py | Python | unlicense | 7,574 | [
"Galaxy"
] | 7d53a241ab7867bcd2fb795e4f4a856d536e183cb8cb41d91be22c49058df3ea |
## \file
## \ingroup tutorial_roofit
## \notebook
##
## \brief Basic functionality: plotting unbinned data with alternate and variable binnings
##
## \macro_code
##
## \date February 2018
## \authors Clemens Lange, Wouter Verkerke (C++ version)
import ROOT
# Set up model
# ---------------------
# Build a B decay p.d.f with mixing
dt = ROOT.RooRealVar("dt", "dt", -20, 20)
dm = ROOT.RooRealVar("dm", "dm", 0.472)
tau = ROOT.RooRealVar("tau", "tau", 1.547)
w = ROOT.RooRealVar("w", "mistag rate", 0.1)
dw = ROOT.RooRealVar("dw", "delta mistag rate", 0.)
mixState = ROOT.RooCategory("mixState", "B0/B0bar mixing state")
mixState.defineType("mixed", -1)
mixState.defineType("unmixed", 1)
tagFlav = ROOT.RooCategory("tagFlav", "Flavour of the tagged B0")
tagFlav.defineType("B0", 1)
tagFlav.defineType("B0bar", -1)
# Build a gaussian resolution model
dterr = ROOT.RooRealVar("dterr", "dterr", 0.1, 1.0)
bias1 = ROOT.RooRealVar("bias1", "bias1", 0)
sigma1 = ROOT.RooRealVar("sigma1", "sigma1", 0.1)
gm1 = ROOT.RooGaussModel("gm1", "gauss model 1", dt, bias1, sigma1)
# Construct Bdecay (x) gauss
bmix = ROOT.RooBMixDecay("bmix", "decay", dt, mixState, tagFlav,
tau, dm, w, dw, gm1, ROOT.RooBMixDecay.DoubleSided)
# Sample data from model
# --------------------------------------------
# Sample 2000 events in (dt,mixState,tagFlav) from bmix
data = bmix.generate(ROOT.RooArgSet(dt, mixState, tagFlav), 2000)
# Show dt distribution with custom binning
# -------------------------------------------------------------------------------
# Make plot of dt distribution of data in range (-15,15) with fine binning
# for dt>0 and coarse binning for dt<0
# Create binning object with range (-15,15)
tbins = ROOT.RooBinning(-15, 15)
# Add 60 bins with uniform spacing in range (-15,0)
tbins.addUniform(60, -15, 0)
# Add 15 bins with uniform spacing in range (0,15)
tbins.addUniform(15, 0, 15)
# Make plot with specified binning
dtframe = dt.frame(ROOT.RooFit.Range(-15, 15),
ROOT.RooFit.Title("dt distribution with custom binning"))
data.plotOn(dtframe, ROOT.RooFit.Binning(tbins))
bmix.plotOn(dtframe)
# NB: Note that bin density for each bin is adjusted to that of default frame binning as shown
# in Y axis label (100 bins -. Events/0.4*Xaxis-dim) so that all bins
# represent a consistent density distribution
# Show mixstate asymmetry with custom binning
# ------------------------------------------------------------------------------------
# Make plot of dt distribution of data asymmetry in 'mixState' with
# variable binning
# Create binning object with range (-10,10)
abins = ROOT.RooBinning(-10, 10)
# Add boundaries at 0, (-1,1), (-2,2), (-3,3), (-4,4) and (-6,6)
abins.addBoundary(0)
abins.addBoundaryPair(1)
abins.addBoundaryPair(2)
abins.addBoundaryPair(3)
abins.addBoundaryPair(4)
abins.addBoundaryPair(6)
# Create plot frame in dt
aframe = dt.frame(ROOT.RooFit.Range(-10, 10), ROOT.RooFit.Title(
"mixState asymmetry distribution with custom binning"))
# Plot mixState asymmetry of data with specified customg binning
data.plotOn(aframe, ROOT.RooFit.Asymmetry(
mixState), ROOT.RooFit.Binning(abins))
# Plot corresponding property of p.d.f
bmix.plotOn(aframe, ROOT.RooFit.Asymmetry(mixState))
# Adjust vertical range of plot to sensible values for an asymmetry
aframe.SetMinimum(-1.1)
aframe.SetMaximum(1.1)
# NB: For asymmetry distributions no density corrects are needed (and are
# thus not applied)
# Draw plots on canvas
c = ROOT.TCanvas("rf108_plotbinning", "rf108_plotbinning", 800, 400)
c.Divide(2)
c.cd(1)
ROOT.gPad.SetLeftMargin(0.15)
dtframe.GetYaxis().SetTitleOffset(1.6)
dtframe.Draw()
c.cd(2)
ROOT.gPad.SetLeftMargin(0.15)
aframe.GetYaxis().SetTitleOffset(1.6)
aframe.Draw()
c.SaveAs("rf108_plotbinning.png")
| karies/root | tutorials/roofit/rf108_plotbinning.py | Python | lgpl-2.1 | 3,801 | [
"Gaussian"
] | 81803b1eaff68db2407bde3d279e08f954090097abf4e6c914df3492ed949deb |
"""Only External Repos url specific constants module"""
from robottelo.config import settings
REPOS_URL = settings.robottelo.repos_hosting_url
CUSTOM_FILE_REPO = 'https://fixtures.pulpproject.org/file/'
CUSTOM_KICKSTART_REPO = 'http://ftp.cvut.cz/centos/8/BaseOS/x86_64/kickstart/'
CUSTOM_RPM_REPO = 'https://fixtures.pulpproject.org/rpm-signed/'
CUSTOM_RPM_SHA_512 = 'https://fixtures.pulpproject.org/rpm-with-sha-512/'
CUSTOM_MODULE_STREAM_REPO_1 = f'{REPOS_URL}/module_stream1'
CUSTOM_MODULE_STREAM_REPO_2 = f'{REPOS_URL}/module_stream2'
CUSTOM_SWID_TAG_REPO = f'{REPOS_URL}/swid_zoo'
FAKE_0_YUM_REPO = f'{REPOS_URL}/fake_yum0'
FAKE_1_YUM_REPO = f'{REPOS_URL}/fake_yum1'
FAKE_2_YUM_REPO = f'{REPOS_URL}/fake_yum2'
FAKE_3_YUM_REPO = f'{REPOS_URL}/fake_yum3'
FAKE_4_YUM_REPO = f'{REPOS_URL}/fake_yum4'
FAKE_5_YUM_REPO = 'http://{0}:{1}@rplevka.fedorapeople.org/fakerepo01/'
FAKE_6_YUM_REPO = f'{REPOS_URL}/needed_errata'
FAKE_7_YUM_REPO = f'{REPOS_URL}/pulp/demo_repos/large_errata/zoo/'
FAKE_8_YUM_REPO = f'{REPOS_URL}/lots_files'
FAKE_9_YUM_REPO = f'{REPOS_URL}/multiple_errata'
FAKE_10_YUM_REPO = f'{REPOS_URL}/modules_rpms'
FAKE_11_YUM_REPO = f'{REPOS_URL}/rpm_deps'
FAKE_YUM_DRPM_REPO = 'https://fixtures.pulpproject.org/drpm-signed/'
FAKE_YUM_SRPM_REPO = 'https://fixtures.pulpproject.org/srpm-signed/'
FAKE_YUM_SRPM_DUPLICATE_REPO = 'https://fixtures.pulpproject.org/srpm-duplicate/'
FAKE_YUM_MIXED_REPO = f'{REPOS_URL}/yum_mixed'
FAKE_YUM_MD5_REPO = 'https://fixtures.pulpproject.org/rpm-with-md5/'
CUSTOM_PUPPET_REPO = f'{REPOS_URL}/custom_puppet'
FAKE_0_PUPPET_REPO = f'{REPOS_URL}/fake_puppet0'
FAKE_1_PUPPET_REPO = f'{REPOS_URL}/fake_puppet1'
FAKE_2_PUPPET_REPO = f'{REPOS_URL}/fake_puppet2'
FAKE_3_PUPPET_REPO = f'{REPOS_URL}/fake_puppet3'
FAKE_4_PUPPET_REPO = f'{REPOS_URL}/fake_puppet4'
FAKE_5_PUPPET_REPO = f'{REPOS_URL}/fake_puppet5'
FAKE_6_PUPPET_REPO = f'{REPOS_URL}/fake_puppet6'
FAKE_7_PUPPET_REPO = 'http://{0}:{1}@rplevka.fedorapeople.org/fakepuppet01/'
FAKE_8_PUPPET_REPO = f'{REPOS_URL}/fake_puppet8'
# Fedora's OSTree repo changed to a single repo at
# https://kojipkgs.fedoraproject.org/compose/ostree/repo/
# With branches for each version. Some tests (test_positive_update_url) still need 2 repos URLs,
# We will use the archived versions for now, but probably need to revisit this.
FEDORA26_OSTREE_REPO = 'https://kojipkgs.fedoraproject.org/compose/ostree-20190207-old/26/'
FEDORA27_OSTREE_REPO = 'https://kojipkgs.fedoraproject.org/compose/ostree-20190207-old/26/'
OSTREE_REPO = 'https://fixtures.pulpproject.org/ostree/small/'
REPO_DISCOVERY_URL = f'{REPOS_URL}/repo_discovery/'
FAKE_0_INC_UPD_URL = f'{REPOS_URL}/inc_update/'
FAKE_PULP_REMOTE_FILEREPO = f'{REPOS_URL}/pulp_remote'
FAKE_0_YUM_REPO_STRING_BASED_VERSIONS = (
'https://fixtures.pulpproject.org/rpm-string-version-updateinfo/'
)
EPEL_REPO = f'{REPOS_URL}/epel_repo'
ANSIBLE_GALAXY = 'https://galaxy.ansible.com/'
ANSIBLE_HUB = 'https://cloud.redhat.com/api/automation-hub/'
| jyejare/robottelo | robottelo/constants/repos.py | Python | gpl-3.0 | 2,980 | [
"Galaxy"
] | 51f4d7758dc688284d319deeab1eecf10792017fd6ad95cb5703f5e3fdcfb07d |
# author: Adrian Rosebrock
# website: http://www.pyimagesearch.com
# USAGE
# python finding_function_names.py
# import the necessary packages
from __future__ import print_function
import imutils
# loop over various function strings to search for and try to
# locate them in the OpenCV library
for funcName in ("contour", "box", "gaussian"):
print("[INFO] Finding all functions that contain `{}`".format(funcName))
imutils.find_function(funcName)
print("") | xuanhan863/imutils | demos/finding_function_names.py | Python | mit | 466 | [
"Gaussian"
] | 10f2b5c7296927c032a1c4331b0ea612b6241f1bdaca0c81f26d1c3f12d66ad9 |
import mock
import unittest
from nose.tools import *
import tempfile
from bs4 import BeautifulSoup
from pyrobot.compat import builtin_name
from pyrobot.forms import Form, fields
from pyrobot.forms.form import _parse_fields
class TestForm(unittest.TestCase):
def setUp(self):
self.html = '''
<form>
<input name="vocals" />
<input name="guitar" type="file" />
<select name="drums">
<option value="roger">Roger<br />
<option value="john">John<br />
</select>
<input type="radio" name="bass" value="Roger">Roger<br />
<input type="radio" name="bass" value="John">John<br />
</form>
'''
self.form = Form(self.html)
def test_fields(self):
keys = set(('vocals', 'guitar', 'drums', 'bass'))
assert_equal(set(self.form.fields.keys()), keys)
assert_equal(set(self.form.keys()), keys)
class TestParser(unittest.TestCase):
def setUp(self):
self.form = Form('<form></form>')
def test_parse_input(self):
html = '<input name="band" value="queen" />'
_fields = _parse_fields(BeautifulSoup(html))
assert_equal(len(_fields), 1)
assert_true(isinstance(_fields['band'], fields.Input))
def test_parse_file_input(self):
html = '<input name="band" type="file" />'
_fields = _parse_fields(BeautifulSoup(html))
assert_equal(len(_fields), 1)
assert_true(isinstance(_fields['band'], fields.FileInput))
def test_parse_textarea(self):
html = '<textarea name="band">queen</textarea>'
_fields = _parse_fields(BeautifulSoup(html))
assert_equal(len(_fields), 1)
assert_true(isinstance(_fields['band'], fields.Textarea))
def test_parse_radio(self):
html = '''
<input type="radio" name="favorite_member" />freddie<br />
<input type="radio" name="favorite_member" />brian<br />
<input type="radio" name="favorite_member" />roger<br />
<input type="radio" name="favorite_member" />john<br />
<input type="radio" name="favorite_song" />rhapsody<br />
<input type="radio" name="favorite_song" />killer<br />
'''
_fields = _parse_fields(BeautifulSoup(html))
assert_equal(len(_fields), 2)
assert_true(isinstance(_fields['favorite_member'], fields.Radio))
assert_true(isinstance(_fields['favorite_song'], fields.Radio))
assert_equal(
len(_fields['favorite_member']._parsed), 4
)
assert_equal(
len(_fields['favorite_song']._parsed), 2
)
def test_parse_checkbox(self):
html = '''
<input type="checkbox" name="favorite_member" />freddie<br />
<input type="checkbox" name="favorite_member" />brian<br />
<input type="checkbox" name="favorite_member" />roger<br />
<input type="checkbox" name="favorite_member" />john<br />
<input type="checkbox" name="favorite_song" />rhapsody<br />
<input type="checkbox" name="favorite_song" />killer<br />
'''
_fields = _parse_fields(BeautifulSoup(html))
assert_equal(len(_fields), 2)
assert_true(isinstance(_fields['favorite_member'], fields.Checkbox))
assert_true(isinstance(_fields['favorite_song'], fields.Checkbox))
assert_equal(
len(_fields['favorite_member']._parsed), 4
)
assert_equal(
len(_fields['favorite_song']._parsed), 2
)
def test_parse_select(self):
html = '''
<select name="instrument">
<option value="vocals">vocals</option>
<option value="guitar">guitar</option>
<option value="drums">drums</option>
<option value="bass">bass</option>
</select>
'''
_fields = _parse_fields(BeautifulSoup(html))
assert_equal(len(_fields), 1)
assert_true(isinstance(_fields['instrument'], fields.Select))
def test_parse_select_multi(self):
html = '''
<select name="instrument" multiple>
<option value="vocals">vocals</option>
<option value="guitar">guitar</option>
<option value="drums">drums</option>
<option value="bass">bass</option>
</select>
'''
_fields = _parse_fields(BeautifulSoup(html))
assert_equal(len(_fields), 1)
assert_true(isinstance(_fields['instrument'], fields.MultiSelect))
class TestInput(unittest.TestCase):
def setUp(self):
self.html = '<input name="brian" value="may" />'
self.input = fields.Input(BeautifulSoup(self.html).find('input'))
def test_name(self):
assert_equal(self.input.name, 'brian')
def test_initial(self):
assert_equal(self.input._value, 'may')
assert_equal(self.input.value, 'may')
def test_value(self):
self.input.value = 'red special'
assert_equal(self.input._value, 'red special')
assert_equal(self.input.value, 'red special')
def test_serialize(self):
assert_equal(
self.input.serialize(),
{'brian': 'may'}
)
class TestInputBlank(unittest.TestCase):
def setUp(self):
self.html = '<input name="blank" />'
self.input = fields.Input(BeautifulSoup(self.html).find('input'))
def test_initial(self):
assert_equal(self.input._value, None)
assert_equal(self.input.value, '')
def test_serialize(self):
assert_equal(
self.input.serialize(),
{'blank': ''}
)
class TestTextarea(unittest.TestCase):
def setUp(self):
self.html = '<textarea name="roger">taylor</textarea>'
self.input = fields.Textarea(BeautifulSoup(self.html).find('textarea'))
def test_name(self):
assert_equal(self.input.name, 'roger')
def test_initial(self):
assert_equal(self.input._value, 'taylor')
assert_equal(self.input.value, 'taylor')
def test_value(self):
self.input.value = 'the drums'
assert_equal(self.input._value, 'the drums')
assert_equal(self.input.value, 'the drums')
def test_serialize(self):
assert_equal(
self.input.serialize(),
{'roger': 'taylor'}
)
class TestTextareaBlank(unittest.TestCase):
def setUp(self):
self.html = '<textarea name="blank"></textarea>'
self.input = fields.Textarea(BeautifulSoup(self.html).find('textarea'))
def test_initial(self):
assert_equal(self.input._value, '')
assert_equal(self.input.value, '')
def test_serialize(self):
assert_equal(
self.input.serialize(),
{'blank': ''}
)
class TestSelect(unittest.TestCase):
def setUp(self):
self.html = '''
<select name="john">
<option value="tie">your mother down</option>
<option value="you're" selected>my best friend</option>
<option value="the">millionaire waltz</option>
</select>
'''
self.input = fields.Select(BeautifulSoup(self.html).find('select'))
def test_name(self):
assert_equal(self.input.name, 'john')
def test_options(self):
assert_equal(
self.input.options,
['tie', "you're", 'the']
)
def test_initial(self):
assert_equal(self.input._value, 1)
assert_equal(self.input.value, "you're")
def test_value(self):
self.input.value = 'the'
assert_equal(self.input._value, 2)
assert_equal(self.input.value, 'the')
def test_value_label(self):
self.input.value = 'millionaire waltz'
assert_equal(self.input._value, 2)
assert_equal(self.input.value, 'the')
def test_serialize(self):
assert_equal(
self.input.serialize(),
{'john': "you're"}
)
class TestSelectBlank(unittest.TestCase):
def setUp(self):
self.html = '''
<select name="john">
<option value="tie">your mother down</option>
<option value="you're">my best friend</option>
<option value="the">millionaire waltz</option>
</select>
'''
self.input = fields.Select(BeautifulSoup(self.html).find('select'))
def test_name(self):
assert_equal(self.input.name, 'john')
def test_initial(self):
assert_equal(self.input._value, 0)
assert_equal(self.input.value, 'tie')
def test_serialize(self):
assert_equal(
self.input.serialize(),
{'john': 'tie'}
)
class TestMultiSelect(unittest.TestCase):
def setUp(self):
self.html = '''
<select name="john" multiple>
<option value="tie">your mother down</option>
<option value="you're" selected>my best friend</option>
<option value="the">millionaire waltz</option>
</select>
'''
self.input = fields.MultiSelect(BeautifulSoup(self.html).find('select'))
class TestMixedCase(unittest.TestCase):
def test_upper_type(self):
html = '''
<input type="RADIO" name="members" value="mercury" />vocals<br />
'''
input = fields.Radio(BeautifulSoup(html).find_all('input'))
assert_equal(input.name, 'members')
def test_upper_name(self):
html = '''
<input type="radio" NAME="members" value="mercury" />vocals<br />
'''
input = fields.Radio(BeautifulSoup(html).find_all('input'))
assert_equal(input.name, 'members')
def test_mixed_radio_names(self):
html = '''
<input type="radio" NAME="members" value="mercury" />vocals<br />
<input type="radio" NAME="MEMBERS" value="may" />guitar<br />
'''
input = fields.Radio(BeautifulSoup(html).find_all('input'))
assert_equal(input.name, 'members')
assert_equal(
input.options,
['mercury', 'may']
)
class TestRadio(unittest.TestCase):
def setUp(self):
self.html = '''
<input type="radio" name="members" value="mercury" checked />vocals<br />
<input type="radio" name="members" value="may" />guitar<br />
<input type="radio" name="members" value="taylor" />drums<br />
<input type="radio" name="members" value="deacon" checked />bass<br />
'''
self.input = fields.Radio(BeautifulSoup(self.html).find_all('input'))
def test_name(self):
assert_equal(self.input.name, 'members')
def test_options(self):
assert_equal(
self.input.options,
['mercury', 'may', 'taylor', 'deacon']
)
def test_initial(self):
assert_equal(self.input.value, 'mercury')
def test_value(self):
self.input.value = 'taylor'
assert_equal(self.input._value, 2)
assert_equal(self.input.value, 'taylor')
def test_value_label(self):
self.input.value = 'drums'
assert_equal(self.input._value, 2)
assert_equal(self.input.value, 'taylor')
def test_serialize(self):
assert_equal(
self.input.serialize(),
{'members': 'mercury'}
)
class TestRadioBlank(unittest.TestCase):
def setUp(self):
self.html = '''
<input type="radio" name="member" value="mercury" />vocals<br />
<input type="radio" name="member" value="may" />guitar<br />
<input type="radio" name="member" value="taylor" />drums<br />
<input type="radio" name="member" value="deacon" />bass<br />
'''
self.input = fields.Radio(BeautifulSoup(self.html).find_all('input'))
def test_initial(self):
assert_equal(self.input.value, '')
def test_serialize(self):
assert_equal(
self.input.serialize(),
{'member': ''}
)
class TestCheckbox(unittest.TestCase):
def setUp(self):
self.html = '''
<input type="checkbox" name="member" value="mercury" checked />vocals<br />
<input type="checkbox" name="member" value="may" />guitar<br />
<input type="checkbox" name="member" value="taylor" />drums<br />
<input type="checkbox" name="member" value="deacon" checked />bass<br />
'''
self.input = fields.Checkbox(BeautifulSoup(self.html).find_all('input'))
def test_name(self):
assert_equal(self.input.name, 'member')
def test_options(self):
assert_equal(
self.input.options,
['mercury', 'may', 'taylor', 'deacon']
)
def test_initial(self):
assert_equal(
self.input.value,
['mercury', 'deacon']
)
def test_value(self):
self.input.value = 'taylor'
assert_equal(self.input._value, [2])
assert_equal(self.input.value, ['taylor'])
def test_value(self):
self.input.value = ['taylor', 'deacon']
assert_equal(self.input._value, [2, 3])
assert_equal(self.input.value, ['taylor', 'deacon'])
def test_value_label(self):
self.input.value = 'drums'
assert_equal(self.input._value, [2])
assert_equal(self.input.value, ['taylor'])
def test_serialize(self):
assert_equal(
self.input.serialize(),
{'member': ['mercury', 'deacon']}
)
class TestCheckboxBlank(unittest.TestCase):
def setUp(self):
self.html = '''
<input type="checkbox" name="member" value="mercury" />vocals<br />
<input type="checkbox" name="member" value="may" />guitar<br />
<input type="checkbox" name="member" value="taylor" />drums<br />
<input type="checkbox" name="member" value="deacon" />bass<br />
'''
self.input = fields.Checkbox(BeautifulSoup(self.html).find_all('input'))
def test_initial(self):
assert_equal(
self.input.value, []
)
def test_serialize(self):
assert_equal(
self.input.serialize(),
{'member': []}
)
class TestFileInput(unittest.TestCase):
def setUp(self):
self.html = '<input name="song" type="file" />'
self.input = fields.FileInput(BeautifulSoup(self.html).find('input'))
def test_name(self):
assert_equal(self.input.name, 'song')
def test_value_file(self):
file = tempfile.TemporaryFile('r')
self.input.value = file
assert_equal(self.input._value, file)
assert_equal(self.input.value, file)
@mock.patch('{0}.open'.format(builtin_name))
def test_value_name(self, mock_open):
file = tempfile.TemporaryFile('r')
mock_open.return_value = file
self.input.value = 'temp'
assert_equal(self.input._value, file)
assert_equal(self.input.value, file)
def test_serialize(self):
file = tempfile.TemporaryFile('r')
self.input.value = file
assert_equal(
self.input.serialize(),
{'song': file}
)
| jmcarp/pyrobot | tests/test_forms.py | Python | bsd-3-clause | 15,369 | [
"Brian"
] | 65d13310b77ae6c2ae0b7d3acb213b0da30229949d2dcf38fdfd86e559f93a58 |
#!/galaxy/home/mgehrin/hiclib/bin/python
"""
Read a table dump in the UCSC gene table format and print a tab separated
list of intervals corresponding to requested features of each gene.
usage: ucsc_gene_table_to_intervals.py [options] < gene_table.txt
options:
-h, --help show this help message and exit
-rREGION, --region=REGION
Limit to region: one of coding, utr3, utr5, transcribed [default]
-e, --exons Only print intervals overlapping an exon
"""
import optparse, string, sys
def main():
# Parse command line
parser = optparse.OptionParser( usage="%prog [options] < gene_table.txt" )
parser.add_option( "-r", "--region", dest="region", default="transcribed",
help="Limit to region: one of coding, utr3, utr5, transcribed [default]" )
parser.add_option( "-e", "--exons", action="store_true", dest="exons",
help="Only print intervals overlapping an exon" )
parser.add_option( "-s", "--strand", action="store_true", dest="strand",
help="Print strand after interval" )
parser.add_option( "-b", "--nobin", action="store_false", dest="discard_first_column", default=True,
help="file doesn't contain a 'bin' column (use this for pre-hg18 files)" )
options, args = parser.parse_args()
assert options.region in ( 'coding', 'utr3', 'utr5', 'transcribed' ), "Invalid region argument"
# Read table from stdin and handle each gene
for line in sys.stdin:
# Parse fields from gene tabls
fields = line.split( '\t' )
if (options.discard_first_column): fields.pop(0)
chrom = fields[1]
strand = fields[2]
tx_start = int( fields[3] )
tx_end = int( fields[4] )
cds_start = int( fields[5] )
cds_end = int( fields[6] )
# Determine the subset of the transcribed region we are interested in
if options.region == 'utr3':
if strand == '-': region_start, region_end = tx_start, cds_start
else: region_start, region_end = cds_end, tx_end
elif options.region == 'utr5':
if strand == '-': region_start, region_end = cds_end, tx_end
else: region_start, region_end = tx_start, cds_start
elif options.region == 'coding':
region_start, region_end = cds_start, cds_end
else:
region_start, region_end = tx_start, tx_end
# If only interested in exons, print the portion of each exon overlapping
# the region of interest, otherwise print the span of the region
if options.exons:
exon_starts = map( int, fields[8].rstrip( ',\n' ).split( ',' ) )
exon_ends = map( int, fields[9].rstrip( ',\n' ).split( ',' ) )
for start, end in zip( exon_starts, exon_ends ):
start = max( start, region_start )
end = min( end, region_end )
if start < end:
if strand: print_tab_sep( chrom, start, end, strand )
else: print_tab_sep( chrom, start, end )
else:
if strand: print_tab_sep( chrom, region_start, region_end, strand )
else: print_tab_sep( chrom, region_start, region_end )
def print_tab_sep( *args ):
"""Print items in `l` to stdout separated by tabs"""
print string.join( [ str( f ) for f in args ], '\t' )
if __name__ == "__main__": main()
| bxlab/HiFive_Paper | Scripts/HiCLib/bx-python-0.7.1/build/scripts-2.7/ucsc_gene_table_to_intervals.py | Python | bsd-3-clause | 3,471 | [
"Galaxy"
] | e6f8cfa4c8209977495c47b84be57d6502013745f596e2f4b5befb7069b39e34 |
# Principal Component Analysis Code :
from numpy import mean,cov,double,cumsum,dot,linalg,array,rank,size,flipud
from pylab import *
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
from mvpa.clfs.knn import kNN
from mvpa.datasets import Dataset
from mvpa.clfs.transerror import TransferError
from mvpa.misc.data_generators import normalFeatureDataset
from mvpa.algorithms.cvtranserror import CrossValidatedTransferError
from mvpa.datasets.splitters import NFoldSplitter
import sys
sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/BMED_8813_HAP/Data')
from data import Fmat_original
def pca(X):
#get dimensions
num_data,dim = X.shape
#center data
mean_X = X.mean(axis=1)
M = (X-mean_X) # subtract the mean (along columns)
Mcov = cov(M)
###### Sanity Check ######
i=0
n=0
while i < 123:
j=0
while j < 90:
if X[i,j] != X[i,j]:
print X[i,j]
print i,j
n=n+1
j = j+1
i=i+1
print n
##########################
print 'PCA - COV-Method used'
val,vec = linalg.eig(Mcov)
#return the projection matrix, the variance and the mean
return vec,val,mean_X, M, Mcov
def my_mvpa(Y,num2):
#Using PYMVPA
PCA_data = np.array(Y)
PCA_label_2 = ['Can-Edge-1']*5 + ['Book-Edge-1']*5 + ['Brown-Cardboard-Box-Edge-1']*5 + ['Cinder-Block-Edge-1']*5 + ['Tin-Box-Edge-1']*5 + ['White-Cardboard-Box-Edge-1']*5 + ['Can-Surface']*5 + ['Book-Surface']*5 + ['Brown-Cardboard-Box-Surface']*5 + ['Cinder-Block-Surface']*5 + ['Tin-Box-Surface']*5 + ['White-Cardboard-Box-Surface']*5 + ['Can-Edge-2']*5 + ['Book-Edge-2']*5 + ['Brown-Cardboard-Box-Edge-2']*5 + ['Cinder-Block-Edge-2']*5 + ['Tin-Box-Edge-2']*5 + ['White-Cardboard-Box-Edge-2']*5
clf = kNN(k=num2)
terr = TransferError(clf)
ds1 = Dataset(samples=PCA_data,labels=PCA_label_2)
cvterr = CrossValidatedTransferError(terr,NFoldSplitter(cvtype=1),enable_states=['confusion'])
error = cvterr(ds1)
return (1-error)*100
def result(eigvec_total,eigval_total,mean_data_total,B,C,num_PC):
# Reduced Eigen-Vector Matrix according to highest Eigenvalues..(Considering First 20 based on above figure)
W = eigvec_total[:,0:num_PC]
m_W, n_W = np.shape(W)
#Projected Data:
Y = (W.T)*B
m_Y, n_Y = np.shape(Y.T)
return Y.T
if __name__ == '__main__':
Fmat = Fmat_original
# Checking the Data-Matrix
m_tot, n_tot = np.shape(Fmat)
print 'Total_Matrix_Shape:',m_tot,n_tot
eigvec_total, eigval_total, mean_data_total, B, C = pca(Fmat)
#print eigvec_total
#print eigval_total
#print mean_data_total
m_eigval_total, n_eigval_total = np.shape(np.matrix(eigval_total))
m_eigvec_total, n_eigvec_total = np.shape(eigvec_total)
m_mean_data_total, n_mean_data_total = np.shape(np.matrix(mean_data_total))
print 'Eigenvalue Shape:',m_eigval_total, n_eigval_total
print 'Eigenvector Shape:',m_eigvec_total, n_eigvec_total
print 'Mean-Data Shape:',m_mean_data_total, n_mean_data_total
#Recall that the cumulative sum of the eigenvalues shows the level of variance accounted by each of the corresponding eigenvectors. On the x axis there is the number of eigenvalues used.
perc_total = cumsum(eigval_total)/sum(eigval_total)
num_PC=1
while num_PC <=20:
Proj = np.zeros((90,num_PC))
Proj = result(eigvec_total,eigval_total,mean_data_total,B,C,num_PC)
# PYMVPA:
num=0
cv_acc = np.zeros(21)
while num <=20:
cv_acc[num] = my_mvpa(Proj,num)
num = num+1
plot(np.arange(21),cv_acc,'-s')
grid('True')
hold('True')
num_PC = num_PC+1
legend(('1-PC', '2-PCs', '3-PCs', '4-PCs', '5-PCs', '6-PCs', '7-PCs', '8-PCs', '9-PCs', '10-PCs', '11-PC', '12-PCs', '13-PCs', '14-PCs', '15-PCs', '16-PCs', '17-PCs', '18-PCs', '19-PCs', '20-PCs'))
ylabel('Cross-Validation Accuracy')
xlabel('k in k-NN Classifier')
show()
| tapomayukh/projects_in_python | sandbox_tapo/src/skin_related/BMED_8813_HAP/Scaling/best_kNN_PC/cross_validate_objects_kNN_PC_BMED_8813_HAP_scaled_method_II.py | Python | mit | 4,355 | [
"Mayavi"
] | 8e8a8350a99a348ac225c8593cf36979df5be0a660905c57d19eb3d5afc025ea |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module provides classes to perform fitting of molecule with arbitrary
atom orders.
This module is supposed to perform exact comparisons without the atom order
correspondence prerequisite, while molecule_structure_comparator is supposed
to do rough comparisons with the atom order correspondence prerequisite.
"""
__author__ = "Xiaohui Qu"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Xiaohui Qu"
__email__ = "xhqu1981@gmail.com"
__status__ = "Experimental"
__date__ = "Jun 7, 2013"
import re
import math
import abc
import itertools
import copy
from monty.json import MSONable
from monty.dev import requires
try:
from openbabel import openbabel as ob
from pymatgen.io.babel import BabelMolAdaptor
except ImportError:
ob = None
class AbstractMolAtomMapper(MSONable, metaclass=abc.ABCMeta):
"""
Abstract molecular atom order mapping class. A mapping will be able to
find the uniform atom order of two molecules that can pair the
geometrically equivalent atoms.
"""
@abc.abstractmethod
def uniform_labels(self, mol1, mol2):
"""
Pair the geometrically equivalent atoms of the molecules.
Args:
mol1: First molecule. OpenBabel OBMol or pymatgen Molecule object.
mol2: Second molecule. OpenBabel OBMol or pymatgen Molecule object.
Returns:
(list1, list2) if uniform atom order is found. list1 and list2
are for mol1 and mol2, respectively. Their length equal
to the number of atoms. They represents the uniform atom order
of the two molecules. The value of each element is the original
atom index in mol1 or mol2 of the current atom in uniform atom
order.
(None, None) if unform atom is not available.
"""
pass
@abc.abstractmethod
def get_molecule_hash(self, mol):
"""
Defines a hash for molecules. This allows molecules to be grouped
efficiently for comparison.
Args:
mol: The molecule. OpenBabel OBMol or pymatgen Molecule object
Returns:
A hashable object. Examples can be string formulas, etc.
"""
pass
@classmethod
def from_dict(cls, d):
"""
Args:
d (): Dict
Returns:
AbstractMolAtomMapper
"""
for trans_modules in ['molecule_matcher']:
import sys
if sys.version_info > (3, 0):
level = 0 # Python 3.x
else:
level = -1 # Python 2.x
mod = __import__('pymatgen.analysis.' + trans_modules,
globals(), locals(), [d['@class']], level)
if hasattr(mod, d['@class']):
class_proxy = getattr(mod, d['@class'])
from_dict_proxy = getattr(class_proxy, "from_dict")
return from_dict_proxy(d)
raise ValueError("Invalid Comparator dict")
class IsomorphismMolAtomMapper(AbstractMolAtomMapper):
"""
Pair atoms by isomorphism permutations in the OpenBabel::OBAlign class
"""
def uniform_labels(self, mol1, mol2):
"""
Pair the geometrically equivalent atoms of the molecules.
Calculate RMSD on all possible isomorphism mappings and return mapping
with the least RMSD
Args:
mol1: First molecule. OpenBabel OBMol or pymatgen Molecule object.
mol2: Second molecule. OpenBabel OBMol or pymatgen Molecule object.
Returns:
(list1, list2) if uniform atom order is found. list1 and list2
are for mol1 and mol2, respectively. Their length equal
to the number of atoms. They represents the uniform atom order
of the two molecules. The value of each element is the original
atom index in mol1 or mol2 of the current atom in uniform atom
order.
(None, None) if unform atom is not available.
"""
obmol1 = BabelMolAdaptor(mol1).openbabel_mol
obmol2 = BabelMolAdaptor(mol2).openbabel_mol
h1 = self.get_molecule_hash(obmol1)
h2 = self.get_molecule_hash(obmol2)
if h1 != h2:
return None, None
query = ob.CompileMoleculeQuery(obmol1)
isomapper = ob.OBIsomorphismMapper.GetInstance(query)
isomorph = ob.vvpairUIntUInt()
isomapper.MapAll(obmol2, isomorph)
sorted_isomorph = [sorted(x, key=lambda morp: morp[0])
for x in isomorph]
label2_list = tuple([tuple([p[1] + 1 for p in x])
for x in sorted_isomorph])
vmol1 = obmol1
aligner = ob.OBAlign(True, False)
aligner.SetRefMol(vmol1)
least_rmsd = float("Inf")
best_label2 = None
label1 = list(range(1, obmol1.NumAtoms() + 1))
# noinspection PyProtectedMember
elements1 = InchiMolAtomMapper._get_elements(vmol1, label1)
for label2 in label2_list:
# noinspection PyProtectedMember
elements2 = InchiMolAtomMapper._get_elements(obmol2, label2)
if elements1 != elements2:
continue
vmol2 = ob.OBMol()
for i in label2:
vmol2.AddAtom(obmol2.GetAtom(i))
aligner.SetTargetMol(vmol2)
aligner.Align()
rmsd = aligner.GetRMSD()
if rmsd < least_rmsd:
least_rmsd = rmsd
best_label2 = copy.copy(label2)
return label1, best_label2
def get_molecule_hash(self, mol):
"""
Return inchi as molecular hash
"""
obconv = ob.OBConversion()
obconv.SetOutFormat(str("inchi"))
obconv.AddOption(str("X"), ob.OBConversion.OUTOPTIONS, str("DoNotAddH"))
inchi_text = obconv.WriteString(mol)
match = re.search(r"InChI=(?P<inchi>.+)\n", inchi_text)
return match.group("inchi")
def as_dict(self):
"""
Returns:
Jsonable dict.
"""
return {"version": __version__, "@module": self.__class__.__module__,
"@class": self.__class__.__name__}
@classmethod
def from_dict(cls, d):
"""
Args:
d (dict): Dict representation
Returns:
IsomorphismMolAtomMapper
"""
return IsomorphismMolAtomMapper()
class InchiMolAtomMapper(AbstractMolAtomMapper):
"""
Pair atoms by inchi labels.
"""
def __init__(self, angle_tolerance=10.0):
"""
Args:
angle_tolerance (float): Angle threshold to assume linear molecule. In degrees.
"""
self._angle_tolerance = angle_tolerance
self._assistant_mapper = IsomorphismMolAtomMapper()
def as_dict(self):
"""
Returns:
MSONAble dict.
"""
return {"version": __version__, "@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"angle_tolerance": self._angle_tolerance}
@classmethod
def from_dict(cls, d):
"""
Args:
d (dict): Dict Representation
Returns:
InchiMolAtomMapper
"""
return InchiMolAtomMapper(angle_tolerance=d["angle_tolerance"])
@staticmethod
def _inchi_labels(mol):
"""
Get the inchi canonical labels of the heavy atoms in the molecule
Args:
mol: The molecule. OpenBabel OBMol object
Returns:
The label mappings. List of tuple of canonical label,
original label
List of equivalent atoms.
"""
obconv = ob.OBConversion()
obconv.SetOutFormat(str("inchi"))
obconv.AddOption(str("a"), ob.OBConversion.OUTOPTIONS)
obconv.AddOption(str("X"), ob.OBConversion.OUTOPTIONS, str("DoNotAddH"))
inchi_text = obconv.WriteString(mol)
match = re.search(r"InChI=(?P<inchi>.+)\nAuxInfo=.+"
r"/N:(?P<labels>[0-9,;]+)/(E:(?P<eq_atoms>[0-9,"
r";\(\)]*)/)?", inchi_text)
inchi = match.group("inchi")
label_text = match.group("labels")
eq_atom_text = match.group("eq_atoms")
heavy_atom_labels = tuple([int(i) for i in label_text.replace(
';', ',').split(',')])
eq_atoms = []
if eq_atom_text is not None:
eq_tokens = re.findall(r'\(((?:[0-9]+,)+[0-9]+)\)', eq_atom_text
.replace(';', ','))
eq_atoms = tuple([tuple([int(i) for i in t.split(',')])
for t in eq_tokens])
return heavy_atom_labels, eq_atoms, inchi
@staticmethod
def _group_centroid(mol, ilabels, group_atoms):
"""
Calculate the centroids of a group atoms indexed by the labels of inchi
Args:
mol: The molecule. OpenBabel OBMol object
ilabel: inchi label map
Returns:
Centroid. Tuple (x, y, z)
"""
c1x, c1y, c1z = 0.0, 0.0, 0.0
for i in group_atoms:
orig_idx = ilabels[i - 1]
oa1 = mol.GetAtom(orig_idx)
c1x += float(oa1.x())
c1y += float(oa1.y())
c1z += float(oa1.z())
num_atoms = len(group_atoms)
c1x /= num_atoms
c1y /= num_atoms
c1z /= num_atoms
return c1x, c1y, c1z
def _virtual_molecule(self, mol, ilabels, eq_atoms):
"""
Create a virtual molecule by unique atoms, the centriods of the
equivalent atoms
Args:
mol: The molecule. OpenBabel OBMol object
ilables: inchi label map
eq_atoms: equivalent atom labels
farthest_group_idx: The equivalent atom group index in which
there is the farthest atom to the centroid
Return:
The virtual molecule
"""
vmol = ob.OBMol()
non_unique_atoms = set([a for g in eq_atoms for a in g])
all_atoms = set(range(1, len(ilabels) + 1))
unique_atom_labels = sorted(all_atoms - non_unique_atoms)
# try to align molecules using unique atoms
for i in unique_atom_labels:
orig_idx = ilabels[i - 1]
oa1 = mol.GetAtom(orig_idx)
a1 = vmol.NewAtom()
a1.SetAtomicNum(oa1.GetAtomicNum())
a1.SetVector(oa1.GetVector())
# try to align using centroids of the equivalent atoms
if vmol.NumAtoms() < 3:
for symm in eq_atoms:
c1x, c1y, c1z = self._group_centroid(mol, ilabels, symm)
min_distance = float("inf")
for i in range(1, vmol.NumAtoms() + 1):
va = vmol.GetAtom(i)
distance = math.sqrt((c1x - va.x()) ** 2 + (c1y - va.y()) ** 2
+ (c1z - va.z()) ** 2)
if distance < min_distance:
min_distance = distance
if min_distance > 0.2:
a1 = vmol.NewAtom()
a1.SetAtomicNum(9)
a1.SetVector(c1x, c1y, c1z)
return vmol
@staticmethod
def _align_heavy_atoms(mol1, mol2, vmol1, vmol2, ilabel1, ilabel2,
eq_atoms):
"""
Align the label of topologically identical atoms of second molecule
towards first molecule
Args:
mol1: First molecule. OpenBabel OBMol object
mol2: Second molecule. OpenBabel OBMol object
vmol1: First virtual molecule constructed by centroids. OpenBabel
OBMol object
vmol2: First virtual molecule constructed by centroids. OpenBabel
OBMol object
ilabel1: inchi label map of the first molecule
ilabel2: inchi label map of the second molecule
eq_atoms: equivalent atom lables
Return:
corrected inchi labels of heavy atoms of the second molecule
"""
nvirtual = vmol1.NumAtoms()
nheavy = len(ilabel1)
for i in ilabel2: # add all heavy atoms
a1 = vmol1.NewAtom()
a1.SetAtomicNum(1)
a1.SetVector(0.0, 0.0, 0.0) # useless, just to pair with vmol2
oa2 = mol2.GetAtom(i)
a2 = vmol2.NewAtom()
a2.SetAtomicNum(1)
# align using the virtual atoms, these atoms are not
# used to align, but match by positions
a2.SetVector(oa2.GetVector())
aligner = ob.OBAlign(False, False)
aligner.SetRefMol(vmol1)
aligner.SetTargetMol(vmol2)
aligner.Align()
aligner.UpdateCoords(vmol2)
canon_mol1 = ob.OBMol()
for i in ilabel1:
oa1 = mol1.GetAtom(i)
a1 = canon_mol1.NewAtom()
a1.SetAtomicNum(oa1.GetAtomicNum())
a1.SetVector(oa1.GetVector())
aligned_mol2 = ob.OBMol()
for i in range(nvirtual + 1, nvirtual + nheavy + 1):
oa2 = vmol2.GetAtom(i)
a2 = aligned_mol2.NewAtom()
a2.SetAtomicNum(oa2.GetAtomicNum())
a2.SetVector(oa2.GetVector())
canon_label2 = list(range(1, nheavy + 1))
for symm in eq_atoms:
for i in symm:
canon_label2[i - 1] = -1
for symm in eq_atoms:
candidates1 = list(symm)
candidates2 = list(symm)
for c2 in candidates2:
distance = 99999.0
canon_idx = candidates1[0]
a2 = aligned_mol2.GetAtom(c2)
for c1 in candidates1:
a1 = canon_mol1.GetAtom(c1)
d = a1.GetDistance(a2)
if d < distance:
distance = d
canon_idx = c1
canon_label2[c2 - 1] = canon_idx
candidates1.remove(canon_idx)
canon_inchi_orig_map2 = [(canon, inchi, orig)
for canon, inchi, orig in
zip(canon_label2, list(range(1, nheavy + 1)),
ilabel2)]
canon_inchi_orig_map2.sort(key=lambda m: m[0])
heavy_atom_indices2 = tuple([x[2] for x in canon_inchi_orig_map2])
return heavy_atom_indices2
@staticmethod
def _align_hydrogen_atoms(mol1, mol2, heavy_indices1,
heavy_indices2):
"""
Align the label of topologically identical atoms of second molecule
towards first molecule
Args:
mol1: First molecule. OpenBabel OBMol object
mol2: Second molecule. OpenBabel OBMol object
heavy_indices1: inchi label map of the first molecule
heavy_indices2: label map of the second molecule
Return:
corrected label map of all atoms of the second molecule
"""
num_atoms = mol2.NumAtoms()
all_atom = set(range(1, num_atoms + 1))
hydrogen_atoms1 = all_atom - set(heavy_indices1)
hydrogen_atoms2 = all_atom - set(heavy_indices2)
label1 = heavy_indices1 + tuple(hydrogen_atoms1)
label2 = heavy_indices2 + tuple(hydrogen_atoms2)
cmol1 = ob.OBMol()
for i in label1:
oa1 = mol1.GetAtom(i)
a1 = cmol1.NewAtom()
a1.SetAtomicNum(oa1.GetAtomicNum())
a1.SetVector(oa1.GetVector())
cmol2 = ob.OBMol()
for i in label2:
oa2 = mol2.GetAtom(i)
a2 = cmol2.NewAtom()
a2.SetAtomicNum(oa2.GetAtomicNum())
a2.SetVector(oa2.GetVector())
aligner = ob.OBAlign(False, False)
aligner.SetRefMol(cmol1)
aligner.SetTargetMol(cmol2)
aligner.Align()
aligner.UpdateCoords(cmol2)
hydrogen_label2 = []
hydrogen_label1 = list(range(len(heavy_indices1) + 1, num_atoms + 1))
for h2 in range(len(heavy_indices2) + 1, num_atoms + 1):
distance = 99999.0
idx = hydrogen_label1[0]
a2 = cmol2.GetAtom(h2)
for h1 in hydrogen_label1:
a1 = cmol1.GetAtom(h1)
d = a1.GetDistance(a2)
if d < distance:
distance = d
idx = h1
hydrogen_label2.append(idx)
hydrogen_label1.remove(idx)
hydrogen_orig_idx2 = label2[len(heavy_indices2):]
hydrogen_canon_orig_map2 = [(canon, orig) for canon, orig
in zip(hydrogen_label2,
hydrogen_orig_idx2)]
hydrogen_canon_orig_map2.sort(key=lambda m: m[0])
hydrogen_canon_indices2 = [x[1] for x in hydrogen_canon_orig_map2]
canon_label1 = label1
canon_label2 = heavy_indices2 + tuple(hydrogen_canon_indices2)
return canon_label1, canon_label2
@staticmethod
def _get_elements(mol, label):
"""
The the elements of the atoms in the specified order
Args:
mol: The molecule. OpenBabel OBMol object.
label: The atom indices. List of integers.
Returns:
Elements. List of integers.
"""
elements = [int(mol.GetAtom(i).GetAtomicNum()) for i in label]
return elements
def _is_molecule_linear(self, mol):
"""
Is the molecule a linear one
Args:
mol: The molecule. OpenBabel OBMol object.
Returns:
Boolean value.
"""
if mol.NumAtoms() < 3:
return True
a1 = mol.GetAtom(1)
a2 = mol.GetAtom(2)
for i in range(3, mol.NumAtoms() + 1):
angle = float(mol.GetAtom(i).GetAngle(a2, a1))
if angle < 0.0:
angle = -angle
if angle > 90.0:
angle = 180.0 - angle
if angle > self._angle_tolerance:
return False
return True
def uniform_labels(self, mol1, mol2):
"""
Args:
mol1 (Molecule): Molecule 1
mol2 (Molecule): Molecule 2
Returns:
Labels
"""
obmol1 = BabelMolAdaptor(mol1).openbabel_mol
obmol2 = BabelMolAdaptor(mol2).openbabel_mol
ilabel1, iequal_atom1, inchi1 = self._inchi_labels(obmol1)
ilabel2, iequal_atom2, inchi2 = self._inchi_labels(obmol2)
if inchi1 != inchi2:
return None, None # Topoligically different
if iequal_atom1 != iequal_atom2:
raise Exception("Design Error! Equavilent atoms are inconsistent")
vmol1 = self._virtual_molecule(obmol1, ilabel1, iequal_atom1)
vmol2 = self._virtual_molecule(obmol2, ilabel2, iequal_atom2)
if vmol1.NumAtoms() != vmol2.NumAtoms():
return None, None
if vmol1.NumAtoms() < 3 or self._is_molecule_linear(vmol1) \
or self._is_molecule_linear(vmol2):
# using isomorphism for difficult (actually simple) molecules
clabel1, clabel2 = self._assistant_mapper.uniform_labels(mol1, mol2)
else:
heavy_atom_indices2 = self._align_heavy_atoms(obmol1, obmol2,
vmol1, vmol2, ilabel1,
ilabel2, iequal_atom1)
clabel1, clabel2 = self._align_hydrogen_atoms(obmol1, obmol2,
ilabel1,
heavy_atom_indices2)
if clabel1 and clabel2:
elements1 = self._get_elements(obmol1, clabel1)
elements2 = self._get_elements(obmol2, clabel2)
if elements1 != elements2:
return None, None
return clabel1, clabel2
def get_molecule_hash(self, mol):
"""
Return inchi as molecular hash
"""
obmol = BabelMolAdaptor(mol).openbabel_mol
inchi = self._inchi_labels(obmol)[2]
return inchi
class MoleculeMatcher(MSONable):
"""
Class to match molecules and identify whether molecules are the same.
"""
@requires(ob,
"BabelMolAdaptor requires openbabel to be installed with "
"Python bindings. Please get it at http://openbabel.org "
"(version >=3.0.0).")
def __init__(self, tolerance=0.01, mapper=InchiMolAtomMapper()):
"""
Args:
tolerance (float): RMSD difference threshold whether two molecules are
different
mapper (AbstractMolAtomMapper): MolAtomMapper object that is able to map the atoms of two
molecule to uniform order
"""
self._tolerance = tolerance
self._mapper = mapper
def fit(self, mol1, mol2):
"""
Fit two molecules.
Args:
mol1: First molecule. OpenBabel OBMol or pymatgen Molecule object
mol2: Second molecule. OpenBabel OBMol or pymatgen Molecule object
Returns:
A boolean value indicates whether two molecules are the same.
"""
return self.get_rmsd(mol1, mol2) < self._tolerance
def get_rmsd(self, mol1, mol2):
"""
Get RMSD between two molecule with arbitrary atom order.
Returns:
RMSD if topology of the two molecules are the same
Infinite if the topology is different
"""
label1, label2 = self._mapper.uniform_labels(mol1, mol2)
if label1 is None or label2 is None:
return float("Inf")
return self._calc_rms(mol1, mol2, label1, label2)
@staticmethod
def _calc_rms(mol1, mol2, clabel1, clabel2):
"""
Calculate the RMSD.
Args:
mol1: The first molecule. OpenBabel OBMol or pymatgen Molecule
object
mol2: The second molecule. OpenBabel OBMol or pymatgen Molecule
object
clabel1: The atom indices that can reorder the first molecule to
uniform atom order
clabel1: The atom indices that can reorder the second molecule to
uniform atom order
Returns:
The RMSD.
"""
obmol1 = BabelMolAdaptor(mol1).openbabel_mol
obmol2 = BabelMolAdaptor(mol2).openbabel_mol
cmol1 = ob.OBMol()
for i in clabel1:
oa1 = obmol1.GetAtom(i)
a1 = cmol1.NewAtom()
a1.SetAtomicNum(oa1.GetAtomicNum())
a1.SetVector(oa1.GetVector())
cmol2 = ob.OBMol()
for i in clabel2:
oa2 = obmol2.GetAtom(i)
a2 = cmol2.NewAtom()
a2.SetAtomicNum(oa2.GetAtomicNum())
a2.SetVector(oa2.GetVector())
aligner = ob.OBAlign(True, False)
aligner.SetRefMol(cmol1)
aligner.SetTargetMol(cmol2)
aligner.Align()
return aligner.GetRMSD()
def group_molecules(self, mol_list):
"""
Group molecules by structural equality.
Args:
mol_list: List of OpenBabel OBMol or pymatgen objects
Returns:
A list of lists of matched molecules
Assumption: if s1=s2 and s2=s3, then s1=s3
This may not be true for small tolerances.
"""
mol_hash = [(i, self._mapper.get_molecule_hash(m))
for i, m in enumerate(mol_list)]
mol_hash.sort(key=lambda x: x[1])
# Use molecular hash to pre-group molecules.
raw_groups = tuple([tuple([m[0] for m in g]) for k, g
in itertools.groupby(mol_hash,
key=lambda x: x[1])])
group_indices = []
for rg in raw_groups:
mol_eq_test = [(p[0], p[1], self.fit(mol_list[p[0]],
mol_list[p[1]]))
for p in itertools.combinations(sorted(rg), 2)]
mol_eq = set([(p[0], p[1]) for p in mol_eq_test if p[2]])
not_alone_mols = set(itertools.chain.from_iterable(mol_eq))
alone_mols = set(rg) - not_alone_mols
group_indices.extend([[m] for m in alone_mols])
while len(not_alone_mols) > 0:
current_group = {not_alone_mols.pop()}
while len(not_alone_mols) > 0:
candidate_pairs = set(
[tuple(sorted(p)) for p
in itertools.product(current_group, not_alone_mols)])
mutual_pairs = candidate_pairs & mol_eq
if len(mutual_pairs) == 0:
break
mutual_mols = set(itertools.chain
.from_iterable(mutual_pairs))
current_group |= mutual_mols
not_alone_mols -= mutual_mols
group_indices.append(sorted(current_group))
group_indices.sort(key=lambda x: (len(x), -x[0]), reverse=True)
all_groups = [[mol_list[i] for i in g] for g in group_indices]
return all_groups
def as_dict(self):
"""
Returns:
MSONAble dict.
"""
return {"version": __version__, "@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"tolerance": self._tolerance, "mapper": self._mapper.as_dict()}
@classmethod
def from_dict(cls, d):
"""
Args:
d (dict): Dict representation
Returns:
MoleculeMatcher
"""
return MoleculeMatcher(
tolerance=d["tolerance"],
mapper=AbstractMolAtomMapper.from_dict(d["mapper"]))
| mbkumar/pymatgen | pymatgen/analysis/molecule_matcher.py | Python | mit | 26,063 | [
"pymatgen"
] | 818fd7022b7489ed6fd563edabb9e00756e70be28983481e7551b7dd2955cf53 |
from __future__ import division, print_function, absolute_import
import re
import os
import logging
import json
import argparse
logger = logging.getLogger(__name__)
# TODO: It may be good to delete all site folders before running this program again
class JournalParsingWorker(object):
"""
Works on a single "shard" of the journal.json file to
parse the json into a tab delimited format
"""
def __init__(self, input_path, output_dir, verbose):
"""
Iterate through the file input_path, for each journal entry:
1) create a directory for the site, if it doesn't already exist
2) create a file in that directory for the journal entry
NOTE: Going back to this approach because the journal.json file
isn't sorted in any meaningful way, so pulling out all the
journal entries for one site at a time isn't possible.
Parse the data in the way this file works at least gets
all the journals for each site in one location.
We can combine journal entries into a single file at the next
step.
Args:
input_path: full filename of input file.
ex: /home/srivbane/shared/caringbridge/data/dev/journal.json
output_dir: directory for where the parsed json shards should be saved
ex: /home/srivbane/shared/caringbridge/data/parsed_json/
verbose: True/False where to print progress to the log file.
Returns: Nothing, it simply writes each journal entry to a file in an
appropriate directory.
"""
if verbose:
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
self.input_path = input_path
self.output_dir = output_dir
# which fields from the journal.json file should we keep (store them in output file names)
self.fields = ['siteId', 'userId', 'journalId', 'createdAt']
def parse_file(self):
"""
Primary function to call, this does all the work
"""
logger.info('Opening file: ' + self.input_path)
output_path = os.path.join(self.output_dir, 'parsed_' + os.path.split(self.input_path)[-1].replace(".json", ".txt"))
num_skipped = 0
no_userId_count = 0
no_journalId_count = 0
with open(self.input_path, 'r') as fin, open(output_path, 'wb') as fout:
for line in fin:
# parse the json into a dictionary
json_dict = json.loads(line)
# put in checks to see if journal should be skipped (i.e. deleted=True, draft=True)
skip = self.check_skip(json_dict)
if skip:
num_skipped += 1
continue
# replace missing keys, if necessary
u, j = self.replace_missings(json_dict)
no_userId_count += u
no_journalId_count += j
# pull out the data we need from the text
keys = self.extract_keys(json_dict)
text = self.extract_text(json_dict)
# write results to a file
output = '\t'.join(keys) + '\t' + text + "\n"
fout.write(output)
return num_skipped, no_userId_count, no_journalId_count
def replace_missings(self, json_dict):
"""
Replace missing values of json dict
Note: python uses pass by reference, so no need to return json_dict
"""
# check if journalId doesn't exist, if not make up a unique id
no_journalId = 'journalId' not in json_dict
if no_journalId:
json_dict['journalId'] = '-1'
# check if userId doesn't exist, if so make one up
no_userId = 'userId' not in json_dict
if no_userId:
json_dict['userId'] = '-1'
return no_userId, no_journalId
def check_skip(self, json_dict):
"""
Check to see if this journal should be skipped.
"""
# remove any journals without a timestamp
if 'createdAt' not in json_dict:
return True
# remove any journals without a timestamp
if 'siteId' not in json_dict:
return True
if 'isDeleted' in json_dict:
is_deleted = json_dict['isDeleted'] == "1"
if is_deleted:
return True
if 'isDraft' in json_dict:
is_draft = json_dict['isDraft'] == "1"
if is_draft:
return True
# is there a more efficient way to perform this check?
if 'body' in json_dict:
is_default = json_dict['body'].strip() == "This CaringBridge site was created just recently. Please visit again soon for a journal update."
if is_default:
return True
no_text = len(re.sub("\s+", "", json_dict['body'])) == 0
if no_text:
return True
else:
# no body field
return True
# if all these tests pass, then return false to not skip
return False
def extract_keys(self, json_dict):
"""
Pull out a list of all the keys to the journal
"""
rval = []
for field in self.fields:
if field == 'createdAt':
rval.append(str(json_dict[field]['$date']))
else:
rval.append(str(json_dict[field]))
return rval
def extract_text(self, json_dict):
# write out the text in the journal entry, include title (if exists) at begining.
text = json_dict['body'].encode('utf-8').strip()
if 'title' in json_dict:
text = json_dict['title'].encode('utf-8').strip() + ' ' + text
# remove all newlines so that the result can be written on a single line of a file
text = re.sub("\s+", ' ', text)
return text
def main():
import subprocess
parser = argparse.ArgumentParser(description='Example for how to run the parse worker to extract journals from a journal.json file.')
parser.add_argument('-i', '--input_path', type=str, help='Name of journal file to parse.')
parser.add_argument('-o', '--output_dir', type=str, help='Name of output directory to create site directories.')
parser.add_argument('--log', dest="verbose", action="store_true", help='Add this flag to have progress printed to log.')
parser.set_defaults(verbose=True)
args = parser.parse_args()
print('parse_worker.py')
print(args)
worker = JournalParsingWorker(input_path=args.input_path, output_dir=args.output_dir, verbose=args.verbose)
worker.parse_file()
if __name__ == "__main__":
main()
| robert-giaquinto/text-analysis | src/parse_journal/parse_worker.py | Python | mit | 6,789 | [
"VisIt"
] | 51fa991d2c56829e7aaf6206bf97e736a4493d7ae12b135e90e006f7442b1dfb |
"""
A Tkinter based backend for piddle.
Perry A. Stoll
Created: February 15, 1999
Requires PIL for rotated string support.
Known Problems:
- Doesn't handle the interactive commands yet.
- PIL based canvas inherits lack of underlining strings from piddlePIL
You can find the latest version of this file:
via http://piddle.sourceforge.net
"""
# we depend on PIL for rotated strings so watch for changes in PIL
import Tkinter, tkFont
tk = Tkinter
import rdkit.sping.pid
__version__ = "0.3"
__date__ = "April 8, 1999"
__author__ = "Perry Stoll, perry.stoll@mail.com "
# fixups by chris lee, cwlee@artsci.wustl.edu
# $Id$
# - added drawImage scaling support
# - shifted baseline y parameter in drawString to work around font metric
# shift due to Tkinter's Canvas text_item object
# - fixed argument names so that argument keywords agreed with piddle.py (passes discipline.py)
#
#
# ToDo: for TKCanvas
# make sure that fontHeight() is returnng appropriate measure. Where is this info?
#
# $Log: pidTK.py,v $
# Revision 1.1 2002/07/12 18:34:47 glandrum
# added
#
# Revision 1.6 2000/11/03 00:56:57 clee
# fixed sizing error in TKCanvas
#
# Revision 1.5 2000/11/03 00:25:37 clee
# removed reference to "BaseTKCanvas" (should just use TKCanvas as default)
#
# Revision 1.4 2000/10/29 19:35:31 clee
# eliminated BaseTKCanvas in favor of straightforward "TKCanvas" name
#
# Revision 1.3 2000/10/29 01:57:41 clee
# - added scrollbar support to both TKCanvas and TKCanvasPIL
# - added getTKCanvas() access method to TKCanvasPIL
#
# Revision 1.2 2000/10/15 00:47:17 clee
# commit before continuing after getting pil to work as package
#
# Revision 1.1.1.1 2000/09/27 03:53:15 clee
# Simple Platform Independent Graphics
#
# Revision 1.6 2000/04/06 01:55:34 pmagwene
# - TKCanvas now uses multiple inheritance from Tkinter.Canvas and piddle.Canvas
# * for the most part works much like a normal Tkinter.Canvas object
# - TKCanvas draws rotated strings using PIL image, other objects using normal Tk calls
# - Minor fixes to FontManager and TKCanvas so can specify root window other than Tk()
# - Removed Quit/Clear buttons from default canvas
#
# Revision 1.5 2000/03/12 07:07:42 clee
# sync with 1_x
#
# Revision 1.4 2000/02/26 23:12:42 clee
# turn off compression by default on piddlePDF
# add doc string to new pil-based piddleTK
#
# Revision 1.3 2000/02/26 21:23:19 clee
# update that makes PIL based TKCanvas the default Canvas for TK.
# Updated piddletest.py. Also, added clear() methdo to piddlePIL's
# canvas it clears to "white" is this correct behavior? Not well
# specified in current documents.
#
class FontManager:
__alt_faces = {"serif": "Times", "sansserif": "Helvetica", "monospaced": "Courier"}
def __init__(self, master):
self.master = master
self.font_cache = {}
# the main interface
def stringWidth(self, s, font):
tkfont = self.piddleToTkFont(font)
return tkfont.measure(s)
def fontHeight(self, font):
tkfont = self.piddleToTkFont(font)
return self._tkfontHeight(tkfont)
def fontAscent(self, font):
tkfont = self.piddleToTkFont(font)
return self._tkfontAscent(tkfont)
def fontDescent(self, font):
tkfont = self.piddleToTkFont(font)
return self._tkfontDescent(tkfont)
def getTkFontString(self, font):
"""Return a string suitable to pass as the -font option to
to a Tk widget based on the piddle-style FONT"""
tkfont = self.piddleToTkFont(font)
# XXX: should just return the internal tk font name?
# return str(tkfont)
return ('-family %(family)s -size %(size)s '
'-weight %(weight)s -slant %(slant)s '
'-underline %(underline)s' % tkfont.config())
def getTkFontName(self, font):
"""Return a the name associated with the piddle-style FONT"""
tkfont = self.piddleToTkFont(font)
return str(tkfont)
def piddleToTkFont(self, font):
"""Return a tkFont instance based on the pid-style FONT"""
if font is None:
return ''
#default 12 pt, "Times", non-bold, non-italic
size = 12
family = "Times"
weight = "normal"
slant = "roman"
underline = "false"
if font.face:
# check if the user specified a generic face type
# like serif or monospaced. check is case-insenstive.
f = font.face.lower()
if f in self.__alt_faces:
family = self.__alt_faces[f]
else:
family = font.face
size = font.size or 12
if font.bold:
weight = "bold"
if font.italic:
slant = "italic"
if font.underline:
underline = 'true'
# ugh... is there a better way to do this?
key = (family, size, weight, slant, underline)
# check if we've already seen this font.
if key in self.font_cache:
# yep, don't bother creating a new one. just fetch it.
font = self.font_cache[key]
else:
# nope, let's create a new tk font.
# this way we will return info about the actual font
# selected by Tk, which may be different than what we ask
# for if it's not availible.
font = tkFont.Font(self.master, family=family, size=size, weight=weight, slant=slant,
underline=underline)
self.font_cache[(family, size, weight, slant, underline)] = font
return font
def _tkfontAscent(self, tkfont):
return tkfont.metrics("ascent")
def _tkfontDescent(self, tkfont):
return tkfont.metrics("descent")
class TKCanvas(tk.Canvas, rdkit.sping.pid.Canvas):
__TRANSPARENT = '' # transparent for Tk color
def __init__(self,
size=(300, 300),
name="sping.TK",
master=None,
scrollingViewPortSize=None, # a 2-tuple to define the size of the viewport
**kw):
"""This canvas allows you to add a tk.Canvas with a sping API for drawing.
To add scrollbars, the simpliest method is to set the 'scrollingViewPortSize'
equal to a tuple that describes the width and height of the visible porition
of the canvas on screen. This sets scrollregion=(0,0, size[0], size[1]).
Then you can add scrollbars as you would any tk.Canvas.
Note, because this is a subclass of tk.Canvas, you can use the normal keywords
to specify a tk.Canvas with scrollbars, however, you should then be careful to
set the "scrollregion" option to the same size as the 'size' passed to __init__.
Tkinter's scrollregion option essentially makes 'size' ignored. """
rdkit.sping.pid.Canvas.__init__(self, size=size, name=size)
if scrollingViewPortSize: # turn on ability to scroll
kw["scrollregion"] = (0, 0, size[0], size[1])
kw["height"] = scrollingViewPortSize[0]
kw["width"] = scrollingViewPortSize[1]
else:
kw["width"] = size[0]
kw["height"] = size[1]
apply(tk.Canvas.__init__, (self, master), kw) # use kw to pass other tk.Canvas options
self.config(background="white")
self.width, self.height = size
self._font_manager = FontManager(self)
self._configure()
self._item_ids = []
self._images = []
def _configure(self):
pass
def _display(self):
self.flush()
self.mainloop()
def _quit(self):
self.quit()
# Hmmm...the postscript generated by this causes my Ghostscript to barf...
def _to_ps_file(self, filename):
self.postscript(file=filename)
def isInteractive(self):
return 0
def onOver(self, event):
pass
def onClick(self, event):
pass
def onKey(self, event):
pass
def flush(self):
tk.Canvas.update(self)
def clear(self):
map(self.delete, self._item_ids)
self._item_ids = []
def _colorToTkColor(self, c):
return "#%02X%02X%02X" % (int(c.red * 255), int(c.green * 255), int(c.blue * 255))
def _getTkColor(self, color, defaultColor):
if color is None:
color = defaultColor
if color is rdkit.sping.pid.transparent:
color = self.__TRANSPARENT
else:
color = self._colorToTkColor(color)
return color
def drawLine(self, x1, y1, x2, y2, color=None, width=None):
color = self._getTkColor(color, self.defaultLineColor)
if width is None:
width = self.defaultLineWidth
new_item = self.create_line(x1, y1, x2, y2, fill=color, width=width)
self._item_ids.append(new_item)
# NYI: curve with fill
#def drawCurve(self, x1, y1, x2, y2, x3, y3, x4, y4,
# edgeColor=None, edgeWidth=None, fillColor=None, closed=0):
#
def stringWidth(self, s, font=None):
return self._font_manager.stringWidth(s, font or self.defaultFont)
def fontAscent(self, font=None):
return self._font_manager.fontAscent(font or self.defaultFont)
def fontDescent(self, font=None):
return self._font_manager.fontDescent(font or self.defaultFont)
def drawString(self, s, x, y, font=None, color=None, angle=None):
if angle:
try:
self._drawRotatedString(s, x, y, font, color, angle)
return
except ImportError:
print("PIL not available. Using unrotated strings.")
# fudge factor for TK on linux (at least)
# strings are being drawn using create_text in canvas
y = y - self.fontHeight(font) * .28 # empirical
#y = y - self.fontDescent(font)
color = self._getTkColor(color, self.defaultLineColor)
font = self._font_manager.getTkFontString(font or self.defaultFont)
new_item = self.create_text(x, y, text=s, font=font, fill=color, anchor=Tkinter.W)
self._item_ids.append(new_item)
def _drawRotatedString(self, s, x, y, font=None, color=None, angle=0):
# we depend on PIL for rotated strings so watch for changes in PIL
try:
import rdkit.sping.PIL.pidPIL
from PIL import Image, ImageTk
pp = rdkit.sping.PIL.pidPIL
except ImportError:
raise ImportError("Rotated strings only possible with PIL support")
pilCan = pp.PILCanvas(size=(self.width, self.height))
pilCan.defaultFont = self.defaultFont
pilCan.defaultLineColor = self.defaultLineColor
if '\n' in s or '\r' in s:
self.drawMultiLineString(s, x, y, font, color, angle)
return
if not font:
font = pilCan.defaultFont
if not color:
color = self.defaultLineColor
if color == rdkit.sping.pid.transparent:
return
# draw into an offscreen Image
tempsize = pilCan.stringWidth(s, font) * 1.2
tempimg = Image.new('RGB', (tempsize, tempsize), (0, 0, 0))
txtimg = Image.new('RGB', (tempsize, tempsize), (255, 255, 255))
from PIL import ImageDraw
temppen = ImageDraw.ImageDraw(tempimg)
temppen.setink((255, 255, 255))
pilfont = pp._pilFont(font)
if not pilfont:
raise ValueError("Bad font: %s" % font)
temppen.setfont(pilfont)
pos = [4, int(tempsize / 2 - pilCan.fontAscent(font)) - pilCan.fontDescent(font)]
temppen.text(pos, s)
pos[1] = int(tempsize / 2)
# rotate
if angle:
from math import pi, sin, cos
tempimg = tempimg.rotate(angle, Image.BILINEAR)
temppen = ImageDraw.ImageDraw(tempimg)
radians = -angle * pi / 180.0
r = tempsize / 2 - pos[0]
pos[0] = int(tempsize / 2 - r * cos(radians))
pos[1] = int(pos[1] - r * sin(radians))
###temppen.rectangle( (pos[0],pos[1],pos[0]+2,pos[1]+2) ) # PATCH for debugging
# colorize, and copy it in
mask = tempimg.convert('L').point(lambda c: c)
temppen.setink((color.red * 255, color.green * 255, color.blue * 255))
temppen.setfill(1)
temppen.rectangle((0, 0, tempsize, tempsize))
txtimg.paste(tempimg, (0, 0), mask)
##Based on code posted by John Michelson in the PIL SIG
transp = txtimg.convert("RGBA")
source = transp.split()
R, G, B, A = 0, 1, 2, 3
mask = transp.point(lambda i: i < 255 and 255) # use white as transparent
source[A].paste(mask)
transp = Image.merge(transp.mode, source) # build a new multiband image
self.drawImage(transp, x - pos[0], y - pos[1])
def drawRect(self, x1, y1, x2, y2, edgeColor=None, edgeWidth=None, fillColor=None):
fillColor = self._getTkColor(fillColor, self.defaultFillColor)
edgeColor = self._getTkColor(edgeColor, self.defaultLineColor)
if edgeWidth is None:
edgeWidth = self.defaultLineWidth
new_item = self.create_rectangle(x1, y1, x2, y2, fill=fillColor, width=edgeWidth,
outline=edgeColor)
self._item_ids.append(new_item)
# NYI:
#def drawRoundRect(self, x1,y1, x2,y2, rx=5, ry=5,
# edgeColor=None, edgeWidth=None, fillColor=None):
def drawEllipse(self, x1, y1, x2, y2, edgeColor=None, edgeWidth=None, fillColor=None):
fillColor = self._getTkColor(fillColor, self.defaultFillColor)
edgeColor = self._getTkColor(edgeColor, self.defaultLineColor)
if edgeWidth is None:
edgeWidth = self.defaultLineWidth
new_item = self.create_oval(x1, y1, x2, y2, fill=fillColor, outline=edgeColor, width=edgeWidth)
self._item_ids.append(new_item)
def drawArc(self, x1, y1, x2, y2, startAng=0, extent=360, edgeColor=None, edgeWidth=None,
fillColor=None):
fillColor = self._getTkColor(fillColor, self.defaultFillColor)
edgeColor = self._getTkColor(edgeColor, self.defaultLineColor)
if edgeWidth is None:
edgeWidth = self.defaultLineWidth
new_item = self.create_arc(x1, y1, x2, y2, start=startAng, extent=extent, fill=fillColor,
width=edgeWidth, outline=edgeColor)
self._item_ids.append(new_item)
def drawPolygon(self, pointlist, edgeColor=None, edgeWidth=None, fillColor=None, closed=0):
fillColor = self._getTkColor(fillColor, self.defaultFillColor)
edgeColor = self._getTkColor(edgeColor, self.defaultLineColor)
if edgeWidth is None:
edgeWidth = self.defaultLineWidth
if closed:
# draw a closed shape
new_item = self.create_polygon(pointlist, fill=fillColor, width=edgeWidth, outline=edgeColor)
else:
if fillColor == self.__TRANSPARENT:
# draw open-ended set of lines
d = {'fill': edgeColor, 'width': edgeWidth}
new_item = apply(self.create_line, pointlist, d)
else:
# open filled shape.
# draw it twice:
# once as a polygon with no edge outline with the fill color
# and once as an open set of lines of the appropriate color
new_item = self.create_polygon(pointlist, fill=fillColor, outline=self.__TRANSPARENT)
self._item_ids.append(new_item)
d = {'fill': edgeColor, 'width': edgeWidth}
new_item = apply(self.create_line, pointlist, d)
self._item_ids.append(new_item)
#def drawFigure(self, partList,
# edgeColor=None, edgeWidth=None, fillColor=None):
# use default implementation
def drawImage(self, image, x1, y1, x2=None, y2=None):
try:
from PIL import ImageTk
except ImportError:
raise NotImplementedError('drawImage - require the ImageTk module')
w, h = image.size
if not x2:
x2 = w + x1
if not y2:
y2 = h + y1
if (w != x2 - x1) or (h != y2 - y1): # need to scale image
myimage = image.resize((x2 - x1, y2 - y1))
else:
myimage = image
# unless I keep a copy of this PhotoImage, it seems to be garbage collected
# and the image is removed from the display after this function. weird
itk = ImageTk.PhotoImage(myimage, master=self)
new_item = self.create_image(x1, y1, image=itk, anchor=Tkinter.NW)
self._item_ids.append(new_item)
self._images.append(itk)
try:
import rdkit.sping.PIL
class TKCanvasPIL(rdkit.sping.PIL.PILCanvas):
"""This canvas maintains a PILCanvas as its backbuffer. Drawing calls
are made to the backbuffer and flush() sends the image to the screen
using TKCanvas.
You can also save what is displayed to a file in any of the formats
supported by PIL"""
def __init__(self, size=(300, 300), name='TKCanvas', master=None, **kw):
rdkit.sping.PIL.PILCanvas.__init__(self, size=size, name=name)
self._tkcanvas = apply(TKCanvas, (size, name, master), kw)
def flush(self):
rdkit.sping.PIL.PILCanvas.flush(self) # call inherited one first
self._tkcanvas.drawImage(self._image, 0, 0) # self._image should be a PIL image
self._tkcanvas.flush()
def getTKCanvas(self):
return self._tkcanvas
except ImportError:
raise ImportError("TKCanvasPIL requires sping PIL Canvas, PIL may not be installed")
| rvianello/rdkit | rdkit/sping/TK/pidTK.py | Python | bsd-3-clause | 16,537 | [
"RDKit"
] | 2d6d57ba436af3974051389e6b4d0583547066764cba9f4921f4c21698db44c2 |
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- http://www.mdanalysis.org
# Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
from numpy.testing import assert_array_equal
class TestDatafiles(object):
def test_import(self):
try:
import MDAnalysis.tests.datafiles
except ImportError:
raise AssertionError("Failed to 'import MDAnalysis.tests.datafiles --- install MDAnalysisTests")
def test_all_exports(self):
import MDAnalysisTests.datafiles
missing = [name for name in dir(MDAnalysisTests.datafiles)
if not name.startswith('_') and name not in MDAnalysisTests.datafiles.__all__]
assert_array_equal(missing, [], err_msg="Variables need to be added to __all__.")
def test_export_variables(self):
import MDAnalysisTests.datafiles
import MDAnalysis.tests.datafiles
missing = [name for name in MDAnalysisTests.datafiles.__all__
if name not in dir(MDAnalysis.tests.datafiles)]
assert_array_equal(missing, [], err_msg="Variables not exported to MDAnalysis.tests.datafiles")
| alejob/mdanalysis | testsuite/MDAnalysisTests/test_datafiles.py | Python | gpl-2.0 | 2,028 | [
"MDAnalysis"
] | b668ee6b5be28e7186965209b49148fed45259d0d7761126b8a178e827021441 |
#!/usr/bin/env python
import matplotlib as m
import matplotlib.pyplot as plt
from matplotlib.mlab import griddata
import numpy as np
import sys
import pdb
import os
import random
def file_len(full_path):
""" Count number of lines in a file."""
f = open(full_path)
nr_of_lines = sum(1 for line in f)
f.close()
return nr_of_lines
# Program Options
xres=200 # Number of Grid points in x direction
yres=200 # Number of Grid points in y direction
#decreaseDataSet=100000 # Set 0 to load all data
decreaseDataSet=0
maxZValue=100000 # This is the maximum z value allowed. All z values higher than that will be ste to MaxZValue
#scatterColorMap=plt.cm.autumn
#define own colormap from white to black
cdict = {
'red' : ((0., 1.0, 1.0), (1., 0.2, 0.2)),
'green': ((0., 1.0, 1.0), (1., 0.2, 0.2)),
'blue' : ((0., 1.0, 1.0), (1., 0.2, 0.2))
}
scatterColorMap = m.colors.LinearSegmentedColormap('my_colormap', cdict, 1024)
""" Main Function """
if not len(sys.argv)>=2:
print "Usage: plotbathymetry.py Bathymetry.grd [Title]"
exit()
if len(sys.argv)==3:
title=sys.argv[2]
else:
title=sys.argv[1]
#Check if file is NetCFD file and converge to XYZ if necessary
s = os.popen("file " + sys.argv[1] + "| grep NetCDF | wc -l").read()
tempfile=sys.argv[1]
if s=="1\n":
print "Found NetCDF file. Start grd2xyz"
tempfile="/tmp/tempXYZ_"+str(random.randint(0,100))+".dat"
os.popen("grd2xyz -V -S " + sys.argv[1] + " >> " + tempfile)
sys.argv[1]=tempfile
print "Load file " + tempfile + "\n"
inp = open(sys.argv[1])
arr = [[],[],[]]
nr_of_lines = file_len(tempfile)
print str(nr_of_lines) + " Datapoints found."
# read line into array
counter=0
if decreaseDataSet==0 or nr_of_lines<decreaseDataSet:
step=1
else:
step=nr_of_lines/decreaseDataSet
print "Decrease dataset to " + str(decreaseDataSet) + " points."
for line in inp.readlines():
if counter%step==0:
# loop over the elemets, split by whitespace
dat=line.split()
arr[0].append(float(dat[0]))
arr[1].append(float(dat[1]))
arr[2].append(min(maxZValue,float(dat[2])))
counter=counter+1
print "Extension in X-direction: From " + str(min(arr[0])) + " to " + str(max(arr[0]))
print "Extension in Y-direction: From " + str(min(arr[1])) + " to " + str(max(arr[1]))
print "Extension in Z-direction: From " + str(min(arr[2])) + " to " + str(max(arr[2]))
#Define Grid:
xi=np.linspace(min(arr[0]),max(arr[0]),xres)
yi=np.linspace(min(arr[1]),max(arr[1]),yres)
# grid the data.
zi = griddata(arr[0],arr[1],arr[2],xi,yi)
# contour the gridded data, plotting dots at the randomly spaced data points.
fig = plt.figure()
subplt = fig.add_subplot(111, xlabel='[m]', ylabel='[m]')
subplt.contour(xi,yi,zi,30,linewidths=0.5,colors='k')
#plt.clabel(CS, inline=1, fontsize=8)
contf=subplt.contourf(xi,yi,zi*100,15,cmap=scatterColorMap)
# plot position of gauge stations
subplt.plot(4.521, 1.196, 'o',color='k')
subplt.text(3.821, 1.246, "Gauge 3")
subplt.plot(4.521, 1.696, 'o',color='k')
subplt.text(3.821, 1.746, "Gauge 2")
subplt.plot(4.521, 2.196, 'o',color='k')
subplt.text(3.821, 2.246, "Gauge 1")
colbar=fig.colorbar(contf) # draw colorbar
colbar.set_label('[cm]')
#plt.clim(-0.12,0.0)
plt.xlim((0, 5.448))
plt.ylim((0, 3.402))
#plt.savefig( "BODC_plot.pdf", format='pdf' )
#plt.title(title)
plt.show()
if s=="1\n":
os.popen("rm " + tempfile)
| FluidityProject/multifluids | examples/monai_valley/raw_data/plotbathymetry.py | Python | lgpl-2.1 | 3,538 | [
"NetCDF"
] | c642a6ff8d4d9c8592249ff4365c1ac0a58afd83e11da86e3ba30f671cda0b30 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import json
import os
import mock
import requests
from testtools import matchers
from openstack import exceptions
from openstack import format
from openstack import resource
from openstack import session
from openstack.tests.unit import base
from openstack import utils
fake_name = 'rey'
fake_id = 99
fake_attr1 = 'lana'
fake_attr2 = 'del'
fake_resource = 'fake'
fake_resources = 'fakes'
fake_arguments = {'name': 'rey'}
fake_base_path = '/fakes/%(name)s/data'
fake_path = '/fakes/rey/data'
fake_data = {'id': fake_id,
'enabled': True,
'name': fake_name,
'attr1': fake_attr1,
'attr2': fake_attr2,
'status': None}
fake_body = {fake_resource: fake_data}
class FakeResource(resource.Resource):
resource_key = fake_resource
resources_key = fake_resources
base_path = fake_base_path
allow_create = allow_retrieve = allow_update = True
allow_delete = allow_list = allow_head = True
enabled = resource.prop('enabled', type=format.BoolStr)
name = resource.prop('name')
first = resource.prop('attr1')
second = resource.prop('attr2')
third = resource.prop('attr3', alias='attr_three')
status = resource.prop('status')
class FakeResourceNoKeys(FakeResource):
resource_key = None
resources_key = None
class PropTests(base.TestCase):
def test_with_alias_and_type(self):
class Test(resource.Resource):
attr = resource.prop("attr1", alias="attr2", type=bool)
t = Test(attrs={"attr2": 500})
# Don't test with assertTrue because 500 evaluates to True.
# Need to test that bool(500) happened and attr2 *is* True.
self.assertIs(t.attr, True)
def test_defaults(self):
new_default = "new_default"
class Test(resource.Resource):
attr1 = resource.prop("attr1")
attr2 = resource.prop("attr2", default=new_default)
t = Test()
self.assertIsNone(t.attr1)
self.assertEqual(new_default, t.attr2)
# When the default value is passed in, it is left untouched.
# Check that attr2 is literally the same object we set as default.
t.attr2 = new_default
self.assertIs(new_default, t.attr2)
not_default = 'not default'
t2 = Test({'attr2': not_default})
self.assertEqual(not_default, t2.attr2)
# Assert that if the default is passed in, it overrides the previously
# set value (bug #1425996)
t2.attr2 = new_default
self.assertEqual(new_default, t2.attr2)
def test_get_without_instance(self):
self.assertIsNone(FakeResource.name)
def test_set_ValueError(self):
class Test(resource.Resource):
attr = resource.prop("attr", type=int)
t = Test()
def should_raise():
t.attr = "this is not an int"
self.assertThat(should_raise, matchers.raises(ValueError))
def test_set_TypeError(self):
class Type(object):
def __init__(self):
pass
class Test(resource.Resource):
attr = resource.prop("attr", type=Type)
t = Test()
def should_raise():
t.attr = "this type takes no args"
self.assertThat(should_raise, matchers.raises(TypeError))
def test_resource_type(self):
class FakestResource(resource.Resource):
shortstop = resource.prop("shortstop", type=FakeResource)
third_base = resource.prop("third_base", type=FakeResource)
sot = FakestResource()
id1 = "Ernie Banks"
id2 = "Ron Santo"
sot.shortstop = id1
sot.third_base = id2
resource1 = FakeResource.new(id=id1)
self.assertEqual(resource1, sot.shortstop)
self.assertEqual(id1, sot.shortstop.id)
self.assertEqual(FakeResource, type(sot.shortstop))
resource2 = FakeResource.new(id=id2)
self.assertEqual(resource2, sot.third_base)
self.assertEqual(id2, sot.third_base.id)
self.assertEqual(FakeResource, type(sot.third_base))
sot2 = FakestResource()
sot2.shortstop = resource1
sot2.third_base = resource2
self.assertEqual(resource1, sot2.shortstop)
self.assertEqual(id1, sot2.shortstop.id)
self.assertEqual(FakeResource, type(sot2.shortstop))
self.assertEqual(resource2, sot2.third_base)
self.assertEqual(id2, sot2.third_base.id)
self.assertEqual(FakeResource, type(sot2.third_base))
body = {
"shortstop": id1,
"third_base": id2
}
sot3 = FakestResource(body)
self.assertEqual(FakeResource({"id": id1}), sot3.shortstop)
self.assertEqual(FakeResource({"id": id2}), sot3.third_base)
def test_set_alias_same_name(self):
class Test(resource.Resource):
attr = resource.prop("something", alias="attr")
val = "hey"
args = {"something": val}
sot = Test(args)
self.assertEqual(val, sot._attrs["something"])
self.assertEqual(val, sot.attr)
class HeaderTests(base.TestCase):
class Test(resource.Resource):
base_path = "/ramones"
service = "punk"
allow_create = True
allow_update = True
hey = resource.header("vocals")
ho = resource.header("guitar")
letsgo = resource.header("bass")
def test_get(self):
val = "joey"
args = {"vocals": val}
sot = HeaderTests.Test({'headers': args})
self.assertEqual(val, sot.hey)
self.assertEqual(None, sot.ho)
self.assertEqual(None, sot.letsgo)
def test_set_new(self):
args = {"vocals": "joey", "bass": "deedee"}
sot = HeaderTests.Test({'headers': args})
sot._reset_dirty()
sot.ho = "johnny"
self.assertEqual("johnny", sot.ho)
self.assertTrue(sot.is_dirty)
def test_set_old(self):
args = {"vocals": "joey", "bass": "deedee"}
sot = HeaderTests.Test({'headers': args})
sot._reset_dirty()
sot.letsgo = "cj"
self.assertEqual("cj", sot.letsgo)
self.assertTrue(sot.is_dirty)
def test_set_brand_new(self):
sot = HeaderTests.Test({'headers': {}})
sot._reset_dirty()
sot.ho = "johnny"
self.assertEqual("johnny", sot.ho)
self.assertTrue(sot.is_dirty)
self.assertEqual({'headers': {"guitar": "johnny"}}, sot)
def test_1428342(self):
sot = HeaderTests.Test({'headers':
requests.structures.CaseInsensitiveDict()})
self.assertIsNone(sot.hey)
def test_create_update_headers(self):
sot = HeaderTests.Test()
sot._reset_dirty()
sot.ho = "johnny"
sot.letsgo = "deedee"
response = mock.MagicMock()
response.body = {'id': 1}
sess = mock.MagicMock()
sess.post = mock.MagicMock(return_value=response)
sess.put = mock.MagicMock(return_value=response)
sot.create(sess)
headers = {'guitar': 'johnny', 'bass': 'deedee'}
sess.post.assert_called_with(HeaderTests.Test.base_path,
service=HeaderTests.Test.service,
headers=headers,
json={})
sot['id'] = 1
sot.letsgo = "cj"
headers = {'guitar': 'johnny', 'bass': 'cj'}
sot.update(sess)
sess.put.assert_called_with('ramones/1',
service=HeaderTests.Test.service,
headers=headers,
json={})
class ResourceTests(base.TestCase):
def setUp(self):
super(ResourceTests, self).setUp()
self.session = mock.Mock(spec=session.Session)
def assertCalledURL(self, method, url):
# call_args gives a tuple of *args and tuple of **kwargs.
# Check that the first arg in *args (the URL) has our url.
self.assertEqual(method.call_args[0][0], url)
def test_empty_id(self):
self.session.get.return_value = mock.Mock(body=fake_body)
obj = FakeResource.new(**fake_arguments)
self.assertEqual(obj, obj.get(self.session))
self.assertEqual(fake_id, obj.id)
self.assertEqual(fake_name, obj['name'])
self.assertEqual(fake_attr1, obj['attr1'])
self.assertEqual(fake_attr2, obj['attr2'])
self.assertEqual(fake_name, obj.name)
self.assertEqual(fake_attr1, obj.first)
self.assertEqual(fake_attr2, obj.second)
def test_not_allowed(self):
class Nope(resource.Resource):
allow_create = allow_retrieve = allow_update = False
allow_delete = allow_list = allow_head = False
nope = Nope()
def cant_create():
nope.create_by_id(1, 2)
def cant_retrieve():
nope.get_data_by_id(1, 2)
def cant_update():
nope.update_by_id(1, 2, 3)
def cant_delete():
nope.delete_by_id(1, 2)
def cant_list():
for i in nope.list(1):
pass
def cant_head():
nope.head_data_by_id(1, 2)
self.assertThat(cant_create,
matchers.raises(exceptions.MethodNotSupported))
self.assertThat(cant_retrieve,
matchers.raises(exceptions.MethodNotSupported))
self.assertThat(cant_update,
matchers.raises(exceptions.MethodNotSupported))
self.assertThat(cant_delete,
matchers.raises(exceptions.MethodNotSupported))
self.assertThat(cant_list,
matchers.raises(exceptions.MethodNotSupported))
self.assertThat(cant_head,
matchers.raises(exceptions.MethodNotSupported))
def _test_create_by_id(self, key, response_value, response_body,
attrs, json_body):
class FakeResource2(FakeResource):
resource_key = key
service = "my_service"
response = mock.MagicMock()
response.body = response_body
sess = mock.MagicMock()
sess.put = mock.MagicMock(return_value=response)
sess.post = mock.MagicMock(return_value=response)
resp = FakeResource2.create_by_id(sess, attrs)
self.assertEqual(response_value, resp)
sess.post.assert_called_with(FakeResource2.base_path,
service=FakeResource2.service,
json=json_body)
r_id = "my_id"
resp = FakeResource2.create_by_id(sess, attrs, resource_id=r_id)
self.assertEqual(response_value, resp)
sess.put.assert_called_with(
utils.urljoin(FakeResource2.base_path, r_id),
service=FakeResource2.service,
json=json_body)
path_args = {"name": "my_name"}
resp = FakeResource2.create_by_id(sess, attrs, path_args=path_args)
self.assertEqual(response_value, resp)
sess.post.assert_called_with(FakeResource2.base_path % path_args,
service=FakeResource2.service,
json=json_body)
resp = FakeResource2.create_by_id(sess, attrs, resource_id=r_id,
path_args=path_args)
self.assertEqual(response_value, resp)
sess.put.assert_called_with(
utils.urljoin(FakeResource2.base_path % path_args, r_id),
service=FakeResource2.service,
json=json_body)
def test_create_without_resource_key(self):
key = None
response_value = [1, 2, 3]
response_body = response_value
attrs = {"a": 1, "b": 2, "c": 3}
json_body = attrs
self._test_create_by_id(key, response_value, response_body,
attrs, json_body)
def test_create_with_resource_key(self):
key = "my_key"
response_value = [1, 2, 3]
response_body = {key: response_value}
attrs = {"a": 1, "b": 2, "c": 3}
json_body = {key: attrs}
self._test_create_by_id(key, response_value, response_body,
attrs, json_body)
def _test_get_data_by_id(self, key, response_value, response_body):
class FakeResource2(FakeResource):
resource_key = key
service = "my_service"
response = mock.MagicMock()
response.body = response_body
sess = mock.MagicMock()
sess.get = mock.MagicMock(return_value=response)
r_id = "my_id"
resp = FakeResource2.get_data_by_id(sess, resource_id=r_id)
self.assertEqual(response_value, resp)
sess.get.assert_called_with(
utils.urljoin(FakeResource2.base_path, r_id),
service=FakeResource2.service)
path_args = {"name": "my_name"}
resp = FakeResource2.get_data_by_id(sess, resource_id=r_id,
path_args=path_args)
self.assertEqual(response_value, resp)
sess.get.assert_called_with(
utils.urljoin(FakeResource2.base_path % path_args, r_id),
service=FakeResource2.service)
def test_get_data_without_resource_key(self):
key = None
response_value = [1, 2, 3]
response_body = response_value
self._test_get_data_by_id(key, response_value, response_body)
def test_get_data_with_resource_key(self):
key = "my_key"
response_value = [1, 2, 3]
response_body = {key: response_value}
self._test_get_data_by_id(key, response_value, response_body)
def _test_head_data_by_id(self, key, response_value):
class FakeResource2(FakeResource):
resource_key = key
service = "my_service"
response = mock.MagicMock()
response.headers = response_value
sess = mock.MagicMock()
sess.head = mock.MagicMock(return_value=response)
r_id = "my_id"
resp = FakeResource2.head_data_by_id(sess, resource_id=r_id)
self.assertEqual({'headers': response_value}, resp)
sess.head.assert_called_with(
utils.urljoin(FakeResource2.base_path, r_id),
service=FakeResource2.service,
accept=None)
path_args = {"name": "my_name"}
resp = FakeResource2.head_data_by_id(sess, resource_id=r_id,
path_args=path_args)
self.assertEqual({'headers': response_value}, resp)
sess.head.assert_called_with(
utils.urljoin(FakeResource2.base_path % path_args, r_id),
service=FakeResource2.service,
accept=None)
def test_head_data_without_resource_key(self):
key = None
response_value = {"key1": "value1", "key2": "value2"}
self._test_head_data_by_id(key, response_value)
def test_head_data_with_resource_key(self):
key = "my_key"
response_value = {"key1": "value1", "key2": "value2"}
self._test_head_data_by_id(key, response_value)
def _test_update_by_id(self, key, response_value, response_body,
attrs, json_body):
class FakeResource2(FakeResource):
patch_update = True
resource_key = key
service = "my_service"
response = mock.MagicMock()
response.body = response_body
sess = mock.MagicMock()
sess.patch = mock.MagicMock(return_value=response)
r_id = "my_id"
resp = FakeResource2.update_by_id(sess, r_id, attrs)
self.assertEqual(response_value, resp)
sess.patch.assert_called_with(
utils.urljoin(FakeResource2.base_path, r_id),
service=FakeResource2.service,
json=json_body)
path_args = {"name": "my_name"}
resp = FakeResource2.update_by_id(sess, r_id, attrs,
path_args=path_args)
self.assertEqual(response_value, resp)
sess.patch.assert_called_with(
utils.urljoin(FakeResource2.base_path % path_args, r_id),
service=FakeResource2.service,
json=json_body)
def test_update_without_resource_key(self):
key = None
response_value = [1, 2, 3]
response_body = response_value
attrs = {"a": 1, "b": 2, "c": 3}
json_body = attrs
self._test_update_by_id(key, response_value, response_body,
attrs, json_body)
def test_update_with_resource_key(self):
key = "my_key"
response_value = [1, 2, 3]
response_body = {key: response_value}
attrs = {"a": 1, "b": 2, "c": 3}
json_body = {key: attrs}
self._test_update_by_id(key, response_value, response_body,
attrs, json_body)
def test_delete_by_id(self):
class FakeResource2(FakeResource):
service = "my_service"
sess = mock.MagicMock()
sess.delete = mock.MagicMock(return_value=None)
r_id = "my_id"
resp = FakeResource2.delete_by_id(sess, r_id)
self.assertIsNone(resp)
sess.delete.assert_called_with(
utils.urljoin(FakeResource2.base_path, r_id),
service=FakeResource2.service,
accept=None)
path_args = {"name": "my_name"}
resp = FakeResource2.delete_by_id(sess, r_id, path_args=path_args)
self.assertIsNone(resp)
sess.delete.assert_called_with(
utils.urljoin(FakeResource2.base_path % path_args, r_id),
service=FakeResource2.service,
accept=None)
def test_create(self):
resp = mock.Mock(body=fake_body)
self.session.post = mock.Mock(return_value=resp)
obj = FakeResource.new(name=fake_name,
enabled=True,
attr1=fake_attr1,
attr2=fake_attr2)
self.assertEqual(obj, obj.create(self.session))
self.assertFalse(obj.is_dirty)
last_req = self.session.post.call_args[1]["json"][
FakeResource.resource_key]
self.assertEqual(4, len(last_req))
self.assertTrue(last_req['enabled'])
self.assertEqual(fake_name, last_req['name'])
self.assertEqual(fake_attr1, last_req['attr1'])
self.assertEqual(fake_attr2, last_req['attr2'])
self.assertEqual(fake_id, obj.id)
self.assertTrue(obj['enabled'])
self.assertEqual(fake_name, obj['name'])
self.assertEqual(fake_attr1, obj['attr1'])
self.assertEqual(fake_attr2, obj['attr2'])
self.assertEqual(True, obj.enabled)
self.assertEqual(fake_name, obj.name)
self.assertEqual(fake_attr1, obj.first)
self.assertEqual(fake_attr2, obj.second)
def test_get(self):
resp = mock.Mock(body=fake_body)
self.session.get = mock.Mock(return_value=resp)
obj = FakeResource.get_by_id(self.session, fake_id,
path_args=fake_arguments)
# Check that the proper URL is being built.
self.assertCalledURL(self.session.get,
os.path.join(fake_base_path % fake_arguments,
str(fake_id))[1:])
self.assertEqual(fake_id, obj.id)
self.assertEqual(fake_name, obj['name'])
self.assertEqual(fake_attr1, obj['attr1'])
self.assertEqual(fake_attr2, obj['attr2'])
self.assertEqual(fake_name, obj.name)
self.assertEqual(fake_attr1, obj.first)
self.assertEqual(fake_attr2, obj.second)
def test_get_with_headers(self):
header1 = "fake-value1"
header2 = "fake-value2"
headers = {"header1": header1,
"header2": header2}
resp = mock.Mock(body=fake_body, headers=headers)
self.session.get = mock.Mock(return_value=resp)
class FakeResource2(FakeResource):
header1 = resource.header("header1")
header2 = resource.header("header2")
obj = FakeResource2.get_by_id(self.session, fake_id,
path_args=fake_arguments,
include_headers=True)
self.assertCalledURL(self.session.get,
os.path.join(fake_base_path % fake_arguments,
str(fake_id))[1:])
self.assertEqual(fake_id, obj.id)
self.assertEqual(fake_name, obj['name'])
self.assertEqual(fake_attr1, obj['attr1'])
self.assertEqual(fake_attr2, obj['attr2'])
self.assertEqual(header1, obj['headers']['header1'])
self.assertEqual(header2, obj['headers']['header2'])
self.assertEqual(fake_name, obj.name)
self.assertEqual(fake_attr1, obj.first)
self.assertEqual(fake_attr2, obj.second)
self.assertEqual(header1, obj.header1)
self.assertEqual(header2, obj.header2)
def test_head(self):
class FakeResource2(FakeResource):
header1 = resource.header("header1")
header2 = resource.header("header2")
resp = mock.Mock(headers={"header1": "one", "header2": "two"})
self.session.head = mock.Mock(return_value=resp)
obj = FakeResource2.head_by_id(self.session, fake_id,
path_args=fake_arguments)
self.assertCalledURL(self.session.head,
os.path.join(fake_base_path % fake_arguments,
str(fake_id))[1:])
self.assertEqual('one', obj['headers']['header1'])
self.assertEqual('two', obj['headers']['header2'])
self.assertEqual('one', obj.header1)
self.assertEqual('two', obj.header2)
def test_patch_update(self):
class FakeResourcePatch(FakeResource):
patch_update = True
resp = mock.Mock(body=fake_body)
self.session.patch = mock.Mock(return_value=resp)
obj = FakeResourcePatch.new(id=fake_id, name=fake_name,
attr1=fake_attr1, attr2=fake_attr2)
self.assertTrue(obj.is_dirty)
self.assertEqual(obj, obj.update(self.session))
self.assertFalse(obj.is_dirty)
self.assertCalledURL(self.session.patch,
os.path.join(fake_base_path % fake_arguments,
str(fake_id))[1:])
last_req = self.session.patch.call_args[1]["json"][
FakeResource.resource_key]
self.assertEqual(3, len(last_req))
self.assertEqual(fake_name, last_req['name'])
self.assertEqual(fake_attr1, last_req['attr1'])
self.assertEqual(fake_attr2, last_req['attr2'])
self.assertEqual(fake_id, obj.id)
self.assertEqual(fake_name, obj.name)
self.assertEqual(fake_attr1, obj.first)
self.assertEqual(fake_attr2, obj.second)
def test_put_update(self):
class FakeResourcePut(FakeResource):
# This is False by default, but explicit for this test.
patch_update = False
resp = mock.Mock(body=fake_body)
self.session.put = mock.Mock(return_value=resp)
obj = FakeResourcePut.new(id=fake_id, name=fake_name,
attr1=fake_attr1, attr2=fake_attr2)
self.assertTrue(obj.is_dirty)
self.assertEqual(obj, obj.update(self.session))
self.assertFalse(obj.is_dirty)
self.assertCalledURL(self.session.put,
os.path.join(fake_base_path % fake_arguments,
str(fake_id))[1:])
last_req = self.session.put.call_args[1]["json"][
FakeResource.resource_key]
self.assertEqual(3, len(last_req))
self.assertEqual(fake_name, last_req['name'])
self.assertEqual(fake_attr1, last_req['attr1'])
self.assertEqual(fake_attr2, last_req['attr2'])
self.assertEqual(fake_id, obj.id)
self.assertEqual(fake_name, obj.name)
self.assertEqual(fake_attr1, obj.first)
self.assertEqual(fake_attr2, obj.second)
def test_update_early_exit(self):
obj = FakeResource()
obj._dirty = [] # Bail out early if there's nothing to update.
self.assertIsNone(obj.update("session"))
def test_update_no_id_attribute(self):
obj = FakeResource.new(id=1, attr="value1")
obj._dirty = {"attr": "value2"}
obj.update_by_id = mock.MagicMock(return_value=dict())
# If no id_attribute is returned in the update response, make sure
# we handle the resulting KeyError.
self.assertEqual(obj, obj.update("session"))
def test_delete(self):
obj = FakeResource({"id": fake_id, "name": fake_name})
obj.delete(self.session)
self.assertCalledURL(self.session.delete,
os.path.join(fake_base_path % fake_arguments,
str(fake_id))[1:])
def _test_list(self, resource_class):
results = [fake_data.copy(), fake_data.copy(), fake_data.copy()]
for i in range(len(results)):
results[i]['id'] = fake_id + i
if resource_class.resources_key is not None:
body = {resource_class.resources_key:
self._get_expected_results()}
sentinel = {resource_class.resources_key: []}
else:
body = self._get_expected_results()
sentinel = []
self.session.get.side_effect = [mock.Mock(body=body),
mock.Mock(body=sentinel)]
objs = list(resource_class.list(self.session, path_args=fake_arguments,
paginated=True))
params = {'limit': 3, 'marker': results[-1]['id']}
self.assertEqual(params, self.session.get.call_args[1]['params'])
self.assertEqual(3, len(objs))
for obj in objs:
self.assertIn(obj.id, range(fake_id, fake_id + 3))
self.assertEqual(fake_name, obj['name'])
self.assertEqual(fake_name, obj.name)
self.assertIsInstance(obj, FakeResource)
def _get_expected_results(self):
results = [fake_data.copy(), fake_data.copy(), fake_data.copy()]
for i in range(len(results)):
results[i]['id'] = fake_id + i
return results
def test_list_keyed_resource(self):
self._test_list(FakeResource)
def test_list_non_keyed_resource(self):
self._test_list(FakeResourceNoKeys)
def _test_list_call_count(self, paginated):
# Test that we've only made one call to receive all data
results = [fake_data.copy(), fake_data.copy(), fake_data.copy()]
body = mock.Mock(body={fake_resources: results})
attrs = {"get.return_value": body}
session = mock.Mock(**attrs)
list(FakeResource.list(session, params={'limit': len(results) + 1},
path_args=fake_arguments,
paginated=paginated))
# Ensure we only made one call to complete this.
self.assertEqual(1, session.get.call_count)
def test_list_bail_out(self):
# When we get less data than limit, make sure we made one call
self._test_list_call_count(True)
def test_list_nonpaginated(self):
# When we call with paginated=False, make sure we made one call
self._test_list_call_count(False)
def test_determine_limit(self):
full_page = [fake_data.copy(), fake_data.copy(), fake_data.copy()]
last_page = [fake_data.copy()]
session = mock.MagicMock()
session.get = mock.MagicMock()
full_response = mock.MagicMock()
full_response.body = {FakeResource.resources_key: full_page}
last_response = mock.MagicMock()
last_response.body = {FakeResource.resources_key: last_page}
pages = [full_response, full_response, last_response]
session.get.side_effect = pages
# Don't specify a limit. Resource.list will determine the limit
# is 3 based on the first `full_page`.
results = list(FakeResource.list(session, path_args=fake_arguments,
paginated=True))
self.assertEqual(session.get.call_count, len(pages))
self.assertEqual(len(full_page + full_page + last_page), len(results))
def test_empty_list(self):
page = []
session = mock.Mock()
session.get = mock.Mock()
full_response = mock.Mock()
full_response.body = {FakeResource.resources_key: page}
pages = [full_response]
session.get.side_effect = pages
results = list(FakeResource.list(session, path_args=fake_arguments,
paginated=True))
self.assertEqual(session.get.call_count, len(pages))
self.assertEqual(len(page), len(results))
def test_attrs_name(self):
obj = FakeResource()
self.assertIsNone(obj.name)
del obj.name
def test_to_dict(self):
kwargs = {
'enabled': True,
'name': 'FOO',
'attr1': 'BAR',
'attr2': ['ZOO', 'BAZ'],
'status': 'Active',
'headers': {
'key': 'value'
}
}
obj = FakeResource(kwargs)
res = obj.to_dict()
self.assertIsInstance(res, dict)
self.assertTrue(res['enabled'])
self.assertEqual('FOO', res['name'])
self.assertEqual('BAR', res['attr1'])
self.assertEqual(['ZOO', 'BAZ'], res['attr2'])
self.assertEqual('Active', res['status'])
self.assertNotIn('headers', res)
def test_composite_attr_happy(self):
obj = FakeResource.existing(**{'attr3': '3'})
try:
self.assertEqual('3', obj.third)
except AttributeError:
self.fail("third was not found as expected")
def test_composite_attr_fallback(self):
obj = FakeResource.existing(**{'attr_three': '3'})
try:
self.assertEqual('3', obj.third)
except AttributeError:
self.fail("third was not found in fallback as expected")
def test_id_del(self):
class Test(resource.Resource):
id_attribute = "my_id"
attrs = {"my_id": 100}
t = Test(attrs=attrs)
self.assertEqual(attrs["my_id"], t.id)
del t.id
self.assertTrue(Test.id_attribute not in t._attrs)
def test_from_name_with_name(self):
name = "Ernie Banks"
obj = FakeResource.from_name(name)
self.assertEqual(name, obj.name)
def test_from_id_with_name(self):
name = "Sandy Koufax"
obj = FakeResource.from_id(name)
self.assertEqual(name, obj.id)
def test_from_id_with_object(self):
name = "Mickey Mantle"
obj = FakeResource.new(name=name)
new_obj = FakeResource.from_id(obj)
self.assertIs(new_obj, obj)
self.assertEqual(obj.name, new_obj.name)
def test_from_id_with_bad_value(self):
def should_raise():
FakeResource.from_id(3.14)
self.assertThat(should_raise, matchers.raises(ValueError))
def test_dirty_list(self):
class Test(resource.Resource):
attr = resource.prop("attr")
# Check if dirty after setting by prop
sot1 = Test()
self.assertFalse(sot1.is_dirty)
sot1.attr = 1
self.assertTrue(sot1.is_dirty)
# Check if dirty after setting by mapping
sot2 = Test()
sot2["attr"] = 1
self.assertTrue(sot1.is_dirty)
# Check if dirty after creation
sot3 = Test({"attr": 1})
self.assertTrue(sot3.is_dirty)
def test_update_attrs(self):
class Test(resource.Resource):
moe = resource.prop("the-attr")
larry = resource.prop("the-attr2")
curly = resource.prop("the-attr3", type=int)
shemp = resource.prop("the-attr4")
value1 = "one"
value2 = "two"
value3 = "3"
value4 = "fore"
value5 = "fiver"
sot = Test({"the-attr": value1})
sot.update_attrs({"the-attr2": value2, "notprop": value4})
self.assertTrue(sot.is_dirty)
self.assertEqual(value1, sot.moe)
self.assertEqual(value1, sot["the-attr"])
self.assertEqual(value2, sot.larry)
self.assertEqual(value4, sot.notprop)
sot._reset_dirty()
sot.update_attrs(curly=value3)
self.assertTrue(sot.is_dirty)
self.assertEqual(int, type(sot.curly))
self.assertEqual(int(value3), sot.curly)
sot._reset_dirty()
sot.update_attrs(**{"the-attr4": value5})
self.assertTrue(sot.is_dirty)
self.assertEqual(value5, sot.shemp)
def test_get_id(self):
class Test(resource.Resource):
pass
ID = "an id"
res = Test({"id": ID})
self.assertEqual(ID, resource.Resource.get_id(ID))
self.assertEqual(ID, resource.Resource.get_id(res))
def test_repr(self):
fr = FakeResource()
fr._loaded = False
fr.first = "hey"
fr.second = "hi"
fr.third = "nah"
the_repr = repr(fr)
the_repr = the_repr.replace('openstack.tests.unit.test_resource.', '')
result = eval(the_repr)
self.assertEqual(fr._loaded, result._loaded)
self.assertEqual(fr.first, result.first)
self.assertEqual(fr.second, result.second)
self.assertEqual(fr.third, result.third)
def test_id_attribute(self):
faker = FakeResource(fake_data)
self.assertEqual(fake_id, faker.id)
faker.id_attribute = 'name'
self.assertEqual(fake_name, faker.id)
faker.id_attribute = 'attr1'
self.assertEqual(fake_attr1, faker.id)
faker.id_attribute = 'attr2'
self.assertEqual(fake_attr2, faker.id)
faker.id_attribute = 'id'
self.assertEqual(fake_id, faker.id)
def test_name_attribute(self):
class Person_ES(resource.Resource):
name_attribute = "nombre"
nombre = resource.prop('nombre')
name = "Brian"
args = {'nombre': name}
person = Person_ES(args)
self.assertEqual(name, person.nombre)
self.assertEqual(name, person.name)
new_name = "Julien"
person.name = new_name
self.assertEqual(new_name, person.nombre)
self.assertEqual(new_name, person.name)
def test_boolstr_prop(self):
faker = FakeResource(fake_data)
self.assertTrue(faker.enabled)
self.assertTrue(faker['enabled'])
faker._attrs['enabled'] = False
self.assertFalse(faker.enabled)
self.assertFalse(faker['enabled'])
# should fail fast
def set_invalid():
faker.enabled = 'INVALID'
self.assertRaises(ValueError, set_invalid)
class ResourceMapping(base.TestCase):
def test__getitem(self):
value = 10
class Test(resource.Resource):
attr = resource.prop("attr")
t = Test(attrs={"attr": value})
self.assertEqual(value, t["attr"])
def test__setitem__existing_item_changed(self):
class Test(resource.Resource):
pass
t = Test()
key = "attr"
value = 1
t[key] = value
self.assertEqual(value, t._attrs[key])
self.assertTrue(key in t._dirty)
def test__setitem__existing_item_unchanged(self):
class Test(resource.Resource):
pass
key = "attr"
value = 1
t = Test(attrs={key: value})
t._reset_dirty() # Clear dirty list so this checks as unchanged.
t[key] = value
self.assertEqual(value, t._attrs[key])
self.assertTrue(key not in t._dirty)
def test__setitem__new_item(self):
class Test(resource.Resource):
pass
t = Test()
key = "attr"
value = 1
t[key] = value
self.assertEqual(value, t._attrs[key])
self.assertTrue(key in t._dirty)
def test__delitem__(self):
class Test(resource.Resource):
pass
key = "attr"
value = 1
t = Test(attrs={key: value})
del t[key]
self.assertTrue(key not in t._attrs)
self.assertTrue(key in t._dirty)
def test__len__(self):
class Test(resource.Resource):
pass
attrs = {"a": 1, "b": 2, "c": 3}
t = Test(attrs=attrs)
self.assertEqual(len(attrs.keys()), len(t))
def test__iter__(self):
class Test(resource.Resource):
pass
attrs = {"a": 1, "b": 2, "c": 3}
t = Test(attrs=attrs)
for attr in t:
self.assertEqual(attrs[attr], t[attr])
def _test_resource_serialization(self, session_method, resource_method):
attr_type = resource.Resource
class Test(resource.Resource):
allow_create = True
attr = resource.prop("attr", type=attr_type)
the_id = 123
sot = Test()
sot.attr = resource.Resource({"id": the_id})
self.assertEqual(attr_type, type(sot.attr))
def fake_call(*args, **kwargs):
attrs = kwargs["json"]
try:
json.dumps(attrs)
except TypeError as e:
self.fail("Unable to serialize _attrs: %s" % e)
return mock.Mock(body=attrs)
session = mock.Mock()
setattr(session, session_method, mock.Mock(side_effect=fake_call))
if resource_method == "create_by_id":
session.create_by_id(session, sot._attrs)
elif resource_method == "update_by_id":
session.update_by_id(session, None, sot._attrs)
def test_create_serializes_resource_types(self):
self._test_resource_serialization("post", "create_by_id")
def test_update_serializes_resource_types(self):
self._test_resource_serialization("patch", "update_by_id")
class FakeResponse(object):
def __init__(self, response):
self.body = response
class TestFind(base.TestCase):
NAME = 'matrix'
ID = 'Fishburne'
PROP = 'attribute2'
def setUp(self):
super(TestFind, self).setUp()
self.mock_session = mock.Mock()
self.mock_get = mock.Mock()
self.mock_session.get = self.mock_get
self.matrix = {'id': self.ID, 'name': self.NAME, 'prop': self.PROP}
def test_name(self):
self.mock_get.side_effect = [
exceptions.HttpException(404, 'not found'),
FakeResponse({FakeResource.resources_key: [self.matrix]})
]
result = FakeResource.find(self.mock_session, self.NAME,
path_args=fake_arguments)
self.assertEqual(self.NAME, result.name)
self.assertEqual(self.PROP, result.prop)
def test_id(self):
self.mock_get.side_effect = [
FakeResponse({FakeResource.resource_key: self.matrix})
]
result = FakeResource.find(self.mock_session, self.ID,
path_args=fake_arguments)
self.assertEqual(self.ID, result.id)
self.assertEqual(self.PROP, result.prop)
path = "fakes/rey/data/" + self.ID
self.mock_get.assert_any_call(path, service=None)
def test_id_no_retrieve(self):
self.mock_get.side_effect = [
FakeResponse({FakeResource.resources_key: [self.matrix]})
]
class NoRetrieveResource(FakeResource):
allow_retrieve = False
result = NoRetrieveResource.find(self.mock_session, self.ID,
path_args=fake_arguments)
self.assertEqual(self.ID, result.id)
self.assertEqual(self.PROP, result.prop)
def test_dups(self):
dupe = self.matrix.copy()
dupe['id'] = 'different'
self.mock_get.side_effect = [
# Raise a 404 first so we get out of the ID search and into name.
exceptions.HttpException(404, 'not found'),
FakeResponse({FakeResource.resources_key: [self.matrix, dupe]})
]
self.assertRaises(exceptions.DuplicateResource, FakeResource.find,
self.mock_session, self.NAME)
def test_id_attribute_find(self):
floater = {'ip_address': "127.0.0.1", 'prop': self.PROP}
self.mock_get.side_effect = [
FakeResponse({FakeResource.resource_key: floater})
]
FakeResource.id_attribute = 'ip_address'
FakeResource.id_attribute = 'ip_address'
result = FakeResource.find(self.mock_session, "127.0.0.1",
path_args=fake_arguments)
self.assertEqual("127.0.0.1", result.id)
self.assertEqual(self.PROP, result.prop)
FakeResource.id_attribute = 'id'
p = {'ip_address': "127.0.0.1"}
path = fake_path + "?limit=2"
self.mock_get.called_once_with(path, params=p, service=None)
def test_nada(self):
self.mock_get.side_effect = [
exceptions.HttpException(404, 'not found'),
FakeResponse({FakeResource.resources_key: []})
]
self.assertEqual(None, FakeResource.find(self.mock_session, self.NAME))
def test_no_name(self):
self.mock_get.side_effect = [
exceptions.HttpException(404, 'not found'),
FakeResponse({FakeResource.resources_key: [self.matrix]})
]
FakeResource.name_attribute = None
self.assertEqual(None, FakeResource.find(self.mock_session, self.NAME))
def test_nada_not_ignored(self):
self.mock_get.side_effect = [
exceptions.HttpException(404, 'not found'),
FakeResponse({FakeResource.resources_key: []})
]
self.assertRaises(exceptions.ResourceNotFound, FakeResource.find,
self.mock_session, self.NAME, ignore_missing=False)
class TestWaitForStatus(base.TestCase):
def __init__(self, *args, **kwargs):
super(TestWaitForStatus, self).__init__(*args, **kwargs)
self.build = FakeResponse(self.body_with_status(fake_body, 'BUILD'))
self.active = FakeResponse(self.body_with_status(fake_body, 'ACTIVE'))
self.error = FakeResponse(self.body_with_status(fake_body, 'ERROR'))
def setUp(self):
super(TestWaitForStatus, self).setUp()
self.sess = mock.MagicMock()
def body_with_status(self, body, status):
body_copy = copy.deepcopy(body)
body_copy[fake_resource]['status'] = status
return body_copy
def test_wait_for_status_nothing(self):
self.sess.get = mock.MagicMock()
sot = FakeResource.new(**fake_data)
sot.status = 'ACTIVE'
self.assertEqual(sot, resource.wait_for_status(
self.sess, sot, 'ACTIVE', [], 1, 2))
self.assertEqual([], self.sess.get.call_args_list)
def test_wait_for_status(self):
self.sess.get = mock.MagicMock()
self.sess.get.side_effect = [self.build, self.active]
sot = FakeResource.new(**fake_data)
self.assertEqual(sot, resource.wait_for_status(
self.sess, sot, 'ACTIVE', [], 1, 2))
def test_wait_for_status_timeout(self):
self.sess.get = mock.MagicMock()
self.sess.get.side_effect = [self.build, self.build]
sot = FakeResource.new(**fake_data)
self.assertRaises(exceptions.ResourceTimeout, resource.wait_for_status,
self.sess, sot, 'ACTIVE', ['ERROR'], 1, 2)
def test_wait_for_status_failures(self):
self.sess.get = mock.MagicMock()
self.sess.get.side_effect = [self.build, self.error]
sot = FakeResource.new(**fake_data)
self.assertRaises(exceptions.ResourceFailure, resource.wait_for_status,
self.sess, sot, 'ACTIVE', ['ERROR'], 1, 2)
def test_wait_for_status_no_status(self):
class FakeResourceNoStatus(resource.Resource):
allow_retrieve = True
sot = FakeResourceNoStatus.new(id=123)
self.assertRaises(AttributeError, resource.wait_for_status,
self.sess, sot, 'ACTIVE', ['ERROR'], 1, 2)
class TestWaitForDelete(base.TestCase):
def test_wait_for_delete(self):
sess = mock.Mock()
sot = FakeResource.new(**fake_data)
sot.get = mock.MagicMock()
sot.get.side_effect = [sot, exceptions.NotFoundException(mock.Mock())]
self.assertEqual(sot, resource.wait_for_delete(sess, sot, 1, 2))
def test_wait_for_delete_fail(self):
sess = mock.Mock()
sot = FakeResource.new(**fake_data)
sot.get = mock.MagicMock(return_value=sot)
self.assertRaises(exceptions.ResourceTimeout, resource.wait_for_delete,
sess, sot, 1, 2)
| dudymas/python-openstacksdk | openstack/tests/unit/test_resource.py | Python | apache-2.0 | 45,751 | [
"Brian",
"MOE"
] | 1d9b57ddf72afdf6e0bf2e19ce88f17f1318b5e2f004d13aa0e3daa4f26a898f |
#!/usr/bin/env python2.6
"""
Oct 5 2012
cp from parseLiteratureBlast_toTex.py
Parser Max's Blast output tab-delimited file into a table of clones that have hits matches certain keywords
Filtering criteria includes:
1/ Match score (E.g minPositives = 90%)
2/ Keywords (E.g: autologous keywords)
3/ Min sequence length (to avoid short sequences which are easier to be false negatives)
4/ Min number of samples
nknguyen at soe ucsc edu
Sep 1
"""
import os, sys, re
import immunoseq.lib.immunoseqLib as iseqlib
class Record():
def __init__(self, line):
#length identities positives eVal score tFile tId tLen qFile qId qLen tAln match qAln authors journal year title abstract fulltextUrl
items = line.strip().split('\t')
self.length = int(items[0])
self.identities = int(items[1])
self.positives = int(items[2])
self.db = items[5].split("/")[-2]
self.qId = items[9]
self.qLen = int(items[10])
self.tAln = items[11]
self.match = items[12]
self.qAln = items[13]
#self.authors = items[14]
self.title = items[17]
self.abstract = items[18]
def reformatTitle(title):
title = title.replace("_", " ")
title = title.replace(";", " ")
return title
def checkKeywords(title, keywords):
t = title.lower()
for keyword in keywords:
if re.search(keyword, t):
return True
return False
def getCloneInfo(clonestr):
#>adapt11D,adapt15D,adapt16D;111621|TRBV19|TRBJ2-1,TRBJ2-7|TRBD1-1;5,45504,1427;size=45504
items = clonestr.lstrip('>').split(';')
samples = items[0].split(',')
size = int( items[-1].lstrip('size=') )
if len(items) == 4:
sizes = items[-2].split(',')
else:
sizes = [1 for s in samples]
sample2size = {}
for i, s in enumerate(samples):
sample2size[s] = int(sizes[i])
#print sample2size
genes = items[1].split(',,')[0].split('|')
vs = genes[1].split(',')
js = genes[2].split(',')
vs = ','.join([v.lstrip("TRBV") for v in vs])
js = ','.join([j.lstrip("TRBJ") for j in js])
ds = ''
if len(genes) > 3:
ds = genes[3].split(',')
ds = ','.join([d.lstrip("TRBD") for d in ds])
return vs, js, ds, sample2size
def parsePaperInfo(paperStr):
#gnl|BL ORD ID|14836 AJ224294|Silins S.L. Submitted (12-FEB-1998) to the EMBL/GenBank/DDBJ databases. Silins S.L., Queensland Institute of Medical Research, The Bancroft Centre, 300 Herston Road, Brisbane, AUSTRALIA 4029 Silins S.L., Cross S.M., Krauer K.G., Moss D.J., Schmidt C.W., Misko I.S. "A functional link for major TCR expansions in healthy adults caused by persistent EBV-infection" J. Clin. Invest. 102(8):1551-1558(1998).|
#items = paperStr.split('"')
diseases = ["Rheumatic Heart Disease", "Autoimmune", "Ankylosing Spondylitis", "Rheumatoid Arthritis", "Reactive Arthritis", "Multiple Sclerosis", "Psoriatic Arthritis", "Spondyloarthropathy", 'Lupus', "Diabetes", "Vitiligo", "haematopoietic stem cell transplantation"]
disease2short = {"Rheumatic Heart Disease": "RHD", "Ankylosing Spondylitis":"AS", "Rheumatoid Arthritis":"RA", "Reactive Arthritis":"ReA", "Multiple Sclerosis":"MS", "Psoriatic Arthritis":"PA", "Spondyloarthropathy":"AS, SpA", 'Lupus':'SLE', 'Vitiligo': 'V', "haematopoietic stem cell transplantation": "AS"}
#title = items[1].lower()
title = paperStr.lower()
matchDiseases = []
for d in diseases:
if d.lower() in title:
if d in disease2short:
matchDiseases.append( disease2short[d] )
else:
matchDiseases.append(d)
#return items[1]
if len(matchDiseases) > 0:
return ', '.join( matchDiseases )
else:
return paperStr
#return items[1]
def checkNumSamples(title):
items = title.split(';')
samples = items[0].lstrip('>').split(',')
sample2patient={'SBC8': 'B', 'asBD':'B', 'asBR':'B', 'adaptBDdraw2':'B', 'asBDdraw2':'B',
'SBC7': '1', 'as1D':'1', 'as1R':'1',
'adapt11D': '11', 'as11R': '11'}
patients = []
for s in samples:
if s not in sample2patient:
patient = s
else:
patient = sample2patient[s]
if patient not in patients:
patients.append(patient)
return len(patients)
def checkNumPatients(title, group2sample2host, minPatientCount, minControlCount, maxPatientCount, maxControlCount):
items = title.split(';')
samples = items[0].lstrip('>').split(',')
if len(items) == 3:
sizes = [1 for s in samples]
else:
sizes = [int(size) for size in items[-2].split(',')]
#controls = ["B", "adaptBD", "asBD", "20", "as20D", "adapt20D", "adaptBDdraw2", 'asBDdraw2']
#sample2patient={'SBC8': 'B', 'asBD':'B', 'asBR':'B', 'adaptBDdraw2':'B', 'asBDdraw2':'B',
# 'SBC7': '1', 'as1D':'1', 'as1R':'1',
# 'adapt11D': '11', 'as11R': '11'}
sample2patient = {}
sample2control = {}
if 'patient' in group2sample2host:
sample2patient = group2sample2host['patient']
if 'control' in group2sample2host:
sample2control = group2sample2host['control']
patients = []
controls = []
numPatientOutofrange = 0
numControlOutofrange = 0
for i, s in enumerate(samples):
size = sizes[i]
if s in sample2patient and s not in patients:
if size >= minPatientCount and size <= maxPatientCount:
patients.append(s)
else:
numPatientOutofrange += 1
elif s in sample2control and s not in controls:
if size >= minControlCount and size <= maxControlCount:
controls.append(s)
else:
numControlOutofrange += 1
return len(patients), len(controls), numPatientOutofrange, numControlOutofrange
def readInfile(options, group2sample2host):
#infile, minPositives, minNumsams, minLen, minNumpatients, minNumcontrols, minPatientCount, minControlCount, group2sample2host):
#clones, clone2hits = readNcbiXml(options.infile, options.minPos, options.minNumSamples, options.minLen, options.minNumPatients, options.minNumControls, options.minPatientCount, options.minControlCount, group2sample2host)
#Read in blast-output file:
f = open(options.infile, 'r')
clone2hits = {} #key = cloneName, val = [ (list of hit papers, identity) ]
clones = []
f.readline()
for line in f:
#length identities positives eVal score tFile tId tLen qFile qId qLen tAln match qAln authors journal year title abstract fulltextUrl
record = Record(line)
clone = record.qId
numsams = checkNumSamples(clone)
if numsams < options.minNumSamples:
continue
numpatients, numcontrols, numPoutofrange, numCoutofrange = checkNumPatients(clone, group2sample2host, options.minPatientCount, options.minControlCount, options.maxPatientCount, options.maxControlCount)
if numpatients < options.minNumPatients or numcontrols < options.minNumControls or numPoutofrange > options.maxPatientOutofrange or numCoutofrange > options.maxControlOutofrange:
#print numpatients
#print numcontrols
continue
if record.qLen < options.minLen or record.length < record.qLen:
continue
if clone not in clones:
clones.append(clone)
#if float( record.positives )/record.qLen < options.minPos or float(record.identities)/record.qLen < options.minIden:
if float( record.positives )/record.length < options.minPos or float(record.identities)/record.length < options.minIden:
continue
if clone not in clone2hits:
clone2hits[ clone ] = [ record ]
else:
clone2hits[ clone ].append( record )
f.close()
return clones, clone2hits
def getPc(count, total):
if total == 0:
return 0
return 100.0*count/total
def getDefaultKeywords():
#autoimmuneKeywords= ['haematopoietic stem cell transplantation']
autoimmuneKeywords=['arthritis', 'ankylosing', 'spondy', 'autoreactive', 'autoantigen', 'reactive arthritis', 'rheumatoid arthritis', 'multiple sclerosis', 'self', 'cross-reactive', 'mimicry', 'synovial', 'crohn', 'psoriasis', 'inflammatory bowel disease', 'ibd', 'ulcerative colitis', 'uveitis']
#autoimmuneKeywords=['arthritis', 'ankylosing', 'spondy', 'autoreactive', 'autoantigen', 'reactive arthritis', 'rheumatoid arthritis', 'multiple sclerosis', 'self', 'cross-reactive', 'mimicry', 'synovial', 'crohn', 'psoriasis', 'inflammatory bowel disease', 'ibd', 'ulcerative colitis', 'uveitis', 'haematopoietic stem cell transplantation']
b27Keywords=['b27']
pathogenKeywords=['chlamydia', 'salmonella', 'yersinia', 'shigella', 'campylobacter', 'vipr1', 'ebv', 'epstein-barr', 'lmp2']
miceKeywords=['mice', 'murine', 'mouse']
group2keywords = {'autoimmune': autoimmuneKeywords, 'b27': b27Keywords, 'pathogen': pathogenKeywords, 'mice':miceKeywords}
return group2keywords
def printTab(clones, clone2hits, group2keywords, options, outbasename):
outfile = "%s.txt" % outbasename
fh = open(outfile, 'w')
fh.write("#MinPositives: %f; MinIdentities: %f, MinNumberOfSamples: %d; MinLen: %d\n" %(options.minPos, options.minIden, options.minNumSamples, options.minLen))
fh.write("#Keywords:\n")
for g, k in group2keywords.iteritems():
fh.write("#\t%s:\t%s\n" %(g, ','.join(k)))
cutoff = 1
numAuto = 0 #number of clones with at least one hit passed cutoff and matches one of the autoimmuneKeywords
numB27 = 0
numPathogen = 0
db2title2count = {'aai':{}, 'imgt':{}, 'pmc':{}, 'elsevier':{}}
for clone in clones:
#vs, ds, js, sample2size = getCloneInfo(clone)
items = clone.split(';')
id = '.'.join( [items[0], items[1]] )
if clone in clone2hits:
hits = clone2hits[ clone ]
matchAuto = False
matchB27 = False
matchPathogen = False
for i, hit in enumerate(hits):
if checkKeywords(hit.title, group2keywords['mice']) or (options.abstract and checkKeywords(hit.abstract, group2keywords['mice'])): #mice studies, ignore
continue
#Check to see if any keywords matched:
if not matchAuto and ( checkKeywords(hit.title, group2keywords['autoimmune']) or (options.abstract and checkKeywords(hit.abstract, group2keywords['autoimmune'])) ):
matchAuto = True
if not matchB27 and ( checkKeywords(hit.title, group2keywords['b27']) or (options.abstract and checkKeywords(hit.abstract, group2keywords['b27'])) ):
matchB27 = True
if not matchPathogen and ( checkKeywords(hit.title, group2keywords['pathogen']) or (options.abstract and checkKeywords(hit.abstract, group2keywords['pathogen'])) ):
matchPathogen = True
if matchAuto or matchB27 or matchPathogen:
fh.write("\n>%s\n" %clone)
for i, hit in enumerate(hits):
hasKeyword = False
if checkKeywords(hit.title, group2keywords['mice']) or (options.abstract and checkKeywords(hit.abstract, group2keywords['mice'])): #mice studies, ignore
continue
for g, keywords in group2keywords.iteritems():
if checkKeywords(hit.title, keywords) or (options.abstract and checkKeywords(hit.abstract, keywords)):
hasKeyword = True
break
if hasKeyword:
fh.write("\t%d/ %s; db=%s\n" %(i, hit.title, hit.db))
fh.write("\t\t%s\n" %hit.qAln)
fh.write("\t\t%s\n" %hit.match)
fh.write("\t\t%s\n" %hit.tAln)
if hit.title not in db2title2count[hit.db]:
db2title2count[hit.db][hit.title] = 1
else:
db2title2count[hit.db][hit.title] += 1
#PRINT LENGTH OF THE CLONE:
#if i == 0 and matchAuto:
# sys.stdout.write("%d\n" % hit.qLen)
if matchAuto:
numAuto += 1
elif matchB27:
numB27 += 1
elif matchPathogen:
numPathogen += 1
for db, t2c in db2title2count.iteritems():
print "%s: %d" %(db, len(t2c.keys()))
total = len(clones)
numhits = len(clone2hits)
fh.write("\n### Summary ###\n")
fh.write("Total\tNumHits\t% hits/total\tnumAuto\t% auto/total\t% auto/hits\tnumB27\t% b27/total\t% b27/hits\tnumPathogen\t% pathogen/total\t% pathogen/hits\n")
fh.write("%d\t%d\t%f\t%d\t%f\t%f\t%d\t%f\t%f\t%d\t%f\t%f\n" %(total, numhits, getPc(numhits, total), numAuto, getPc(numAuto, total), getPc(numAuto, numhits), numB27, getPc(numB27, total), getPc(numB27, numhits), numPathogen, getPc(numPathogen, total), getPc(numPathogen, numhits)) )
fh.close()
####### LATEX TABLE ########
def myTabHeader(f, samples):
f.write("\\begin{sidewaystable}\n")
#f.write("\\begin{table}\n")
f.write("\\centering\n")
f.write("\\scalebox{0.9}{%\n")
#f.write("\\begin{tabular}{c|c|c|%s|c|c|c}\n" %( "|".join(["c" for s in samples]) ) )
f.write("\\begin{tabular}{l|l|l|%s|l|l|l}\n" %( "|".join(["l" for s in samples]) ) )
#f.write(" \\multicolumn{3}{c|}{Clones} & \\multicolumn{%d}{c|}{Samples} & \\multicolumn{3}{c}{Hits} \\\\\n" %(len(samples)) )
f.write(" \\multicolumn{3}{c|}{\\textbf{Clones}} & \\multicolumn{%d}{c|}{\\textbf{Samples}} & \\multicolumn{3}{c}{\\textbf{Hits}} \\\\\n" %(len(samples)) )
#f.write("\\cline{2-%d}\n" %( len(colnames)*2 + 1 ))
f.write("\\hline\n")
#f.write("V & CDR3 & J & %s & CDR3 & Alignment & Disease \\\\\n" %(" & ".join(samples)))
f.write("\\textbf{V} & \\textbf{CDR3} & \\textbf{J} & \\textbf{%s} & \\textbf{CDR3} & \\textbf{Alignment} & \\textbf{Disease} \\\\\n" %("} & \\textbf{".join(samples)))
f.write("\\hline\n")
def tab(f, clones, clone2hits, group2keywords, options, samples):
for clone in clones:
vs, js, ds, sample2size = getCloneInfo(clone)
if clone in clone2hits:
hits = clone2hits[clone]
hitsWithKeyword = [] #list of hits that have at least 1 keyword
for hit in hits:
if checkKeywords(hit.title, group2keywords['mice']) or (options.abstract and checkKeywords(hit.abstract, group2keywords['mice'])): #mice studies, ignore
continue
for g, kw in group2keywords.iteritems():
if g == 'b27' or g == 'pathogen':
continue
if checkKeywords(hit.title, kw) or (options.abstract and checkKeywords(hit.abstract, kw)):
hitsWithKeyword.append(hit)
break
if len(hitsWithKeyword) == 0: #no hit with keyword
continue
seq = hits[0].qAln
numrow = len(hitsWithKeyword)
#First line
f.write("\\multirow{%d}{*}{%s} & \\multirow{%d}{*}{%s} & \\multirow{%d}{*}{%s} & " %(numrow, vs, numrow, seq, numrow, js) ) #Write V, CDR3, J
for s in samples:
name = iseqlib.properName2name(s)
if name in sample2size:
count = sample2size[name]
f.write("\\multirow{%d}{*}{%d} & " % (numrow, count))
else:
f.write("\\multirow{%d}{*}{} & " % (numrow))
f.write("%s & %s & %s \\\\\n " %( hitsWithKeyword[0].tAln, hitsWithKeyword[0].match, parsePaperInfo(hitsWithKeyword[0].title) ))
#Other hits:
for i in xrange(1, numrow):
f.write("\\cline{%d-%d}\n" %(3 + len(samples) + 1, 3 + len(samples) + 3))
f.write(" &"*( 3 + len(samples) ) )
h = hitsWithKeyword[i]
f.write( "%s & %s & %s \\\\\n" %(h.tAln, h.match, parsePaperInfo(h.title)) )
f.write("\\hline\n")
def printTexTab(clones, clone2hits, group2keywords, options, outbasename):
outfile = "%s.tex" %outbasename
f = open(outfile, 'w')
iseqlib.writeDocumentStart(f)
samples = ['AS1', 'AS2', 'AS3', 'AS4', 'AS5', 'H1', 'H2']
myTabHeader(f, samples)
tab(f, clones, clone2hits, group2keywords, options, samples)
label = ''
captionStr = ''
#iseqlib.tableCloser(f, captionStr, label)
iseqlib.sidewaystableCloser(f, captionStr, label)
iseqlib.writeDocumentEnd(f)
f.close()
####### LATEX TABLE FORMAT 0 ===============
def myTabHeader0(f):
#f.write("\\begin{sidewaystable}\n")
f.write("\\begin{table}\n")
f.write("\\centering\n")
f.write("\\scalebox{0.4}{%\n")
f.write("\\begin{tabular}{c|c|c|c|c|c|c|c}\n")
f.write(" \\multicolumn{3}{c|}{Clones} & \\multicolumn{2}{c|}{Samples} & \\multicolumn{3}{c}{Hits} \\\\\n")
#f.write("\\cline{2-%d}\n" %( len(colnames)*2 + 1 ))
f.write("\\hline\n")
f.write("V & CDR3 & J & Name & Size & CDR3 & Alignment & Paper \\\\\n")
f.write("\\hline\n")
def tab0(f, clones, clone2hits, group2keywords, options):
for clone in clones:
vs, js, ds, sample2size = getCloneInfo(clone)
if clone in clone2hits:
hits = clone2hits[clone]
hitsWithKeyword = [] #list of hits that have at least 1 keyword
for hit in hits:
if checkKeywords(hit.title, group2keywords['mice']) or (options.abstract and checkKeywords(hit.abstract, group2keywords['mice'])): #mice studies, ignore
continue
for g, kw in group2keywords.iteritems():
if g == 'b27' or g == 'pathogen':
continue
if checkKeywords(hit.title, kw) or (options.abstract and checkKeywords(hit.abstract, kw)):
hitsWithKeyword.append(hit)
break
if len(hitsWithKeyword) == 0: #no hit with keyword
continue
seq = hits[0].qAln
samples = sorted( [iseqlib.properName(s) for s in sample2size.keys()] )
numrow = max( [len(samples), len(hitsWithKeyword)] )
f.write("\\multirow{%d}{*}{%s} & \\multirow{%d}{*}{%s} & \\multirow{%d}{*}{%s} & " %(numrow, vs, numrow, seq, numrow, js) ) #Write V, CDR3, J
#f.write("%s & %d & %s & %s & %s \\\\\n " %( samples[0], sample2size[iseqlib.properName2name(samples[0])], hitsWithKeyword[0][4], hitsWithKeyword[0][3], hitsWithKeyword[0][0] )) #first row
f.write("%s & %d & %s & %s & %s \\\\\n " %( samples[0], sample2size[iseqlib.properName2name(samples[0])], hitsWithKeyword[0].tAln, hitsWithKeyword[0].match, parsePaperInfo(hitsWithKeyword[0].title) ))
for i in xrange(1, numrow):
f.write("\\cline{4-8}\n")
f.write(" & & & ")
if i < len(samples):
s = samples[i]
f.write(" %s & %d &" %(s, sample2size[iseqlib.properName2name(s)]) )
else:
f.write(" & & ")
if i < len(hitsWithKeyword):
h = hitsWithKeyword[i]
#f.write( "%s & %s & %s \\\\\n" %(h[4], h[3], h[0]) )
f.write( "%s & %s & %s \\\\\n" %(h.tAln, h.match, parsePaperInfo(h.title)) )
else:
f.write(" & & \\\\\n")
f.write("\\hline\n")
def printTexTab0(clones, clone2hits, group2keywords, options, outbasename):
outfile = "%s.tex" %outbasename
f = open(outfile, 'w')
iseqlib.writeDocumentStart(f)
myTabHeader(f)
tab(f, clones, clone2hits, group2keywords, options)
label = ''
captionStr = ''
iseqlib.tableCloser(f, captionStr, label)
#iseqlib.sidewaystableCloser(f, captionStr, label)
iseqlib.writeDocumentEnd(f)
f.close()
def readSample2host(file):
f = open(file, 'r')
group2sample2host = {}
for line in f:
items = line.strip().split()
if len(items) < 3:
continue
group = items[0]
sample = items[1]
host = items[2]
if group not in group2sample2host:
group2sample2host[group] = {sample:host}
else:
group2sample2host[group][sample] = host
f.close()
return group2sample2host
def addOptions(parser):
parser.add_option('-i', '--infile', dest='infile', help='Input xml file')
parser.add_option('-o', '--outdir', dest='outdir', help='Output directory')
parser.add_option('-b', '--basename', dest='basename', default='hits', help='Output files basename. Default=%default')
parser.add_option('-p', '--positive', dest='minPos', type='float', default=0.9, help='Minimum portion of positive matches. Default=%default')
parser.add_option('-I', '--identity', dest='minIden', type='float', default=0.9, help='Minimum portion of positive matches. Default=%default')
parser.add_option('-k', '--keywords', dest='keywords', default=None, help='Only hits matching at least one keyword are reported')
parser.add_option('-l', '--len', dest='minLen', type='int', default=10, help='Minimum sequence length to be included in the output. Default=%default')
parser.add_option('-s', '--samples', dest='minNumSamples', type='int', default=1, help='Minimum number of samples containing the sequence. Default=%default')
parser.add_option('--patients', dest='minNumPatients', type='int', default=0, help='Minimum number of patients containing the sequence. Default=%default')
parser.add_option('--controls', dest='minNumControls', type='int', default=0, help='Minimum number of controls containing the sequence. Default=%default')
parser.add_option('--minPatientCount', dest='minPatientCount', type='int', default=1, help='Minimum size a clone must have in a patient sample to be considered as "present" in that sample. Default=%default')
parser.add_option('--minControlCount', dest='minControlCount', type='int', default=1, help='Minimum size a clone must have in a control sample to be considered as "present" in that sample. Default=%default')
parser.add_option('--maxPatientCount', dest='maxPatientCount', type='int', default=10000000, help='Maximun size a clone must have in a patient sample to be considered as "present" in that sample. Default=%default')
parser.add_option('--maxControlCount', dest='maxControlCount', type='int', default=10000000, help='Maximun size a clone must have in a control sample to be considered as "present" in that sample. Default=%default')
parser.add_option('--maxPatientOutofrange', dest='maxPatientOutofrange', type='int', default=100, help='Max number of patients with outofrange counts allowed. Default=%default')
parser.add_option('--maxControlOutofrange', dest='maxControlOutofrange', type='int', default=100, help='Max number of controls with outofrange counts allowed. Default=%default')
parser.add_option('--sample2host', dest='sample2host', help='Optional. File contains mapping between samples and host. Format:<Group> <sample> <host>. Ex: control asBD B ')
parser.add_option('-a', '--abstract', dest='abstract', action='store_true', default=False, help='If specified, will include the abstract in the search of matching keywords')
def main():
parser = iseqlib.initOptions()
addOptions(parser)
options, args = parser.parse_args()
group2keywords = {} #key = keywordGroup, val = list of keywords
if options.keywords:
if options.keywords == '-':
group2keywords = getDefaultKeywords()
else:
group2keywords, kw2group = iseqlib.readGroup2samples(options.keywords)
group2sample2host = {}
if options.sample2host:
group2sample2host = readSample2host(options.sample2host)
#clones, clone2hits = readNcbiXml(options.infile, options.minPos, options.minNumSamples, options.minLen, options.minNumPatients, options.minNumControls, options.minPatientCount, options.minControlCount, group2sample2host)
#clones, clone2hits = readNcbiXml(options, group2sample2host)
clones, clone2hits = readInfile(options, group2sample2host)
outbasename = os.path.join(options.outdir, options.basename)
printTab(clones, clone2hits, group2keywords, options, outbasename)
printTexTab(clones, clone2hits, group2keywords, options, outbasename)
if __name__ == '__main__':
main()
| ngannguyen/immunoseq | src/parseLiteratureBlastTab_toTex.py | Python | mit | 24,929 | [
"BLAST"
] | 7366b66cf370131cc3cf6f0a264e71cb6373187b825fad2647f974f9ede1156c |
#!/usr/bin/python
# http://mcsp.wartburg.edu/zelle/python/graphics.py
# https://mcsp.wartburg.edu/zelle/python/graphics/graphics/index.html
import math
from graphics import *
XSCALE = 2550
YSCALE = 1310
XCENTER = XSCALE / 2
YCENTER = YSCALE / 2
# https://en.wikipedia.org/wiki/Incircle_and_excircles_of_a_triangle#Trilinear_coordinates
# {\displaystyle \left({\frac {ax_{a}+bx_{b}+cx_{c}}{a+b+c}},{\frac {ay_{a}+by_{b}+cy_{c}}{a+b+c}}\right)={\frac {a\left(x_{a},y_{a}\right)+b\left(x_{b},y_{b}\right)+c\left(x_{c},y_{c}\right)}{a+b+c}}.}
# {ax_{a}+bx_{b}+cx_{c}}{a+b+c}},{{ay_{a}+by_{b}+cy_{c}}{a+b+c}}
def circles5(win, scale):
red1 = color_rgb(255, 0, 0)
green1 = color_rgb(0, 255, 0)
blue1 = color_rgb(0, 0, 255)
print "red1 = %s" % str(red1)
print "green1 = %s" % str(green1)
print "blue1 = %s" % str(blue1)
rb_magenta1 = color_rgb(255, 0, 255)
gb_cyan1 = color_rgb(0, 255, 255)
rg_yellow1 = color_rgb(255, 255, 0)
rm_rose1 = color_rgb(255, 0, 127)
bm_violet1 = color_rgb(127, 0, 255)
bc_azure1 = color_rgb(0, 127, 255)
gc_green1 = color_rgb(0, 255, 127)
gy_chart1 = color_rgb(127, 255, 0)
ry_orange1 = color_rgb(255, 127, 0)
# red magenta blue cyan green yellow
# rose violet azure spring-green chartreuse orange
c0 = Circle(Point(XCENTER,YCENTER), 10 * scale)
c0.setWidth(4)
# c0.setWidth(10)
# c0.setOutline(rm_rose1)
# c0.setOutline(bm_violet1)
# c0.setOutline(bc_azure1)
# c0.setOutline(gc_green1)
# c0.setOutline(gy_chart1)
# c0.setOutline(ry_orange1)
c0.draw(win)
# https://en.wikipedia.org/wiki/Color_wheel
# https://en.wikipedia.org/wiki/File:Color_star-en_(tertiary_names).svg
# red purple blue green yellow orange
# magenta, violet, teal, chartreuse, amber, vermilion
# c0.setOutline("red") #FF0000
# c0.setOutline("purple") #A020F0
# c0.setOutline("blue") #0000FF
# c0.setOutline("green") #00FF00
# c0.setOutline("yellow") #FFFF00
# c0.setOutline("orange") #FFA500
# c0.setOutline("magenta") #FF00FF
# c0.setOutline("violet")
# # c0.setOutline("teal") # unknown #008080 https://en.wikipedia.org/wiki/X11_color_names
# c0.setOutline("chartreuse")
# # c0.setOutline("amber") # unknown
# # c0.setOutline("vermilion") # unknown
# https://en.wikipedia.org/wiki/File:RBG_color_wheel.svg
# red magenta blue cyan green yellow
# rose violet azure spring-green chartreuse orange
# c0.setOutline("red") #FF0000
# c0.setOutline("magenta") #FF00FF
# c0.setOutline("blue") #0000FF
# c0.setOutline("cyan") #00FFFF
# c0.setOutline("green") #00FF00
# c0.setOutline("yellow") #FFFF00
# # c0.setOutline("rose") # unknown
# c0.setOutline("pink") #FFC0CB
# c0.setOutline("violet") #EE82EE
# c0.setOutline("azure") #F0FFFF
# c0.setOutline("spring green") #00FF7F
# c0.setOutline("chartreuse") #7FFF00
# c0.setOutline("orange") #FFA500
radius1 = 10 * scale
diameter1 = radius1 * 2
npoints = 6
inc1 = (math.pi * 2) / npoints
theta1 = 0
xs = []
ys = []
# color1 = ["red", "magenta", "blue", "cyan", "green", "yellow"]
color1 = [red1, rb_magenta1, blue1, gb_cyan1, green1, rg_yellow1]
for i1 in range(npoints):
x1 = (math.sin(theta1) * diameter1) + XCENTER
xs.append(x1)
y1 = (math.cos(theta1) * diameter1) + YCENTER
ys.append(y1)
c1 = Circle(Point(x1, y1), 10 * scale)
c1.setOutline(color1[i1])
c1.setWidth(4)
c1.draw(win)
theta1 += inc1
xa = XCENTER * diameter1
ya = YCENTER * diameter1
xb1 = xs[0] * diameter1
yb1 = ys[0] * diameter1
xc1 = xs[1] * diameter1
yc1 = ys[1] * diameter1
x1 = (xa + xb1 + xc1) / (3 * diameter1)
y1 = (ya + yb1 + yc1) / (3 * diameter1)
c1 = Circle(Point(x1, y1), 10 * scale)
# c1.setOutline("pink")
c1.setOutline(rm_rose1)
c1.setWidth(4)
c1.draw(win)
xb2 = xs[2] * diameter1
yb2 = ys[2] * diameter1
xc2 = xs[3] * diameter1
yc2 = ys[3] * diameter1
x2 = (xa + xb2 + xc2) / (3 * diameter1)
y2 = (ya + yb2 + yc2) / (3 * diameter1)
c2 = Circle(Point(x2, y2), 10 * scale)
# c2.setOutline("azure")
c2.setOutline(bc_azure1)
# c2.setWidth(10)
c2.setWidth(4)
c2.draw(win)
# red magenta blue cyan green yellow
# rose violet azure spring-green chartreuse orange
xb3 = xs[4] * diameter1
yb3 = ys[4] * diameter1
xc3 = xs[5] * diameter1
yc3 = ys[5] * diameter1
x3 = (xa + xb3 + xc3) / (3 * diameter1)
y3 = (ya + yb3 + yc3) / (3 * diameter1)
c3 = Circle(Point(x3, y3), 10 * scale)
# c3.setOutline(gc_green1)
c3.setOutline(gy_chart1)
c3.setWidth(4)
c3.draw(win)
## rm_rose1 = color_rgb(255, 0, 127)
# bm_violet1 = color_rgb(127, 0, 255)
## bc_azure1 = color_rgb(0, 127, 255)
# gc_green1 = color_rgb(0, 255, 127)
## gy_chart1 = color_rgb(127, 255, 0)
# ry_orange1 = color_rgb(255, 127, 0)
# # red magenta blue cyan green yellow
# # rose violet azure spring-green chartreuse orange
xb4 = xs[5] * diameter1
yb4 = ys[5] * diameter1
xc4 = xs[0] * diameter1
yc4 = ys[0] * diameter1
x4 = (xa + xb4 + xc4) / (3 * diameter1)
y4 = (ya + yb4 + yc4) / (3 * diameter1)
c4 = Circle(Point(x4, y4), 10 * scale)
# c4.setOutline(bm_violet1)
# c4.setOutline(gc_green1)
c4.setOutline(ry_orange1)
c4.setWidth(4)
c4.draw(win)
xb5 = xs[1] * diameter1
yb5 = ys[1] * diameter1
xc5 = xs[2] * diameter1
yc5 = ys[2] * diameter1
x5 = (xa + xb5 + xc5) / (3 * diameter1)
y5 = (ya + yb5 + yc5) / (3 * diameter1)
c5 = Circle(Point(x5, y5), 10 * scale)
c5.setOutline(bm_violet1)
c5.setWidth(4)
c5.draw(win)
xb6 = xs[3] * diameter1
yb6 = ys[3] * diameter1
xc6 = xs[4] * diameter1
yc6 = ys[4] * diameter1
x6 = (xa + xb6 + xc6) / (3 * diameter1)
y6 = (ya + yb6 + yc6) / (3 * diameter1)
c6 = Circle(Point(x6, y6), 10 * scale)
c6.setOutline(gc_green1)
c6.setWidth(4)
c6.draw(win)
def circles4(win, scale):
c0 = Circle(Point(XCENTER,YCENTER), 10 * scale)
c0.draw(win)
radius1 = 10 * scale
diameter1 = radius1 * 2
npoints = 6
inc1 = (math.pi * 2) / npoints
theta1 = 0
xs = []
ys = []
for i1 in range(npoints):
x1 = (math.sin(theta1) * diameter1) + XCENTER
xs.append(x1)
y1 = (math.cos(theta1) * diameter1) + YCENTER
ys.append(y1)
c1 = Circle(Point(x1, y1), 10 * scale)
c1.draw(win)
theta1 += inc1
xa = XCENTER * diameter1
ya = YCENTER * diameter1
xb1 = xs[0] * diameter1
yb1 = ys[0] * diameter1
xc1 = xs[1] * diameter1
yc1 = ys[1] * diameter1
x1 = (xa + xb1 + xc1) / (3 * diameter1)
y1 = (ya + yb1 + yc1) / (3 * diameter1)
c1 = Circle(Point(x1, y1), 10 * scale)
c1.draw(win)
xb2 = xs[2] * diameter1
yb2 = ys[2] * diameter1
xc2 = xs[3] * diameter1
yc2 = ys[3] * diameter1
x2 = (xa + xb2 + xc2) / (3 * diameter1)
y2 = (ya + yb2 + yc2) / (3 * diameter1)
c2 = Circle(Point(x2, y2), 10 * scale)
c2.draw(win)
xb3 = xs[4] * diameter1
yb3 = ys[4] * diameter1
xc3 = xs[5] * diameter1
yc3 = ys[5] * diameter1
x3 = (xa + xb3 + xc3) / (3 * diameter1)
y3 = (ya + yb3 + yc3) / (3 * diameter1)
c3 = Circle(Point(x3, y3), 10 * scale)
c3.draw(win)
xb4 = xs[5] * diameter1
yb4 = ys[5] * diameter1
xc4 = xs[0] * diameter1
yc4 = ys[0] * diameter1
x4 = (xa + xb4 + xc4) / (3 * diameter1)
y4 = (ya + yb4 + yc4) / (3 * diameter1)
c4 = Circle(Point(x4, y4), 10 * scale)
c4.draw(win)
xb5 = xs[1] * diameter1
yb5 = ys[1] * diameter1
xc5 = xs[2] * diameter1
yc5 = ys[2] * diameter1
x5 = (xa + xb5 + xc5) / (3 * diameter1)
y5 = (ya + yb5 + yc5) / (3 * diameter1)
c5 = Circle(Point(x5, y5), 10 * scale)
c5.draw(win)
xb6 = xs[3] * diameter1
yb6 = ys[3] * diameter1
xc6 = xs[4] * diameter1
yc6 = ys[4] * diameter1
x6 = (xa + xb6 + xc6) / (3 * diameter1)
y6 = (ya + yb6 + yc6) / (3 * diameter1)
c6 = Circle(Point(x6, y6), 10 * scale)
c6.draw(win)
def circles3(win, scale):
c0 = Circle(Point(XCENTER,YCENTER), 10 * scale)
c0.draw(win)
radius1 = 10 * scale
diameter1 = radius1 * 2
npoints = 6
inc1 = (math.pi * 2) / npoints
theta1 = 0
xs = []
ys = []
for i1 in range(npoints):
x1 = (math.sin(theta1) * diameter1) + XCENTER
xs.append(x1)
y1 = (math.cos(theta1) * diameter1) + YCENTER
ys.append(y1)
c1 = Circle(Point(x1, y1), 10 * scale)
c1.draw(win)
theta1 += inc1
xa = XCENTER * diameter1
ya = YCENTER * diameter1
xb1 = xs[0] * diameter1
yb1 = ys[0] * diameter1
xc1 = xs[1] * diameter1
yc1 = ys[1] * diameter1
x1 = (xa + xb1 + xc1) / (3 * diameter1)
y1 = (ya + yb1 + yc1) / (3 * diameter1)
c1 = Circle(Point(x1, y1), 10 * scale)
c1.draw(win)
xb2 = xs[2] * diameter1
yb2 = ys[2] * diameter1
xc2 = xs[3] * diameter1
yc2 = ys[3] * diameter1
x2 = (xa + xb2 + xc2) / (3 * diameter1)
y2 = (ya + yb2 + yc2) / (3 * diameter1)
c2 = Circle(Point(x2, y2), 10 * scale)
c2.draw(win)
xb3 = xs[4] * diameter1
yb3 = ys[4] * diameter1
xc3 = xs[5] * diameter1
yc3 = ys[5] * diameter1
x3 = (xa + xb3 + xc3) / (3 * diameter1)
y3 = (ya + yb3 + yc3) / (3 * diameter1)
c3 = Circle(Point(x3, y3), 10 * scale)
c3.draw(win)
def circles2(win, scale):
c0 = Circle(Point(XCENTER,YCENTER), 10 * scale)
c0.draw(win)
radius1 = 10 * scale
diameter1 = radius1 * 2
# c1 = Circle(Point(XCENTER + diameter1,YCENTER), 10 * scale)
# c1.draw(win)
# c2 is at 60 degrees, same diameter
npoints = 6
inc1 = (math.pi * 2) / npoints
# inc1 = (math.pi) / npoints
theta1 = 0
# x2 = (math.sin(theta1) * diameter1) + XCENTER
# y2 = (math.cos(theta1) * diameter1) + YCENTER
# c2 = Circle(Point(x2, y2), 10 * scale)
# c2.draw(win)
# theta1 += inc1
# x3 = (math.sin(theta1) * diameter1) + XCENTER
# y3 = (math.cos(theta1) * diameter1) + YCENTER
# c3 = Circle(Point(x3, y3), 10 * scale)
# c3.draw(win)
for i1 in range(npoints):
x1 = (math.sin(theta1) * diameter1) + XCENTER
y1 = (math.cos(theta1) * diameter1) + YCENTER
c1 = Circle(Point(x1, y1), 10 * scale)
c1.draw(win)
theta1 += inc1
#for i1 in range(npoints):
# x1 = (math.sin(theta1) * radius) + xoffset
# y1 = (math.cos(theta1) * radius) + yoffset
# hex1(win, x1, y1, scale)
# theta1 += inc1
def circles1(win, xoffset, yoffset, scale = 1.0):
sxoffset = xoffset * scale + XCENTER
syoffset = yoffset * scale + YCENTER
#p = Polygon(
# Point(-4 * scale + sxoffset, -7 * scale + syoffset),
# Point( 4 * scale + sxoffset, -7 * scale + syoffset),
# Point( 8 * scale + sxoffset, 0 * scale + syoffset),
# Point( 4 * scale + sxoffset, 7 * scale + syoffset),
# Point(-4 * scale + sxoffset, 7 * scale + syoffset),
# Point(-8 * scale + sxoffset, 0 * scale + syoffset))
#p.draw(win)
# c = Circle(Point(50 * SCALE,50 * SCALE), 10 * SCALE)
c = Circle(Point(XCENTER,YCENTER), 10 * scale)
c.draw(win)
c1 = Circle(Point(-4 * scale + sxoffset, -7 * scale + syoffset), 10 * scale)
c1.draw(win)
c2 = Circle(Point( 4 * scale + sxoffset, -7 * scale + syoffset), 10 * scale)
c2.draw(win)
c3 = Circle(Point( 8 * scale + sxoffset, 0 * scale + syoffset), 10 * scale)
c3.draw(win)
c4 = Circle(Point( 4 * scale + sxoffset, 7 * scale + syoffset), 10 * scale)
c4.draw(win)
c5 = Circle(Point(-4 * scale + sxoffset, 7 * scale + syoffset), 10 * scale)
c5.draw(win)
c6 = Circle(Point(-8 * scale + sxoffset, 0 * scale + syoffset), 10 * scale)
c6.draw(win)
def main():
radius = 500.0
# scale = 0.5
scale = 10.0
win = GraphWin("circle1", XSCALE, YSCALE)
win.setCoords(0,0, XSCALE , YSCALE)
# one side is 8 units long
# height of vertical rectangle is 14
# bulge to either side is 4
# 1 -> 1
# layer 0
# center
# circle1(win, 0, 0, scale, radius)
# circles1(win, 0, 0, scale)
# circles2(win, scale)
# circles3(win, scale)
# circles4(win, scale)
circles5(win, scale)
# p0 = Point(XCENTER, YCENTER)
# p0.setFill("red")
# p0.setOutline("red")
# p0.draw(win)
# p1 = Point(XCENTER + 12 * scale, YCENTER + 7 * scale)
# l1 = Line(p0, p1)
# l1.setFill("red")
# l1.draw(win)
# t = Text(Point(XCENTER,YCENTER), "0")
# t.draw(win)
win.getMouse()
win.close()
# https://math.stackexchange.com/questions/260096/find-the-coordinates-of-a-point-on-a-circle
# x = rsin(theta), y = rcos(theta)
def circle1(win, xoffset, yoffset, scale = 1.0, radius = 10.0):
hex1(win, xoffset, yoffset, scale)
# theta is degrees or radians?
npoints = 10
npoints = 1
npoints = 100
inc1 = (math.pi * 2) / npoints
theta1 = 0.0
for i1 in range(npoints):
x1 = (math.sin(theta1) * radius) + xoffset
y1 = (math.cos(theta1) * radius) + yoffset
hex1(win, x1, y1, scale)
theta1 += inc1
# math = <module 'math' from '/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/lib-dynload/math.so'>
# acos acos(x) Return the arc cosine (measured in radians) of x.
# acosh acosh(x) Return the inverse hyperbolic cosine of x.
# asin asin(x) Return the arc sine (measured in radians) of x.
# asinh asinh(x) Return the inverse hyperbolic sine of x.
# atan atan(x) Return the arc tangent (measured in radians) of x.
# atan2 atan2(y, x) Return the arc tangent (measured in radians) of y/x. Unlike atan(y/x), the signs of both x and y are considered.
# atanh atanh(x) Return the inverse hyperbolic tangent of x.
# ceil ceil(x) Return the ceiling of x as a float. This is the smallest integral value >= x.
# copysign copysign(x, y) Return x with the sign of y.
# cos cos(x) Return the cosine of x (measured in radians).
# cosh cosh(x) Return the hyperbolic cosine of x.
# degrees degrees(x) Convert angle x from radians to degrees.
# erf erf(x) Error function at x.
# erfc erfc(x) Complementary error function at x.
# exp exp(x) Return e raised to the power of x.
# expm1 expm1(x) Return exp(x)-1. This function avoids the loss of precision involved in the direct evaluation of exp(x)-1 for small x.
# fabs fabs(x) Return the absolute value of the float x.
# factorial factorial(x) -> Integral Find x!. Raise a ValueError if x is negative or non-integral.
# floor floor(x) Return the floor of x as a float. This is the largest integral value <= x.
# fmod fmod(x, y) Return fmod(x, y), according to platform C. x % y may differ.
# frexp frexp(x) Return the mantissa and exponent of x, as pair (m, e). m is a float and e is an int, such that x = m * 2.**e. If x is 0, m and e are both 0. Else 0.5 <= abs(m) < 1.0.
# fsum fsum(iterable) Return an accurate floating point sum of values in the iterable. Assumes IEEE-754 floating point arithmetic.
# gamma gamma(x) Gamma function at x.
# hypot hypot(x, y) Return the Euclidean distance, sqrt(x*x + y*y).
# isinf isinf(x) -> bool Check if float x is infinite (positive or negative).
# isnan isnan(x) -> bool Check if float x is not a number (NaN).
# ldexp ldexp(x, i) Return x * (2**i).
# lgamma lgamma(x) Natural logarithm of absolute value of Gamma function at x.
# log log(x[, base]) Return the logarithm of x to the given base. If the base not specified, returns the natural logarithm (base e) of x.
# log10 log10(x) Return the base 10 logarithm of x.
# log1p log1p(x) Return the natural logarithm of 1+x (base e). The result is computed in a way which is accurate for x near zero.
# modf modf(x) Return the fractional and integer parts of x. Both results carry the sign of x and are floats.
# pow pow(x, y) Return x**y (x to the power of y).
# radians radians(x) Convert angle x from degrees to radians.
# sin sin(x) Return the sine of x (measured in radians).
# sinh sinh(x) Return the hyperbolic sine of x.
# sqrt sqrt(x) Return the square root of x.
# tan tan(x) Return the tangent of x (measured in radians).
# tanh tanh(x) Return the hyperbolic tangent of x.
# trunc trunc(x:Real) -> Integral Truncates x to the nearest Integral toward 0. Uses the __trunc__ magic method.
# math.pi = 3.14159265359
# math.e = 2.71828182846
# phi = 1.61803398875
def hex1(win, xoffset, yoffset, scale = 1.0):
sxoffset = xoffset * scale + XCENTER
syoffset = yoffset * scale + YCENTER
p = Polygon(
Point(-4 * scale + sxoffset, -7 * scale + syoffset),
Point( 4 * scale + sxoffset, -7 * scale + syoffset),
Point( 8 * scale + sxoffset, 0 * scale + syoffset),
Point( 4 * scale + sxoffset, 7 * scale + syoffset),
Point(-4 * scale + sxoffset, 7 * scale + syoffset),
Point(-8 * scale + sxoffset, 0 * scale + syoffset))
p.draw(win)
def old_main():
scale = 7.7
win = GraphWin("hex2", XSCALE, YSCALE)
win.setCoords(0,0, XSCALE , YSCALE)
# one side is 8 units long
# height of vertical rectangle is 14
# bulge to either side is 4
# 1 -> 1
# layer 0
# center
hex1(win, 0, 0, scale)
# 6 -> 7
# layer 1
# 1.1 upper right -> lastx + 12, lasty + 7
hex1(win, 12, 7, scale)
# 1.2 lower right -> lastx + 12, lasty - 7
hex1(win, 12, -7, scale)
# 1.3 bottom -> lastx , lasty - 14
hex1(win, 0, -14, scale)
# 1.4 lower left -> lastx - 12, lasty - 7
hex1(win, -12, -7, scale)
# 1.5 upper left -> lastx - 12, lasty + 7
hex1(win, -12, 7, scale)
# 1.6 top -> lastx , lasty + 14
hex1(win, 0, 14, scale)
# 12 -> 19
# layer 2
# 2.1 one o'clock
hex1(win, 12, 21, scale)
# 2.2 two o'clock
hex1(win, 24, 14, scale)
# 2.3 three o'clock
hex1(win, 24, 0, scale)
# 2.4 four o'clock
hex1(win, 24, -14, scale)
# 2.5 five o'clock
hex1(win, 12, -21, scale)
# 2.6 six o'clock
hex1(win, 0, -28, scale)
# 2.7 seven o'clock
hex1(win, -12, -21, scale)
# 2.8 eight o'clock
hex1(win, -24, -14, scale)
# 2.9 nine o'clock
hex1(win, -24, 0, scale)
# 2.10 ten o'clock
hex1(win, -24, 14, scale)
# 2.11 eleven o'clock
hex1(win, -12, 21, scale)
# 2.12 twelve o'clock
hex1(win, 0, 28, scale)
# 18 -> 37
# layer 3
# 3.1 above one o'clock
hex1(win, 12, 35, scale)
# 3.2 above two o'clock
hex1(win, 24, 28, scale)
# 3.3 shift one o'clock
hex1(win, 36, 21, scale)
# 3.4 down from 3
hex1(win, 36, 7, scale)
# 3.5 down from 4
hex1(win, 36, -7, scale)
# 3.6 down from 5
hex1(win, 36, -21, scale)
# 3.7 down from four o'clock
hex1(win, 24, -28, scale)
# 3.8 down from five o'clock
hex1(win, 12, -35, scale)
# 3.9 bottom
hex1(win, 0, -42, scale)
# 3.10 down from seven o'clock
hex1(win, -12, -35, scale)
# 3.11 down from eight o'clock
hex1(win, -24, -28, scale)
# 3.12
hex1(win, -36, -21, scale)
# 3.13 up from 12
hex1(win, -36, -7, scale)
# 3.14 up from 13
hex1(win, -36, 7, scale)
# 3.15 up from 14
hex1(win, -36, 21, scale)
# 3.16 up from ten o'clock
hex1(win, -24, 28, scale)
# 3.17 up from eleven o'clock
hex1(win, -12, 35, scale)
# 3.18 top
hex1(win, 0, 42, scale)
# 24 -> 61
# layer 4
# 4.1 above 3.1 must be 40 to 63
hex1(win, 12, 49, scale)
# 4.2 above 3.2 must be 40 to 63
hex1(win, 24, 42, scale)
# 4.3 above 3.3 must be 40 to 63
hex1(win, 36, 35, scale)
# 4.4 must be 44, 45, 46, 47, 60, 61, 62, 63
hex1(win, 48, 28, scale)
# 4.5 down from 4.4
hex1(win, 48, 14, scale)
# 4.6 down from 5
hex1(win, 48, 0, scale)
# 4.7 down from 6
hex1(win, 48, -14, scale)
# 4.8 down from 7 must be 9, 11, 25, 27, 41, 43, 57 or 59
hex1(win, 48, -28, scale)
# 4.9
hex1(win, 36, -35, scale)
# 4.10
hex1(win, 24, -42, scale)
# 4.11
hex1(win, 12, -49, scale)
# 4.12 bottom
hex1(win, 0, -56, scale)
# 4.13
hex1(win, -12, -49, scale)
# 4.14
hex1(win, -24, -42, scale)
# 4.15 must be 17, 21, 25, 29, 49, 53, 57 or 61
hex1(win, -36, -35, scale)
# 4.16
hex1(win, -48, -28, scale)
# 4.17
hex1(win, -48, -14, scale)
# 4.18
hex1(win, -48, 0, scale)
# 4.19
hex1(win, -48, 14, scale)
# 4.20
hex1(win, -48, 28, scale)
# 4.21
hex1(win, -36, 35, scale)
# 4.22
hex1(win, -24, 42, scale)
# 4.23
hex1(win, -12, 49, scale)
# 4.24 top must be 24 to 31
hex1(win, 0, 56, scale)
# 5.10 top must be 63 - 1 = 62
hex1(win, 0, 70, scale)
t = Text(Point(XCENTER,YCENTER + 70 * scale), "62")
t.draw(win)
# 5.20 lower right axis must be 63 - 16 = 47
hex1(win, 60, -35, scale)
t = Text(Point(XCENTER + 60 * scale,YCENTER - 35 * scale), "47")
t.draw(win)
# 5.30 lower left axis must be 63 - 8 = 55
hex1(win, -60, -35, scale)
t = Text(Point(XCENTER - 60 * scale,YCENTER - 35 * scale), "55")
t.draw(win)
# 30 -> 91
# layer 5
# 36 -> 127
# layer 6
# 42 -> 169 64, 128, 192, 256, 320
# layer 6
# 7 48 -> 217
# 8 54 -> 261
p0 = Point(XCENTER, YCENTER)
p0.setFill("red")
p0.setOutline("red")
p0.draw(win)
p1 = Point(XCENTER + 12 * scale, YCENTER + 7 * scale)
l1 = Line(p0, p1)
l1.setFill("red")
l1.draw(win)
t = Text(Point(XCENTER,YCENTER), "0")
t.draw(win)
win.getMouse()
win.close()
main()
#
#
# __
#/ \
#\__/
#
# ____
# / \
#/ \
#\ /
# \____/
#
# 5
# __ __
# / \
# 4 3
# / 0 \ 000000
# \ /
# 1 2
# \__ __/
# 0
#
# 5
# __ __
# / \
# 4 3
# / 1 \ 000001
# \ /
# 1 2
# \______/
# 0
#
# 5
# __ __
# / \
# 4 3
# / 2 \ 000010
# \ /
# 1 \ 2
# \__ __/
# 0
#
# 5
# __ __
# / \
# 4 3
# / 3 \ 000011
# \ /
# 1 \ 2
# \______/
# 0
#
# 5
# __ __
# / \
# 4 3
# / 4 \ 000100
# \ /
# 1 / 2
# \__ __/
# 0
#
#
# 5
# ______
# / \
# 4 / \ 3
# / 61 \ 111101
# \ /
# 1 / 2
# \______/
# 0
#
# 5
# ______
# / \
# 4 / \ 3
# / 62 \ 111110
# \ /
# 1 \ / 2
# \__ __/
# 0
#
# 5
# ______
# / \
# 4 / \ 3
# / 63 \ 111111
# \ /
# 1 \ / 2
# \______/
# 0
| jtraver/dev | python/graphics/circles5.py | Python | mit | 23,699 | [
"Amber"
] | aab30632b948885eaef0c7b5e97928eb8b618e69706053f443f444cb0f85bd6d |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
import itertools
from pymatgen.core.lattice import Lattice
import numpy as np
from pymatgen.util.testing import PymatgenTest
from pymatgen.core.operations import SymmOp
class LatticeTestCase(PymatgenTest):
def setUp(self):
self.lattice = Lattice.cubic(10.0)
self.cubic = self.lattice
self.tetragonal = Lattice.tetragonal(10, 20)
self.orthorhombic = Lattice.orthorhombic(10, 20, 30)
self.monoclinic = Lattice.monoclinic(10, 20, 30, 66)
self.hexagonal = Lattice.hexagonal(10, 20)
self.rhombohedral = Lattice.rhombohedral(10, 77)
family_names = ["cubic", "tetragonal", "orthorhombic", "monoclinic",
"hexagonal", "rhombohedral"]
self.families = {}
for name in family_names:
self.families[name] = getattr(self, name)
def test_format(self):
self.assertEqual("[[10.000, 0.000, 0.000], [0.000, 10.000, 0.000], [0.000, 0.000, 10.000]]",
format(self.lattice, ".3fl"))
self.assertEqual(
"""10.000 0.000 0.000
0.000 10.000 0.000
0.000 0.000 10.000""",
format(self.lattice, ".3f"))
self.assertEqual("{10.0, 10.0, 10.0, 90.0, 90.0, 90.0}",
format(self.lattice, ".1fp"))
def test_init(self):
a = 9.026
lattice = Lattice.cubic(a)
self.assertIsNotNone(lattice, "Initialization from new_cubic failed")
lattice2 = Lattice([[a, 0, 0], [0, a, 0], [0, 0, a]])
for i in range(0, 3):
for j in range(0, 3):
self.assertAlmostEqual(lattice.matrix[i][j],
lattice2.matrix[i][j], 5,
"Inconsistent matrix from two inits!")
def test_copy(self):
cubic_copy = self.cubic.copy()
self.assertTrue(cubic_copy == self.cubic)
self.assertFalse(cubic_copy._matrix is self.cubic._matrix)
def test_get_cartesian_or_frac_coord(self):
coord = self.lattice.get_cartesian_coords([0.15, 0.3, 0.4])
self.assertArrayAlmostEqual(coord, [1.5, 3., 4.])
self.assertArrayAlmostEqual(
self.tetragonal.get_fractional_coords([12.12312, 45.2134,
1.3434]),
[1.212312, 4.52134, 0.06717])
#Random testing that get_cart and get_frac coords reverses each other.
rand_coord = np.random.random_sample(3)
coord = self.tetragonal.get_cartesian_coords(rand_coord)
fcoord = self.tetragonal.get_fractional_coords(coord)
self.assertArrayAlmostEqual(fcoord, rand_coord)
def test_d_hkl(self):
cubic_copy = self.cubic.copy()
hkl = (1,2,3)
dhkl = ((hkl[0]**2 + hkl[1]**2 + hkl[2]**2)/(cubic_copy.a**2))**(-1/2)
self.assertEqual(dhkl, cubic_copy.d_hkl(hkl))
def test_reciprocal_lattice(self):
recip_latt = self.lattice.reciprocal_lattice
self.assertArrayAlmostEqual(recip_latt.matrix,
0.628319 * np.eye(3), 5)
self.assertArrayAlmostEqual(self.tetragonal.reciprocal_lattice.matrix,
[[0.628319, 0., 0.], [0., 0.628319, 0],
[0., 0., 0.3141590]], 5)
#Test the crystallographic version.
recip_latt_xtal = self.lattice.reciprocal_lattice_crystallographic
self.assertArrayAlmostEqual(recip_latt.matrix,
recip_latt_xtal.matrix * 2 * np.pi,
5)
def test_static_methods(self):
lengths_c = [3.840198, 3.84019885, 3.8401976]
angles_c = [119.99998575, 90, 60.00000728]
mat_c = [[3.840198, 0.000000, 0.0000], [1.920099, 3.325710, 0.000000],
[0.000000, -2.217138, 3.135509]]
#should give the lengths and angles above
newlatt = Lattice(mat_c)
(lengths, angles) = newlatt.lengths_and_angles
for i in range(0, 3):
self.assertAlmostEqual(lengths[i], lengths_c[i], 5,
"Lengths incorrect!")
self.assertAlmostEqual(angles[i], angles_c[i], 5,
"Angles incorrect!")
(lengths, angles) = \
Lattice.from_lengths_and_angles(lengths, angles).lengths_and_angles
for i in range(0, 3):
self.assertAlmostEqual(lengths[i], lengths_c[i], 5,
"Lengths incorrect!")
self.assertAlmostEqual(angles[i], angles_c[i], 5,
"Angles incorrect!")
def test_attributes(self):
"""docstring for test_attributes"""
lattice = Lattice.cubic(10.0)
self.assertEqual(lattice.a, 10.0)
self.assertEqual(lattice.b, 10.0)
self.assertEqual(lattice.c, 10.0)
self.assertAlmostEqual(lattice.volume, 1000.0)
xyz = lattice.get_cartesian_coords([0.25, 0.35, 0.45])
self.assertEqual(xyz[0], 2.5)
self.assertEqual(xyz[1], 3.5)
self.assertEqual(xyz[2], 4.5)
def test_consistency(self):
"""
when only lengths and angles are given for constructors, the
internal matrix representation is ambiguous since the lattice rotation
is not specified.
This test makes sure that a consistent definition is specified for the
lattice rotation when using different constructors from lengths angles
"""
l = [3.840198, 3.84019885, 3.8401976]
a = [119.99998575, 90, 60.00000728]
mat1 = Lattice.from_lengths_and_angles(l, a).matrix
mat2 = Lattice.from_parameters(l[0], l[1], l[2],
a[0], a[1], a[2]).matrix
for i in range(0, 3):
for j in range(0, 3):
self.assertAlmostEqual(mat1[i][j], mat2[i][j], 5)
def test_lattice_matricies(self):
"""
If alpha == 90 and beta == 90, two matricies are identical.
"""
def _identical(a, b, c, alpha, beta, gamma):
mat1 = Lattice.from_parameters(a, b, c, alpha, beta, gamma, False).matrix
mat2 = Lattice.from_parameters(a, b, c, alpha, beta, gamma, True).matrix
# self.assertArrayAlmostEqual(mat1, mat2)
return ((mat1 - mat2)**2).sum() < 1e-6
self.assertTrue(_identical(2, 3, 4, 90, 90, 90))
self.assertTrue(_identical(2, 3, 4, 90, 90, 80))
self.assertTrue(_identical(2, 3, 4, 90, 90, 100))
self.assertFalse(_identical(2, 3, 4, 100, 90, 90))
self.assertFalse(_identical(2, 3, 4, 90, 100, 90))
self.assertFalse(_identical(2, 3, 4, 100, 100, 100))
def test_get_lll_reduced_lattice(self):
lattice = Lattice([1.0, 1, 1, -1.0, 0, 2, 3.0, 5, 6])
reduced_latt = lattice.get_lll_reduced_lattice()
expected_ans = Lattice(np.array(
[0.0, 1.0, 0.0, 1.0, 0.0, 1.0, -2.0, 0.0, 1.0]).reshape((3, 3)))
self.assertAlmostEqual(
np.linalg.det(np.linalg.solve(expected_ans.matrix,
reduced_latt.matrix)),
1)
self.assertArrayAlmostEqual(
sorted(reduced_latt.abc), sorted(expected_ans.abc))
self.assertAlmostEqual(reduced_latt.volume, lattice.volume)
latt = [7.164750, 2.481942, 0.000000,
- 4.298850, 2.481942, 0.000000,
0.000000, 0.000000, 14.253000]
expected_ans = Lattice(np.array(
[-4.298850, 2.481942, 0.000000, 2.865900, 4.963884, 0.000000,
0.000000, 0.000000, 14.253000]))
reduced_latt = Lattice(latt).get_lll_reduced_lattice()
self.assertAlmostEqual(
np.linalg.det(np.linalg.solve(expected_ans.matrix,
reduced_latt.matrix)),
1)
self.assertArrayAlmostEqual(
sorted(reduced_latt.abc), sorted(expected_ans.abc))
expected_ans = Lattice([0.0, 10.0, 10.0,
10.0, 10.0, 0.0,
30.0, -30.0, 40.0])
lattice = np.array([100., 0., 10., 10., 10., 20., 10., 10., 10.])
lattice = lattice.reshape(3, 3)
lattice = Lattice(lattice.T)
reduced_latt = lattice.get_lll_reduced_lattice()
self.assertAlmostEqual(
np.linalg.det(np.linalg.solve(expected_ans.matrix,
reduced_latt.matrix)),
1)
self.assertArrayAlmostEqual(
sorted(reduced_latt.abc), sorted(expected_ans.abc))
random_latt = Lattice(np.random.random((3, 3)))
if np.linalg.det(random_latt.matrix) > 1e-8:
reduced_random_latt = random_latt.get_lll_reduced_lattice()
self.assertAlmostEqual(reduced_random_latt.volume,
random_latt.volume)
def test_get_niggli_reduced_lattice(self):
latt = Lattice.from_parameters(3, 5.196, 2, 103 + 55 / 60,
109 + 28 / 60,
134 + 53 / 60)
reduced_cell = latt.get_niggli_reduced_lattice()
abc, angles = reduced_cell.lengths_and_angles
self.assertAlmostEqual(abc[0], 2, 3)
self.assertAlmostEqual(abc[1], 3, 3)
self.assertAlmostEqual(abc[2], 3, 3)
self.assertAlmostEqual(angles[0], 116.382855225, 3)
self.assertAlmostEqual(angles[1], 94.769790287999996, 3)
self.assertAlmostEqual(angles[2], 109.466666667, 3)
mat = [[5.0, 0, 0], [0, 5.0, 0], [5.0, 0, 5.0]]
latt = Lattice(np.dot([[1, 1, 1], [1, 1, 0], [0, 1, 1]], mat))
reduced_cell = latt.get_niggli_reduced_lattice()
abc, angles = reduced_cell.lengths_and_angles
for l in abc:
self.assertAlmostEqual(l, 5, 3)
for a in angles:
self.assertAlmostEqual(a, 90, 3)
latt = Lattice([1.432950, 0.827314, 4.751000, -1.432950, 0.827314,
4.751000, 0.0, -1.654628, 4.751000])
ans = [[-1.432950, -2.481942, 0.0],
[-2.8659, 0.0, 0.0],
[-1.432950, -0.827314, -4.751000]]
self.assertArrayAlmostEqual(latt.get_niggli_reduced_lattice().matrix,
ans)
latt = Lattice.from_parameters(7.365450, 6.199506, 5.353878,
75.542191, 81.181757, 156.396627)
ans = [[2.578932, 0.826965, 0.000000],
[-0.831059, 2.067413, 1.547813],
[-0.458407, -2.480895, 1.129126]]
self.assertArrayAlmostEqual(latt.get_niggli_reduced_lattice().matrix,
np.array(ans), 5)
def test_find_mapping(self):
m = np.array([[0.1, 0.2, 0.3], [-0.1, 0.2, 0.7], [0.6, 0.9, 0.2]])
latt = Lattice(m)
op = SymmOp.from_origin_axis_angle([0, 0, 0], [2, 3, 3], 35)
rot = op.rotation_matrix
scale = np.array([[1, 1, 0], [0, 1, 0], [0, 0, 1]])
latt2 = Lattice(np.dot(rot, np.dot(scale, m).T).T)
(aligned_out, rot_out, scale_out) = latt2.find_mapping(latt)
self.assertAlmostEqual(abs(np.linalg.det(rot)), 1)
rotated = SymmOp.from_rotation_and_translation(rot_out).operate_multi(latt.matrix)
self.assertArrayAlmostEqual(rotated, aligned_out.matrix)
self.assertArrayAlmostEqual(np.dot(scale_out, latt2.matrix), aligned_out.matrix)
self.assertArrayAlmostEqual(aligned_out.lengths_and_angles, latt.lengths_and_angles)
self.assertFalse(np.allclose(aligned_out.lengths_and_angles,
latt2.lengths_and_angles))
def test_find_all_mappings(self):
m = np.array([[0.1, 0.2, 0.3], [-0.1, 0.2, 0.7], [0.6, 0.9, 0.2]])
latt = Lattice(m)
op = SymmOp.from_origin_axis_angle([0, 0, 0], [2, -1, 3], 40)
rot = op.rotation_matrix
scale = np.array([[0, 2, 0], [1, 1, 0], [0,0,1]])
latt2 = Lattice(np.dot(rot, np.dot(scale, m).T).T)
for (aligned_out, rot_out, scale_out) in latt.find_all_mappings(latt2):
self.assertArrayAlmostEqual(np.inner(latt2.matrix, rot_out),
aligned_out.matrix, 5)
self.assertArrayAlmostEqual(np.dot(scale_out, latt.matrix),
aligned_out.matrix)
self.assertArrayAlmostEqual(aligned_out.lengths_and_angles, latt2.lengths_and_angles)
self.assertFalse(np.allclose(aligned_out.lengths_and_angles,
latt.lengths_and_angles))
latt = Lattice.orthorhombic(9, 9, 5)
self.assertEqual(len(list(latt.find_all_mappings(latt))), 16)
#catch the singular matrix error
latt = Lattice.from_lengths_and_angles([1,1,1], [10,10,10])
for l, _, _ in latt.find_all_mappings(latt, ltol=0.05, atol=11):
self.assertTrue(isinstance(l, Lattice))
def test_mapping_symmetry(self):
l = Lattice.cubic(1)
l2 = Lattice.orthorhombic(1.1001, 1, 1)
self.assertEqual(l.find_mapping(l2, ltol=0.1), None)
self.assertEqual(l2.find_mapping(l, ltol=0.1), None)
l2 = Lattice.orthorhombic(1.0999, 1, 1)
self.assertNotEqual(l2.find_mapping(l, ltol=0.1), None)
self.assertNotEqual(l.find_mapping(l2, ltol=0.1), None)
def test_to_from_dict(self):
d = self.tetragonal.as_dict()
t = Lattice.from_dict(d)
for i in range(3):
self.assertEqual(t.abc[i], self.tetragonal.abc[i])
self.assertEqual(t.angles[i], self.tetragonal.angles[i])
#Make sure old style dicts work.
d = self.tetragonal.as_dict(verbosity=1)
del d["matrix"]
t = Lattice.from_dict(d)
for i in range(3):
self.assertEqual(t.abc[i], self.tetragonal.abc[i])
self.assertEqual(t.angles[i], self.tetragonal.angles[i])
def test_scale(self):
new_volume = 10
for (family_name, lattice) in self.families.items():
new_lattice = lattice.scale(new_volume)
self.assertAlmostEqual(new_lattice.volume, new_volume)
self.assertArrayAlmostEqual(new_lattice.angles, lattice.angles)
def test_get_wigner_seitz_cell(self):
ws_cell = Lattice([[10, 0, 0], [0, 5, 0], [0, 0, 1]])\
.get_wigner_seitz_cell()
self.assertEqual(6, len(ws_cell))
for l in ws_cell[3]:
self.assertEqual([abs(i) for i in l], [5.0, 2.5, 0.5])
def test_dot_and_norm(self):
frac_basis = [[1,0,0], [0,1,0], [0,0,1]]
for family_name, lattice in self.families.items():
#print(family_name)
self.assertArrayEqual(lattice.norm(lattice.matrix, frac_coords=False), lattice.abc)
self.assertArrayEqual(lattice.norm(frac_basis), lattice.abc)
for (i, vec) in enumerate(frac_basis):
length = lattice.norm(vec)
self.assertArrayEqual(length[0], lattice.abc[i])
# We always get a ndarray.
self.assertTrue(hasattr(length, "shape"))
# Passing complex arrays should raise TypeError
with self.assertRaises(TypeError):
lattice.norm(np.zeros(3, dtype=np.complex))
# Cannot reshape the second argument.
with self.assertRaises(ValueError):
lattice.dot(np.zeros(6), np.zeros(8))
# Passing vectors of different length is invalid.
with self.assertRaises(ValueError):
lattice.dot(np.zeros(3), np.zeros(6))
def test_get_points_in_sphere(self):
# This is a non-niggli representation of a cubic lattice
latt = Lattice([[1,5,0],[0,1,0],[5,0,1]])
# evenly spaced points array between 0 and 1
pts = np.array(list(itertools.product(range(5), repeat=3))) / 5
pts = latt.get_fractional_coords(pts)
self.assertEqual(len(latt.get_points_in_sphere(
pts, [0, 0, 0], 0.20001)), 7)
self.assertEqual(len(latt.get_points_in_sphere(
pts, [0.5, 0.5, 0.5], 1.0001)), 552)
def test_get_all_distances(self):
fcoords = np.array([[0.3, 0.3, 0.5],
[0.1, 0.1, 0.3],
[0.9, 0.9, 0.8],
[0.1, 0.0, 0.5],
[0.9, 0.7, 0.0]])
lattice = Lattice.from_lengths_and_angles([8, 8, 4],
[90, 76, 58])
expected = np.array([[0.000, 3.015, 4.072, 3.519, 3.245],
[3.015, 0.000, 3.207, 1.131, 4.453],
[4.072, 3.207, 0.000, 2.251, 1.788],
[3.519, 1.131, 2.251, 0.000, 3.852],
[3.245, 4.453, 1.788, 3.852, 0.000]])
output = lattice.get_all_distances(fcoords, fcoords)
self.assertArrayAlmostEqual(output, expected, 3)
#test just one input point
output2 = lattice.get_all_distances(fcoords[0], fcoords)
self.assertArrayAlmostEqual(output2, [expected[0]], 2)
#test distance when initial points are not in unit cell
f1 = [0, 0, 17]
f2 = [0, 0, 10]
self.assertEqual(lattice.get_all_distances(f1, f2)[0, 0], 0)
def test_monoclinic(self):
lengths, angles = self.monoclinic.lengths_and_angles
self.assertNotAlmostEqual(angles[1], 90)
self.assertAlmostEqual(angles[0], 90)
self.assertAlmostEqual(angles[2], 90)
def test_is_hexagonal(self):
self.assertFalse(self.cubic.is_hexagonal())
self.assertFalse(self.tetragonal.is_hexagonal())
self.assertFalse(self.orthorhombic.is_hexagonal())
self.assertFalse(self.monoclinic.is_hexagonal())
self.assertFalse(self.rhombohedral.is_hexagonal())
self.assertTrue(self.hexagonal.is_hexagonal())
def test_get_distance_and_image(self):
dist, image = self.cubic.get_distance_and_image([0, 0, 0.1],
[0, 0., 0.9])
self.assertAlmostEqual(dist, 2)
self.assertArrayAlmostEqual(image, [0, 0, -1])
def test_get_distance_and_image_strict(self):
for count in range(10):
lengths = [np.random.randint(1, 100) for i in range(3)]
lattice = [np.random.rand(3) * lengths[i]
for i in range(3)]
lattice = Lattice(np.array(lattice))
f1 = np.random.rand(3)
f2 = np.random.rand(3)
scope = list(range(-3, 4))
min_image_dist = (float("inf"), None)
for image in itertools.product(scope, scope, scope):
cart = lattice.get_cartesian_coords(f1 - (f2 + image))
dist = np.dot(cart, cart) ** 0.5
if dist < min_image_dist[0]:
min_image_dist = (dist, image)
pmg_result = lattice.get_distance_and_image(f1, f2)
self.assertGreaterEqual(min_image_dist[0] + 1e-7, pmg_result[0])
if abs(min_image_dist[0] - pmg_result[0]) < 1e-12:
self.assertArrayAlmostEqual(min_image_dist[1], pmg_result[1])
def test_lll_basis(self):
a = np.array([1., 0.1, 0.])
b = np.array([0., 2., 0.])
c = np.array([0., 0., 3.])
l1 = Lattice([a, b, c])
l2 = Lattice([a + b, b + c, c])
ccoords = np.array([[1, 1, 2], [2, 2, 1.5]])
l1_fcoords = l1.get_fractional_coords(ccoords)
l2_fcoords = l2.get_fractional_coords(ccoords)
self.assertArrayAlmostEqual(l1.matrix, l2.lll_matrix)
self.assertArrayAlmostEqual(np.dot(l2.lll_mapping, l2.matrix),
l1.matrix)
self.assertArrayAlmostEqual(np.dot(l2_fcoords, l2.matrix),
np.dot(l1_fcoords, l1.matrix))
lll_fcoords = l2.get_lll_frac_coords(l2_fcoords)
self.assertArrayAlmostEqual(lll_fcoords, l1_fcoords)
self.assertArrayAlmostEqual(l1.get_cartesian_coords(lll_fcoords),
np.dot(lll_fcoords, l2.lll_matrix))
self.assertArrayAlmostEqual(l2.get_frac_coords_from_lll(lll_fcoords),
l2_fcoords)
if __name__ == '__main__':
import unittest
unittest.main()
| gpetretto/pymatgen | pymatgen/core/tests/test_lattice.py | Python | mit | 20,579 | [
"pymatgen"
] | 1aa8dfba3b269141a7ad7e3a781bc6cd4129bcfe5a3d84232611f89b87645a16 |
#
# Copyright (C) 2013-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import unittest as ut
import unittest_decorators as utx
import espressomd
import numpy as np
@utx.skipIfMissingFeatures("ROTATION")
class Rotation(ut.TestCase):
s = espressomd.System(box_l=[1.0, 1.0, 1.0])
s.seed = s.cell_system.get_state()['n_nodes'] * [1234]
s.cell_system.skin = 0
s.time_step = 0.01
def test_langevin(self):
"""Applies langevin thermostat and checks that correct axes get
thermalized"""
s = self.s
s.thermostat.set_langevin(gamma=1, kT=1, seed=42)
for x in 0, 1:
for y in 0, 1:
for z in 0, 1:
s.part.clear()
s.part.add(id=0, pos=(0, 0, 0), rotation=(x, y, z),
quat=(1, 0, 0, 0), omega_body=(0, 0, 0),
torque_lab=(0, 0, 0))
s.integrator.run(500)
self.validate(x, 0)
self.validate(y, 1)
self.validate(z, 2)
def validate(self, rotate, coord):
if rotate:
# self.assertNotEqual(self.s.part[0].torque_body[coord],0)
self.assertNotEqual(self.s.part[0].omega_body[coord], 0)
else:
# self.assertEqual(self.s.part[0].torque_body[coord],0)
self.assertEqual(self.s.part[0].omega_body[coord], 0)
@utx.skipIfMissingFeatures("EXTERNAL_FORCES")
def test_axes_changes(self):
"""Verifies that rotation axes in body and space frame stay the same
and other axes don't"""
s = self.s
s.part.clear()
s.part.add(id=0, pos=(0.9, 0.9, 0.9), ext_torque=(1, 1, 1))
s.thermostat.turn_off()
for dir in 0, 1, 2:
# Reset orientation
s.part[0].quat = [1, 0, 0, 0]
# Enable rotation in a single direction
rot = [0, 0, 0]
rot[dir] = 1
s.part[0].rotation = rot
s.integrator.run(30)
s.integrator.run(100)
# Check other axes:
for axis in [1, 0, 0], [0, 1, 0], [0, 0, 1]:
if rot == axis:
# The axis for which rotation is on should coincide in body
# and space frame
self.assertAlmostEqual(
np.dot(rot, s.part[0].convert_vector_body_to_space(rot)), 1, places=8)
else:
# For non-rotation axis, body and space frame should differ
self.assertLess(
np.dot(axis, s.part[0].convert_vector_body_to_space(axis)), 0.95)
def test_frame_conversion_and_rotation(self):
s = self.s
s.part.clear()
p = s.part.add(pos=np.random.random(3), rotation=(1, 1, 1))
# Space and body frame co-incide?
np.testing.assert_allclose(
np.copy(p.director), p.convert_vector_body_to_space((0, 0, 1)), atol=1E-10)
# Random vector should still co-incide
v = (1., 5.5, 17)
np.testing.assert_allclose(
v, p.convert_vector_space_to_body(v), atol=1E-10)
np.testing.assert_allclose(
v, p.convert_vector_body_to_space(v), atol=1E-10)
# Particle rotation
p.rotate((1, 2, 0), np.pi / 4)
# Check angle for director
self.assertAlmostEqual(
np.arccos(np.dot(p.director, (0, 0, 1))), np.pi / 4, delta=1E-10)
# Check other vector
v = (5, -7, 3)
v_r = p.convert_vector_body_to_space(v)
self.assertAlmostEqual(np.dot(v, v), np.dot(v_r, v_r), delta=1e-10)
np.testing.assert_allclose(
p.convert_vector_space_to_body(v_r), v, atol=1E-10)
# Rotation axis should co-incide
np.testing.assert_allclose(
(1, 2, 0), p.convert_vector_body_to_space((1, 2, 0)))
# Check rotation axis with all elements set
p.rotate(axis=(-5, 2, 17), angle=1.)
v = (5, -7, 3)
v_r = p.convert_vector_body_to_space(v)
self.assertAlmostEqual(np.dot(v, v), np.dot(v_r, v_r), delta=1e-10)
np.testing.assert_allclose(
p.convert_vector_space_to_body(v_r), v, atol=1E-10)
if __name__ == "__main__":
ut.main()
| psci2195/espresso-ffans | testsuite/python/rotation_per_particle.py | Python | gpl-3.0 | 4,942 | [
"ESPResSo"
] | 3eef171ee7ad394438b97e1c15bc73c8d7de51bb5bdf8849fe6042aefc355348 |
#!/usr/bin/env python3
"""
Copyright 2020 Paul Willworth <ioscode@gmail.com>
This file is part of Galaxy Harvester.
Galaxy Harvester is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Galaxy Harvester is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with Galaxy Harvester. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import sys
import cgi
from http import cookies
import dbSession
import pymysql
import ghShared
import ghLists
import ghNames
import dbShared
from jinja2 import Environment, FileSystemLoader
import ghObjectSchematic
def getQualityData(conn, schematicID):
# get array of schematic quality data
qualityData = []
propertyData = []
expGroup = ''
expProp = ''
expCursor = conn.cursor()
expCursor.execute('SELECT tSchematicQualities.expQualityID, expProperty, expGroup, statName, statWeight, weightTotal FROM tSchematicQualities INNER JOIN tSchematicResWeights ON tSchematicQualities.expQualityID = tSchematicResWeights.expQualityID WHERE schematicID="' + schematicID + '" ORDER BY expGroup, expProperty, statName;')
expRow = expCursor.fetchone()
while (expRow != None):
if (expGroup != expRow[2]):
expGroup = expRow[2]
if (expProp != expRow[1]):
if len(propertyData) > 0:
qualityData.append(propertyData)
propertyData = []
expProp = expRow[1]
propertyData.append([expRow[3],(expRow[4]*100.0)/(expRow[5]*100.0)])
expRow = expCursor.fetchone()
qualityData.append(propertyData)
expCursor.close()
return qualityData
# Get a list item for a component ingredient
def getComponentLink(cn, objectPath, ingredientType):
compCursor = cn.cursor()
compCursor.execute('SELECT schematicID, schematicName, complexity, xpAmount, (SELECT imageName FROM tSchematicImages tsi WHERE tsi.schematicID=tSchematic.schematicID AND tsi.imageType=1) AS schemImage FROM tSchematic WHERE objectPath="' + objectPath + '" OR objectGroup="' + objectPath + '";')
compRow = compCursor.fetchone()
tempStr = ''
schemImageName = ''
schemName = ''
if (compRow != None):
# use first image for slot
schemName = compRow[1]
if (compRow[4] != None):
schemImageName = compRow[4]
else:
schemImageName = 'none.jpg'
while compRow != None:
# Add each potential component
if tempStr.find('href') > -1:
tempStr += ' or '
tempStr += '<a href="' + compRow[0] + '">' + compRow[1] + '</a>'
compRow = compCursor.fetchone()
else:
tempStr = objectPath[objectPath.rfind('/')+1:-4].replace('_',' ')
return "/images/schematics/{0}|{1}|{2}".format(schemImageName, tempStr, schemName)
# Helper function to look up profession for schematic since its not a property of schem but inferrable by skill group
def getProfession(conn, skillGroup):
profCursor = conn.cursor()
profCursor.execute('SELECT profID FROM tSkillGroup WHERE skillGroup=%s;', [skillGroup])
profRow = profCursor.fetchone()
if profRow != None:
return profRow[0]
else:
return 0
profCursor.close()
def main():
# Get current url
try:
url = os.environ['SCRIPT_NAME']
except KeyError:
url = ''
uiTheme = ''
schematicID = ''
schemImageAttempt = ''
schemHTML = '<h2>That schematic does not exist.</h2>'
form = cgi.FieldStorage()
# Get Cookies
useCookies = 1
C = cookies.SimpleCookie()
try:
C.load(os.environ['HTTP_COOKIE'])
except KeyError:
useCookies = 0
if useCookies:
try:
currentUser = C['userID'].value
except KeyError:
currentUser = ''
try:
loginResult = C['loginAttempt'].value
except KeyError:
loginResult = 'success'
try:
sid = C['gh_sid'].value
except KeyError:
sid = form.getfirst('gh_sid', '')
try:
uiTheme = C['uiTheme'].value
except KeyError:
uiTheme = ''
try:
schemImageAttempt = C['schemImageAttempt'].value
except KeyError:
schemImageAttempt = ''
try:
galaxy = C['galaxy'].value
except KeyError:
galaxy = form.getfirst('galaxy', ghShared.DEFAULT_GALAXY)
else:
currentUser = ''
loginResult = form.getfirst('loginAttempt', '')
sid = form.getfirst('gh_sid', '')
schemImageAttempt = form.getfirst('schemImageAttempt', '')
galaxy = form.getfirst('galaxy', ghShared.DEFAULT_GALAXY)
forceOp = form.getfirst('forceOp', '')
# escape input to prevent sql injection
sid = dbShared.dbInsertSafe(sid)
# Get a session
logged_state = 0
linkappend = ''
disableStr = ''
if loginResult == None:
loginResult = 'success'
sess = dbSession.getSession(sid)
if (sess != ''):
logged_state = 1
currentUser = sess
if (uiTheme == ''):
uiTheme = dbShared.getUserAttr(currentUser, 'themeName')
if (useCookies == 0):
linkappend = 'gh_sid=' + sid
else:
disableStr = ' disabled="disabled"'
if (uiTheme == ''):
uiTheme = 'crafter'
path = []
s = None
if 'PATH_INFO' in os.environ:
path = os.environ['PATH_INFO'].split('/')[1:]
path = [p for p in path if p != '']
favHTML = ''
canEdit = False
canAdd = False
profession = ''
if len(path) > 0:
schematicID = dbShared.dbInsertSafe(path[0])
url = url + '/' + schematicID
try:
conn = dbShared.ghConn()
except Exception:
errorstr = "Error: could not connect to database"
# Lookup reputation for edit tool option
stats = dbShared.getUserStats(currentUser, galaxy).split(",")
userReputation = int(stats[2])
admin = dbShared.getUserAdmin(conn, currentUser, galaxy)
canAdd = userReputation >= ghShared.MIN_REP_VALS['ADD_SCHEMATIC'] or admin
if (schematicID != 'index') and (schematicID != 'home'):
# Build the schematic object
cursor = conn.cursor()
if (cursor):
cursor.execute('SELECT schematicName, complexity, xpAmount, (SELECT imageName FROM tSchematicImages tsi WHERE tsi.schematicID=tSchematic.schematicID AND tsi.imageType=1) AS schemImage, galaxy, enteredBy, craftingTab, skillGroup, objectType FROM tSchematic WHERE schematicID=%s;', [schematicID])
row = cursor.fetchone()
if (row != None):
# main schematic data
if (row[3] != None):
schemImageName = row[3]
else:
schemImageName = 'none.jpg'
s = ghObjectSchematic.schematic()
s.schematicID = schematicID
s.schematicName = row[0]
s.complexity = row[1]
s.xpAmount = row[2]
s.schematicImage = schemImageName
s.galaxy = row[4]
s.enteredBy = row[5]
s.craftingTab = row[6]
s.skillGroup = row[7]
s.objectType = row[8]
profession = getProfession(conn, s.skillGroup)
ingCursor = conn.cursor()
ingCursor.execute('SELECT ingredientName, ingredientType, ingredientObject, ingredientQuantity, res.resName, containerType FROM tSchematicIngredients LEFT JOIN (SELECT resourceGroup AS resID, groupName AS resName, containerType FROM tResourceGroup UNION ALL SELECT resourceType, resourceTypeName, containerType FROM tResourceType) res ON ingredientObject = res.resID WHERE schematicID="' + schematicID + '" ORDER BY ingredientType, ingredientQuantity DESC;')
ingRow = ingCursor.fetchone()
while (ingRow != None):
tmpObject = ingRow[2]
tmpObject = tmpObject.replace('shared_','')
if (ingRow[1] == 0):
tmpImage = '/images/resources/{0}.png'.format(ingRow[5])
# resource
if (ingRow[4] != None):
tmpName = ingRow[4]
tmpLink = '<a href="' + ghShared.BASE_SCRIPT_URL + 'resourceType.py/' + ingRow[2] + '">' + tmpName + '</a>'
else:
tmpLink = '<a href="' + ghShared.BASE_SCRIPT_URL + 'resourceType.py/' + ingRow[2] + '">' + tmpName + '</a>'
else:
# component
results = getComponentLink(conn, tmpObject, ingRow[1]).split('|')
tmpLink = results[1]
tmpImage = results[0]
tmpName = results[2]
s.ingredients.append(ghObjectSchematic.schematicIngredient(ingRow[0], ingRow[1], tmpObject, ingRow[3], ingRow[4], tmpLink, tmpImage, tmpName))
ingRow = ingCursor.fetchone()
ingCursor.close()
# schematic quality data
expGroup = ''
expProp = ''
qg = None
qp = None
expCursor = conn.cursor()
expCursor.execute('SELECT tSchematicQualities.expQualityID, expProperty, expGroup, statName, statWeight, weightTotal FROM tSchematicQualities INNER JOIN tSchematicResWeights ON tSchematicQualities.expQualityID = tSchematicResWeights.expQualityID WHERE schematicID="' + schematicID + '" ORDER BY expGroup, expProperty, statName;')
expRow = expCursor.fetchone()
while (expRow != None):
if expRow[1] != expProp:
if qp != None:
qg.properties.append(qp)
qp = None
qp = ghObjectSchematic.schematicQualityProperty(expRow[1], expRow[5])
expProp = expRow[1]
if expRow[2] != expGroup:
if qg != None:
s.qualityGroups.append(qg)
qg = None
qg = ghObjectSchematic.schematicQualityGroup(expRow[2])
expGroup = expRow[2]
sw = ghObjectSchematic.schematicStatWeight(expRow[0], expRow[3], expRow[4], expRow[5])
qp.statWeights.append(sw)
expRow = expCursor.fetchone()
if qp != None:
qg.properties.append(qp)
if qg != None:
s.qualityGroups.append(qg)
expCursor.close()
# Get list of schematics this one can be used in
useCursor = conn.cursor()
useCursor.execute('SELECT tSchematicIngredients.schematicID, s2.schematicName FROM tSchematicIngredients INNER JOIN tSchematic ON tSchematicIngredients.ingredientObject = tSchematic.objectPath OR tSchematicIngredients.ingredientObject = tSchematic.objectGroup INNER JOIN tSchematic s2 ON tSchematicIngredients.schematicID=s2.schematicID WHERE tSchematic.schematicID = "' + schematicID + '" AND s2.galaxy IN (0, ' + str(galaxy) + ') GROUP BY tSchematicIngredients.schematicID;')
useRow = useCursor.fetchone()
while (useRow != None):
s.schematicsUsedIn.append([useRow[0], useRow[1]])
useRow = useCursor.fetchone()
useCursor.close()
if logged_state > 0:
favCursor = conn.cursor()
favSQL = ''.join(('SELECT itemID FROM tFavorites WHERE favType=4 AND userID="', currentUser, '" AND favGroup="', schematicID, '" AND galaxy=', galaxy))
favCursor.execute(favSQL)
favRow = favCursor.fetchone()
if favRow != None:
favHTML = ' <div class="inlineBlock" style="width:3%;float:left;"><a alt="Favorite" title="Favorite" style="cursor: pointer;" onclick="toggleFavorite(this, 4, \''+ schematicID +'\', $(\'#galaxySel\').val());"><img src="/images/favorite16On.png" /></a></div>'
else:
favHTML = ' <div class="inlineBlock" style="width:3%;float:left;"><a alt="Favorite" title="Favorite" style="cursor: pointer;" onclick="toggleFavorite(this, 4, \''+ schematicID +'\', $(\'#galaxySel\').val());"><img src="/images/favorite16Off.png" /></a></div>'
favCursor.close()
if admin or currentUser == s.enteredBy or (s.galaxy != 0 and userReputation >= ghShared.MIN_REP_VALS['EDIT_OTHER_SCHEMATIC']):
canEdit = True
cursor.close()
conn.close()
pictureName = dbShared.getUserAttr(currentUser, 'pictureName')
print('Content-type: text/html\n')
env = Environment(loader=FileSystemLoader('templates'))
env.globals['BASE_SCRIPT_URL'] = ghShared.BASE_SCRIPT_URL
env.globals['MOBILE_PLATFORM'] = ghShared.getMobilePlatform(os.environ['HTTP_USER_AGENT'])
groupListShort = ""
if forceOp == 'edit':
groupListShort=ghLists.getResourceGroupListShort()
template = env.get_template('schematiceditor.html')
else:
template = env.get_template('schematics.html')
print(template.render(uiTheme=uiTheme, loggedin=logged_state, currentUser=currentUser, loginResult=loginResult, linkappend=linkappend, url=url, pictureName=pictureName, imgNum=ghShared.imgNum, galaxyList=ghLists.getGalaxyList(), professionList=ghLists.getProfessionList(galaxy), schematicTabList=ghLists.getSchematicTabList(), objectTypeList=ghLists.getObjectTypeList(), noenergyTypeList=ghLists.getOptionList('SELECT resourceType, resourceTypeName FROM tResourceType WHERE resourceCategory != "energy" ORDER BY resourceTypeName;'), resourceGroupList=ghLists.getResourceGroupList(), resourceGroupListShort=groupListShort, statList=ghLists.getStatList(), schematicID=schematicID, schematic=s, favHTML=favHTML, canEdit=canEdit, profession=profession, canAdd=canAdd, enableCAPTCHA=ghShared.RECAPTCHA_ENABLED, siteidCAPTCHA=ghShared.RECAPTCHA_SITEID))
if __name__ == "__main__":
main()
| pwillworth/galaxyharvester | html/schematics.py | Python | gpl-3.0 | 12,698 | [
"Galaxy"
] | 15127fc3e1c2c872a5e7351a31831979873ead8e0af3d9a7bf316efe4f4ef6a9 |
"""feedfinder: Find the Web feed for a Web page
http://www.aaronsw.com/2002/feedfinder/
Usage:
feed(uri) - returns feed found for a URI
feeds(uri) - returns all feeds found for a URI
>>> import feedfinder
>>> feedfinder.feed('scripting.com')
'http://scripting.com/rss.xml'
>>>
>>> feedfinder.feeds('scripting.com')
['http://delong.typepad.com/sdj/atom.xml',
'http://delong.typepad.com/sdj/index.rdf',
'http://delong.typepad.com/sdj/rss.xml']
>>>
Can also use from the command line. Feeds are returned one per line:
$ python feedfinder.py diveintomark.org
http://diveintomark.org/xml/atom.xml
How it works:
0. At every step, feeds are minimally verified to make sure they are really feeds.
1. If the URI points to a feed, it is simply returned; otherwise
the page is downloaded and the real fun begins.
2. Feeds pointed to by LINK tags in the header of the page (autodiscovery)
3. <A> links to feeds on the same server ending in ".rss", ".rdf", ".xml", or
".atom"
4. <A> links to feeds on the same server containing "rss", "rdf", "xml", or "atom"
5. <A> links to feeds on external servers ending in ".rss", ".rdf", ".xml", or
".atom"
6. <A> links to feeds on external servers containing "rss", "rdf", "xml", or "atom"
7. Try some guesses about common places for feeds (index.xml, atom.xml, etc.).
8. As a last ditch effort, we search Syndic8 for feeds matching the URI
"""
__version__ = "1.371"
__date__ = "2006-04-24"
__maintainer__ = "Aaron Swartz (me@aaronsw.com)"
__author__ = "Mark Pilgrim (http://diveintomark.org)"
__copyright__ = "Copyright 2002-4, Mark Pilgrim; 2006 Aaron Swartz"
__license__ = "Python"
__credits__ = """Abe Fettig for a patch to sort Syndic8 feeds by popularity
Also Jason Diamond, Brian Lalor for bug reporting and patches"""
_debug = 0
import sgmllib
import urllib
import urlparse
import re
import sys
import robotparser
import threading
class TimeoutError(Exception):
pass
def timelimit(timeout):
"""borrowed from web.py"""
def _1(function):
def _2(*args, **kw):
class Dispatch(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.result = None
self.error = None
self.setDaemon(True)
self.start()
def run(self):
try:
self.result = function(*args, **kw)
except:
self.error = sys.exc_info()
c = Dispatch()
c.join(timeout)
if c.isAlive():
raise TimeoutError, 'took too long'
if c.error:
raise c.error[0], c.error[1]
return c.result
return _2
return _1
# XML-RPC support allows feedfinder to query Syndic8 for possible matches.
# Python 2.3 now comes with this module by default, otherwise you can download it
try:
import xmlrpclib # http://www.pythonware.com/products/xmlrpc/
except ImportError:
xmlrpclib = None
if not dict:
def dict(aList):
rc = {}
for k, v in aList:
rc[k] = v
return rc
def _debuglog(message):
if _debug: print message
class URLGatekeeper:
"""a class to track robots.txt rules across multiple servers"""
def __init__(self):
self.rpcache = {} # a dictionary of RobotFileParser objects, by domain
self.urlopener = urllib.FancyURLopener()
self.urlopener.version = "feedfinder/" + __version__ + " " + self.urlopener.version + " +http://www.aaronsw.com/2002/feedfinder/"
_debuglog(self.urlopener.version)
self.urlopener.addheaders = [('User-agent', self.urlopener.version)]
robotparser.URLopener.version = self.urlopener.version
robotparser.URLopener.addheaders = self.urlopener.addheaders
def _getrp(self, url):
protocol, domain = urlparse.urlparse(url)[:2]
if self.rpcache.has_key(domain):
return self.rpcache[domain]
baseurl = '%s://%s' % (protocol, domain)
robotsurl = urlparse.urljoin(baseurl, 'robots.txt')
_debuglog('fetching %s' % robotsurl)
rp = robotparser.RobotFileParser(robotsurl)
try:
rp.read()
except:
pass
self.rpcache[domain] = rp
return rp
def can_fetch(self, url):
rp = self._getrp(url)
allow = rp.can_fetch(self.urlopener.version, url)
_debuglog("gatekeeper of %s says %s" % (url, allow))
return allow
@timelimit(10)
def get(self, url, check=True):
if check and not self.can_fetch(url): return ''
try:
return self.urlopener.open(url).read()
except:
return ''
_gatekeeper = URLGatekeeper()
class BaseParser(sgmllib.SGMLParser):
def __init__(self, baseuri):
sgmllib.SGMLParser.__init__(self)
self.links = []
self.baseuri = baseuri
def normalize_attrs(self, attrs):
def cleanattr(v):
v = sgmllib.charref.sub(lambda m: unichr(int(m.groups()[0])), v)
v = v.strip()
v = v.replace('<', '<').replace('>', '>').replace(''', "'").replace('"', '"').replace('&', '&')
return v
attrs = [(k.lower(), cleanattr(v)) for k, v in attrs]
attrs = [(k, k in ('rel','type') and v.lower() or v) for k, v in attrs]
return attrs
def do_base(self, attrs):
attrsD = dict(self.normalize_attrs(attrs))
if not attrsD.has_key('href'): return
self.baseuri = attrsD['href']
def error(self, *a, **kw): pass # we're not picky
class LinkParser(BaseParser):
FEED_TYPES = ('application/rss+xml',
'text/xml',
'application/atom+xml',
'application/x.atom+xml',
'application/x-atom+xml')
def do_link(self, attrs):
attrsD = dict(self.normalize_attrs(attrs))
if not attrsD.has_key('rel'): return
rels = attrsD['rel'].split()
if 'alternate' not in rels: return
if attrsD.get('type') not in self.FEED_TYPES: return
if not attrsD.has_key('href'): return
self.links.append(urlparse.urljoin(self.baseuri, attrsD['href']))
class ALinkParser(BaseParser):
def start_a(self, attrs):
attrsD = dict(self.normalize_attrs(attrs))
if not attrsD.has_key('href'): return
self.links.append(urlparse.urljoin(self.baseuri, attrsD['href']))
def makeFullURI(uri):
uri = uri.strip()
if uri.startswith('feed://'):
uri = 'http://' + uri.split('feed://', 1).pop()
for x in ['http', 'https']:
if uri.startswith('%s://' % x):
return uri
return 'http://%s' % uri
def getLinks(data, baseuri):
p = LinkParser(baseuri)
p.feed(data)
return p.links
def getALinks(data, baseuri):
p = ALinkParser(baseuri)
p.feed(data)
return p.links
def getLocalLinks(links, baseuri):
baseuri = baseuri.lower()
urilen = len(baseuri)
return [l for l in links if l.lower().startswith(baseuri)]
def isFeedLink(link):
return link[-4:].lower() in ('.rss', '.rdf', '.xml', '.atom')
def isXMLRelatedLink(link):
link = link.lower()
return link.count('rss') + link.count('rdf') + link.count('xml') + link.count('atom')
r_brokenRedirect = re.compile('<newLocation[^>]*>(.*?)</newLocation>', re.S)
def tryBrokenRedirect(data):
if '<newLocation' in data:
newuris = r_brokenRedirect.findall(data)
if newuris: return newuris[0].strip()
def couldBeFeedData(data):
data = data.lower()
if data.count('<html'): return 0
return data.count('<rss') + data.count('<rdf') + data.count('<feed')
def isFeed(uri):
_debuglog('seeing if %s is a feed' % uri)
protocol = urlparse.urlparse(uri)
if protocol[0] not in ('http', 'https'): return 0
data = _gatekeeper.get(uri)
return couldBeFeedData(data)
def sortFeeds(feed1Info, feed2Info):
return cmp(feed2Info['headlines_rank'], feed1Info['headlines_rank'])
def getFeedsFromSyndic8(uri):
feeds = []
try:
server = xmlrpclib.Server('http://www.syndic8.com/xmlrpc.php')
feedids = server.syndic8.FindFeeds(uri)
infolist = server.syndic8.GetFeedInfo(feedids, ['headlines_rank','status','dataurl'])
infolist.sort(sortFeeds)
feeds = [f['dataurl'] for f in infolist if f['status']=='Syndicated']
_debuglog('found %s feeds through Syndic8' % len(feeds))
except:
pass
return feeds
def feeds(uri, all=False, querySyndic8=False, _recurs=None):
if _recurs is None: _recurs = [uri]
fulluri = makeFullURI(uri)
try:
data = _gatekeeper.get(fulluri, check=False)
except:
return []
# is this already a feed?
if couldBeFeedData(data):
return [fulluri]
newuri = tryBrokenRedirect(data)
if newuri and newuri not in _recurs:
_recurs.append(newuri)
return feeds(newuri, all=all, querySyndic8=querySyndic8, _recurs=_recurs)
# nope, it's a page, try LINK tags first
_debuglog('looking for LINK tags')
try:
outfeeds = getLinks(data, fulluri)
except:
outfeeds = []
_debuglog('found %s feeds through LINK tags' % len(outfeeds))
outfeeds = filter(isFeed, outfeeds)
if all or not outfeeds:
# no LINK tags, look for regular <A> links that point to feeds
_debuglog('no LINK tags, looking at A tags')
try:
links = getALinks(data, fulluri)
except:
links = []
locallinks = getLocalLinks(links, fulluri)
# look for obvious feed links on the same server
outfeeds.extend(filter(isFeed, filter(isFeedLink, locallinks)))
if all or not outfeeds:
# look harder for feed links on the same server
outfeeds.extend(filter(isFeed, filter(isXMLRelatedLink, locallinks)))
if all or not outfeeds:
# look for obvious feed links on another server
outfeeds.extend(filter(isFeed, filter(isFeedLink, links)))
if all or not outfeeds:
# look harder for feed links on another server
outfeeds.extend(filter(isFeed, filter(isXMLRelatedLink, links)))
if all or not outfeeds:
_debuglog('no A tags, guessing')
suffixes = [ # filenames used by popular software:
'atom.xml', # blogger, TypePad
'index.atom', # MT, apparently
'index.rdf', # MT
'rss.xml', # Dave Winer/Manila
'index.xml', # MT
'index.rss' # Slash
]
outfeeds.extend(filter(isFeed, [urlparse.urljoin(fulluri, x) for x in suffixes]))
if (all or not outfeeds) and querySyndic8:
# still no luck, search Syndic8 for feeds (requires xmlrpclib)
_debuglog('still no luck, searching Syndic8')
outfeeds.extend(getFeedsFromSyndic8(uri))
if hasattr(__builtins__, 'set') or __builtins__.has_key('set'):
outfeeds = list(set(outfeeds))
return outfeeds
getFeeds = feeds # backwards-compatibility
def feed(uri):
#todo: give preference to certain feed formats
feedlist = feeds(uri)
if feedlist:
return feedlist[0]
else:
return None
##### test harness ######
def test():
uri = 'http://cnn.com'
failed = []
count = 0
while 1:
data = _gatekeeper.get(uri)
if data.find('Atom autodiscovery test') == -1: break
sys.stdout.write('.')
sys.stdout.flush()
count += 1
links = getLinks(data, uri)
if not links:
print '\n*** FAILED ***', uri, 'could not find link'
failed.append(uri)
elif len(links) > 1:
print '\n*** FAILED ***', uri, 'found too many links'
failed.append(uri)
else:
atomdata = urllib.urlopen(links[0]).read()
if atomdata.find('<link rel="alternate"') == -1:
print '\n*** FAILED ***', uri, 'retrieved something that is not a feed'
failed.append(uri)
else:
backlink = atomdata.split('href="').pop().split('"')[0]
if backlink != uri:
print '\n*** FAILED ***', uri, 'retrieved wrong feed'
failed.append(uri)
if data.find('<link rel="next" href="') == -1: break
uri = urlparse.urljoin(uri, data.split('<link rel="next" href="').pop().split('"')[0])
print
print count, 'tests executed,', len(failed), 'failed'
if __name__ == '__main__':
args = sys.argv[1:]
if args and args[0] == '--debug':
_debug = 1
args.pop(0)
if args:
uri = args[0]
else:
uri = 'http://diveintomark.org/'
if uri == 'test':
test()
else:
print "\n".join(getFeeds(uri)) | cantino/newspaper | newspaper/packages/feedfinder.py | Python | mit | 12,920 | [
"Brian"
] | 26e23e04c85c44314872a7afd5e4b21518c5db07b7975a23859f7abe960e3ec1 |
#!/usr/bin/env python
import pylab as pl
def readLammps(fn):
fh = open(fn)
lines = fh.readlines()
fh.close()
steps = []
for kl in range(len(lines)):
if lines[kl].startswith('ITEM: TIMESTEP'):
step = []
Na = int(lines[kl+3])
for ka in range(Na):
v = map(float,lines[kl+9+ka].split())
step.append(v)
steps.append(step)
kl += 9+Na
return pl.array(steps)
def readMark1(fn):
fh = open(fn)
lines = fh.readlines()
fh.close()
Na = int(lines[0])
steps = []
for kl in range(len(lines)):
if lines[kl].startswith('Atoms. Timestep:'):
step = []
for ka in range(Na):
v = map(float,lines[kl+1+ka].split()[1:])
step.append(v)
steps.append(step)
kl += 1+Na
return pl.array(steps)
L = readLammps('lammps.vel')
M = readMark1('mark1.vel')
U0 = pl.sqrt((L**2).sum(2)).mean()
pl.figure()
for kx in range(3):
pl.subplot(3,1,kx+1)
for ka in range(L.shape[1]):
rel_diff = (L[:,ka,kx]-M[:,ka,kx])/U0
pl.plot( rel_diff,label='lammps-mark1')
yl = pl.ylim()
pl.ylim([min(yl[0],0.0),max(yl[1],0.0)])
pl.title( '%s-velocity'%('xyz'[kx]) )
pl.tight_layout()
pl.show() | yorzh86/Step1 | scripts/compareVelocity.py | Python | gpl-2.0 | 1,115 | [
"LAMMPS"
] | e954bcd35f8e4f5bb6cdf3b8f2721e2a95006b2b41c67ee7a0470fea7e4bfbcc |
# Copyright (C) 2009-2013 Sebastian Rahlf <basti at redtoad dot de>
#
# This program is release under the BSD License. You can find the full text of
# the license in the LICENSE file.
from lxml import etree, objectify
from amazonproduct.contrib.cart import Cart, Item
from amazonproduct.errors import AWSError
from amazonproduct.processors import BaseProcessor
from amazonproduct.processors import ITEMS_PAGINATOR, RELATEDITEMS_PAGINATOR
from amazonproduct.processors._lxml import SearchPaginator
from amazonproduct.processors._lxml import RelatedItemsPaginator
class SelectiveClassLookup(etree.CustomElementClassLookup):
"""
Lookup mechanism for XML elements to ensure that ItemIds (like
ASINs) are always StringElements and evaluated as such.
Thanks to Brian Browning for pointing this out.
"""
# pylint: disable-msg=W0613
def lookup(self, node_type, document, namespace, name):
if name in ('ItemId', 'ASIN'):
return objectify.StringElement
class Processor (BaseProcessor):
"""
Response processor using ``lxml.objectify``. It uses a custom lookup
mechanism for XML elements to ensure that ItemIds (such as ASINs) are
always StringElements and evaluated as such.
..warning:: This processors does not run on Google App Engine!
http://code.google.com/p/googleappengine/issues/detail?id=18
"""
# pylint: disable-msg=R0903
paginators = {
ITEMS_PAGINATOR: SearchPaginator,
RELATEDITEMS_PAGINATOR: RelatedItemsPaginator,
}
def __init__(self):
self._parser = etree.XMLParser()
lookup = SelectiveClassLookup()
lookup.set_fallback(objectify.ObjectifyElementClassLookup())
self._parser.set_element_class_lookup(lookup)
def parse(self, fp):
"""
Parses a file-like object containing the Amazon XML response.
"""
tree = objectify.parse(fp, self._parser)
root = tree.getroot()
#~ from lxml import etree
#~ print etree.tostring(tree, pretty_print=True)
try:
nspace = root.nsmap[None]
errors = root.xpath('//aws:Error', namespaces={'aws': nspace})
except KeyError:
errors = root.xpath('//Error')
for error in errors:
raise AWSError(
code=error.Code.text,
msg=error.Message.text,
xml=root)
return root
@classmethod
def parse_cart(cls, node):
"""
Returns an instance of :class:`amazonproduct.contrib.Cart` based on
information extracted from ``node``.
"""
cart = Cart()
# TODO This is probably not the safest way to get <Cart>
root = node.Cart
cart.cart_id = root.CartId.pyval
cart.hmac = root.HMAC.pyval
def parse_item(item_node):
item = Item()
item.item_id = item_node.CartItemId.pyval
item.asin = item_node.ASIN.pyval
item.seller = item_node.SellerNickname.pyval
item.quantity = item_node.Quantity.pyval
item.title = item_node.Title.pyval
item.product_group = item_node.ProductGroup.pyval
item.price = (
item_node.Price.Amount.pyval,
item_node.Price.CurrencyCode.pyval)
item.total = (
item_node.ItemTotal.Amount.pyval,
item_node.ItemTotal.CurrencyCode.pyval)
return item
try:
for item_node in root.CartItems.CartItem:
cart.items.append(parse_item(item_node))
cart.url = root.PurchaseURL.pyval
cart.subtotal = (root.SubTotal.Amount, root.SubTotal.CurrencyCode)
except AttributeError:
cart.url = None
cart.subtotal = None
return cart
| prats226/python-amazon-product-api-0.2.8 | amazonproduct/processors/objectify.py | Python | bsd-3-clause | 3,842 | [
"Brian"
] | f4c92fdef0932a306838b270f7766b711b99d755c89312fe0a3973d3f6710108 |
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2002-2006 Donald N. Allingham
# Copyright (C) 2008 Brian G. Matherly
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
import re
from ....ggettext import gettext as _
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
from .. import Rule
#-------------------------------------------------------------------------
# "Repos having notes that contain a substring"
#-------------------------------------------------------------------------
class MatchesRegexpOf(Rule):
labels = [ _('Regular expression:')]
name = _('Notes containing <regular expression>')
description = _("Matches notes that contain text "
"which matches a regular expression")
category = _('General filters')
def __init__(self, list):
Rule.__init__(self, list)
try:
self.match = re.compile(list[0], re.I|re.U|re.L)
except:
self.match = re.compile('')
def apply(self, db, note):
""" Apply the filter """
text = note.get()
if self.match.match(text) is not None:
return True
return False
| arunkgupta/gramps | gramps/gen/filters/rules/note/_matchesregexpof.py | Python | gpl-2.0 | 2,164 | [
"Brian"
] | 7fc95fb80429e566c395101b9b220d8ef104fb523176d85db47a729999d63b66 |
from __future__ import absolute_import
from six import string_types
import json
import decimal
import datetime
from pathlib import Path
from plotly.io._utils import validate_coerce_fig_to_dict, validate_coerce_output_type
from _plotly_utils.optional_imports import get_module
from _plotly_utils.basevalidators import ImageUriValidator
# Orca configuration class
# ------------------------
class JsonConfig(object):
_valid_engines = ("json", "orjson", "auto")
def __init__(self):
self._default_engine = "auto"
@property
def default_engine(self):
return self._default_engine
@default_engine.setter
def default_engine(self, val):
if val not in JsonConfig._valid_engines:
raise ValueError(
"Supported JSON engines include {valid}\n"
" Received {val}".format(valid=JsonConfig._valid_engines, val=val)
)
if val == "orjson":
self.validate_orjson()
self._default_engine = val
@classmethod
def validate_orjson(cls):
orjson = get_module("orjson")
if orjson is None:
raise ValueError("The orjson engine requires the orjson package")
config = JsonConfig()
def coerce_to_strict(const):
"""
This is used to ultimately *encode* into strict JSON, see `encode`
"""
# before python 2.7, 'true', 'false', 'null', were include here.
if const in ("Infinity", "-Infinity", "NaN"):
return None
else:
return const
def to_json_plotly(plotly_object, pretty=False, engine=None):
"""
Convert a plotly/Dash object to a JSON string representation
Parameters
----------
plotly_object:
A plotly/Dash object represented as a dict, graph_object, or Dash component
pretty: bool (default False)
True if JSON representation should be pretty-printed, False if
representation should be as compact as possible.
engine: str (default None)
The JSON encoding engine to use. One of:
- "json" for an engine based on the built-in Python json module
- "orjson" for a faster engine that requires the orjson package
- "auto" for the "orjson" engine if available, otherwise "json"
If not specified, the default engine is set to the current value of
plotly.io.json.config.default_engine.
Returns
-------
str
Representation of input object as a JSON string
See Also
--------
to_json : Convert a plotly Figure to JSON with validation
"""
orjson = get_module("orjson", should_load=True)
# Determine json engine
if engine is None:
engine = config.default_engine
if engine == "auto":
if orjson is not None:
engine = "orjson"
else:
engine = "json"
elif engine not in ["orjson", "json"]:
raise ValueError("Invalid json engine: %s" % engine)
modules = {
"sage_all": get_module("sage.all", should_load=False),
"np": get_module("numpy", should_load=False),
"pd": get_module("pandas", should_load=False),
"image": get_module("PIL.Image", should_load=False),
}
# Dump to a JSON string and return
# --------------------------------
if engine == "json":
opts = {}
if pretty:
opts["indent"] = 2
else:
# Remove all whitespace
opts["separators"] = (",", ":")
from _plotly_utils.utils import PlotlyJSONEncoder
return json.dumps(plotly_object, cls=PlotlyJSONEncoder, **opts)
elif engine == "orjson":
JsonConfig.validate_orjson()
opts = orjson.OPT_NON_STR_KEYS | orjson.OPT_SERIALIZE_NUMPY
if pretty:
opts |= orjson.OPT_INDENT_2
# Plotly
try:
plotly_object = plotly_object.to_plotly_json()
except AttributeError:
pass
# Try without cleaning
try:
return orjson.dumps(plotly_object, option=opts).decode("utf8")
except TypeError:
pass
cleaned = clean_to_json_compatible(
plotly_object, numpy_allowed=True, datetime_allowed=True, modules=modules,
)
return orjson.dumps(cleaned, option=opts).decode("utf8")
def to_json(fig, validate=True, pretty=False, remove_uids=True, engine=None):
"""
Convert a figure to a JSON string representation
Parameters
----------
fig:
Figure object or dict representing a figure
validate: bool (default True)
True if the figure should be validated before being converted to
JSON, False otherwise.
pretty: bool (default False)
True if JSON representation should be pretty-printed, False if
representation should be as compact as possible.
remove_uids: bool (default True)
True if trace UIDs should be omitted from the JSON representation
engine: str (default None)
The JSON encoding engine to use. One of:
- "json" for an engine based on the built-in Python json module
- "orjson" for a faster engine that requires the orjson package
- "auto" for the "orjson" engine if available, otherwise "json"
If not specified, the default engine is set to the current value of
plotly.io.json.config.default_engine.
Returns
-------
str
Representation of figure as a JSON string
See Also
--------
to_json_plotly : Convert an arbitrary plotly graph_object or Dash component to JSON
"""
# Validate figure
# ---------------
fig_dict = validate_coerce_fig_to_dict(fig, validate)
# Remove trace uid
# ----------------
if remove_uids:
for trace in fig_dict.get("data", []):
trace.pop("uid", None)
return to_json_plotly(fig_dict, pretty=pretty, engine=engine)
def write_json(fig, file, validate=True, pretty=False, remove_uids=True, engine=None):
"""
Convert a figure to JSON and write it to a file or writeable
object
Parameters
----------
fig:
Figure object or dict representing a figure
file: str or writeable
A string representing a local file path or a writeable object
(e.g. a pathlib.Path object or an open file descriptor)
pretty: bool (default False)
True if JSON representation should be pretty-printed, False if
representation should be as compact as possible.
remove_uids: bool (default True)
True if trace UIDs should be omitted from the JSON representation
engine: str (default None)
The JSON encoding engine to use. One of:
- "json" for an engine based on the built-in Python json module
- "orjson" for a faster engine that requires the orjson package
- "auto" for the "orjson" engine if available, otherwise "json"
If not specified, the default engine is set to the current value of
plotly.io.json.config.default_engine.
Returns
-------
None
"""
# Get JSON string
# ---------------
# Pass through validate argument and let to_json handle validation logic
json_str = to_json(
fig, validate=validate, pretty=pretty, remove_uids=remove_uids, engine=engine
)
# Try to cast `file` as a pathlib object `path`.
# ----------------------------------------------
if isinstance(file, string_types):
# Use the standard Path constructor to make a pathlib object.
path = Path(file)
elif isinstance(file, Path):
# `file` is already a Path object.
path = file
else:
# We could not make a Path object out of file. Either `file` is an open file
# descriptor with a `write()` method or it's an invalid object.
path = None
# Open file
# ---------
if path is None:
# We previously failed to make sense of `file` as a pathlib object.
# Attempt to write to `file` as an open file descriptor.
try:
file.write(json_str)
return
except AttributeError:
pass
raise ValueError(
"""
The 'file' argument '{file}' is not a string, pathlib.Path object, or file descriptor.
""".format(
file=file
)
)
else:
# We previously succeeded in interpreting `file` as a pathlib object.
# Now we can use `write_bytes()`.
path.write_text(json_str)
def from_json_plotly(value, engine=None):
"""
Parse JSON string using the specified JSON engine
Parameters
----------
value: str or bytes
A JSON string or bytes object
engine: str (default None)
The JSON decoding engine to use. One of:
- if "json", parse JSON using built in json module
- if "orjson", parse using the faster orjson module, requires the orjson
package
- if "auto" use orjson module if available, otherwise use the json module
If not specified, the default engine is set to the current value of
plotly.io.json.config.default_engine.
Returns
-------
dict
See Also
--------
from_json_plotly : Parse JSON with plotly conventions into a dict
"""
orjson = get_module("orjson", should_load=True)
# Validate value
# --------------
if not isinstance(value, (string_types, bytes)):
raise ValueError(
"""
from_json_plotly requires a string or bytes argument but received value of type {typ}
Received value: {value}""".format(
typ=type(value), value=value
)
)
# Determine json engine
if engine is None:
engine = config.default_engine
if engine == "auto":
if orjson is not None:
engine = "orjson"
else:
engine = "json"
elif engine not in ["orjson", "json"]:
raise ValueError("Invalid json engine: %s" % engine)
if engine == "orjson":
JsonConfig.validate_orjson()
# orjson handles bytes input natively
value_dict = orjson.loads(value)
else:
# decode bytes to str for built-in json module
if isinstance(value, bytes):
value = value.decode("utf-8")
value_dict = json.loads(value)
return value_dict
def from_json(value, output_type="Figure", skip_invalid=False, engine=None):
"""
Construct a figure from a JSON string
Parameters
----------
value: str or bytes
String or bytes object containing the JSON representation of a figure
output_type: type or str (default 'Figure')
The output figure type or type name.
One of: graph_objs.Figure, 'Figure', graph_objs.FigureWidget, 'FigureWidget'
skip_invalid: bool (default False)
False if invalid figure properties should result in an exception.
True if invalid figure properties should be silently ignored.
engine: str (default None)
The JSON decoding engine to use. One of:
- if "json", parse JSON using built in json module
- if "orjson", parse using the faster orjson module, requires the orjson
package
- if "auto" use orjson module if available, otherwise use the json module
If not specified, the default engine is set to the current value of
plotly.io.json.config.default_engine.
Raises
------
ValueError
if value is not a string, or if skip_invalid=False and value contains
invalid figure properties
Returns
-------
Figure or FigureWidget
"""
# Decode JSON
# -----------
fig_dict = from_json_plotly(value, engine=engine)
# Validate coerce output type
# ---------------------------
cls = validate_coerce_output_type(output_type)
# Create and return figure
# ------------------------
fig = cls(fig_dict, skip_invalid=skip_invalid)
return fig
def read_json(file, output_type="Figure", skip_invalid=False, engine=None):
"""
Construct a figure from the JSON contents of a local file or readable
Python object
Parameters
----------
file: str or readable
A string containing the path to a local file or a read-able Python
object (e.g. a pathlib.Path object or an open file descriptor)
output_type: type or str (default 'Figure')
The output figure type or type name.
One of: graph_objs.Figure, 'Figure', graph_objs.FigureWidget, 'FigureWidget'
skip_invalid: bool (default False)
False if invalid figure properties should result in an exception.
True if invalid figure properties should be silently ignored.
engine: str (default None)
The JSON decoding engine to use. One of:
- if "json", parse JSON using built in json module
- if "orjson", parse using the faster orjson module, requires the orjson
package
- if "auto" use orjson module if available, otherwise use the json module
If not specified, the default engine is set to the current value of
plotly.io.json.config.default_engine.
Returns
-------
Figure or FigureWidget
"""
# Try to cast `file` as a pathlib object `path`.
# -------------------------
# ----------------------------------------------
file_is_str = isinstance(file, string_types)
if isinstance(file, string_types):
# Use the standard Path constructor to make a pathlib object.
path = Path(file)
elif isinstance(file, Path):
# `file` is already a Path object.
path = file
else:
# We could not make a Path object out of file. Either `file` is an open file
# descriptor with a `write()` method or it's an invalid object.
path = None
# Read file contents into JSON string
# -----------------------------------
if path is not None:
json_str = path.read_text()
else:
json_str = file.read()
# Construct and return figure
# ---------------------------
return from_json(
json_str, skip_invalid=skip_invalid, output_type=output_type, engine=engine
)
def clean_to_json_compatible(obj, **kwargs):
# Try handling value as a scalar value that we have a conversion for.
# Return immediately if we know we've hit a primitive value
# Bail out fast for simple scalar types
if isinstance(obj, (int, float, string_types)):
return obj
if isinstance(obj, dict):
return {k: clean_to_json_compatible(v, **kwargs) for k, v in obj.items()}
elif isinstance(obj, (list, tuple)):
if obj:
# Must process list recursively even though it may be slow
return [clean_to_json_compatible(v, **kwargs) for v in obj]
# unpack kwargs
numpy_allowed = kwargs.get("numpy_allowed", False)
datetime_allowed = kwargs.get("datetime_allowed", False)
modules = kwargs.get("modules", {})
sage_all = modules["sage_all"]
np = modules["np"]
pd = modules["pd"]
image = modules["image"]
# Sage
if sage_all is not None:
if obj in sage_all.RR:
return float(obj)
elif obj in sage_all.ZZ:
return int(obj)
# numpy
if np is not None:
if obj is np.ma.core.masked:
return float("nan")
elif isinstance(obj, np.ndarray):
if numpy_allowed and obj.dtype.kind in ("b", "i", "u", "f"):
return np.ascontiguousarray(obj)
elif obj.dtype.kind == "M":
# datetime64 array
return np.datetime_as_string(obj).tolist()
elif obj.dtype.kind == "U":
return obj.tolist()
elif obj.dtype.kind == "O":
# Treat object array as a lists, continue processing
obj = obj.tolist()
elif isinstance(obj, np.datetime64):
return str(obj)
# pandas
if pd is not None:
if obj is pd.NaT:
return None
elif isinstance(obj, (pd.Series, pd.DatetimeIndex)):
if numpy_allowed and obj.dtype.kind in ("b", "i", "u", "f"):
return np.ascontiguousarray(obj.values)
elif obj.dtype.kind == "M":
if isinstance(obj, pd.Series):
dt_values = obj.dt.to_pydatetime().tolist()
else: # DatetimeIndex
dt_values = obj.to_pydatetime().tolist()
if not datetime_allowed:
# Note: We don't need to handle dropping timezones here because
# numpy's datetime64 doesn't support them and pandas's tz_localize
# above drops them.
for i in range(len(dt_values)):
dt_values[i] = dt_values[i].isoformat()
return dt_values
# datetime and date
try:
# Need to drop timezone for scalar datetimes. Don't need to convert
# to string since engine can do that
obj = obj.to_pydatetime()
except (TypeError, AttributeError):
pass
if not datetime_allowed:
try:
return obj.isoformat()
except (TypeError, AttributeError):
pass
elif isinstance(obj, datetime.datetime):
return obj
# Try .tolist() convertible, do not recurse inside
try:
return obj.tolist()
except AttributeError:
pass
# Do best we can with decimal
if isinstance(obj, decimal.Decimal):
return float(obj)
# PIL
if image is not None and isinstance(obj, image.Image):
return ImageUriValidator.pil_image_to_uri(obj)
# Plotly
try:
obj = obj.to_plotly_json()
except AttributeError:
pass
# Recurse into lists and dictionaries
if isinstance(obj, dict):
return {k: clean_to_json_compatible(v, **kwargs) for k, v in obj.items()}
elif isinstance(obj, (list, tuple)):
if obj:
# Must process list recursively even though it may be slow
return [clean_to_json_compatible(v, **kwargs) for v in obj]
return obj
| plotly/plotly.py | packages/python/plotly/plotly/io/_json.py | Python | mit | 18,159 | [
"ORCA"
] | 19a18119281b67400fda518fae4a56bf05ca0fcd8b186cf85c68e256e73eb729 |
#!/usr/bin/env python
""" runs checkTransformationIntegrity from ValidateOutputDataAgent on selected Tranformation
"""
from __future__ import print_function
import sys
from DIRAC.Core.Base.Script import parseCommandLine
parseCommandLine()
if len( sys.argv ) < 2:
print('Usage: dirac-transformation-verify-outputdata transID [transID] [transID]')
sys.exit()
else:
transIDs = [int( arg ) for arg in sys.argv[1:]]
from DIRAC.TransformationSystem.Agent.ValidateOutputDataAgent import ValidateOutputDataAgent
from DIRAC.TransformationSystem.Client.TransformationClient import TransformationClient
agent = ValidateOutputDataAgent( 'Transformation/ValidateOutputDataAgent',
'Transformation/ValidateOutputDataAgent',
'dirac-transformation-verify-outputdata' )
agent.initialize()
client = TransformationClient()
for transID in transIDs:
agent.checkTransformationIntegrity( transID )
| chaen/DIRAC | TransformationSystem/scripts/dirac-transformation-verify-outputdata.py | Python | gpl-3.0 | 968 | [
"DIRAC"
] | cb2ac12b1af4cde54d21be3f1d8315cd177a1ac309f0c23555117a7603a6a1da |
#!/usr/bin/env python
from __future__ import print_function
import argparse
import datetime
import glob
import shutil
import os
# try:
# from urllib.parse import urlparse
# except ImportError:
# from urlparse import urlparse
import logging
log = logging.getLogger(__name__)
def copy_output_file_to_dataset(dir_name, input_dir, dt_type=None):
"""
Copy the datasets file to the news dataset cbt_browser
:param dir_name: the target output directory for the ctb_explorer dataset
:param input_dir: the input files
:param dt_type: the type of input dataset (neo4jdb, jbrowser - default to None)
:return: boolean
"""
dt_loc = input_dir.rpartition('/')[2].replace(".dat", "_files")
if dt_type:
if dt_type == "neo4jdb":
src_files = glob.glob(os.path.dirname(input_dir) + '/{}/{}'.format(dt_loc, dt_type) + "/*" )
else:
src_files = glob.glob(os.path.dirname(input_dir) + '/{}'.format(dt_loc) + "/*" )
else:
return False
for file_name in src_files:
if os.path.isfile(file_name):
try:
shutil.copy2(file_name, dir_name)
except shutil.Error as e:
log.debug('Error: %s' % e)
# eg. source or destination doesn't exist
except IOError as e:
log.debug('Error: %s' % e.strerror)
elif os.path.isdir(file_name):
# create the parent dir before copytree
try:
os.chdir(dir_name)
shutil.copytree(file_name, file_name.rsplit('/', 1)[-1])
except shutil.Error as e:
log.debug('Error: %s' % e)
# eg. source or destination doesn't exist
except IOError as e:
log.debug('Error: %s' % e.strerror)
return True
class BuildCtbExplorerRunner(object):
def __init__(self, args=None):
"""
Initializes an object to run CtbRunner in Galaxy.
"""
# Check whether the options are specified and saves them into the object
self.args = args
self.output_neo4jdb = args.output_neo4jdb
self.output_jbrowser = args.output_jbrowser
self.input_neo4jdb = args.input_neo4jdb
self.input_jbrowser = args.input_jbrowser
def build_ctb_explorer(self):
"""
:rtype: boolean
"""
if copy_output_file_to_dataset(self.output_neo4jdb, self.input_neo4jdb, dt_type="neo4jdb") and \
copy_output_file_to_dataset(self.output_jbrowser, self.input_jbrowser, dt_type="jbrowser"):
"""Copy the jbrowser input data file to the outputdir @TODO: investigate altenatives"""
try:
shutil.copy2(self.input_jbrowser, os.path.join(self.output_jbrowser, 'index.html'))
except shutil.Error as e:
log.debug('Error: %s' % e)
# eg. source or destination doesn't exist
except IOError as e:
log.debug('Error: %s' % e.strerror)
print("CTB Report run time: %s" % str(datetime.date.today()))
print("Neo4jDB - Input: %s" % str(self.args.input_neo4jdb))
print("JBrowser - Input: %s" % str(self.args.input_jbrowser))
else:
return False
return True
def main():
parser = argparse.ArgumentParser(description="Tool used to build a combat-tb explorer dataset")
parser.add_argument('--output_neo4jdb')
parser.add_argument('--output_jbrowser')
parser.add_argument('--input_neo4jdb')
parser.add_argument('--input_jbrowser')
args = parser.parse_args()
ctb_explorer_runner = BuildCtbExplorerRunner(args)
# make the output directory (neo4j)
if not os.path.exists(args.output_neo4jdb):
os.makedirs(args.output_neo4jdb)
# make the output directory (jbrowser)
if not os.path.exists(args.output_jbrowser):
os.makedirs(args.output_jbrowser)
status = ctb_explorer_runner.build_ctb_explorer()
if status is None:
exit(1)
if __name__ == "__main__":
main()
| SANBI-SA/tools-sanbi-uwc | tools/build_ctb_explorer/build_ctb_explorer.py | Python | gpl-3.0 | 4,075 | [
"Galaxy"
] | 981d6209c82a85db2b8a1ed1f6b4a4b13808b9b55ded1a37fe6754470a8ff771 |
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Tool to inspect a model."""
import os
import time
from typing import Text, Tuple, List
from absl import app
from absl import flags
from absl import logging
import numpy as np
from PIL import Image
import tensorflow.compat.v1 as tf
from tensorflow_examples.lite.model_maker.third_party.efficientdet import hparams_config
from tensorflow_examples.lite.model_maker.third_party.efficientdet import inference
from tensorflow_examples.lite.model_maker.third_party.efficientdet import utils
from tensorflow.python.client import timeline # pylint: disable=g-direct-tensorflow-import
flags.DEFINE_string('model_name', 'efficientdet-d0', 'Model.')
flags.DEFINE_string('logdir', '/tmp/deff/', 'log directory.')
flags.DEFINE_string('runmode', 'dry', 'Run mode: {freeze, bm, dry}')
flags.DEFINE_string('trace_filename', None, 'Trace file name.')
flags.DEFINE_integer('threads', 0, 'Number of threads.')
flags.DEFINE_integer('bm_runs', 10, 'Number of benchmark runs.')
flags.DEFINE_string('tensorrt', None, 'TensorRT mode: {None, FP32, FP16, INT8}')
flags.DEFINE_bool('delete_logdir', True, 'Whether to delete logdir.')
flags.DEFINE_bool('freeze', False, 'Freeze graph.')
flags.DEFINE_bool('use_xla', False, 'Run with xla optimization.')
flags.DEFINE_integer('batch_size', 1, 'Batch size for inference.')
flags.DEFINE_string('ckpt_path', None, 'checkpoint dir used for eval.')
flags.DEFINE_string('export_ckpt', None, 'Path for exporting new models.')
flags.DEFINE_string(
'hparams', '', 'Comma separated k=v pairs of hyperparameters or a module'
' containing attributes to use as hyperparameters.')
flags.DEFINE_string('input_image', None, 'Input image path for inference.')
flags.DEFINE_string('output_image_dir', None, 'Output dir for inference.')
# For video.
flags.DEFINE_string('input_video', None, 'Input video path for inference.')
flags.DEFINE_string('output_video', None,
'Output video path. If None, play it online instead.')
# For visualization.
flags.DEFINE_integer('line_thickness', None, 'Line thickness for box.')
flags.DEFINE_integer('max_boxes_to_draw', 100, 'Max number of boxes to draw.')
flags.DEFINE_float('min_score_thresh', 0.4, 'Score threshold to show box.')
flags.DEFINE_string('nms_method', 'hard', 'nms method, hard or gaussian.')
# For saved model.
flags.DEFINE_string('saved_model_dir', '/tmp/saved_model',
'Folder path for saved model.')
flags.DEFINE_string('tflite_path', None, 'Path for exporting tflite file.')
FLAGS = flags.FLAGS
class ModelInspector(object):
"""A simple helper class for inspecting a model."""
def __init__(self,
model_name: Text,
logdir: Text,
tensorrt: Text = False,
use_xla: bool = False,
ckpt_path: Text = None,
export_ckpt: Text = None,
saved_model_dir: Text = None,
tflite_path: Text = None,
batch_size: int = 1,
hparams: Text = '',
**kwargs): # pytype: disable=annotation-type-mismatch
self.model_name = model_name
self.logdir = logdir
self.tensorrt = tensorrt
self.use_xla = use_xla
self.ckpt_path = ckpt_path
self.export_ckpt = export_ckpt
self.saved_model_dir = saved_model_dir
self.tflite_path = tflite_path
model_config = hparams_config.get_detection_config(model_name)
model_config.override(hparams) # Add custom overrides
model_config.is_training_bn = False
model_config.image_size = utils.parse_image_size(model_config.image_size)
# If batch size is 0, then build a graph with dynamic batch size.
self.batch_size = batch_size or None
self.labels_shape = [batch_size, model_config.num_classes]
# A hack to make flag consistent with nms configs.
if kwargs.get('score_thresh', None):
model_config.nms_configs.score_thresh = kwargs['score_thresh']
if kwargs.get('nms_method', None):
model_config.nms_configs.method = kwargs['nms_method']
if kwargs.get('max_output_size', None):
model_config.nms_configs.max_output_size = kwargs['max_output_size']
height, width = model_config.image_size
if model_config.data_format == 'channels_first':
self.inputs_shape = [batch_size, 3, height, width]
else:
self.inputs_shape = [batch_size, height, width, 3]
self.model_config = model_config
def build_model(self, inputs: tf.Tensor) -> List[tf.Tensor]:
"""Build model with inputs and labels and print out model stats."""
logging.info('start building model')
cls_outputs, box_outputs = inference.build_model(
self.model_name,
inputs,
**self.model_config)
# Write to tfevent for tensorboard.
train_writer = tf.summary.FileWriter(self.logdir)
train_writer.add_graph(tf.get_default_graph())
train_writer.flush()
all_outputs = list(cls_outputs.values()) + list(box_outputs.values())
return all_outputs
def export_saved_model(self, **kwargs):
"""Export a saved model for inference."""
tf.enable_resource_variables()
driver = inference.ServingDriver(
self.model_name,
self.ckpt_path,
batch_size=self.batch_size,
use_xla=self.use_xla,
model_params=self.model_config.as_dict(),
**kwargs)
driver.build()
driver.export(self.saved_model_dir, self.tflite_path, self.tensorrt)
def saved_model_inference(self, image_path_pattern, output_dir, **kwargs):
"""Perform inference for the given saved model."""
driver = inference.ServingDriver(
self.model_name,
self.ckpt_path,
batch_size=self.batch_size,
use_xla=self.use_xla,
model_params=self.model_config.as_dict(),
**kwargs)
driver.load(self.saved_model_dir)
# Serving time batch size should be fixed.
batch_size = self.batch_size or 1
all_files = list(tf.io.gfile.glob(image_path_pattern))
print('all_files=', all_files)
num_batches = (len(all_files) + batch_size - 1) // batch_size
for i in range(num_batches):
batch_files = all_files[i * batch_size:(i + 1) * batch_size]
height, width = self.model_config.image_size
images = [Image.open(f) for f in batch_files]
if len(set([m.size for m in images])) > 1:
# Resize only if images in the same batch have different sizes.
images = [m.resize(height, width) for m in images]
raw_images = [np.array(m) for m in images]
size_before_pad = len(raw_images)
if size_before_pad < batch_size:
padding_size = batch_size - size_before_pad
raw_images += [np.zeros_like(raw_images[0])] * padding_size
detections_bs = driver.serve_images(raw_images)
for j in range(size_before_pad):
img = driver.visualize(raw_images[j], detections_bs[j], **kwargs)
img_id = str(i * batch_size + j)
output_image_path = os.path.join(output_dir, img_id + '.jpg')
Image.fromarray(img).save(output_image_path)
print('writing file to %s' % output_image_path)
def saved_model_benchmark(self,
image_path_pattern,
trace_filename=None,
**kwargs):
"""Perform inference for the given saved model."""
driver = inference.ServingDriver(
self.model_name,
self.ckpt_path,
batch_size=self.batch_size,
use_xla=self.use_xla,
model_params=self.model_config.as_dict(),
**kwargs)
driver.load(self.saved_model_dir)
raw_images = []
all_files = list(tf.io.gfile.glob(image_path_pattern))
if len(all_files) < self.batch_size:
all_files = all_files * (self.batch_size // len(all_files) + 1)
raw_images = [np.array(Image.open(f)) for f in all_files[:self.batch_size]]
driver.benchmark(raw_images, trace_filename)
def saved_model_video(self, video_path: Text, output_video: Text, **kwargs):
"""Perform video inference for the given saved model."""
import cv2 # pylint: disable=g-import-not-at-top
driver = inference.ServingDriver(
self.model_name,
self.ckpt_path,
batch_size=1,
use_xla=self.use_xla,
model_params=self.model_config.as_dict())
driver.load(self.saved_model_dir)
cap = cv2.VideoCapture(video_path)
if not cap.isOpened():
print('Error opening input video: {}'.format(video_path))
out_ptr = None
if output_video:
frame_width, frame_height = int(cap.get(3)), int(cap.get(4))
out_ptr = cv2.VideoWriter(output_video,
cv2.VideoWriter_fourcc('m', 'p', '4', 'v'), 25,
(frame_width, frame_height))
while cap.isOpened():
# Capture frame-by-frame
ret, frame = cap.read()
if not ret:
break
raw_frames = [np.array(frame)]
detections_bs = driver.serve_images(raw_frames)
new_frame = driver.visualize(raw_frames[0], detections_bs[0], **kwargs)
if out_ptr:
# write frame into output file.
out_ptr.write(new_frame)
else:
# show the frame online, mainly used for real-time speed test.
cv2.imshow('Frame', new_frame)
# Press Q on keyboard to exit
if cv2.waitKey(1) & 0xFF == ord('q'):
break
def inference_single_image(self, image_image_path, output_dir, **kwargs):
driver = inference.InferenceDriver(self.model_name, self.ckpt_path,
self.model_config.as_dict())
driver.inference(image_image_path, output_dir, **kwargs)
def build_and_save_model(self):
"""build and save the model into self.logdir."""
with tf.Graph().as_default(), tf.Session() as sess:
# Build model with inputs and labels.
inputs = tf.placeholder(tf.float32, name='input', shape=self.inputs_shape)
outputs = self.build_model(inputs)
# Run the model
inputs_val = np.random.rand(*self.inputs_shape).astype(float)
labels_val = np.zeros(self.labels_shape).astype(np.int64)
labels_val[:, 0] = 1
if self.ckpt_path:
# Load the true weights if available.
inference.restore_ckpt(sess, self.ckpt_path,
self.model_config.moving_average_decay,
self.export_ckpt)
else:
sess.run(tf.global_variables_initializer())
# Run a single train step.
sess.run(outputs, feed_dict={inputs: inputs_val})
all_saver = tf.train.Saver(save_relative_paths=True)
all_saver.save(sess, os.path.join(self.logdir, self.model_name))
tf_graph = os.path.join(self.logdir, self.model_name + '_train.pb')
with tf.io.gfile.GFile(tf_graph, 'wb') as f:
f.write(sess.graph_def.SerializeToString())
def eval_ckpt(self):
"""build and save the model into self.logdir."""
with tf.Graph().as_default(), tf.Session() as sess:
# Build model with inputs and labels.
inputs = tf.placeholder(tf.float32, name='input', shape=self.inputs_shape)
self.build_model(inputs)
inference.restore_ckpt(sess, self.ckpt_path,
self.model_config.moving_average_decay,
self.export_ckpt)
def freeze_model(self) -> Tuple[Text, Text]:
"""Freeze model and convert them into tflite and tf graph."""
with tf.Graph().as_default(), tf.Session() as sess:
inputs = tf.placeholder(tf.float32, name='input', shape=self.inputs_shape)
outputs = self.build_model(inputs)
if self.ckpt_path:
# Load the true weights if available.
inference.restore_ckpt(sess, self.ckpt_path,
self.model_config.moving_average_decay,
self.export_ckpt)
else:
# Load random weights if not checkpoint is not available.
self.build_and_save_model()
checkpoint = tf.train.latest_checkpoint(self.logdir)
logging.info('Loading checkpoint: %s', checkpoint)
saver = tf.train.Saver()
saver.restore(sess, checkpoint)
# export frozen graph.
output_node_names = [node.op.name for node in outputs]
graphdef = tf.graph_util.convert_variables_to_constants(
sess, sess.graph_def, output_node_names)
tf_graph = os.path.join(self.logdir, self.model_name + '_frozen.pb')
tf.io.gfile.GFile(tf_graph, 'wb').write(graphdef.SerializeToString())
# export savaed model.
output_dict = {'class_predict_%d' % i: outputs[i] for i in range(5)}
output_dict.update({'box_predict_%d' % i: outputs[5+i] for i in range(5)})
signature_def_map = {
'serving_default':
tf.saved_model.predict_signature_def(
{'input': inputs},
output_dict,
)
}
output_dir = os.path.join(self.logdir, 'savedmodel')
b = tf.saved_model.Builder(output_dir)
b.add_meta_graph_and_variables(
sess,
tags=['serve'],
signature_def_map=signature_def_map,
assets_collection=tf.get_collection(tf.GraphKeys.ASSET_FILEPATHS),
clear_devices=True)
b.save()
logging.info('Model saved at %s', output_dir)
return graphdef
def benchmark_model(self,
warmup_runs,
bm_runs,
num_threads,
trace_filename=None):
"""Benchmark model."""
if self.tensorrt:
print('Using tensorrt ', self.tensorrt)
graphdef = self.freeze_model()
if num_threads > 0:
print('num_threads for benchmarking: {}'.format(num_threads))
sess_config = tf.ConfigProto(
intra_op_parallelism_threads=num_threads,
inter_op_parallelism_threads=1)
else:
sess_config = tf.ConfigProto()
sess_config.graph_options.rewrite_options.dependency_optimization = 2
if self.use_xla:
sess_config.graph_options.optimizer_options.global_jit_level = (
tf.OptimizerOptions.ON_2)
with tf.Graph().as_default(), tf.Session(config=sess_config) as sess:
inputs = tf.placeholder(tf.float32, name='input', shape=self.inputs_shape)
output = self.build_model(inputs)
img = np.random.uniform(size=self.inputs_shape)
sess.run(tf.global_variables_initializer())
if self.tensorrt:
fetches = [inputs.name] + [i.name for i in output]
goutput = self.convert_tr(graphdef, fetches)
inputs, output = goutput[0], goutput[1:]
if not self.use_xla:
# Don't use tf.group because XLA removes the whole graph for tf.group.
output = tf.group(*output)
else:
output = tf.add_n([tf.reduce_sum(x) for x in output])
output_name = [output.name]
input_name = inputs.name
graphdef = tf.graph_util.convert_variables_to_constants(
sess, sess.graph_def, output_name)
with tf.Graph().as_default(), tf.Session(config=sess_config) as sess:
tf.import_graph_def(graphdef, name='')
for i in range(warmup_runs):
start_time = time.time()
sess.run(output_name, feed_dict={input_name: img})
logging.info('Warm up: {} {:.4f}s'.format(i, time.time() - start_time))
print('Start benchmark runs total={}'.format(bm_runs))
start = time.perf_counter()
for i in range(bm_runs):
sess.run(output_name, feed_dict={input_name: img})
end = time.perf_counter()
inference_time = (end - start) / bm_runs
print('Per batch inference time: ', inference_time)
print('FPS: ', self.batch_size / inference_time)
if trace_filename:
run_options = tf.RunOptions()
run_options.trace_level = tf.RunOptions.FULL_TRACE
run_metadata = tf.RunMetadata()
sess.run(
output_name,
feed_dict={input_name: img},
options=run_options,
run_metadata=run_metadata)
logging.info('Dumping trace to %s', trace_filename)
trace_dir = os.path.dirname(trace_filename)
if not tf.io.gfile.exists(trace_dir):
tf.io.gfile.makedirs(trace_dir)
with tf.io.gfile.GFile(trace_filename, 'w') as trace_file:
trace = timeline.Timeline(step_stats=run_metadata.step_stats)
trace_file.write(trace.generate_chrome_trace_format(show_memory=True))
def convert_tr(self, graph_def, fetches):
"""Convert to TensorRT."""
from tensorflow.python.compiler.tensorrt import trt # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top
converter = trt.TrtGraphConverter(
nodes_denylist=[t.split(':')[0] for t in fetches],
input_graph_def=graph_def,
precision_mode=self.tensorrt)
infer_graph = converter.convert()
goutput = tf.import_graph_def(infer_graph, return_elements=fetches)
return goutput
def run_model(self, runmode, **kwargs):
"""Run the model on devices."""
if runmode == 'dry':
self.build_and_save_model()
elif runmode == 'freeze':
self.freeze_model()
elif runmode == 'ckpt':
self.eval_ckpt()
elif runmode == 'saved_model_benchmark':
self.saved_model_benchmark(
kwargs['input_image'],
trace_filename=kwargs.get('trace_filename', None))
elif runmode in ('infer', 'saved_model', 'saved_model_infer',
'saved_model_video'):
config_dict = {}
if kwargs.get('line_thickness', None):
config_dict['line_thickness'] = kwargs.get('line_thickness')
if kwargs.get('max_boxes_to_draw', None):
config_dict['max_boxes_to_draw'] = kwargs.get('max_boxes_to_draw')
if kwargs.get('min_score_thresh', None):
config_dict['min_score_thresh'] = kwargs.get('min_score_thresh')
if runmode == 'saved_model':
self.export_saved_model(**config_dict)
elif runmode == 'infer':
self.inference_single_image(kwargs['input_image'],
kwargs['output_image_dir'], **config_dict)
elif runmode == 'saved_model_infer':
self.saved_model_inference(kwargs['input_image'],
kwargs['output_image_dir'], **config_dict)
elif runmode == 'saved_model_video':
self.saved_model_video(kwargs['input_video'], kwargs['output_video'],
**config_dict)
elif runmode == 'bm':
self.benchmark_model(
warmup_runs=5,
bm_runs=kwargs.get('bm_runs', 10),
num_threads=kwargs.get('threads', 0),
trace_filename=kwargs.get('trace_filename', None))
else:
raise ValueError('Unkown runmode {}'.format(runmode))
def main(_):
if tf.io.gfile.exists(FLAGS.logdir) and FLAGS.delete_logdir:
logging.info('Deleting log dir ...')
tf.io.gfile.rmtree(FLAGS.logdir)
inspector = ModelInspector(
model_name=FLAGS.model_name,
logdir=FLAGS.logdir,
tensorrt=FLAGS.tensorrt,
use_xla=FLAGS.use_xla,
ckpt_path=FLAGS.ckpt_path,
export_ckpt=FLAGS.export_ckpt,
saved_model_dir=FLAGS.saved_model_dir,
tflite_path=FLAGS.tflite_path,
batch_size=FLAGS.batch_size,
hparams=FLAGS.hparams,
score_thresh=FLAGS.min_score_thresh,
max_output_size=FLAGS.max_boxes_to_draw,
nms_method=FLAGS.nms_method)
inspector.run_model(
FLAGS.runmode,
input_image=FLAGS.input_image,
output_image_dir=FLAGS.output_image_dir,
input_video=FLAGS.input_video,
output_video=FLAGS.output_video,
line_thickness=FLAGS.line_thickness,
max_boxes_to_draw=FLAGS.max_boxes_to_draw,
min_score_thresh=FLAGS.min_score_thresh,
nms_method=FLAGS.nms_method,
bm_runs=FLAGS.bm_runs,
threads=FLAGS.threads,
trace_filename=FLAGS.trace_filename)
if __name__ == '__main__':
logging.set_verbosity(logging.WARNING)
tf.enable_v2_tensorshape()
tf.disable_eager_execution()
app.run(main)
| tensorflow/examples | tensorflow_examples/lite/model_maker/third_party/efficientdet/model_inspect.py | Python | apache-2.0 | 20,632 | [
"Gaussian"
] | 6bd41e9801c1bdc3143f5ffa7ce61849adf9819628ffb17246e79d0195e3ba6f |
import cv2
import numpy as np
import matplotlib.pyplot as plt
'''
Starting with a user-specified bounding box (the box with the smallest measure within which all the points lie) around
the object to be segmented, the algorithm estimates the color distribution of the target object and that of the
background using a Gaussian mixture model (a probabilistic model for representing the presence of sub-populations within
an overall population).
This is used to construct a Markov random field (a set of random variables whose future states depend only upon the
current state) over the pixel labels, with an energy function (mathematical optimization) that prefers connected
regions having the same label, and running a graph cut based optimization to infer their values.
As this estimate is likely to be more accurate than the original, taken from the bounding box, this two-step procedure
is repeated until convergence.
'''
img = cv2.imread('bradco.jpg')
mask = np.zeros(img.shape[:2], np.uint8)
bgdModel = np.zeros((1, 65), np.float64)
fgdModel = np.zeros((1, 65), np.float64)
rect = (0, 0, 300, 300)
cv2.grabCut(img, mask, rect, bgdModel, fgdModel, 5, cv2.GC_INIT_WITH_RECT)
mask2 = np.where((mask == 2) | (mask == 0), 0, 1).astype('uint8')
img = img * mask2[:, :, np.newaxis]
plt.imshow(img)
plt.colorbar()
plt.show()
| RyanChinSang/ECNG3020-ORSS4SCVI | BETA/TestCode/OpenCV/TUT-GrabCutFgExtr.py | Python | gpl-3.0 | 1,337 | [
"Gaussian"
] | 0393d8d20dd960ec492d38002625c4356848099c42a3e88bd3e6f58ed6867152 |
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
"""
FParser printer
The FParserPrinter converts single sympy expressions into a single FParser.
"""
from sympy.printing.codeprinter import CodePrinter
from sympy.printing.precedence import precedence
from sympy import ccode
# dictionary mapping sympy function to (argument_conditions, fparser function).
# Used in FParserPrinter._print_Function(self)
known_functions = {
"Abs": "abs",
"sin": "sin",
"cos": "cos",
"tan": "tan",
"asin": "asin",
"acos": "acos",
"atan": "atan",
"atan2": "atan2",
"exp": "exp",
"log": "log",
"erf": "erf",
"sinh": "sinh",
"cosh": "cosh",
"tanh": "tanh",
"asinh": "asinh",
"acosh": "acosh",
"atanh": "atanh",
"floor": "floor",
"ceiling": "ceil",
}
class FParserPrinter(CodePrinter):
"""A printer to convert python expressions to FParser expressions"""
printmethod = "_fparser"
_default_settings = {
'order': None,
'human': False,
'full_prec': 'auto',
'precision': 15,
}
# ovewrite some operators (FParser uses single char and/or)
_operators = {
'and': '&',
'or': '|',
'not': '!',
}
def __init__(self, **kwargs):
"""Register function mappings supplied by user"""
CodePrinter.__init__(self, kwargs)
self.known_functions = dict(known_functions)
def _rate_index_position(self, p):
"""function to calculate score based on position among indices
This method is used to sort loops in an optimized order, see
CodePrinter._sort_optimized()
"""
return p*5
def _format_code(self, lines):
return lines
def _get_statement(self, codestring):
return "%s;" % codestring
def _get_loop_opening_ending(self, indices):
return '',''
def _print_Pow(self, expr):
PREC = precedence(expr)
if expr.exp == -1:
return '1/%s' % (self.parenthesize(expr.base, PREC))
elif expr.exp == 0.5:
return 'sqrt(%s)' % self._print(expr.base)
elif expr.base == 2:
return 'exp2(%s)' % self._print(expr.exp)
else:
return '%s^%s' % (self.parenthesize(expr.base, PREC),
self.parenthesize(expr.exp, PREC))
def _print_BaseScalar(self, expr):
"""
Print simple variable names instead of R.variable_name
see sympy/sympy/vector/scalar.py
"""
index, system = expr._id
return system._variable_names[index]
def _print_Rational(self, expr):
p, q = int(expr.p), int(expr.q)
return '%d/%d' % (p, q)
def _print_Indexed(self, expr):
raise TypeError("FParserPrinter does not support array indices")
def _print_Idx(self, expr):
raise TypeError("FParserPrinter does not support array indices")
def _print_Exp1(self, expr):
return 'exp(1)'
#def _print_Pi(self, expr):
# return '3.14159265359'
# TODO: we need a more elegant way to deal with infinity in FParser
def _print_Float(self, expr):
if expr == float("inf"):
return "1e200"
elif expr == float("-inf"):
return "-1e200"
else:
return CodePrinter._print_Float(self, expr)
def _print_Infinity(self, expr):
return '1e200'
def _print_NegativeInfinity(self, expr):
return '-1e200'
def _print_Piecewise(self, expr):
ecpairs = ["if(%s,%s" % (self._print(c), self._print(e))
for e, c in expr.args[:-1]]
if expr.args[-1].cond == True:
ecpairs.append("%s" % self._print(expr.args[-1].expr))
else:
# there is no default value, so we generate an invalid expression
# that will fail at runtime
ecpairs.append("if(%s,%s,0/0)" %
(self._print(expr.args[-1].cond),
self._print(expr.args[-1].expr)))
return ",".join(ecpairs) + ")" * (len(ecpairs)-1)
def fparser(expr, assign_to=None, **kwargs):
r"""Converts an expr to an FParser expression
Parameters
==========
expr : sympy.core.Expr
a sympy expression to be converted
precision : optional
the precision for numbers such as pi [default=15]
Examples
========
>>> from sympy import ccode, symbols, Rational, sin, ceiling, Abs
>>> x, tau = symbols(["x", "tau"])
>>> ccode((2*tau)**Rational(7,2))
'8*sqrt(2)*pow(tau, 7.0L/2.0L)'
>>> fparser(sin(x), assign_to="s")
's = sin(x);'
"""
return FParserPrinter(**kwargs).doprint(expr, assign_to)[-1]
def print_fparser(expr, **kwargs):
"""Prints an FParser representation of the given expression."""
print(str(fparser(expr, **kwargs)))
def build_hit(expr, name, **kwargs):
"""
Create a hit node containing a ParsedFunction of the given expression
Inputs:
expr[sympy.core.Expr]: The sympy expression to convert
name[str]: The name of the input file block to create
kwargs: Key, value pairs for val, vals input parameters (defaults to 1.0) if not provided
"""
import pyhit
if hasattr(expr, 'free_symbols'):
symbols = set([str(s) for s in expr.free_symbols]).difference(set(['R.x', 'R.y', 'R.z', 't']))
else:
symbols = set()
for symbol in symbols:
kwargs.setdefault(symbol, 1.)
root = pyhit.Node(None, name)
root['type'] = 'ParsedFunction'
root['value'] = "'{}'".format(str(fparser(expr)))
if kwargs:
pvars = ' '.join(kwargs.keys())
pvals = ' '.join([str(v) for v in kwargs.values()])
root['vars'] = "'{}'".format(pvars)
root['vals'] = "'{}'".format(pvals)
return root
def print_hit(*args, **kwargs):
"""Prints a hit block containing a ParsedFunction of the given expression"""
root = build_hit(*args, **kwargs)
print(root.render())
| harterj/moose | python/mms/fparser.py | Python | lgpl-2.1 | 6,200 | [
"MOOSE"
] | 6d066ea19eb0facc6140350d99e43034aef4c42b491f6ae4ac99ed51de1d855b |
# Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import argparse
import torch
from torch.distributions import constraints
import pyro
import pyro.distributions as dist
from pyro.contrib.autoname import named
from pyro.infer import SVI, JitTrace_ELBO, Trace_ELBO
from pyro.optim import Adam
# This is a simple gaussian mixture model.
#
# The example demonstrates how to pass named.Objects() from a global model to
# a local model implemented as a helper function.
def model(data, k):
latent = named.Object("latent")
# Create parameters for a Gaussian mixture model.
latent.probs.param_(torch.ones(k) / k, constraint=constraints.simplex)
latent.locs.param_(torch.zeros(k))
latent.scales.param_(torch.ones(k), constraint=constraints.positive)
# Observe all the data. We pass a local latent in to the local_model.
latent.local = named.List()
for x in data:
local_model(latent.local.add(), latent.probs, latent.locs, latent.scales, obs=x)
def local_model(latent, ps, locs, scales, obs=None):
i = latent.id.sample_(dist.Categorical(ps))
return latent.x.sample_(dist.Normal(locs[i], scales[i]), obs=obs)
def guide(data, k):
latent = named.Object("latent")
latent.local = named.List()
for x in data:
# We pass a local latent in to the local_guide.
local_guide(latent.local.add(), k)
def local_guide(latent, k):
# The local guide simply guesses category assignments.
latent.probs.param_(torch.ones(k) / k, constraint=constraints.positive)
latent.id.sample_(dist.Categorical(latent.probs))
def main(args):
pyro.set_rng_seed(0)
optim = Adam({"lr": 0.1})
elbo = JitTrace_ELBO() if args.jit else Trace_ELBO()
inference = SVI(model, guide, optim, loss=elbo)
data = torch.tensor([0.0, 1.0, 2.0, 20.0, 30.0, 40.0])
k = 2
print("Step\tLoss")
loss = 0.0
for step in range(args.num_epochs):
if step and step % 10 == 0:
print("{}\t{:0.5g}".format(step, loss))
loss = 0.0
loss += inference.step(data, k=k)
print("Parameters:")
for name, value in sorted(pyro.get_param_store().items()):
print("{} = {}".format(name, value.detach().cpu().numpy()))
if __name__ == "__main__":
assert pyro.__version__.startswith("1.7.0")
parser = argparse.ArgumentParser(description="parse args")
parser.add_argument("-n", "--num-epochs", default=200, type=int)
parser.add_argument("--jit", action="store_true")
args = parser.parse_args()
main(args)
| uber/pyro | examples/contrib/autoname/mixture.py | Python | apache-2.0 | 2,572 | [
"Gaussian"
] | dbfac4db4815c49c194efc743c7e76ea6395770130aa176a81d5df965298fcbc |
#!/usr/bin/env python -Es
"""
Script to set up a custom genome for bcbio-nextgen
"""
import argparse
from argparse import ArgumentParser
import os
from Bio import SeqIO
import toolz as tz
from bcbio.utils import safe_makedir, file_exists, chdir
from bcbio.pipeline import config_utils
from bcbio.distributed.transaction import file_transaction
from bcbio.provenance import do
from bcbio.install import (REMOTES, get_cloudbiolinux, SUPPORTED_GENOMES, SUPPORTED_INDEXES,
_get_data_dir)
from bcbio.pipeline.run_info import ALLOWED_CONTIG_NAME_CHARS
from bcbio.galaxy import loc
from fabric.api import *
import subprocess
import sys
import shutil
import yaml
import gffutils
from gffutils.iterators import DataIterator
import tempfile
SEQ_DIR = "seq"
RNASEQ_DIR = "rnaseq"
SRNASEQ_DIR = "srnaseq"
ERCC_BUCKET = "bcbio-data.s3.amazonaws.com/"
def gff3_to_gtf(gff3_file):
dialect = {'field separator': '; ',
'fmt': 'gtf',
'keyval separator': ' ',
'leading semicolon': False,
'multival separator': ',',
'quoted GFF2 values': True,
'order': ['gene_id', 'transcript_id'],
'repeated keys': False,
'trailing semicolon': True}
out_file = os.path.splitext(gff3_file)[0] + ".gtf"
if file_exists(out_file):
return out_file
print "Converting %s to %s." %(gff3_file, out_file)
db = gffutils.create_db(gff3_file, ":memory:")
with file_transaction(out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
for feature in DataIterator(db.features_of_type("exon"), dialect=dialect):
transcript_id = feature["Parent"][0]
gene_id = db[transcript_id]["Parent"][0]
attr = {"transcript_id": transcript_id, "gene_id": gene_id}
attributes = gffutils.attributes.Attributes(attr)
feature.attributes = attributes
print >> out_handle, feature
return out_file
def _index_w_command(dir_name, command, ref_file, ext=None):
index_name = os.path.splitext(os.path.basename(ref_file))[0]
if ext is not None: index_name += ext
build_path = os.path.join(os.path.dirname(ref_file), os.pardir)
out_dir = os.path.join(build_path, dir_name)
index_path = os.path.join(out_dir, index_name)
if not env.safe_exists(out_dir):
env.safe_run("mkdir %s" % out_dir)
subprocess.check_call(command.format(ref_file=ref_file,
index_name=index_path), shell=True)
return index_path
def setup_base_directories(genome_dir, name, build, gtf=None):
name_dir = os.path.join(genome_dir, name)
safe_makedir(name_dir)
build_dir = os.path.join(name_dir, build)
safe_makedir(build_dir)
seq_dir = os.path.join(build_dir, SEQ_DIR)
safe_makedir(seq_dir)
if gtf:
gtf_dir = os.path.join(build_dir, RNASEQ_DIR)
safe_makedir(gtf_dir)
return build_dir
def install_fasta_file(build_dir, fasta, build):
out_file = os.path.join(build_dir, SEQ_DIR, build + ".fa")
if not os.path.exists(out_file):
recs = SeqIO.parse(fasta, "fasta")
with open(out_file, "w") as out_handle:
SeqIO.write((_clean_rec_name(rec) for rec in recs), out_handle, "fasta")
return out_file
def _clean_rec_name(rec):
"""Clean illegal characters in input fasta file which cause problems downstream.
"""
out_id = []
for char in list(rec.id):
if char in ALLOWED_CONTIG_NAME_CHARS:
out_id.append(char)
else:
out_id.append("_")
rec.id = "".join(out_id)
rec.description = ""
return rec
def install_gtf_file(build_dir, gtf, build):
out_file = os.path.join(build_dir, RNASEQ_DIR, "ref-transcripts.gtf")
if not os.path.exists(out_file):
shutil.copyfile(gtf, out_file)
return out_file
def install_srna(species, gtf):
out_file = os.path.join(SRNASEQ_DIR, "srna-transcripts.gtf")
safe_makedir(SRNASEQ_DIR)
if not os.path.exists(out_file):
shutil.copyfile(gtf, out_file)
try:
from seqcluster import install
except ImportError:
raise ImportError("install seqcluster first, please.")
with chdir(SRNASEQ_DIR):
hairpin, miRNA = install._install_mirbase()
cmd = ("grep -A 2 {species} {hairpin} | grep -v '\-\-$' | tr U T > hairpin.fa")
do.run(cmd.format(**locals()), "set precursor.")
cmd = ("grep -A 1 {species} {miRNA} > miRNA.str")
do.run(cmd.format(**locals()), "set miRNA.")
shutil.rmtree("mirbase")
return out_file
def append_ercc(gtf_file, fasta_file):
ercc_fa = ERCC_BUCKET + "ERCC92.fasta.gz"
tmp_fa = tempfile.NamedTemporaryFile(delete=False, suffix=".gz").name
append_fa_cmd = "wget {ercc_fa} -O {tmp_fa}; gzip -cd {tmp_fa} >> {fasta_file}"
print append_fa_cmd.format(**locals())
subprocess.check_call(append_fa_cmd.format(**locals()), shell=True)
ercc_gtf = ERCC_BUCKET + "ERCC92.gtf.gz"
tmp_gtf = tempfile.NamedTemporaryFile(delete=False, suffix=".gz").name
append_gtf_cmd = "wget {ercc_gtf} -O {tmp_gtf}; gzip -cd {tmp_gtf} >> {gtf_file}"
print append_gtf_cmd.format(**locals())
subprocess.check_call(append_gtf_cmd.format(**locals()), shell=True)
if __name__ == "__main__":
description = ("Set up a custom genome for bcbio-nextgen. This will "
"place the genome under name/build in the genomes "
"directory in your bcbio-nextgen installation.")
parser = ArgumentParser(description=description)
parser.add_argument("-c", "--cores", default=1,
help="number of cores to use")
parser.add_argument("-f", "--fasta", required=True,
help="FASTA file of the genome.")
parser.add_argument("--gff3", default=False, action='store_true',
help="File is a GFF3 file.")
parser.add_argument("-g", "--gtf", default=None,
help="GTF file of the transcriptome")
parser.add_argument("-n", "--name", required=True,
help="Name of organism, for example Hsapiens.")
parser.add_argument("-b", "--build", required=True,
help="Build of genome, for example hg19.")
parser.add_argument("-i", "--indexes", choices=SUPPORTED_INDEXES, nargs="*",
default=["seq"], help="Space separated list of indexes to make")
parser.add_argument("--ercc", action='store_true', default=False,
help="Add ERCC spike-ins.")
parser.add_argument("--mirbase", help="species in mirbase for smallRNAseq data.")
parser.add_argument("--srna_gtf", help="gtf to use for smallRNAseq data.")
args = parser.parse_args()
if not all([args.mirbase, args.srna_gtf]) and any([args.mirbase, args.srna_gtf]):
raise ValueError("--mirbase and --srna_gtf both need a value.")
env.hosts = ["localhost"]
env.cores = args.cores
os.environ["PATH"] += os.pathsep + os.path.dirname(sys.executable)
cbl = get_cloudbiolinux(REMOTES)
sys.path.insert(0, cbl["dir"])
genomemod = __import__("cloudbio.biodata", fromlist=["genomes"])
# monkey patch cloudbiolinux to use this indexing command instead
genomes = getattr(genomemod, 'genomes')
genomes._index_w_command = _index_w_command
fabmod = __import__("cloudbio", fromlist=["fabutils"])
fabutils = getattr(fabmod, 'fabutils')
fabutils.configure_runsudo(env)
system_config = os.path.join(_get_data_dir(), "galaxy", "bcbio_system.yaml")
with open(system_config) as in_handle:
config = yaml.load(in_handle)
env.picard_home = config_utils.get_program("picard", config, ptype="dir")
genome_dir = os.path.abspath(os.path.join(_get_data_dir(), "genomes"))
args.fasta = os.path.abspath(args.fasta)
args.gtf = os.path.abspath(args.gtf) if args.gtf else None
if args.gff3:
args.gtf = gff3_to_gtf(args.gtf)
# always make a sequence dictionary
if "seq" not in args.indexes:
args.indexes.append("seq")
env.system_install = genome_dir
prepare_tx = os.path.join(cbl["dir"], "utils", "prepare_tx_gff.py")
print "Creating directories using %s as the base." % (genome_dir)
build_dir = setup_base_directories(genome_dir, args.name, args.build, args.gtf)
os.chdir(build_dir)
print "Genomes will be installed into %s." % (build_dir)
fasta_file = install_fasta_file(build_dir, args.fasta, args.build)
print "Installed genome as %s." % (fasta_file)
if args.gtf:
if "bowtie2" not in args.indexes:
args.indexes.append("bowtie2")
gtf_file = install_gtf_file(build_dir, args.gtf, args.build)
print "Installed GTF as %s." % (gtf_file)
if args.ercc:
print "Appending ERCC sequences to %s and %s." % (gtf_file, fasta_file)
append_ercc(gtf_file, fasta_file)
indexed = {}
for index in args.indexes:
print "Creating the %s index." % (index)
index_fn = genomes.get_index_fn(index)
if not index_fn:
print "Do not know how to make the index %s, skipping." % (index)
continue
indexed[index] = index_fn(fasta_file)
indexed["samtools"] = fasta_file
if args.gtf:
"Preparing transcriptome."
with chdir(os.path.join(build_dir, os.pardir)):
cmd = ("{sys.executable} {prepare_tx} --cores {args.cores} --genome-dir {genome_dir} --gtf {gtf_file} {args.name} {args.build}")
subprocess.check_call(cmd.format(**locals()), shell=True)
if args.mirbase:
"Preparing smallRNA data."
with chdir(os.path.join(build_dir)):
install_srna(args.mirbase, args.srna_gtf)
base_dir = os.path.normpath(os.path.dirname(fasta_file))
resource_file = os.path.join(base_dir, "%s-resources.yaml" % args.build)
print "Dumping genome resources to %s." % resource_file
resource_dict = {"version": 1}
if args.gtf:
transcripts = ["rnaseq", "transcripts"]
mask = ["rnaseq", "transcripts_mask"]
index = ["rnaseq", "transcriptome_index", "tophat"]
dexseq = ["rnaseq", "dexseq"]
refflat = ["rnaseq", "refflat"]
rRNA_fa = ["rnaseq", "rRNA_fa"]
resource_dict = tz.update_in(resource_dict, transcripts,
lambda x: "../rnaseq/ref-transcripts.gtf")
resource_dict = tz.update_in(resource_dict, mask,
lambda x: "../rnaseq/ref-transcripts-mask.gtf")
resource_dict = tz.update_in(resource_dict, index,
lambda x: "../rnaseq/tophat/%s_transcriptome.ver" % args.build)
resource_dict = tz.update_in(resource_dict, refflat,
lambda x: "../rnaseq/ref-transcripts.refFlat")
resource_dict = tz.update_in(resource_dict, dexseq,
lambda x: "../rnaseq/ref-transcripts.dexseq.gff3")
resource_dict = tz.update_in(resource_dict, rRNA_fa,
lambda x: "../rnaseq/rRNA.fa")
if args.mirbase:
srna_gtf = ["srnaseq", "srna-transcripts"]
srna_mirbase = ["srnaseq", "mirbase"]
resource_dict = tz.update_in(resource_dict, srna_gtf,
lambda x: "../srnaseq/srna-transcripts.gtf")
resource_dict = tz.update_in(resource_dict, srna_mirbase,
lambda x: "../srnaseq/hairpin.fa")
# write out resource dictionarry
with file_transaction(resource_file) as tx_resource_file:
with open(tx_resource_file, "w") as out_handle:
out_handle.write(yaml.dump(resource_dict, default_flow_style=False))
print "Updating Galaxy .loc files."
galaxy_base = os.path.join(_get_data_dir(), "galaxy")
for index, index_file in indexed.items():
loc.update_loc_file(galaxy_base, index, args.build, index_file)
| gifford-lab/bcbio-nextgen | scripts/bcbio_setup_genome.py | Python | mit | 12,079 | [
"Galaxy"
] | cd23c67547ff222b2c4f3af48fedc690922a7a0e180ff9c29159093167ace797 |
#!/usr/bin/env python
"""
Originally written by Kelly Vincent
pretty output and additional picard wrappers by Ross Lazarus for rgenetics
Runs all available wrapped Picard tools.
usage: picard_wrapper.py [options]
code Ross wrote licensed under the LGPL
see http://www.gnu.org/copyleft/lesser.html
"""
import optparse, os, sys, subprocess, tempfile, shutil, time, logging
galhtmlprefix = """<?xml version="1.0" encoding="utf-8" ?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<meta name="generator" content="Galaxy %s tool output - see http://getgalaxy.org/" />
<title></title>
<link rel="stylesheet" href="/static/style/base.css" type="text/css" />
</head>
<body>
<div class="document">
"""
galhtmlattr = """Galaxy tool %s run at %s</b><br/>"""
galhtmlpostfix = """</div></body></html>\n"""
def stop_err( msg ):
sys.stderr.write( '%s\n' % msg )
sys.exit()
def timenow():
"""return current time as a string
"""
return time.strftime('%d/%m/%Y %H:%M:%S', time.localtime(time.time()))
class PicardBase():
"""
simple base class with some utilities for Picard
adapted and merged with Kelly Vincent's code april 2011 Ross
lots of changes...
"""
def __init__(self, opts=None,arg0=None):
""" common stuff needed at init for a picard tool
"""
assert opts <> None, 'PicardBase needs opts at init'
self.opts = opts
if self.opts.outdir == None:
self.opts.outdir = os.getcwd() # fixmate has no html file eg so use temp dir
assert self.opts.outdir <> None,'## PicardBase needs a temp directory if no output directory passed in'
self.picname = self.baseName(opts.jar)
if self.picname.startswith('picard'):
self.picname = opts.picard_cmd # special case for some tools like replaceheader?
self.progname = self.baseName(arg0)
self.version = '0.002'
self.delme = [] # list of files to destroy
self.title = opts.title
self.inputfile = opts.input
try:
os.makedirs(opts.outdir)
except:
pass
try:
os.makedirs(opts.tmpdir)
except:
pass
self.log_filename = os.path.join(self.opts.outdir,'%s.log' % self.picname)
self.metricsOut = os.path.join(opts.outdir,'%s.metrics.txt' % self.picname)
self.setLogging(logfname=self.log_filename)
def baseName(self,name=None):
return os.path.splitext(os.path.basename(name))[0]
def setLogging(self,logfname="picard_wrapper.log"):
"""setup a logger
"""
logging.basicConfig(level=logging.INFO,
filename=logfname,
filemode='a')
def readLarge(self,fname=None):
""" read a potentially huge file.
"""
try:
# get stderr, allowing for case where it's very large
tmp = open( fname, 'rb' )
s = ''
buffsize = 1048576
try:
while True:
more = tmp.read( buffsize )
if len(more) > 0:
s += more
else:
break
except OverflowError:
pass
tmp.close()
except Exception, e:
stop_err( 'Read Large Exception : %s' % str( e ) )
return s
def runCL(self,cl=None,output_dir=None):
""" construct and run a command line
we have galaxy's temp path as opt.temp_dir so don't really need isolation
sometimes stdout is needed as the output - ugly hacks to deal with potentially vast artifacts
"""
assert cl <> None, 'PicardBase runCL needs a command line as cl'
if output_dir == None:
output_dir = self.opts.outdir
if type(cl) == type([]):
cl = ' '.join(cl)
fd,templog = tempfile.mkstemp(dir=output_dir,suffix='rgtempRun.txt')
tlf = open(templog,'wb')
fd,temperr = tempfile.mkstemp(dir=output_dir,suffix='rgtempErr.txt')
tef = open(temperr,'wb')
process = subprocess.Popen(cl, shell=True, stderr=tef, stdout=tlf, cwd=output_dir)
rval = process.wait()
tlf.close()
tef.close()
stderrs = self.readLarge(temperr)
stdouts = self.readLarge(templog)
if rval > 0:
s = '## executing %s returned status %d and stderr: \n%s\n' % (cl,rval,stderrs)
stdouts = '%s\n%s' % (stdouts,stderrs)
else:
s = '## executing %s returned status %d and nothing on stderr\n' % (cl,rval)
logging.info(s)
os.unlink(templog) # always
os.unlink(temperr) # always
return s, stdouts, rval # sometimes s is an output
def runPic(self, jar, cl):
"""
cl should be everything after the jar file name in the command
"""
runme = ['java -Xmx%s' % self.opts.maxjheap]
runme.append(" -Djava.io.tmpdir='%s' " % self.opts.tmpdir)
runme.append('-jar %s' % jar)
runme += cl
s,stdouts,rval = self.runCL(cl=runme, output_dir=self.opts.outdir)
return stdouts,rval
def samToBam(self,infile=None,outdir=None):
"""
use samtools view to convert sam to bam
"""
fd,tempbam = tempfile.mkstemp(dir=outdir,suffix='rgutilsTemp.bam')
cl = ['samtools view -h -b -S -o ',tempbam,infile]
tlog,stdouts,rval = self.runCL(cl,outdir)
return tlog,tempbam,rval
def sortSam(self, infile=None,outfile=None,outdir=None):
"""
"""
print '## sortSam got infile=%s,outfile=%s,outdir=%s' % (infile,outfile,outdir)
cl = ['samtools sort',infile,outfile]
tlog,stdouts,rval = self.runCL(cl,outdir)
return tlog
def cleanup(self):
for fname in self.delme:
try:
os.unlink(fname)
except:
pass
def prettyPicout(self,transpose,maxrows):
"""organize picard outpouts into a report html page
"""
res = []
try:
r = open(self.metricsOut,'r').readlines()
except:
r = []
if len(r) > 0:
res.append('<b>Picard on line resources</b><ul>\n')
res.append('<li><a href="http://picard.sourceforge.net/index.shtml">Click here for Picard Documentation</a></li>\n')
res.append('<li><a href="http://picard.sourceforge.net/picard-metric-definitions.shtml">Click here for Picard Metrics definitions</a></li></ul><hr/>\n')
if transpose:
res.append('<b>Picard output (transposed to make it easier to see)</b><hr/>\n')
else:
res.append('<b>Picard output</b><hr/>\n')
res.append('<table cellpadding="3" >\n')
dat = []
heads = []
lastr = len(r) - 1
# special case for estimate library complexity hist
thist = False
for i,row in enumerate(r):
if row.strip() > '':
srow = row.split('\t')
if row.startswith('#'):
heads.append(row.strip()) # want strings
else:
dat.append(srow) # want lists
if row.startswith('## HISTOGRAM'):
thist = True
if len(heads) > 0:
hres = ['<tr class="d%d"><td colspan="2">%s</td></tr>' % (i % 2,x) for i,x in enumerate(heads)]
res += hres
heads = []
if len(dat) > 0:
if transpose and not thist:
tdat = map(None,*dat) # transpose an arbitrary list of lists
tdat = ['<tr class="d%d"><td>%s</td><td>%s </td></tr>\n' % ((i+len(heads)) % 2,x[0],x[1]) for i,x in enumerate(tdat)]
else:
tdat = ['\t'.join(x).strip() for x in dat] # back to strings :(
tdat = ['<tr class="d%d"><td colspan="2">%s</td></tr>\n' % ((i+len(heads)) % 2,x) for i,x in enumerate(tdat)]
res += tdat
dat = []
res.append('</table>\n')
return res
def fixPicardOutputs(self,transpose,maxloglines):
"""
picard produces long hard to read tab header files
make them available but present them transposed for readability
"""
logging.shutdown()
self.cleanup() # remove temp files stored in delme
rstyle="""<style type="text/css">
tr.d0 td {background-color: oldlace; color: black;}
tr.d1 td {background-color: aliceblue; color: black;}
</style>"""
res = [rstyle,]
res.append(galhtmlprefix % self.progname)
res.append(galhtmlattr % (self.picname,timenow()))
flist = [x for x in os.listdir(self.opts.outdir) if not x.startswith('.')]
pdflist = [x for x in flist if os.path.splitext(x)[-1].lower() == '.pdf']
if len(pdflist) > 0: # assumes all pdfs come with thumbnail .jpgs
for p in pdflist:
pbase = os.path.splitext(p)[0] # removes .pdf
imghref = '%s.jpg' % pbase
mimghref = '%s-0.jpg' % pbase # multiple pages pdf -> multiple thumbnails without asking!
if mimghref in flist:
imghref=mimghref # only one for thumbnail...it's a multi page pdf
res.append('<table cellpadding="10"><tr><td>\n')
res.append('<a href="%s"><img src="%s" title="Click image preview for a print quality PDF version" hspace="10" align="middle"></a>\n' % (p,imghref))
res.append('</tr></td></table>\n')
if len(flist) > 0:
res.append('<b>The following output files were created (click the filename to view/download a copy):</b><hr/>')
res.append('<table>\n')
for i,f in enumerate(flist):
fn = os.path.split(f)[-1]
res.append('<tr><td><a href="%s">%s</a></td></tr>\n' % (fn,fn))
res.append('</table><p/>\n')
pres = self.prettyPicout(transpose,maxloglines)
if len(pres) > 0:
res += pres
l = open(self.log_filename,'r').readlines()
llen = len(l)
if llen > 0:
res.append('<b>Picard Tool Run Log</b><hr/>\n')
rlog = ['<pre>',]
if llen > maxloglines:
n = min(50,int(maxloglines/2))
rlog += l[:n]
rlog.append('------------ ## %d rows deleted ## --------------\n' % (llen-maxloglines))
rlog += l[-n:]
else:
rlog += l
rlog.append('</pre>')
if llen > maxloglines:
rlog.append('\n<b>## WARNING - %d log lines truncated - <a href="%s">%s</a> contains entire output</b>' % (llen - maxloglines,self.log_filename,self.log_filename))
res += rlog
else:
res.append("### Odd, Picard left no log file %s - must have really barfed badly?\n" % self.log_filename)
res.append('<hr/>The freely available <a href="http://picard.sourceforge.net/command-line-overview.shtml">Picard software</a> \n')
res.append( 'generated all outputs reported here running as a <a href="http://getgalaxy.org">Galaxy</a> tool')
res.append(galhtmlpostfix)
outf = open(self.opts.htmlout,'w')
outf.write(''.join(res))
outf.write('\n')
outf.close()
def makePicInterval(self,inbed=None,outf=None):
"""
picard wants bait and target files to have the same header length as the incoming bam/sam
a meaningful (ie accurate) representation will fail because of this - so this hack
it would be far better to be able to supply the original bed untouched
Additional checking added Ross Lazarus Dec 2011 to deal with two 'bug' reports on the list
"""
assert inbed <> None
bed = open(inbed,'r').readlines()
sbed = [x.split('\t') for x in bed] # lengths MUST be 5
lens = [len(x) for x in sbed]
strands = [x[3] for x in sbed if not x[3] in ['+','-']]
maxl = max(lens)
minl = min(lens)
e = []
if maxl <> minl:
e.append("## Input error: Inconsistent field count in %s - please read the documentation on bait/target format requirements, fix and try again" % inbed)
if maxl <> 5:
e.append("## Input error: %d fields found in %s, 5 required - please read the warning and documentation on bait/target format requirements, fix and try again" % (maxl,inbed))
if len(strands) > 0:
e.append("## Input error: Fourth column in %s is not the required strand (+ or -) - please read the warning and documentation on bait/target format requirements, fix and try again" % (inbed))
if len(e) > 0: # write to stderr and quit
print >> sys.stderr, '\n'.join(e)
sys.exit(1)
thead = os.path.join(self.opts.outdir,'tempSamHead.txt')
if self.opts.datatype == 'sam':
cl = ['samtools view -H -S',self.opts.input,'>',thead]
else:
cl = ['samtools view -H',self.opts.input,'>',thead]
self.runCL(cl=cl,output_dir=self.opts.outdir)
head = open(thead,'r').readlines()
s = '## got %d rows of header\n' % (len(head))
logging.info(s)
o = open(outf,'w')
o.write(''.join(head))
o.write(''.join(bed))
o.close()
return outf
def cleanSam(self, insam=None, newsam=None, picardErrors=[],outformat=None):
"""
interesting problem - if paired, must remove mate pair of errors too or we have a new set of errors after cleaning - missing mate pairs!
Do the work of removing all the error sequences
pysam is cool
infile = pysam.Samfile( "-", "r" )
outfile = pysam.Samfile( "-", "w", template = infile )
for s in infile: outfile.write(s)
errors from ValidateSameFile.jar look like
WARNING: Record 32, Read name SRR006041.1202260, NM tag (nucleotide differences) is missing
ERROR: Record 33, Read name SRR006041.1042721, Empty sequence dictionary.
ERROR: Record 33, Read name SRR006041.1042721, RG ID on SAMRecord not found in header: SRR006041
"""
assert os.path.isfile(insam), 'rgPicardValidate cleansam needs an input sam file - cannot find %s' % insam
assert newsam <> None, 'rgPicardValidate cleansam needs an output new sam file path'
removeNames = [x.split(',')[1].replace(' Read name ','') for x in picardErrors if len(x.split(',')) > 2]
remDict = dict(zip(removeNames,range(len(removeNames))))
infile = pysam.Samfile(insam,'rb')
info = 'found %d error sequences in picardErrors, %d unique' % (len(removeNames),len(remDict))
if len(removeNames) > 0:
outfile = pysam.Samfile(newsam,'wb',template=infile) # template must be an open file
i = 0
j = 0
for row in infile:
dropme = remDict.get(row.qname,None) # keep if None
if not dropme:
outfile.write(row)
j += 1
else: # discard
i += 1
info = '%s\n%s' % (info, 'Discarded %d lines writing %d to %s from %s' % (i,j,newsam,insam))
outfile.close()
infile.close()
else: # we really want a nullop or a simple pointer copy
infile.close()
if newsam:
shutil.copy(insam,newsam)
logging.info(info)
def __main__():
doFix = False # tools returning htmlfile don't need this
doTranspose = True # default
maxloglines = 100 # default
#Parse Command Line
op = optparse.OptionParser()
# All tools
op.add_option('-i', '--input', dest='input', help='Input SAM or BAM file' )
op.add_option('-e', '--inputext', default=None)
op.add_option('-o', '--output', default=None)
op.add_option('-n', '--title', default="Pick a Picard Tool")
op.add_option('-t', '--htmlout', default=None)
op.add_option('-d', '--outdir', default=None)
op.add_option('-x', '--maxjheap', default='4g')
op.add_option('-b', '--bisulphite', default='false')
op.add_option('-s', '--sortorder', default='query')
op.add_option('','--tmpdir', default='/tmp')
op.add_option('-j','--jar',default='')
op.add_option('','--picard-cmd',default=None)
# Many tools
op.add_option( '', '--output-format', dest='output_format', help='Output format' )
op.add_option( '', '--bai-file', dest='bai_file', help='The path to the index file for the input bam file' )
op.add_option( '', '--ref', dest='ref', help='Built-in reference with fasta and dict file', default=None )
# CreateSequenceDictionary
op.add_option( '', '--ref-file', dest='ref_file', help='Fasta to use as reference', default=None )
op.add_option( '', '--species-name', dest='species_name', help='Species name to use in creating dict file from fasta file' )
op.add_option( '', '--build-name', dest='build_name', help='Name of genome assembly to use in creating dict file from fasta file' )
op.add_option( '', '--trunc-names', dest='trunc_names', help='Truncate sequence names at first whitespace from fasta file' )
# MarkDuplicates
op.add_option( '', '--remdups', default='true', help='Remove duplicates from output file' )
op.add_option( '', '--optdupdist', default="100", help='Maximum pixels between two identical sequences in order to consider them optical duplicates.' )
# CollectInsertSizeMetrics
op.add_option('', '--taillimit', default="0")
op.add_option('', '--histwidth', default="0")
op.add_option('', '--minpct', default="0.01")
op.add_option('', '--malevel', default='')
op.add_option('', '--deviations', default="0.0")
# CollectAlignmentSummaryMetrics
op.add_option('', '--maxinsert', default="20")
op.add_option('', '--adaptors', default='')
# FixMateInformation and validate
# CollectGcBiasMetrics
op.add_option('', '--windowsize', default='100')
op.add_option('', '--mingenomefrac', default='0.00001')
# AddOrReplaceReadGroups
op.add_option( '', '--rg-opts', dest='rg_opts', help='Specify extra (optional) arguments with full, otherwise preSet' )
op.add_option( '', '--rg-lb', dest='rg_library', help='Read Group Library' )
op.add_option( '', '--rg-pl', dest='rg_platform', help='Read Group platform (e.g. illumina, solid)' )
op.add_option( '', '--rg-pu', dest='rg_plat_unit', help='Read Group platform unit (eg. run barcode) ' )
op.add_option( '', '--rg-sm', dest='rg_sample', help='Read Group sample name' )
op.add_option( '', '--rg-id', dest='rg_id', help='Read Group ID' )
op.add_option( '', '--rg-cn', dest='rg_seq_center', help='Read Group sequencing center name' )
op.add_option( '', '--rg-ds', dest='rg_desc', help='Read Group description' )
# ReorderSam
op.add_option( '', '--allow-inc-dict-concord', dest='allow_inc_dict_concord', help='Allow incomplete dict concordance' )
op.add_option( '', '--allow-contig-len-discord', dest='allow_contig_len_discord', help='Allow contig length discordance' )
# ReplaceSamHeader
op.add_option( '', '--header-file', dest='header_file', help='sam or bam file from which header will be read' )
op.add_option('','--assumesorted', default='true')
op.add_option('','--readregex', default="[a-zA-Z0-9]+:[0-9]:([0-9]+):([0-9]+):([0-9]+).*")
#estimatelibrarycomplexity
op.add_option('','--minid', default="5")
op.add_option('','--maxdiff', default="0.03")
op.add_option('','--minmeanq', default="20")
#hsmetrics
op.add_option('','--baitbed', default=None)
op.add_option('','--targetbed', default=None)
#validate
op.add_option('','--ignoreflags', action='append', type="string")
op.add_option('','--maxerrors', default=None)
op.add_option('','--datatype', default=None)
op.add_option('','--bamout', default=None)
op.add_option('','--samout', default=None)
opts, args = op.parse_args()
opts.sortme = opts.assumesorted == 'false'
assert opts.input <> None
# need to add
# instance that does all the work
pic = PicardBase(opts,sys.argv[0])
tmp_dir = opts.outdir
haveTempout = False # we use this where sam output is an option
rval = 0
stdouts = 'Not run yet'
# set ref and dict files to use (create if necessary)
ref_file_name = opts.ref
if opts.ref_file <> None:
csd = 'CreateSequenceDictionary'
realjarpath = os.path.split(opts.jar)[0]
jarpath = os.path.join(realjarpath,'%s.jar' % csd) # for refseq
tmp_ref_fd, tmp_ref_name = tempfile.mkstemp( dir=opts.tmpdir , prefix = pic.picname)
ref_file_name = '%s.fasta' % tmp_ref_name
# build dict
dict_file_name = '%s.dict' % tmp_ref_name
os.symlink( opts.ref_file, ref_file_name )
cl = ['REFERENCE=%s' % ref_file_name]
cl.append('OUTPUT=%s' % dict_file_name)
cl.append('URI=%s' % os.path.basename( opts.ref_file ))
cl.append('TRUNCATE_NAMES_AT_WHITESPACE=%s' % opts.trunc_names)
if opts.species_name:
cl.append('SPECIES=%s' % opts.species_name)
if opts.build_name:
cl.append('GENOME_ASSEMBLY=%s' % opts.build_name)
pic.delme.append(dict_file_name)
pic.delme.append(ref_file_name)
pic.delme.append(tmp_ref_name)
stdouts,rval = pic.runPic(jarpath, cl)
# run relevant command(s)
# define temporary output
# if output is sam, it must have that extension, otherwise bam will be produced
# specify sam or bam file with extension
if opts.output_format == 'sam':
suff = '.sam'
else:
suff = ''
tmp_fd, tempout = tempfile.mkstemp( dir=opts.tmpdir, suffix=suff )
cl = ['VALIDATION_STRINGENCY=LENIENT',]
if pic.picname == 'AddOrReplaceReadGroups':
# sort order to match Galaxy's default
cl.append('SORT_ORDER=coordinate')
# input
cl.append('INPUT=%s' % opts.input)
# outputs
cl.append('OUTPUT=%s' % tempout)
# required read groups
cl.append('RGLB="%s"' % opts.rg_library)
cl.append('RGPL="%s"' % opts.rg_platform)
cl.append('RGPU="%s"' % opts.rg_plat_unit)
cl.append('RGSM="%s"' % opts.rg_sample)
if opts.rg_id:
cl.append('RGID="%s"' % opts.rg_id)
# optional read groups
if opts.rg_seq_center:
cl.append('RGCN="%s"' % opts.rg_seq_center)
if opts.rg_desc:
cl.append('RGDS="%s"' % opts.rg_desc)
stdouts,rval = pic.runPic(opts.jar, cl)
haveTempout = True
elif pic.picname == 'BamIndexStats':
tmp_fd, tmp_name = tempfile.mkstemp( dir=tmp_dir )
tmp_bam_name = '%s.bam' % tmp_name
tmp_bai_name = '%s.bai' % tmp_bam_name
os.symlink( opts.input, tmp_bam_name )
os.symlink( opts.bai_file, tmp_bai_name )
cl.append('INPUT=%s' % ( tmp_bam_name ))
pic.delme.append(tmp_bam_name)
pic.delme.append(tmp_bai_name)
pic.delme.append(tmp_name)
stdouts,rval = pic.runPic( opts.jar, cl )
f = open(pic.metricsOut,'a')
f.write(stdouts) # got this on stdout from runCl
f.write('\n')
f.close()
doTranspose = False # but not transposed
elif pic.picname == 'EstimateLibraryComplexity':
cl.append('I=%s' % opts.input)
cl.append('O=%s' % pic.metricsOut)
if float(opts.minid) > 0:
cl.append('MIN_IDENTICAL_BASES=%s' % opts.minid)
if float(opts.maxdiff) > 0.0:
cl.append('MAX_DIFF_RATE=%s' % opts.maxdiff)
if float(opts.minmeanq) > 0:
cl.append('MIN_MEAN_QUALITY=%s' % opts.minmeanq)
if opts.readregex > '':
cl.append('READ_NAME_REGEX="%s"' % opts.readregex)
if float(opts.optdupdist) > 0:
cl.append('OPTICAL_DUPLICATE_PIXEL_DISTANCE=%s' % opts.optdupdist)
stdouts,rval = pic.runPic(opts.jar, cl)
elif pic.picname == 'CollectAlignmentSummaryMetrics':
# Why do we do this fakefasta thing?
# Because we need NO fai to be available or picard barfs unless it matches the input data.
# why? Dunno Seems to work without complaining if the .bai file is AWOL....
fakefasta = os.path.join(opts.outdir,'%s_fake.fasta' % os.path.basename(ref_file_name))
try:
os.symlink(ref_file_name,fakefasta)
except:
s = '## unable to symlink %s to %s - different devices? Will shutil.copy'
info = s
shutil.copy(ref_file_name,fakefasta)
pic.delme.append(fakefasta)
cl.append('ASSUME_SORTED=true')
adaptlist = opts.adaptors.split(',')
adaptorseqs = ['ADAPTER_SEQUENCE=%s' % x for x in adaptlist]
cl += adaptorseqs
cl.append('IS_BISULFITE_SEQUENCED=%s' % opts.bisulphite)
cl.append('MAX_INSERT_SIZE=%s' % opts.maxinsert)
cl.append('OUTPUT=%s' % pic.metricsOut)
cl.append('R=%s' % fakefasta)
cl.append('TMP_DIR=%s' % opts.tmpdir)
if not opts.assumesorted.lower() == 'true': # we need to sort input
sortedfile = '%s.sorted' % os.path.basename(opts.input)
if opts.datatype == 'sam': # need to work with a bam
tlog,tempbam,trval = pic.samToBam(opts.input,opts.outdir)
pic.delme.append(tempbam)
try:
tlog = pic.sortSam(tempbam,sortedfile,opts.outdir)
except:
print '## exception on sorting sam file %s' % opts.input
else: # is already bam
try:
tlog = pic.sortSam(opts.input,sortedfile,opts.outdir)
except : # bug - [bam_sort_core] not being ignored - TODO fixme
print '## exception %s on sorting bam file %s' % (sys.exc_info()[0],opts.input)
cl.append('INPUT=%s.bam' % os.path.abspath(os.path.join(opts.outdir,sortedfile)))
pic.delme.append(os.path.join(opts.outdir,sortedfile))
else:
cl.append('INPUT=%s' % os.path.abspath(opts.input))
stdouts,rval = pic.runPic(opts.jar, cl)
elif pic.picname == 'CollectGcBiasMetrics':
assert os.path.isfile(ref_file_name),'PicardGC needs a reference sequence - cannot read %s' % ref_file_name
# sigh. Why do we do this fakefasta thing? Because we need NO fai to be available or picard barfs unless it has the same length as the input data.
# why? Dunno
fakefasta = os.path.join(opts.outdir,'%s_fake.fasta' % os.path.basename(ref_file_name))
try:
os.symlink(ref_file_name,fakefasta)
except:
s = '## unable to symlink %s to %s - different devices? May need to replace with shutil.copy'
info = s
shutil.copy(ref_file_name,fakefasta)
pic.delme.append(fakefasta)
x = 'rgPicardGCBiasMetrics'
pdfname = '%s.pdf' % x
jpgname = '%s.jpg' % x
tempout = os.path.join(opts.outdir,'rgPicardGCBiasMetrics.out')
temppdf = os.path.join(opts.outdir,pdfname)
cl.append('R=%s' % fakefasta)
cl.append('WINDOW_SIZE=%s' % opts.windowsize)
cl.append('MINIMUM_GENOME_FRACTION=%s' % opts.mingenomefrac)
cl.append('INPUT=%s' % opts.input)
cl.append('OUTPUT=%s' % tempout)
cl.append('TMP_DIR=%s' % opts.tmpdir)
cl.append('CHART_OUTPUT=%s' % temppdf)
cl.append('SUMMARY_OUTPUT=%s' % pic.metricsOut)
stdouts,rval = pic.runPic(opts.jar, cl)
if os.path.isfile(temppdf):
cl2 = ['convert','-resize x400',temppdf,os.path.join(opts.outdir,jpgname)] # make the jpg for fixPicardOutputs to find
s,stdouts,rval = pic.runCL(cl=cl2,output_dir=opts.outdir)
else:
s='### runGC: Unable to find pdf %s - please check the log for the causal problem\n' % temppdf
lf = open(pic.log_filename,'a')
lf.write(s)
lf.write('\n')
lf.close()
elif pic.picname == 'CollectInsertSizeMetrics':
""" <command interpreter="python">
picard_wrapper.py -i "$input_file" -n "$out_prefix" --tmpdir "${__new_file_path__}" --deviations "$deviations"
--histwidth "$histWidth" --minpct "$minPct" --malevel "$malevel"
-j "${GALAXY_DATA_INDEX_DIR}/shared/jars/picard/CollectInsertSizeMetrics.jar" -d "$html_file.files_path" -t "$html_file"
</command>
"""
isPDF = 'InsertSizeHist.pdf'
pdfpath = os.path.join(opts.outdir,isPDF)
histpdf = 'InsertSizeHist.pdf'
cl.append('I=%s' % opts.input)
cl.append('O=%s' % pic.metricsOut)
cl.append('HISTOGRAM_FILE=%s' % histpdf)
#if opts.taillimit <> '0': # this was deprecated although still mentioned in the docs at 1.56
# cl.append('TAIL_LIMIT=%s' % opts.taillimit)
if opts.histwidth <> '0':
cl.append('HISTOGRAM_WIDTH=%s' % opts.histwidth)
if float( opts.minpct) > 0.0:
cl.append('MINIMUM_PCT=%s' % opts.minpct)
if float(opts.deviations) > 0.0:
cl.append('DEVIATIONS=%s' % opts.deviations)
if opts.malevel:
malists = opts.malevel.split(',')
malist = ['METRIC_ACCUMULATION_LEVEL=%s' % x for x in malists]
cl += malist
stdouts,rval = pic.runPic(opts.jar, cl)
if os.path.exists(pdfpath): # automake thumbnail - will be added to html
cl2 = ['mogrify', '-format jpg -resize x400 %s' % pdfpath]
pic.runCL(cl=cl2,output_dir=opts.outdir)
else:
s = 'Unable to find expected pdf file %s<br/>\n' % pdfpath
s += 'This <b>always happens if single ended data was provided</b> to this tool,\n'
s += 'so please double check that your input data really is paired-end NGS data.<br/>\n'
s += 'If your input was paired data this may be a bug worth reporting to the galaxy-bugs list\n<br/>'
logging.info(s)
if len(stdouts) > 0:
logging.info(stdouts)
elif pic.picname == 'MarkDuplicates':
# assume sorted even if header says otherwise
cl.append('ASSUME_SORTED=%s' % (opts.assumesorted))
# input
cl.append('INPUT=%s' % opts.input)
# outputs
cl.append('OUTPUT=%s' % opts.output)
cl.append('METRICS_FILE=%s' % pic.metricsOut )
# remove or mark duplicates
cl.append('REMOVE_DUPLICATES=%s' % opts.remdups)
# the regular expression to be used to parse reads in incoming SAM file
cl.append('READ_NAME_REGEX="%s"' % opts.readregex)
# maximum offset between two duplicate clusters
cl.append('OPTICAL_DUPLICATE_PIXEL_DISTANCE=%s' % opts.optdupdist)
stdouts,rval = pic.runPic(opts.jar, cl)
elif pic.picname == 'FixMateInformation':
cl.append('I=%s' % opts.input)
cl.append('O=%s' % tempout)
cl.append('SORT_ORDER=%s' % opts.sortorder)
stdouts,rval = pic.runPic(opts.jar,cl)
haveTempout = True
elif pic.picname == 'ReorderSam':
# input
cl.append('INPUT=%s' % opts.input)
# output
cl.append('OUTPUT=%s' % tempout)
# reference
cl.append('REFERENCE=%s' % ref_file_name)
# incomplete dict concordance
if opts.allow_inc_dict_concord == 'true':
cl.append('ALLOW_INCOMPLETE_DICT_CONCORDANCE=true')
# contig length discordance
if opts.allow_contig_len_discord == 'true':
cl.append('ALLOW_CONTIG_LENGTH_DISCORDANCE=true')
stdouts,rval = pic.runPic(opts.jar, cl)
haveTempout = True
elif pic.picname == 'ReplaceSamHeader':
cl.append('INPUT=%s' % opts.input)
cl.append('OUTPUT=%s' % tempout)
cl.append('HEADER=%s' % opts.header_file)
stdouts,rval = pic.runPic(opts.jar, cl)
haveTempout = True
elif pic.picname == 'CalculateHsMetrics':
maxloglines = 100
baitfname = os.path.join(opts.outdir,'rgPicardHsMetrics.bait')
targetfname = os.path.join(opts.outdir,'rgPicardHsMetrics.target')
baitf = pic.makePicInterval(opts.baitbed,baitfname)
if opts.targetbed == opts.baitbed: # same file sometimes
targetf = baitf
else:
targetf = pic.makePicInterval(opts.targetbed,targetfname)
cl.append('BAIT_INTERVALS=%s' % baitf)
cl.append('TARGET_INTERVALS=%s' % targetf)
cl.append('INPUT=%s' % os.path.abspath(opts.input))
cl.append('OUTPUT=%s' % pic.metricsOut)
cl.append('TMP_DIR=%s' % opts.tmpdir)
stdouts,rval = pic.runPic(opts.jar,cl)
elif pic.picname == 'ValidateSamFile':
import pysam
doTranspose = False
sortedfile = os.path.join(opts.outdir,'rgValidate.sorted')
stf = open(pic.log_filename,'w')
tlog = None
if opts.datatype == 'sam': # need to work with a bam
tlog,tempbam,rval = pic.samToBam(opts.input,opts.outdir)
try:
tlog = pic.sortSam(tempbam,sortedfile,opts.outdir)
except:
print '## exception on sorting sam file %s' % opts.input
else: # is already bam
try:
tlog = pic.sortSam(opts.input,sortedfile,opts.outdir)
except: # bug - [bam_sort_core] not being ignored - TODO fixme
print '## exception on sorting bam file %s' % opts.input
if tlog:
print '##tlog=',tlog
stf.write(tlog)
stf.write('\n')
sortedfile = '%s.bam' % sortedfile # samtools does that
cl.append('O=%s' % pic.metricsOut)
cl.append('TMP_DIR=%s' % opts.tmpdir)
cl.append('I=%s' % sortedfile)
opts.maxerrors = '99999999'
cl.append('MAX_OUTPUT=%s' % opts.maxerrors)
if opts.ignoreflags[0] <> 'None': # picard error values to ignore
igs = ['IGNORE=%s' % x for x in opts.ignoreflags if x <> 'None']
cl.append(' '.join(igs))
if opts.bisulphite.lower() <> 'false':
cl.append('IS_BISULFITE_SEQUENCED=true')
if opts.ref <> None or opts.ref_file <> None:
cl.append('R=%s' % ref_file_name)
stdouts,rval = pic.runPic(opts.jar,cl)
if opts.datatype == 'sam':
pic.delme.append(tempbam)
newsam = opts.output
outformat = 'bam'
pe = open(pic.metricsOut,'r').readlines()
pic.cleanSam(insam=sortedfile, newsam=newsam, picardErrors=pe,outformat=outformat)
pic.delme.append(sortedfile) # not wanted
stf.close()
pic.cleanup()
else:
print >> sys.stderr,'picard.py got an unknown tool name - %s' % pic.picname
sys.exit(1)
if haveTempout:
# Some Picard tools produced a potentially intermediate bam file.
# Either just move to final location or create sam
if os.path.exists(tempout):
shutil.move(tempout, os.path.abspath(opts.output))
if opts.htmlout <> None or doFix: # return a pretty html page
pic.fixPicardOutputs(transpose=doTranspose,maxloglines=maxloglines)
if rval <> 0:
print >> sys.stderr, '## exit code=%d; stdout=%s' % (rval,stdouts)
# signal failure
if __name__=="__main__": __main__()
| jhl667/galaxy_tools | tools/picard/picard_wrapper.orig.py | Python | apache-2.0 | 35,974 | [
"Galaxy",
"pysam"
] | ab64af29cbc2c3a311a32412f0985ed1c84ee745ad42234c539af0da6512df81 |
from __future__ import (absolute_import, division, print_function)
import io
import numpy as np
import AbinsModules
from mantid.kernel import Atom, logger
class LoadCRYSTAL(AbinsModules.GeneralDFTProgram):
"""
Class for loading CRYSTAL DFT phonon data. Special thanks to Leonardo Bernasconi for contributing to this module.
"""
def __init__(self, input_dft_filename=None):
"""
:param input_dft_filename: name of a file with phonon data (foo.out)
"""
super(LoadCRYSTAL, self).__init__(input_dft_filename=input_dft_filename)
self._num_k = None
self._num_modes = None
self._num_atoms = None
# Transformation (expansion) matrix E
# More info in 'Creating a super cell' at
# http://www.theochem.unito.it/crystal_tuto/mssc2008_cd/tutorials/geometry/geom_tut.html
self._inv_expansion_matrix = np.eye(3, dtype=AbinsModules.AbinsConstants.FLOAT_TYPE)
self._parser = AbinsModules.GeneralDFTParser()
self._dft_program = "CRYSTAL"
def read_phonon_file(self):
"""
Reads phonon data from CRYSTAL output files. Saves frequencies, weights of k-point vectors, k-point vectors,
amplitudes of atomic displacements, hash of the phonon file (hash) to <>.hdf5
:return object of type AbinsData.
"""
# determine system (molecule or crystal?)
system = self._determine_system()
# check if one or more k-points to parse
phonon_dispersion = self._determine_dispersion()
# read data from output CRYSTAL file
filename = self._clerk.get_input_filename()
with io.open(filename, "rb") as crystal_file:
logger.notice("Reading from " + filename)
if system is AbinsModules.AbinsConstants.CRYSTAL:
lattice_vectors = self._read_lattice_vectors(file_obj=crystal_file)
else:
lattice_vectors = [[0, 0, 0]] * 3
coord_lines = self._read_atomic_coordinates(file_obj=crystal_file)
freq, coordinates, weights, k_coordinates = self._read_modes(file_obj=crystal_file,
phonon_dispersion=phonon_dispersion)
# put data into Abins data structure
data = {}
self._create_atoms_data(data=data, coord_lines=coord_lines[:self._num_atoms])
self._create_kpoints_data(data=data, freq=freq, atomic_displacements=coordinates,
atomic_coordinates=coord_lines[:self._num_atoms], weights=weights,
k_coordinates=k_coordinates, unit_cell=lattice_vectors)
# save data to hdf file
self.save_dft_data(data=data)
# return AbinsData object
return self._rearrange_data(data=data)
def _determine_system(self):
"""
Determines whether the system is a molecule or a crystal.
:returns: True if calculation for molecule otherwise False
"""
with io.open(self._clerk.get_input_filename(), "rb") as crystal_file:
lines = crystal_file.read()
if b"MOLECULAR CALCULATION" in lines or b"0D - MOLECULE" in lines:
molecular = True
elif b"CRYSTAL CALCULATION" in lines or b"3D - CRYSTAL" in lines:
molecular = False
else:
raise ValueError("Only molecular or 3D CRYSTAL systems can be processed")
if molecular:
logger.notice("This run is for a MOLECULAR system")
else:
logger.notice("This run is for a 3D CRYSTAL system")
return molecular
def _determine_dispersion(self):
"""
Checks if we have data for more than one k-point. If data for more than one k-point then calculates
transformation matrix to primitive unit cell from super cell.
:returns: True if many k-points included in calculations otherwise False
"""
with io.open(self._clerk.get_input_filename(), "rb") as crystal_file:
lines = crystal_file.read()
phonon_dispersion = lines.count(b"DISPERSION K ") > 1
if phonon_dispersion:
# In case there is more than one k-point super-cell is constructed. In order to obtain metric tensor we
# need to find expansion transformation.
with io.open(self._clerk.get_input_filename(), "rb") as crystal_file:
self._parser.find_first(file_obj=crystal_file, msg="EXPANSION MATRIX OF PRIMITIVE CELL")
dim = 3
vectors = []
for i in range(dim):
line = crystal_file.readline().split()[1:]
vector = [float(item) for item in line]
vectors.append(vector)
temp = np.asarray(vectors).astype(dtype=AbinsModules.AbinsConstants.FLOAT_TYPE, casting="safe")
self._inv_expansion_matrix = np.linalg.inv(temp)
return phonon_dispersion
def _read_lattice_vectors(self, file_obj=None):
"""
Reads lattice vectors from .out CRYSTAL file.
:param file_obj: file object from which we read
:returns: list with lattice vectors
"""
self._parser.find_first(file_obj=file_obj, msg="DIRECT LATTICE VECTORS CARTESIAN COMPONENTS (ANGSTROM)")
file_obj.readline() # Line: X Y Z
dim = 3
vectors = []
for i in range(dim):
line = file_obj.readline()
line = line.split()
vector = [float(item) for item in line]
vectors.append(vector)
return vectors
def _read_atomic_coordinates(self, file_obj=None):
"""
Reads atomic coordinates from .out CRYSTAL file.
:param file_obj: file object from which we read
:returns: list with atomic coordinates
"""
coord_lines = []
self._parser.find_first(file_obj=file_obj,
msg="ATOM X(ANGSTROM) Y(ANGSTROM) Z(ANGSTROM)")
file_obj.readline() # Line: *******************************************************************************
while not self._parser.file_end(file_obj=file_obj):
line = file_obj.readline().replace(b"T", b"")
# At the end of this section there is always empty line.
if not line.strip():
break
coord_lines += [line.strip(b"\n")]
for line in coord_lines:
# convert from unicode to str in case of Python 2
temp = str(line.strip(b"\n"))
logger.debug(temp)
return coord_lines
def _read_modes(self, file_obj=None, phonon_dispersion=None):
"""
Reads vibrational modes (frequencies and atomic displacements).
:param phonon_dispersion: True if more then one k-point to parse, otherwise False.
:param file_obj: file object from which we read
:returns: Tuple with frequencies and corresponding atomic displacements, weights of k-points and coordinates of
k-points
"""
# case of more than one k-point
if phonon_dispersion:
num_k = self._get_num_kpoints(file_obj=file_obj)
weights = []
k_coordinates = []
freq = []
all_coord = []
# parse all k-points
for k in range(num_k):
line = self._parser.find_first(file_obj=file_obj, msg="DISPERSION K POINT NUMBER")
partial_freq = []
xdisp = []
ydisp = []
zdisp = []
local_line = line.replace(b"(", b" ").replace(b")", b" ").split()
k_coordinates.append([float(local_line[7]), float(local_line[8]), float(local_line[9])])
weights.append(float(local_line[11]))
k_point_type = local_line[6]
# parse k-points for which atomic displacements are real
if k_point_type == b"R":
while not self._parser.file_end(file_obj=file_obj):
self._read_freq_block(file_obj=file_obj, freq=partial_freq)
self._read_coord_block(file_obj=file_obj, xdisp=xdisp, ydisp=ydisp, zdisp=zdisp)
if self._parser.block_end(file_obj=file_obj, msg=["DISPERSION K POINT NUMBER"]):
break
if not self._inside_k_block(file_obj=file_obj):
break
# parse k-points for which atomic displacements are complex
elif k_point_type == b"C":
real_partial_xdisp = []
real_partial_ydisp = []
real_partial_zdisp = []
complex_partial_xdisp = []
complex_partial_ydisp = []
complex_partial_zdisp = []
while not self._parser.file_end(file_obj=file_obj):
self._read_freq_block(file_obj=file_obj, freq=partial_freq)
self._read_coord_block(file_obj=file_obj, xdisp=real_partial_xdisp,
ydisp=real_partial_ydisp, zdisp=real_partial_zdisp, part="real")
if self._parser.block_end(file_obj=file_obj, msg=["IMAGINARY"]):
break
while not self._parser.file_end(file_obj=file_obj):
self._read_freq_block(file_obj=file_obj, freq=partial_freq, append=False)
self._read_coord_block(file_obj=file_obj, xdisp=complex_partial_xdisp,
ydisp=complex_partial_ydisp, zdisp=complex_partial_zdisp,
part="imaginary")
if self._parser.block_end(file_obj=file_obj, msg=["DISPERSION K POINT NUMBER"]):
break
if not self._inside_k_block(file_obj=file_obj):
break
# reconstruct complex atomic displacements
for el in range(len(real_partial_xdisp)):
xdisp.append(real_partial_xdisp[el] + complex_partial_xdisp[el])
ydisp.append(real_partial_ydisp[el] + complex_partial_ydisp[el])
zdisp.append(real_partial_zdisp[el] + complex_partial_zdisp[el])
else:
raise ValueError("Invalid format of input file ", self._clerk.get_input_filename())
freq.append(partial_freq)
all_coord.append([xdisp, ydisp, zdisp])
# only one k-point
else:
end_msgs = ["******", "ACLIMAX"]
inside_block = True
freq = []
xdisp = []
ydisp = []
zdisp = []
# parse block with frequencies and atomic displacements
while not self._parser.file_end(file_obj=file_obj) and inside_block:
self._read_freq_block(file_obj=file_obj, freq=freq)
self._read_coord_block(file_obj=file_obj, xdisp=xdisp, ydisp=ydisp, zdisp=zdisp)
if self._parser.block_end(file_obj=file_obj, msg=end_msgs):
break
freq = [freq]
weights = [1.0]
k_coordinates = [[0.0, 0.0, 0.0]]
all_coord = [[xdisp, ydisp, zdisp]]
self._num_k = len(freq)
self._num_modes = len(freq[0])
if self._num_modes % 3 == 0:
self._num_atoms = int(self._num_modes / 3)
else:
raise ValueError("Invalid number of modes.")
return freq, all_coord, weights, k_coordinates
def _read_freq_block(self, file_obj=None, freq=None, append=True):
"""
Parses block with frequencies.
:param append:
:param file_obj: file object from which we read
:param freq: list with frequencies which we update
"""
line = self._parser.find_first(file_obj=file_obj, msg="FREQ(CM**-1)")
if append:
freq.extend([float(item) for item in line.replace(b"\n", b" ").replace(b"FREQ(CM**-1)", b" ").split()])
def _read_coord_block(self, file_obj=None, xdisp=None, ydisp=None, zdisp=None, part="real"):
"""
Parses block with coordinates.
:param file_obj: file object from which we read
:param xdisp: list with x coordinates which we update
:param ydisp: list with y coordinates which we update
:param zdisp: list with z coordinates which we update
"""
self._parser.move_to(file_obj=file_obj, msg="AT.")
while not self._parser.file_end(file_obj=file_obj):
pos = file_obj.tell()
line = file_obj.readline()
if line.strip():
if b" X " in line:
for item in line[14:].strip(b"\n").split():
self._parse_item(item=item, container=xdisp, part=part)
elif b" Y " in line:
for item in line[14:].strip(b"\n").split():
self._parse_item(item=item, container=ydisp, part=part)
elif b" Z " in line:
for item in line[14:].strip(b"\n").split():
self._parse_item(item=item, container=zdisp, part=part)
else:
file_obj.seek(pos)
break
def _parse_item(self, item=None, container=None, part=None):
if part == "real":
container.append(complex(float(item), 0.0))
elif part == "imaginary":
container.append(complex(0.0, float(item)))
else:
raise ValueError("Real or imaginary part of complex number was expected.")
def _inside_k_block(self, file_obj=None):
"""
Checks if end of k-points block.
:param file_obj: file object from which we read
:returns: True if end of block otherwise False
"""
allowed_keywords = [b" X ", b" Y ", b" Z ", b"-", b"REAL", b"IMAGINARY", b"MODES", b"DISPERSION"]
# remove empty lines:
pos = None
while not self._parser.file_end(file_obj=file_obj):
pos = file_obj.tell()
line = file_obj.readline()
if line.strip():
break
file_obj.seek(pos)
# non empty line to check
pos = file_obj.tell()
line = file_obj.readline()
file_obj.seek(pos)
# if there isn't any keyword from set "allowed_keywords" it means that we reached end of k-block
# if any keyword in line we are still in k-block
return any([key in line for key in allowed_keywords])
def _get_num_kpoints(self, file_obj=None):
self._parser.find_first(file_obj=file_obj, msg="K WEIGHT COORD")
num_k = 0
while not self._parser.file_end(file_obj=file_obj):
line = file_obj.readline()
if b"WITH SHRINKING FACTORS:" in line:
return num_k
num_k += 1
def _create_atoms_data(self, data=None, coord_lines=None):
"""
Creates Python dictionary with atoms data which can be easily converted to AbinsData object.
:param data: Python dictionary to which found atoms data should be added
:param coord_lines: list with information about atoms
"""
data.update({"atoms": dict()})
for i, line in enumerate(coord_lines):
l = line.split()
symbol = str(l[2].decode("utf-8").capitalize())
atom = Atom(symbol=symbol)
data["atoms"]["atom_{}".format(i)] = {
"symbol": symbol, "mass": atom.mass, "sort": i,
"coord": np.asarray(l[3:6]).astype(dtype=AbinsModules.AbinsConstants.FLOAT_TYPE)}
def _create_kpoints_data(self, data=None, freq=None, atomic_displacements=None, atomic_coordinates=None,
weights=None, k_coordinates=None, unit_cell=None):
"""
Creates Python dictionary with k-points data which can be easily converted to AbinsData object.
:param data: Python dictionary to which found k-points data should be added
:param freq: normal modes
:param atomic_displacements: atomic displacements
:param atomic_coordinates: equilibrium atomic coordinates
:param weights: weights of k-points
:param k_coordinates: coordinates of k-points
:param unit_cell: list with unit cell vectors
"""
# a) Put frequencies into dictionary
data["frequencies"] = np.asarray(freq).astype(dtype=AbinsModules.AbinsConstants.FLOAT_TYPE, casting="safe")
# b) Extract atomic displacements, normalize them and put them into data dictionary
# Extract
all_k_atomic_disp = [self._create_kpoint_data(freq=freq[k],
atomic_displacements=atomic_displacements[k],
atomic_coordinates=atomic_coordinates)
for k in range(self._num_k)]
# normalise
all_k_atomic_disp = np.asarray(all_k_atomic_disp)
masses = np.asarray([data["atoms"]["atom_%s" % atom]["mass"] for atom in range(self._num_atoms)])
# [num_k ,num_freq, num_atoms, dim] -> [num_k, num_freq, num_atoms, dim, dim] -> [num_k, num_freq, num_atoms]
temp1 = np.trace(np.einsum("mlki, mlkj->mlkij", all_k_atomic_disp, all_k_atomic_disp.conjugate()),
axis1=3, axis2=4)
temp2 = np.einsum("mij, j->mij", temp1, masses)
# [num_k, num_freq, num_atoms] -> [num_k, num_freq]
norm = np.sum(temp2, axis=2)
# noinspection PyTypeChecker
all_k_atomic_disp = np.einsum("mijk,mi->mijk", all_k_atomic_disp, 1.0 / np.sqrt(norm))
all_k_atomic_disp = np.einsum("mijk,j->mijk", all_k_atomic_disp, np.sqrt(masses))
# [num_k, num_freq, num_atoms, dim] -> [num_k, num_atoms, num_freq, dim]
data["atomic_displacements"] = np.transpose(a=all_k_atomic_disp, axes=(0, 2, 1, 3))
# c) Put weights into dictionary
data["weights"] = np.asarray(weights).astype(dtype=AbinsModules.AbinsConstants.FLOAT_TYPE, casting="safe")
# d) Put k-vectors into dictionary
data["k_vectors"] = np.asarray(k_coordinates).astype(dtype=AbinsModules.AbinsConstants.FLOAT_TYPE,
casting="safe")
# e) put unit cell into dictionary
temp = np.asarray(unit_cell).astype(dtype=AbinsModules.AbinsConstants.FLOAT_TYPE, casting="safe")
data["unit_cell"] = np.dot(self._inv_expansion_matrix, temp)
def _create_kpoint_data(self, freq=None, atomic_displacements=None, atomic_coordinates=None):
"""
Extracts atomic displacements.
:param freq: normal modes for the given k-point
:param atomic_displacements: atomic displacements for the given k-point
:param atomic_coordinates: atomic coordinates (equilibrium positions)
:return normalised atomic displacements in the form of numpy array
"""
column_num = -1
freq_num = -1
row_num = 0
default_row_width = 6 # default width of block with modes
displacements = []
num_displacements = len(atomic_displacements[0])
num_coordinates = len(atomic_coordinates)
for _ in freq:
column_num += 1
freq_num += 1
if column_num == default_row_width:
column_num = 0
row_num += 1
# Parse blocks with default row width (6)
if row_num <= num_displacements / (default_row_width * num_coordinates) - 1:
displacements.extend(self.create_kpoints_data_helper(atomic_displacements=atomic_displacements,
atomic_coordinates=atomic_coordinates, row=row_num,
column=column_num))
# At this point we have parsed all the modes that are
# part of blocks of 6 in the crystal output; now we need to
# consider the other blocks
elif num_displacements % default_row_width != 0:
current_row_width = num_displacements % default_row_width
displacements.extend(self.create_kpoints_data_helper(atomic_displacements=atomic_displacements,
atomic_coordinates=atomic_coordinates, row=row_num,
column=column_num, row_width=current_row_width))
# Reshape displacements so that Abins can use it to create its internal data objects
# num_atoms: number of atoms in the system
# num_freq: number of modes
# dim: dimension for each atomic displacement (atoms vibrate in 3D space)
#
# The following conversion is necessary:
# (num_freq * num_atom * dim) -> (num_freq, num_atom, dim)
num_freq = len(freq)
dim = 3
displacements = np.asarray(a=displacements, order="C").reshape(num_freq, self._num_atoms, dim)
return displacements
def create_kpoints_data_helper(self, atomic_displacements=None, atomic_coordinates=None, row=None, column=None,
row_width=6):
"""
Extracts atomic displacements for the given row and column.
:param atomic_displacements: list with atomic displacements
:param atomic_coordinates: list with atomic coordinates
:param row: number of atomic_displacements row to parse
:param column: number of atomic_displacements column to parse
:param row_width: current width of row to parse
:return normalised atomic displacements
"""
xdisp = atomic_displacements[0]
ydisp = atomic_displacements[1]
zdisp = atomic_displacements[2]
atomic_coordinates_length = len(atomic_coordinates)
atomic_coordinates_iter = range(atomic_coordinates_length)
const = row * atomic_coordinates_length * 6 + column
indices = [const + atom_num * row_width for atom_num in atomic_coordinates_iter]
x = [xdisp[indx] for indx in indices]
y = [ydisp[indx] for indx in indices]
z = [zdisp[indx] for indx in indices]
local_displacements = np.transpose(np.asarray([x, y, z]))
return local_displacements
| dymkowsk/mantid | scripts/AbinsModules/LoadCRYSTAL.py | Python | gpl-3.0 | 22,768 | [
"CRYSTAL"
] | 26766df5e3053d490b9af356c1a21f05fee1d31b3de188b8c8113b84e8ec93f4 |
from django.conf import settings
from django.conf.urls import include, url
from django.contrib import admin
from django.views import defaults as default_views
admin.autodiscover()
urlpatterns = [
url(r'^polls/', include('polls.urls', namespace="polls")),
url(r'^admin/', include(admin.site.urls)),
]
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', default_views.bad_request, kwargs={'exception': Exception("Bad Request!")}),
url(r'^403/$', default_views.permission_denied, kwargs={'exception': Exception("Permission Denied")}),
url(r'^404/$', default_views.page_not_found, kwargs={'exception': Exception("Page not Found")}),
url(r'^500/$', default_views.server_error),
]
| fedenko/django-gulp-example | mysite/urls.py | Python | bsd-3-clause | 873 | [
"VisIt"
] | 873c02c1c90ef6cd3e55a363280a31fb67dcd5a474447f49617ff083328d6eb1 |
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2018, Exa Analytics Development Team
# Distributed under the terms of the Apache License 2.0
"""
The Atomic Universe
#########################
The :class:`~exatomic.container.Universe` object is a subclass of
:class:`~exa.container.Container` that stores data coming from computational
chemistry experiments in a unified and systematic way. Data is organized into
"frames". A frame is an axis that can represent time (e.g. molecular dynamics
simulations), step number (e.g. geometry optimization), or an arbitrary index
(e.g. density functional theory exchange correlation functional).
"""
import six
import numpy as np
import pandas as pd
from exa import DataFrame, Container, TypedMeta
from .frame import Frame, compute_frame_from_atom
from .atom import Atom, UnitAtom, ProjectedAtom, VisualAtom, Frequency
from .two import (AtomTwo, MoleculeTwo, compute_atom_two,
_compute_bond_count, _compute_bonds)
from .molecule import (Molecule, compute_molecule, compute_molecule_com,
compute_molecule_count)
from .field import AtomicField
from .orbital import Orbital, Excitation, MOMatrix, DensityMatrix
from .basis import Overlap, BasisSet, BasisSetOrder
from exatomic.algorithms.orbital import add_molecular_orbitals
from exatomic.algorithms.basis import BasisFunctions, compute_uncontracted_basis_set_order
from .tensor import Tensor
class Meta(TypedMeta):
atom = Atom
frame = Frame
atom_two = AtomTwo
unit_atom = UnitAtom
projected_atom = ProjectedAtom
visual_atom = VisualAtom
frequency = Frequency
molecule = Molecule
molecule_two = MoleculeTwo
field = AtomicField
orbital = Orbital
momatrix = MOMatrix
cart_momatrix = MOMatrix
sphr_momatrix = MOMatrix
excitation = Excitation
overlap = Overlap
density = DensityMatrix
basis_set_order = BasisSetOrder
cart_basis_set_order = BasisSetOrder
sphr_basis_set_order = BasisSetOrder
uncontracted_basis_set_order = BasisSetOrder
basis_set = BasisSet
basis_dims = dict
basis_functions = BasisFunctions
contribution = DataFrame
multipole = DataFrame
tensor = Tensor
class Universe(six.with_metaclass(Meta, Container)):
"""
The atomic container is called a universe because it represents everything
known about the atomistic simulation (whether quantum or classical). This
includes data such as atomic coordinates, molecular orbital energies, as
well as (classical phenomena) such as two body distances, etc.
Attributes:
frame (:class:`~exatomic.core.frame.Frame`): State variables:
atom (:class:`~exatomic.core.atom.Atom`): (Classical) atomic data (e.g. coordinates)
atom_two (:class:`~exatomic.core.two.AtomTwo`): Interatomic distances
molecule (:class:`~exatomic.core.molecule.Molecule`): Molecule information
orbital (:class:`~exatomic.core.orbital.Orbital`): Molecular orbital information
momatrix (:class:`~exatomic.core.orbital.MOMatrix`): Molecular orbital coefficient matrix
frequency (:class:`~exatomic.core.atom.Frequency`): Vibrational modes and atom displacements
excitation (:class:`~exatomic.core.orbital.Excitation`): Electronic excitation information
basis_set (:class:`~exatomic.core.basis.BasisSet`): Basis set specification
overlap (:class:`~exatomic.core.basis.Overlap`): The overlap matrix
basis_functions (:class:`~exatomic.algorithms.basis.BasisFunctions`): Basis function evaluation
field (:class:`~exatomic.core.field.AtomicField`): Scalar fields (MOs, densities, etc.)
"""
_cardinal = "frame"
_getter_prefix = "compute"
@property
def current_momatrix(self):
if self.meta['spherical']:
try: return self.sphr_momatrix
except AttributeError: return self.momatrix
try: return self.cart_momatrix
except AttributeError: return self.momatrix
@property
def current_basis_set_order(self):
if 'uncontracted' in self.meta:
return self.uncontracted_basis_set_order
if self.meta['spherical']:
try: return self.sphr_basis_set_order
except AttributeError: return self.basis_set_order
try: return self.cart_basis_set_order
except AttributeError: return self.basis_set_order
@property
def periodic(self, *args, **kwargs):
return self.frame.is_periodic(*args, **kwargs)
@property
def orthorhombic(self):
return self.frame.orthorhombic()
@classmethod
def from_cclib(cls, ccobj):
from exatomic.interfaces.cclib import universe_from_cclib
return cls(**universe_from_cclib(ccobj))
# Note that compute_* function may be called automatically by typed
# properties defined in UniverseMeta
def compute_frame(self):
"""Compute a minmal frame table."""
self.frame = compute_frame_from_atom(self.atom)
def compute_unit_atom(self):
"""Compute minimal image for periodic systems."""
self.unit_atom = UnitAtom.from_universe(self)
def compute_visual_atom(self):
self.visual_atom = VisualAtom.from_universe(self)
self.compute_molecule_com()
def compute_atom_two(self, *args, **kwargs):
"""
Compute interatomic two body properties (e.g. bonds).
Args:
mapper (dict): Custom radii to use when determining bonds
bond_extra (float): Extra additive factor to use when determining bonds
"""
self.atom_two = compute_atom_two(self, *args, **kwargs)
def compute_bonds(self, *args, **kwargs):
"""
Updates bonds (and molecules).
See Also:
:func:`~exatomic.two.AtomTwo.compute_bonds`
"""
_compute_bonds(self.atom, self.atom_two, *args, **kwargs)
def compute_bond_count(self):
"""
Compute bond counts and attach them to the :class:`~exatomic.atom.Atom` table.
"""
_compute_bond_count(self)
def compute_molecule(self):
"""Compute the :class:`~exatomic.molecule.Molecule` table."""
self.molecule = compute_molecule(self)
self.compute_molecule_count()
def compute_molecule_com(self):
cx, cy, cz = compute_molecule_com(self)
self.molecule['cx'] = cx
self.molecule['cy'] = cy
self.molecule['cz'] = cz
def compute_atom_count(self):
"""Compute number of atoms per frame."""
self.frame['atom_count'] = self.atom.cardinal_groupby().size()
def compute_molecule_count(self):
"""Compute number of molecules per frame."""
self.frame['molecule_count'] = compute_molecule_count(self)
def compute_basis_dims(self):
"""Compute basis dimensions."""
bset = self.basis_set
mapr = self.atom.set.map
self.basis_dims = {
'npc': mapr(bset.primitives(False).groupby('set').sum()).sum(),
'nps': mapr(bset.primitives(True).groupby('set').sum()).sum(),
'ncc': mapr(bset.functions(False).groupby('set').sum()).sum(),
'ncs': mapr(bset.functions(True).groupby('set').sum()).sum(),
'sets': bset.functions_by_shell()}
def compute_basis_functions(self, **kwargs):
if self.meta['program'] in ['nwchem']:
self.basis_functions = BasisFunctions(self, cartp=False)
else:
self.basis_functions = BasisFunctions(self)
def compute_uncontracted_basis_set_order(self):
"""Compute an uncontracted basis set order."""
self.uncontracted_basis_set_order = compute_uncontracted_basis_set_order(self)
def enumerate_shells(self, frame=0):
"""Extract minimal information from the universe to be used in
numba-compiled numerical procedures.
.. code-block:: python
pointers, atoms, shells = uni.enumerate_shells()
Args:
frame (int): state of the universe (default 0)
"""
atom = self.atom.groupby('frame').get_group(frame)
if self.meta['program'] not in ['molcas', 'adf', 'nwchem', 'gaussian']:
print('Warning: Check spherical shell parameter for {} '
'molecular orbital generation'.format(self.meta['program']))
shls = self.basis_set.shells(self.meta['program'],
self.meta['spherical'],
self.meta['gaussian'])
grps = shls.groupby('set')
# Pointers into (xyzs, shls) arrays
ptrs = np.array([(c, idx) for c, seht in enumerate(atom.set)
for idx in grps.get_group(seht).index])
return ptrs, atom[['x', 'y', 'z']].values, shls[0].values
def add_field(self, field):
"""Adds a field object to the universe.
.. code-block:: python
# Assuming field[n] is of type AtomicField
uni.add_field(field)
uni.add_field([field1, field2])
Args:
field (iter, :class:`exatomic.core.field.AtomicField`): field(s) to add
Warning:
Adding a large number of (high resolution) fields may impact performance.
"""
self._traits_need_update = True
if isinstance(field, AtomicField):
if not hasattr(self, 'field'):
self.field = field
else:
self.field._revert_categories()
new_field_values = self.field.field_values + field.field_values
newdx = range(len(self.field), len(self.field) + len(field))
field.index = newdx
new_field = pd.concat([self.field, field])
self.field = AtomicField(new_field, field_values=new_field_values)
elif isinstance(field, list):
if not hasattr(self, 'field'):
fields = pd.concat(field)
fields.index = range(len(fields))
fields_values = [j for i in field for j in i.field_values]
self.field = AtomicField(fields, field_values=fields_values)
else:
new_field_values = self.field.field_values + [j for i in field for j in i.field_values]
newdx = range(len(self.field), len(self.field) + sum([len(i.field_values) for i in field]))
for i, idx in enumerate(newdx):
field[i].index = [idx]
new_field = pd.concat([self.field] + field)
self.field = AtomicField(new_field, field_values=new_field_values)
else:
raise TypeError('field must be an instance of exatomic.field.AtomicField or a list of them')
def add_molecular_orbitals(self, field_params=None, mocoefs=None,
vector=None, frame=0, replace=False,
inplace=True, verbose=True, irrep=None):
"""Add molecular orbitals to universe.
.. code-block:: python
uni.add_molecular_orbitals() # Default around (HOMO-5, LUMO+7)
uni.add_molecular_orbitals(vector=range(5)) # Specifies the first 5 MOs
uni.add_molecular_orbitals( # Higher resolution fields
field_params={'rmin': -10, # smallest value in 'x', 'y', 'z'
'rmax': 10, # largest value in 'x', 'y', 'z'
'nr': 100}) # number of points between rmin and rmax
uni.field # The field parameters
uni.field.field_values # The generated scalar fields
Args:
field_params (dict, pd.Series): see :func:`exatomic.algorithms.orbital_util.make_fps`
mocoefs (str): column in :class:`~exatomic.core.orbital.MOMatrix`
vector (iter): indices of orbitals to evaluate (0-based)
frame (int): frame of atomic positions for the orbitals
replace (bool): remove previous fields (default False)
inplace (bool): add directly to uni or return :class:`~exatomic.core.field.AtomicField` (default True)
verbose (bool): print timing statistics (default True)
irrep (int): irreducible representation
Warning:
Default behavior just continually adds fields to the universe. This can
affect performance if adding many fields. `replace` modifies this behavior.
Warning:
Specifying very high resolution field parameters, e.g. 'nr' > 100
may slow things down and/or crash the kernel. Use with caution.
"""
if not hasattr(self, 'momatrix'):
raise AttributeError('uni must have momatrix attribute.')
if not hasattr(self, 'basis_set'):
raise AttributeError('uni must have basis_set attribute.')
return add_molecular_orbitals(self, field_params=field_params,
mocoefs=mocoefs, vector=vector,
frame=frame, replace=replace,
inplace=inplace, verbose=verbose,
irrep=irrep)
def __len__(self):
return len(self.frame)
def __init__(self, **kwargs):
super(Universe, self).__init__(**kwargs)
def concat(name=None, description=None, meta=None, *universes):
"""
Warning:
This function is not fully featured or tested yet!
"""
raise NotImplementedError()
def basis_function_contributions(universe, mo, mocoefs='coef',
tol=0.01, ao=None, frame=0):
"""
Provided a universe with momatrix and basis_set_order attributes,
return the major basis function contributions of a particular
molecular orbital.
.. code-block:: python
# display the 16th orbital coefficients > abs(0.15)
basis_function_contributions(uni, 15, tol=0.15) # 0-based indexing!
Args:
universe (class:`exatomic.core.universe.Universe`): a universe
mo (int): molecular orbital index
mocoefs (str): column of interest in universe.momatrix
tol (float): minimum value of coefficient by which to filter
frame (int): frame of the universe (default is zero)
Returns:
joined (pd.DataFrame): a join of momatrix and basis_set_order
"""
small = universe.momatrix.contributions(mo, tol=tol, mocoefs=mocoefs, frame=frame)
chis = small['chi'].values
coefs = small[mocoefs]
coefs.index = chis
joined = pd.concat([universe.basis_set_order.ix[chis], coefs], axis=1)
if ao is None:
return joined
else:
raise NotImplementedError("not clever enough for that.")
| alexvmarch/atomic | exatomic/core/universe.py | Python | apache-2.0 | 14,841 | [
"ADF",
"Gaussian",
"MOLCAS",
"NWChem",
"cclib"
] | 50175c7cf46f977ab727a4d9c819f6bf38b37f7a38930a3c48a73356d76a265b |
from collections import namedtuple, OrderedDict
_ProductType = namedtuple('ProductType', ['name', 'description', 'amount', 'paperback_addl', 'us_postage', 'can_postage', 'eur_postage', 'aus_postage', 'else_postage'])
class Product(_ProductType):
__slots__ = ()
def __str__(self):
return self.name
PRODUCT_LOOKUP = {
# Hello Web Books
'hwb-video': Product('hwb-video', 'Hello Web Books Video Package', 19900, 3000, 322, 1400, 2000, 2900, 2100, ),
'hwb-pb': Product('hwb-pb', 'Hello Web Books Paperback Package', 7995, 0, 322, 1400, 2000, 2900, 2100, ),
'hwb-ebooks': Product('hwb-ebooks', 'Hello Web Books eBook Package', 4995, 3000, 322, 1400, 2000, 2900, 2100, ),
# Hello Web App
'hwa-video': Product('hwa-video', 'Hello Web App Video Package', 17900, 2000, 322, 1000, 1500, 2000, 1800, ),
'hwa-pb': Product('hwa-pb', 'Hello Web App Paperback Package', 5995, 0, 322, 1000, 1500, 2000, 1800, ),
'hwa-ebooks': Product('hwa-ebooks', 'Hello Web App eBook Package', 3495, 2000, 322, 1000, 1500, 2000, 1800, ),
'hwa-video-supplement': Product('hwa-video-supplement', 'Hello Web App Video Supplement', 14405, 2000, 322, 1000, 1500, 2000, 1800, ),
# Hello Web Design
'hwd-video': Product('hwd-video', 'Hello Web Design Video Package', 9900, 1000, 300, 800, 1300, 2000, 1500, ),
'hwd-pb': Product('hwd-pb', 'Hello Web Design Paperback Package', 3995, 0, 300, 800, 1300, 2000, 1500, ),
'hwd-ebooks': Product('hwd-ebooks', 'Hello Web Design eBook Package', 2495, 1000, 300, 800, 1300, 2000, 1500, ),
'hwd-video-supplement': Product('hwd-video-supplement', 'Hello Web Design Video Supplement', 7405, 1000, 300, 800, 1300, 2000, 1500, ),
}
#for key, value in hwa_course.iteritems():
# print key, value['name']
course_list = OrderedDict({
'Hello Web App': {
'Hello Web App': {
'0': {
'name': 'Hello Web App Introduction',
'video': '',
'template': 'course/hwa/intro.md',
'link': 'intro',
},
'1': {
'name': "What We’re Building",
'video': '',
'template': 'course/hwa/what-building.md',
'link': 'what-building',
},
'2': {
'name': 'Prerequisites',
'video': '',
'template': 'course/hwa/prerequisites.md',
'link': 'prerequisites',
},
'3': {
'name': 'Getting Started',
'video': 'https://player.vimeo.com/video/322468325',
'template': 'course/hwa/getting-started.md',
'link': 'getting-started',
},
'4': {
'name': 'Setting up your Templates',
'video': 'https://player.vimeo.com/video/125105042',
'template': 'course/hwa/setting-templates.md',
'link': 'setting-templates',
},
'5': {
'name': 'Fun with Template Tags',
'video': 'https://player.vimeo.com/video/125107452',
'template': 'course/hwa/template-tags.md',
'link': 'template-tags',
},
'6': {
'name': 'Adding Dynamic Data',
'video': 'https://player.vimeo.com/video/125112251',
'template': 'course/hwa/dynamic-data.md',
'link': 'dynamic-data',
},
'7': {
'name': 'Displaying Dynamic Information in the Templates',
'video': 'https://player.vimeo.com/video/125113570',
'template': 'course/hwa/dynamic-templates.md',
'link': 'dynamic-templates',
},
'8': {
'name': 'Setting up Individual Object Pages',
'video': 'https://player.vimeo.com/video/125114336',
'template': 'course/hwa/indiv-object-pages.md',
'link': 'indiv-object-pages',
},
'9': {
'name': 'Forms.py Funsies',
'video': 'https://player.vimeo.com/video/125116321',
'template': 'course/hwa/forms.md',
'link': 'forms',
},
'10': {
'name': 'Adding a Registration Page',
'video': 'https://player.vimeo.com/video/125118325',
'template': 'course/hwa/reg-page.md',
'link': 'reg-page',
},
'11': {
'name': 'Associating Users with Objects',
'video': 'https://player.vimeo.com/video/322475015',
'template': 'course/hwa/user-objects.md',
'link': 'user-objects',
},
'12': {
'name': 'Setting up Basic Browse Pages',
'video': 'https://player.vimeo.com/video/125185864',
'template': 'course/hwa/browse-page.md',
'link': 'browse-page',
},
'13': {
'name': 'Quick Hits: 404 Pages, requirements.txt, and Testing',
'video': 'https://player.vimeo.com/video/125195379',
'template': 'course/hwa/quick-hits.md',
'link': 'quick-hits',
},
'14': {
'name': 'Deploying Your Web App',
'video': 'https://player.vimeo.com/video/125203626',
'template': 'course/hwa/deploying.md',
'link': 'deploying',
},
'15': {
'name': 'What To Do If Your App is Broken',
'video': '',
'template': 'course/hwa/broken.md',
'link': 'broken',
},
'16': {
'name': 'Important Things to Know',
'video': '',
'template': 'course/hwa/important-know.md',
'link': 'important-know',
},
'17': {
'name': 'Moving Forward',
'video': '',
'template': 'course/hwa/moving-forward.md',
'link': 'moving-forward',
},
'18': {
'name': 'Special Thanks',
'video': '',
'template': 'course/hwa/special-thanks.md',
'link': 'special-thanks',
},
},
'Video Extras': {
'0': {
'name': 'Example: Real Life Code',
'video': 'https://player.vimeo.com/video/124584929',
'template': 'course/empty.md',
'link': 'real-life-code',
},
'1': {
'name': 'Example: Adding A New Feature',
'video': 'https://player.vimeo.com/video/124593641',
'template': 'course/empty.md',
'link': 'extra-new-feature',
},
'2': {
'name': 'Example: Using Git',
'video': 'https://player.vimeo.com/video/124581543',
'template': 'course/empty.md',
'link': 'using-git',
},
'3': {
'name': 'Example: Using the Command Line',
'video': 'https://player.vimeo.com/video/124364480',
'template': 'course/empty.md',
'link': 'using-command-line',
},
},
'Intermediate Concepts': {
'0': {
'name': 'Intermediate Concepts Introduction',
'video': '',
'template': 'course/hwaic/introduction.md',
'link': 'introduction',
},
'1': {
'name': 'Creating a Contact Form and Working with Custom Forms',
'video': 'https://player.vimeo.com/video/147784115',
'template': 'course/hwaic/contact-form.md',
'link': 'contact-form',
},
'2': {
'name': 'Adding a New Model',
'video': 'https://player.vimeo.com/video/147789199',
'template': 'course/hwaic/new-model.md',
'link': 'new-model',
},
'3': {
'name': 'Adding Easy Admin Emails, Helpers, Sitemaps, and More',
'video': 'https://player.vimeo.com/video/147789913',
'template': 'course/hwaic/misc.md',
'link': 'misc',
},
'4': {
'name': 'Adding User-Uploaded Images',
'video': 'https://player.vimeo.com/video/147791632',
'template': 'course/hwaic/user-uploaded-images.md',
'link': 'user-uploaded-images',
},
'5': {
'name': 'Editing and Resizing Images',
'video': 'https://player.vimeo.com/video/147868862',
'template': 'course/hwaic/resizing-images.md',
'link': 'resizing-images',
},
'6': {
'name': 'Setting Up Django Messages for Alerts',
'video': 'https://player.vimeo.com/video/148555627',
'template': 'course/hwaic/django-messages.md',
'link': 'django-messages',
},
'7': {
'name': 'Front-End Fun: Adding Gulp, Sass, and Bootstrap',
'video': '',
'template': 'course/hwaic/sass-bootstrap.md',
'link': 'sass-bootstrap',
},
'8': {
'name': 'Reading Source Code And Setting Up a Form to Edit User Email Addresses',
'video': 'https://player.vimeo.com/video/148569412',
'template': 'course/hwaic/reading-source.md',
'link': 'reading-source',
},
'9': {
'name': 'Adding Payments with Stripe',
'video': 'https://player.vimeo.com/video/148574316',
'template': 'course/hwaic/stripe.md',
'link': 'stripe',
},
'10': {
'name': 'Adding an API',
'video': 'https://player.vimeo.com/video/150557037',
'template': 'course/hwaic/api.md',
'link': 'api',
},
'11': {
'name': 'Working with Sessions',
'video': 'https://player.vimeo.com/video/150557885',
'template': 'course/hwaic/sessions.md',
'link': 'sessions',
},
'12': {
'name': 'Creating Your Own Scripts and a Bit About Cron Jobs',
'video': 'https://player.vimeo.com/video/150558862',
'template': 'course/hwaic/cronjobs.md',
'link': 'cronjobs',
},
'12': {
'name': 'Database Pitfalls',
'video': 'https://player.vimeo.com/video/150559647',
'template': 'course/hwaic/database-pitfalls.md',
'link': 'database-pitfalls',
},
'13': {
'name': 'Resources',
'video': '',
'template': 'course/hwaic/resources.md',
'link': 'resources',
},
'14': {
'name': 'Thanks',
'video': '',
'template': 'course/hwaic/thanks.md',
'link': 'thanks',
},
},
},
'Hello Web Design': {
'Module 1': {
'0.00': {
'name': 'Foreword',
'video': '',
'template': 'course/hwd/foreword.md',
'link': 'foreword',
},
'0.0': {
'name': 'Introduction',
'video': 'https://player.vimeo.com/video/322480097',
'template': 'course/hwd/intro.md',
'link': 'intro',
},
'1.0': {
'name': 'If You Only Read One Chapter, Make It This One',
'video': 'https://player.vimeo.com/video/322480267',
'template': 'course/hwd/one-chapter.md',
'link': 'one-chapter',
},
},
'Module 2': {
'2.0': {
'name': 'Theory and Design Principles',
'video': '',
'template': 'course/hwd/theory.md',
'link': 'theory',
},
'2.1': {
'name': 'Grid',
'video': 'https://player.vimeo.com/video/322480426',
'template': 'course/hwd/grid.md',
'link': 'grid',
},
'2.2': {
'name': 'Color',
'video': 'https://player.vimeo.com/video/322480564',
'template': 'course/hwd/color.md',
'link': 'color',
},
'2.3': {
'name': 'Typography',
'video': 'https://player.vimeo.com/video/322480679',
'template': 'course/hwd/typography.md',
'link': 'typography',
},
'2.4': {
'name': 'White Space',
'video': 'https://player.vimeo.com/video/322480801',
'template': 'course/hwd/white-space.md',
'link': 'white-space',
},
'2.5': {
'name': 'Layout and Hierarchy',
'video': 'https://player.vimeo.com/video/322480937',
'template': 'course/hwd/layout.md',
'link': 'layout',
},
'2.6': {
'name': 'Content',
'video': 'https://player.vimeo.com/video/322481048',
'template': 'course/hwd/content.md',
'link': 'content',
},
'2.7': {
'name': 'User Experience',
'video': 'https://player.vimeo.com/video/322481441',
'template': 'course/hwd/ux.md',
'link': 'ux',
},
'2.8': {
'name': 'Images and Imagery',
'video': 'https://player.vimeo.com/video/322481555',
'template': 'course/hwd/images.md',
'link': 'images',
},
'2.9': {
'name': 'Extra Tidbits',
'video': 'https://player.vimeo.com/video/322481739',
'template': 'course/hwd/tidbits.md',
'link': 'tidbits',
},
},
'Module 3': {
'3.0': {
'name': 'The Process and Training Your Design Eye',
'video': '',
'template': 'course/hwd/process.md',
'link': 'process',
},
'3.1': {
'name': 'Finding Inspiration',
'video': 'https://player.vimeo.com/video/322481899',
'template': 'course/hwd/inspiration.md',
'link': 'inspiration',
},
'3.2': {
'name': 'Planning',
'video': 'https://player.vimeo.com/video/322482078',
'template': 'course/hwd/planning.md',
'link': 'planning',
},
'3.3': {
'name': 'Prototypes',
'video': 'https://player.vimeo.com/video/324816425',
'template': 'course/hwd/prototypes.md',
'link': 'prototypes',
},
'3.4': {
'name': 'Getting Feedback',
'video': 'https://player.vimeo.com/video/322482231',
'template': 'course/hwd/feedback.md',
'link': 'feedback',
},
'3.5': {
'name': 'Coding Your Design',
'video': 'https://player.vimeo.com/video/324816579',
'template': 'course/hwd/coding.md',
'link': 'coding',
},
},
'Module 4': {
'4.0': {
'name': 'Reassurances',
'video': 'https://player.vimeo.com/video/324816712',
'template': 'course/hwd/reassurances.md',
'link': 'reassurances',
},
'5.0': {
'name': 'Additional Resources',
'video': '',
'template': 'course/hwd/additional-resources.md',
'link': 'additional-resources',
},
'5.1': {
'name': 'Final Thoughts',
'video': '',
'template': 'course/hwd/final-thoughts.md',
'link': 'final-thoughts',
},
'5.2': {
'name': 'Special Thanks',
'video': '',
'template': 'course/hwd/special-thanks.md',
'link': 'special-thanks',
},
'5.3': {
'name': 'About Author',
'video': '',
'template': 'course/hwd/about-author.md',
'link': 'about-author',
},
},
},
'Really Friendly Command Line Intro': {
'Zine': {
'0': {
'name': 'Introduction',
'video': 'https://player.vimeo.com/video/328691402',
'template': 'course/cmd/intro.md',
'link': 'intro',
},
'1': {
'name': 'Let’s start playing!',
'video': '',
'template': 'course/cmd/playing.md',
'link': 'playing',
},
'2': {
'name': 'But I’m tired of typing already!',
'video': '',
'template': 'course/cmd/tired.md',
'link': 'tired',
},
'3': {
'name': 'Intermediate command line utilities',
'video': '',
'template': 'course/cmd/intermediate.md',
'link': 'intermediate',
},
'4': {
'name': 'Wait, something went wrong',
'video': '',
'template': 'course/cmd/wrong.md',
'link': 'wrong',
},
'5': {
'name': 'Conclusion',
'video': '',
'template': 'course/cmd/conclusion.md',
'link': 'conclusion',
},
},
},
'Really Friendly Git Intro': {
'Zine': {
'0': {
'name': 'Introduction',
'video': '',
'template': 'course/git/intro.md',
'link': 'intro',
},
'1': {
'name': 'What is Git?',
'video': '',
'template': 'course/git/whatsgit.md',
'link': 'whatsgit',
},
'2': {
'name': 'Let’s start playing!',
'video': '',
'template': 'course/git/playing.md',
'link': 'playing',
},
'3': {
'name': 'Intermediate Git: Creating branches!',
'video': '',
'template': 'course/git/intermediate.md',
'link': 'intermediate',
},
'4': {
'name': 'That’s cool! So what’s GitHub then?',
'video': '',
'template': 'course/git/github.md',
'link': 'github',
},
'5': {
'name': 'Conclusion',
'video': '',
'template': 'course/git/conclusion.md',
'link': 'conclusion',
},
},
},
})
| hellowebbooks/hellowebbooks-website | books/options.py | Python | mit | 19,793 | [
"GULP"
] | c14638cf309d16d04d7981145a51301249a9cf886de61669fd3350c509b99f0b |
# derived from
# http://s3.amazonaws.com/alexa-static/top-1m.csv.zip
# fetched 2018-05-22, see also
# https://support.alexa.com/hc/en-us/sections/200063274-Top-Sites
alexa_top_1m_tlds_about = {
'date': '2018-05-22',
'source': 'http://s3.amazonaws.com/alexa-static/top-1m.csv.zip'
}
alexa_top_1m_tlds = {
# zcat top-1m.csv.zip \
# | perl -ne 'chomp; s/.+\.//; $h{$_}++;
# END {
# for (sort { $h{$b} <=> $h{$a} } keys %h) {
# print "\047", $_, "\047:", $h{$_}, ", ";
# }
# }' \
# | fold --width=70 -s | perl -lpe 's/^/ /; s/:/: /g; s/\s+$//'
'com': 477059, 'ru': 50063, 'org': 47785, 'net': 42573, 'de': 29457,
'br': 19476, 'uk': 16367, 'pl': 15077, 'ir': 13369, 'in': 12812,
'au': 11239, 'fr': 9492, 'it': 9483, 'info': 9136, 'ua': 8126, 'ca': 8077,
'cz': 7611, 'es': 7448, 'jp': 6807, 'nl': 6636, 'co': 6477, 'mx': 6384,
'tw': 5700, 'hu': 5644, 'se': 5406, 'gr': 5273, 'io': 4975, 'eu': 4784,
'ar': 4780, 'cn': 4340, 'ro': 3984, 'sk': 3980, 'me': 3882, 'ch': 3825,
'tv': 3698, 'dk': 3667, 'za': 3663, 'id': 3548, 'kr': 3408, 'vn': 3285,
'us': 3068, 'no': 3056, 'be': 3037, 'at': 2701, 'cl': 2637, 'edu': 2630,
'tr': 2370, 'biz': 2217, 'xyz': 2094, 'sg': 2057, 'fi': 2052, 'pt': 1875,
'club': 1804, 'hk': 1751, 'il': 1675, 'my': 1656, 'ie': 1605, 'az': 1547,
'cc': 1489, 'pro': 1477, 'nz': 1446, 'online': 1429, 'by': 1399,
'lt': 1262, 'th': 1259, 'kz': 1248, 'su': 1202, 'pk': 1180, 'si': 1144,
'bg': 1142, 'hr': 1142, 'top': 1137, 'xn--p1ai': 1100, 'pe': 1016,
'ng': 1014, 'site': 939, 'rs': 933, 'gov': 927, 'tk': 874, 'sa': 759,
'pw': 728, 'ae': 719, 'ph': 676, 'lv': 657, 'uz': 614, 'to': 574,
'mobi': 557, 'download': 545, 'win': 523, 'ee': 517, 'ws': 482, 'nu': 468,
'bd': 455, 'eg': 448, 'xxx': 437, 've': 421, 'is': 420, 'ml': 409,
'lk': 390, 'fm': 384, 'ga': 376, 'ge': 375, 'cat': 371, 'ma': 367,
'am': 364, 'space': 363, 'stream': 339, 'news': 330, 'live': 323,
'ec': 313, 'qa': 310, 'asia': 305, 'ba': 296, 'cf': 292, 'today': 281,
'website': 280, 'tn': 259, 'life': 254, 'uy': 252, 'do': 246, 'blog': 246,
'guru': 245, 'ly': 243, 'md': 241, 'bid': 237, 'gg': 233, 'name': 231,
'mk': 228, 'tech': 223, 'dz': 219, 'lu': 219, 'shop': 216, 'ke': 208,
'travel': 201, 'link': 199, 'la': 197, 'fun': 189, 'cr': 188, 'one': 176,
'al': 172, 'ai': 169, 'store': 165, 'mn': 163, 'gq': 162, 'world': 157,
'media': 156, 'bz': 149, 'kw': 147, 'im': 145, 'af': 144, 'jobs': 141,
'cy': 141, 'tz': 140, 'network': 140, 'om': 139, 'video': 137, 'py': 135,
'ug': 132, 'aero': 131, 'gt': 130, 'rocks': 126, 'cloud': 124, 'bo': 123,
'mm': 120, 'kg': 119, 'trade': 118, 'np': 117, 'tj': 116, 'pa': 115,
'sy': 113, 'zone': 113, 'click': 112, 'review': 110, 'cu': 108, 'sv': 106,
'coop': 105, 'loan': 101, 'cm': 99, 'men': 98, 'jo': 98, 'vip': 95,
'date': 93, 'li': 91, 'wiki': 89, 'eus': 88, 'porn': 86, 'moe': 85,
'sd': 85, 'work': 85, 'global': 83, 'gdn': 82, 'center': 82, 'ninja': 81,
'press': 80, 'lb': 80, 'st': 79, 'sh': 79, 'bh': 79, 'ag': 79, 'host': 78,
'city': 74, 'ps': 73, 'science': 73, 'mz': 73, 'nyc': 73, 'tokyo': 71,
'ac': 70, 'gh': 70, 'party': 70, 'design': 69, 'sexy': 68, 'plus': 68,
'mu': 67, 'academy': 67, 'so': 66, 're': 65, 'iq': 65, 'int': 64, 'et': 64,
'expert': 63, 'church': 62, 'tips': 61, 'zw': 59, 'pics': 59, 'cd': 56,
'games': 55, 'mt': 54, 'bet': 54, 'ci': 54, 'rw': 54, 'hn': 54, 'ovh': 53,
'ao': 52, 'vc': 51, 'cx': 51, 'red': 51, 'guide': 51, 'mg': 50, 'ni': 49,
'tm': 49, 'mo': 48, 'tools': 47, 'sn': 47, 'cool': 47, 'studio': 46,
'london': 45, 'education': 45, 'agency': 45, 'gratis': 45, 'tt': 44,
'cash': 43, 'sc': 43, 'mil': 43, 'ms': 43, 'community': 43, 'email': 42,
'land': 42, 'bank': 41, 'company': 41, 'bike': 41, 'social': 40,
'audio': 40, 'kh': 39, 'digital': 39, 'ltd': 39, 'tf': 39, 'lol': 38,
'pg': 38, 'help': 38, 'solutions': 37, 'pub': 37, 'market': 36, 'bw': 36,
'team': 36, 'watch': 35, 'art': 35, 'berlin': 35, 'school': 34, 'chat': 34,
'tl': 34, 'sx': 33, 'love': 33, 'gs': 32, 'exchange': 32, 'zm': 31,
'services': 31, 'faith': 30, 'gl': 30, 'movie': 30, 'systems': 30,
'xn--j1amh': 30, 'mw': 29, 'scot': 29, 'run': 29, 'money': 29, 'cafe': 28,
'pf': 28, 'works': 28, 'pm': 28, 'kim': 28, 'pr': 28, 'blue': 28, 'bf': 28,
'group': 28, 'bt': 28, 'as': 27, 'ink': 27, 'coffee': 26, 'xn--p1acf': 26,
'sex': 25, 'events': 25, 'farm': 25, 'tc': 25, 'bn': 25, 'ht': 24,
'paris': 24, 'nc': 24, 'na': 24, 'wtf': 23, 'bio': 22, 'careers': 22,
'app': 22, 'ad': 22, 'rip': 22, 'reviews': 22, 'moscow': 22, 'gal': 22,
'gold': 22, 'yt': 21, 'xn--80asehdb': 21, 'webcam': 21, 'fit': 20,
'dating': 20, 'taipei': 20, 'photos': 20, 'mv': 20, 'gy': 20, 'wang': 20,
'racing': 20, 'capital': 20, 'deals': 19, 'bi': 19, 'care': 19, 'game': 19,
'fo': 19, 'bm': 19, 'training': 19, 'ooo': 19, 'lc': 19, 'amsterdam': 18,
'pink': 18, 'ye': 18, 'jm': 18, 'film': 18, 'university': 18,
'marketing': 18, 'house': 17, 'tube': 17, 'vg': 17, 'swiss': 17,
'report': 17, 'menu': 17, 'support': 17, 'uno': 17, 'international': 16,
'software': 16, 'gallery': 16, 'sale': 16, 'photography': 16,
'istanbul': 16, 'cam': 15, 'camp': 15, 'mr': 15, 'dog': 15, 'codes': 15,
'bzh': 15, 'foundation': 14, 'dj': 14, 'bj': 14, 'photo': 14, 'ne': 14,
'fj': 14, 'technology': 14, 'onl': 13, 'museum': 13, 'fyi': 13,
'tours': 13, 'sl': 13, 'cards': 13, 'ist': 13, 'institute': 13,
'pizza': 12, 'express': 12, 'sz': 12, 'band': 12, 'xn--90ais': 12,
'quebec': 12, 'va': 12, 'directory': 12, 'supply': 12, 'leclerc': 12,
'direct': 12, 'fitness': 12, 'dance': 11, 'energy': 11, 'best': 11,
'cricket': 11, 'dev': 11, 'college': 11, 'show': 11, 'rest': 11,
'desi': 11, 'bar': 11, 'cv': 11, 'ky': 11, 'sm': 11, 'buzz': 10,
'vegas': 10, 'kiwi': 10, 'ls': 10, 'africa': 10, 'brussels': 10,
'hosting': 10, 'town': 10, 'xn--80adxhks': 10, 'how': 10, 'ngo': 9,
'graphics': 9, 'wales': 9, 'earth': 9, 'wien': 9, 'beer': 9, 'business': 9,
'je': 9, 'google': 9, 'wedding': 9, 'xn--d1acj3b': 9, 'partners': 9,
'sr': 9, 'style': 9, 'fund': 9, 'gd': 9, 'wf': 9, 'clothing': 9, 'fish': 9,
'coach': 9, 'vet': 8, 'vision': 8, 'mc': 8, 'pictures': 8, 'hamburg': 8,
'adult': 8, 'green': 8, 'xn--80aswg': 8, 'ventures': 8, 'black': 8,
'audi': 8, 'place': 8, 'garden': 8, 'ax': 8, 'sydney': 8, 'kitchen': 7,
'fashion': 7, 'box': 7, 'lat': 7, 'tel': 7, 'nrw': 7, 'vin': 7, 'build': 7,
'clinic': 7, 'schule': 7, 'krd': 7, 'golf': 7, 'shoes': 7, 'delivery': 7,
'xin': 7, 'koeln': 7, 'restaurant': 7, 'tg': 7, 'accountant': 7,
'parts': 7, 'football': 7, 'computer': 6, 'vote': 6, 'ski': 6,
'management': 6, 'camera': 6, 'vu': 6, 'law': 6, 'pet': 6, 'xn--c1avg': 6,
'finance': 6, 'cg': 6, 'enterprises': 6, 'gi': 6, 'eco': 6, 'gift': 6,
'auction': 6, 'bnpparibas': 6, 'poker': 5, 'boutique': 5, 'sb': 5,
'casa': 5, 'haus': 5, 'yokohama': 5, 'army': 5, 'engineering': 5,
'bayern': 5, 'kaufen': 5, 'cheap': 5, 'wine': 5, 'gm': 5, 'bible': 5,
'domains': 5, 'exposed': 5, 'legal': 5, 'promo': 5, 'toys': 4,
'equipment': 4, 'horse': 4, 'yoga': 4, 'barclays': 4, 'apartments': 4,
'archi': 4, 'soccer': 4, 'diet': 4, 'zip': 4, 'bot': 4, 'dental': 4,
'gmbh': 4, 'barcelona': 4, 'abbott': 4, 'gent': 4, 'car': 4, 'solar': 4,
'builders': 4, 'frl': 4, 'recipes': 4, 'hockey': 4, 'tattoo': 4,
'canon': 4, 'td': 4, 'saarland': 4, 'mp': 4, 'ki': 4, 'melbourne': 4,
'creditcard': 4, 'mortgage': 4, 'health': 4, 'bradesco': 4,
'vlaanderen': 4, 'okinawa': 4, 'xn--3e0b707e': 4, 'post': 4,
'property': 4, 'realtor': 4, 'tienda': 4, 'rio': 4, 'basketball': 3,
'man': 3, 'futbol': 3, 'doctor': 3, 'moda': 3, 'bb': 3, 'casino': 3,
'organic': 3, 'sap': 3, 'estate': 3, 'gives': 3, 'holiday': 3, 'aq': 3,
'repair': 3, 'vi': 3, 'gn': 3, 'monash': 3, 'kp': 3, 'auto': 3, 'soy': 3,
'xn--90ae': 3, 'sandvik': 3, 'goog': 3, 'pn': 3, 'gp': 3, 'gf': 3,
'corsica': 3, 'barclaycard': 3, 'contractors': 3, 'fail': 3, 'tax': 3,
'discount': 3, 'nagoya': 3, 'rentals': 3, 'sbi': 3, 'ruhr': 3, 'taxi': 3,
'cern': 3, 'bs': 3, 'mba': 3, 'diamonds': 3, 'miami': 3, 'investments': 3,
'tirol': 3, 'family': 3, 'singles': 3, 'ren': 3, 'cab': 3, 'dm': 3,
'pictet': 2, 'associates': 2, 'nf': 2, 'office': 2, 'sky': 2, 'cba': 2,
'xn--mgbab2bd': 2, 'shopping': 2, 'vanguard': 2, 'lgbt': 2, 'sony': 2,
'glass': 2, 'cymru': 2, 'flowers': 2, 'holdings': 2, 'seat': 2, 'med': 2,
'consulting': 2, 'jcb': 2, 'fans': 2, 'ads': 2, 'irish': 2, 'sncf': 2,
'study': 2, 'luxury': 2, 'yandex': 2, 'xn--fiqs8s': 2, 'pharmacy': 2,
'immo': 2, 'rent': 2, 'orange': 2, 'lr': 2, 'radio': 2, 'cw': 2,
'properties': 2, 'observer': 2, 'ismaili': 2, 'kyoto': 2,
'productions': 2, 'theater': 2, 'markets': 2, 'lighting': 2,
'industries': 2, 'coupons': 2, 'tatar': 2, 'forsale': 2, 'hm': 2, 'inc': 2,
'telefonica': 2, 'reisen': 2, 'dhl': 2, 'weber': 2, 'aw': 2, 'juegos': 2,
'mobile': 2, 'hot': 2, 'courses': 2, 'nico': 2, 'nr': 2, 'christmas': 1,
'attorney': 1, 'yahoo': 1, 'srl': 1, 'maison': 1, 'sarl': 1, 'actor': 1,
'docs': 1, 'salon': 1, 'viajes': 1, 'natura': 1, 'schmidt': 1,
'chintai': 1, 'kred': 1, 'supplies': 1, 'ong': 1, 'lawyer': 1,
'immobilien': 1, 'komatsu': 1, 'xn--wgbl6a': 1, 'fox': 1, 'fk': 1,
'madrid': 1, 'hiv': 1, 'plumbing': 1, 'xn--e1a4c': 1, 'bingo': 1, 'gw': 1,
'security': 1, 'aws': 1, 'loans': 1, 'weather': 1, 'trading': 1,
'cruises': 1, 'hitachi': 1, 'alsace': 1, 'durban': 1, 'financial': 1,
'george': 1, 'shiksha': 1, 'globo': 1, 'xn--54b7fta0cc': 1, 'physio': 1,
'teva': 1, 'country': 1, 'voto': 1, 'surgery': 1, 'baidu': 1, 'gop': 1,
'mattel': 1, 'mom': 1, 'jll': 1, 'chase': 1, 'cars': 1, 'cooking': 1,
'er': 1, 'prime': 1, 'gifts': 1, 'now': 1, 'xn--h2brj9c': 1, 'jewelry': 1,
'axa': 1, 'uol': 1, 'java': 1, 'drive': 1, 'surf': 1, 'lease': 1,
'gmail': 1, 'florist': 1, 'hiphop': 1, 'accountants': 1, 'credit': 1,
'weir': 1, 'bom': 1, 'mopar': 1, 'meet': 1, 'career': 1, 'foo': 1,
'degree': 1, 'youtube': 1, 'tab': 1, 'anz': 1, 'insure': 1, 'neustar': 1,
'ck': 1, 'comcast': 1, 'crs': 1, 'claims': 1, 'guitars': 1, 'limited': 1,
'flights': 1, 'reise': 1, 'saxo': 1, 'osaka': 1, 'brother': 1, 'toyota': 1,
'here': 1, 'healthcare': 1, 'honda': 1, 'vacations': 1, 'forex': 1,
'play': 1, 'new': 1,
}
| sebastian-nagel/cc-crawl-statistics | stats/tld_alexa_top_1m.py | Python | apache-2.0 | 10,408 | [
"CASINO",
"MOE"
] | d0624ac02b5d31a94136b15b8e0b236685b9e1f1019e546c8f523f4819af5536 |
"""
pymc3.distributions
A collection of common probability distributions for stochastic
nodes in PyMC.
"""
from __future__ import division
from .dist_math import *
from .distribution import draw_values, generate_samples
import numpy as np
import numpy.random as nr
import scipy.stats as st
from . import transforms
__all__ = ['Uniform', 'Flat', 'Normal', 'Beta', 'Exponential', 'Laplace',
'T', 'StudentT', 'Cauchy', 'HalfCauchy', 'Gamma', 'Weibull','Bound',
'Tpos', 'Lognormal', 'ChiSquared', 'HalfNormal', 'Wald',
'Pareto', 'InverseGamma', 'ExGaussian']
class PositiveContinuous(Continuous):
"""Base class for positive continuous distributions"""
def __init__(self, transform=transforms.log, *args, **kwargs):
super(PositiveContinuous, self).__init__(transform=transform, *args, **kwargs)
class UnitContinuous(Continuous):
"""Base class for continuous distributions on [0,1]"""
def __init__(self, transform=transforms.logodds, *args, **kwargs):
super(UnitContinuous, self).__init__(transform=transform, *args, **kwargs)
def get_tau_sd(tau=None, sd=None):
"""
Find precision and standard deviation
.. math::
\tau = \frac{1}{\sigma^2}
Parameters
----------
tau : array-like, optional
sd : array-like, optional
Results
-------
Returns tuple (tau, sd)
Notes
-----
If neither tau nor sd is provided, returns (1., 1.)
"""
if tau is None:
if sd is None:
sd = 1.
tau = 1.
else:
tau = sd ** -2.
else:
if sd is not None:
raise ValueError("Can't pass both tau and sd")
else:
sd = tau ** -.5
# cast tau and sd to float in a way that works for both np.arrays
# and pure python
tau = 1.*tau
sd = 1.*sd
return (tau, sd)
class Uniform(Continuous):
"""
Continuous uniform log-likelihood.
.. math::
f(x \mid lower, upper) = \frac{1}{upper-lower}
Parameters
----------
lower : float
Lower limit (defaults to 0)
upper : float
Upper limit (defaults to 1)
"""
def __init__(self, lower=0, upper=1, transform='interval', *args, **kwargs):
super(Uniform, self).__init__(*args, **kwargs)
self.lower = lower
self.upper = upper
self.mean = (upper + lower) / 2.
self.median = self.mean
if transform is 'interval':
self.transform = transforms.interval(lower, upper)
def random(self, point=None, size=None, repeat=None):
lower, upper = draw_values([self.lower, self.upper],
point=point)
return generate_samples(st.uniform.rvs, loc=lower, scale=upper - lower,
dist_shape=self.shape,
size=size)
def logp(self, value):
lower = self.lower
upper = self.upper
return bound(
-log(upper - lower),
lower <= value, value <= upper)
class Flat(Continuous):
"""
Uninformative log-likelihood that returns 0 regardless of
the passed value.
"""
def __init__(self, *args, **kwargs):
super(Flat, self).__init__(*args, **kwargs)
self.median = 0
def random(self, point=None, size=None, repeat=None):
raise ValueError('Cannot sample from Flat distribution')
def logp(self, value):
return zeros_like(value)
class Normal(Continuous):
"""
Normal log-likelihood.
.. math::
f(x \mid \mu, \tau) = \sqrt{\frac{\tau}{2\pi}} \exp\left\{ -\frac{\tau}{2} (x-\mu)^2 \right\}
Parameters
----------
mu : float
Mean of the distribution.
tau : float
Precision of the distribution, which corresponds to
:math:`1/\sigma^2` (tau > 0).
sd : float
Standard deviation of the distribution. Alternative parameterization.
.. note::
- :math:`E(X) = \mu`
- :math:`Var(X) = 1/\tau`
"""
def __init__(self, mu=0.0, tau=None, sd=None, *args, **kwargs):
super(Normal, self).__init__(*args, **kwargs)
self.mean = self.median = self.mode = self.mu = mu
self.tau, self.sd = get_tau_sd(tau=tau, sd=sd)
self.variance = 1. / self.tau
def random(self, point=None, size=None, repeat=None):
mu, tau, sd = draw_values([self.mu, self.tau, self.sd],
point=point)
return generate_samples(st.norm.rvs, loc=mu, scale=tau ** -0.5,
dist_shape=self.shape,
size=size)
def logp(self, value):
tau = self.tau
sd = self.sd
mu = self.mu
return bound(
(-tau * (value - mu) ** 2 + log(tau / pi / 2.)) / 2.,
tau > 0,
sd > 0
)
class HalfNormal(PositiveContinuous):
"""
Half-normal log-likelihood, a normal distribution with mean 0 limited
to the domain :math:`x \in [0, \infty)`.
.. math::
f(x \mid \tau) = \sqrt{\frac{2\tau}{\pi}}\exp\left\{ {\frac{-x^2 \tau}{2}}\right\}
:Parameters:
- `x` : :math:`x \ge 0`
- `tau` : tau > 0
- `sd` : sd > 0 (alternative parameterization)
"""
def __init__(self, tau=None, sd=None, *args, **kwargs):
super(HalfNormal, self).__init__(*args, **kwargs)
self.tau, self.sd = get_tau_sd(tau=tau, sd=sd)
self.mean = sqrt(2 / (pi * self.tau))
self.variance = (1. - 2/pi) / self.tau
def random(self, point=None, size=None, repeat=None):
tau = draw_values([self.tau], point=point)
return generate_samples(st.halfnorm.rvs, loc=0., scale=tau ** -0.5,
dist_shape=self.shape,
size=size)
def logp(self, value):
tau = self.tau
sd = self.sd
return bound(
-0.5 * tau * value**2 + 0.5 * log(tau * 2. / pi),
tau > 0,
sd > 0,
value >= 0
)
class Wald(PositiveContinuous):
"""
Wald random variable with support :math:`x \in (0, \infty)`.
.. math::
f(x \mid \mu, \lambda) = \left(\frac{\lambda}{2\pi)}\right)^{1/2}x^{-3/2}
\exp\left\{ -\frac{\lambda}{2x}\left(\frac{x-\mu}{\mu}\right)^2\right\}
Parameters
----------
mu : float, optional
Mean of the distribution (mu > 0).
lam : float, optional
Relative precision (lam > 0).
phi : float, optional
Shape. Alternative parametrisation where phi = lam / mu (phi > 0).
alpha : float, optional
Shift/location (alpha >= 0).
The Wald can be instantiated by specifying mu only (so lam=1),
mu and lam, mu and phi, or lam and phi.
.. note::
- :math:`E(X) = \mu`
- :math:`Var(X) = \frac{\mu^3}{\lambda}`
References
----------
.. [Tweedie1957]
Tweedie, M. C. K. (1957).
Statistical Properties of Inverse Gaussian Distributions I.
The Annals of Mathematical Statistics, Vol. 28, No. 2, pp. 362-377
.. [Michael1976]
Michael, J. R., Schucany, W. R. and Hass, R. W. (1976).
Generating Random Variates Using Transformations with Multiple Roots.
The American Statistician, Vol. 30, No. 2, pp. 88-90
"""
def __init__(self, mu=None, lam=None, phi=None, alpha=0., *args, **kwargs):
super(Wald, self).__init__(*args, **kwargs)
self.mu, self.lam, self.phi = self.get_mu_lam_phi(mu, lam, phi)
self.alpha = alpha
self.mean = self.mu + alpha
self.mode = self.mu * ( sqrt(1. + (1.5 * self.mu / self.lam) ** 2) - 1.5 * self.mu / self.lam ) + alpha
self.variance = (self.mu ** 3) / self.lam
def get_mu_lam_phi(self, mu, lam, phi):
if mu is None:
if lam is not None and phi is not None:
return lam / phi, lam, phi
else:
if lam is None:
if phi is None:
return mu, 1., 1. / mu
else:
return mu, mu * phi, phi
else:
if phi is None:
return mu, lam, lam / mu
raise ValueError('Wald distribution must specify either mu only, mu and lam, mu and phi, or lam and phi.')
def _random(self, mu, lam, alpha, size=None):
v = st.norm.rvs(loc=0., scale=1., size=size) ** 2
value = mu + (mu ** 2) * v / (2. * lam) - mu/(2. * lam) * \
np.sqrt(4. * mu * lam * v + (mu * v) ** 2)
z = st.uniform.rvs(loc=0., scale=1, size=size)
i = np.floor(z - mu / (mu + value)) * 2 + 1
value = (value ** -i) * (mu ** (i + 1))
return value + alpha
def random(self, point=None, size=None, repeat=None):
mu, lam, alpha = draw_values([self.mu, self.lam, self.alpha],
point=point)
return generate_samples(self._random,
mu, lam, alpha,
dist_shape=self.shape,
size=size)
def logp(self, value):
mu = self.mu
lam = self.lam
alpha = self.alpha
# value *must* be iid. Otherwise this is wrong.
return bound(logpow(lam / (2. * pi), 0.5) - logpow(value - alpha, 1.5)
- 0.5 * lam / (value - alpha) * ((value - alpha - mu) / (mu)) ** 2,
mu > 0.,
lam > 0.,
value > 0.,
alpha >=0.,
value - alpha > 0)
class Beta(UnitContinuous):
"""
Beta log-likelihood. The conjugate prior for the parameter
:math:`p` of the binomial distribution.
.. math::
f(x \mid \alpha, \beta) = \frac{\Gamma(\alpha + \beta)}{\Gamma(\alpha) \Gamma(\beta)} x^{\alpha - 1} (1 - x)^{\beta - 1}
Parameters
----------
alpha : float
alpha > 0
beta : float
beta > 0
Alternative parameterization:
mu : float
1 > mu > 0
sd : float
sd > 0
.. math::
alpha = mu * sd
beta = (1 - mu) * sd
.. note::
- :math:`E(X)=\frac{\alpha}{\alpha+\beta}`
- :math:`Var(X)=\frac{\alpha \beta}{(\alpha+\beta)^2(\alpha+\beta+1)}`
"""
def __init__(self, alpha=None, beta=None, mu=None, sd=None, *args, **kwargs):
super(Beta, self).__init__(*args, **kwargs)
alpha, beta = self.get_alpha_beta(alpha, beta, mu, sd)
self.alpha = alpha
self.beta = beta
self.mean = alpha / (alpha + beta)
self.variance = alpha * beta / (
(alpha + beta) ** 2 * (alpha + beta + 1))
def get_alpha_beta(self, alpha=None, beta=None, mu=None, sd=None):
if (alpha is not None) and (beta is not None):
pass
elif (mu is not None) and (sd is not None):
alpha = mu * sd
beta = (1 - mu) * sd
else:
raise ValueError('Incompatible parameterization. Either use alpha and beta, or mu and sd to specify distribution. ')
return alpha, beta
def random(self, point=None, size=None, repeat=None):
alpha, beta = draw_values([self.alpha, self.beta],
point=point)
return generate_samples(st.beta.rvs, alpha, beta,
dist_shape=self.shape,
size=size)
def logp(self, value):
alpha = self.alpha
beta = self.beta
return bound(
gammaln(alpha + beta) - gammaln(alpha) - gammaln(beta) +
logpow(
value, alpha - 1) + logpow(1 - value, beta - 1),
0 <= value, value <= 1,
alpha > 0,
beta > 0)
class Exponential(PositiveContinuous):
"""
Exponential distribution
Parameters
----------
lam : float
lam > 0
rate or inverse scale
"""
def __init__(self, lam, *args, **kwargs):
super(Exponential, self).__init__(*args, **kwargs)
self.lam = lam
self.mean = 1. / lam
self.median = self.mean * log(2)
self.mode = 0
self.variance = lam ** -2
def random(self, point=None, size=None, repeat=None):
lam = draw_values([self.lam], point=point)
return generate_samples(nr.exponential, scale=1./lam,
dist_shape=self.shape,
size=size)
def logp(self, value):
lam = self.lam
return bound(log(lam) - lam * value,
value > 0,
lam > 0)
class Laplace(Continuous):
"""
Laplace distribution
Parameters
----------
mu : float
mean
b : float
scale
"""
def __init__(self, mu, b, *args, **kwargs):
super(Laplace, self).__init__(*args, **kwargs)
self.b = b
self.mean = self.median = self.mode = self.mu = mu
self.variance = 2 * b ** 2
def random(self, point=None, size=None, repeat=None):
mu, b = draw_values([self.mu, self.b], point=point)
return generate_samples(nr.laplace, mu, b,
dist_shape=self.shape,
size=size)
def logp(self, value):
mu = self.mu
b = self.b
return -log(2 * b) - abs(value - mu) / b
class Lognormal(PositiveContinuous):
"""
Log-normal log-likelihood.
Distribution of any random variable whose logarithm is normally
distributed. A variable might be modeled as log-normal if it can
be thought of as the multiplicative product of many small
independent factors.,
.. math::
f(x \mid \mu, \tau) = \sqrt{\frac{\tau}{2\pi}}\frac{
\exp\left\{ -\frac{\tau}{2} (\ln(x)-\mu)^2 \right\}}{x}
:Parameters:
- `x` : x > 0
- `mu` : Location parameter.
- `tau` : Scale parameter (tau > 0).
.. note::
:math:`E(X)=e^{\mu+\frac{1}{2\tau}}`
:math:`Var(X)=(e^{1/\tau}-1)e^{2\mu+\frac{1}{\tau}}`
"""
def __init__(self, mu=0, tau=1, *args, **kwargs):
super(Lognormal, self).__init__(*args, **kwargs)
self.mu = mu
self.tau = tau
self.mean = exp(mu + 1./(2*tau))
self.median = exp(mu)
self.mode = exp(mu - 1./tau)
self.variance = (exp(1./tau) - 1) * exp(2*mu + 1./tau)
def _random(self, mu, tau, size=None):
samples = st.norm.rvs(loc=0., scale=1., size=size)
return np.exp(mu + (tau ** -0.5) * samples)
def random(self, point=None, size=None, repeat=None):
mu, tau = draw_values([self.mu, self.tau], point=point)
return generate_samples(self._random, mu, tau,
dist_shape=self.shape,
size=size)
def logp(self, value):
mu = self.mu
tau = self.tau
return bound(
-0.5*tau*(log(value) - mu)**2 + 0.5*log(tau/(2.*pi)) - log(value),
tau > 0)
class T(Continuous):
"""
Non-central Student's T log-likelihood.
Describes a normal variable whose precision is gamma distributed. If
only nu parameter is passed, this specifies a standard (central)
Student's T.
.. math::
f(x|\mu,\lambda,\nu) = \frac{\Gamma(\frac{\nu +
1}{2})}{\Gamma(\frac{\nu}{2})}
\left(\frac{\lambda}{\pi\nu}\right)^{\frac{1}{2}}
\left[1+\frac{\lambda(x-\mu)^2}{\nu}\right]^{-\frac{\nu+1}{2}}
Parameters
----------
nu : int
Degrees of freedom
mu : float
Location parameter (defaults to 0)
lam : float
Scale parameter (defaults to 1)
"""
def __init__(self, nu, mu=0, lam=None, sd=None, *args, **kwargs):
super(T, self).__init__(*args, **kwargs)
self.nu = nu = as_tensor_variable(nu)
self.lam, self.sd = get_tau_sd(tau=lam, sd=sd)
self.mean = self.median = self.mode = self.mu = mu
self.variance = switch((nu > 2) * 1, (1 / self.lam) * (nu / (nu - 2)) , inf)
def random(self, point=None, size=None, repeat=None):
nu, mu, lam = draw_values([self.nu, self.mu, self.lam],
point=point)
return generate_samples(st.t.rvs, nu, loc=mu, scale=lam ** -0.5,
dist_shape=self.shape,
size=size)
def logp(self, value):
nu = self.nu
mu = self.mu
lam = self.lam
sd = self.sd
return bound(
gammaln((nu + 1.0) / 2.0) + .5 * log(lam / (nu * pi)) - gammaln(nu / 2.0) - (nu + 1.0) / 2.0 * log(1.0 + lam * (value - mu) ** 2 / nu),
lam > 0,
nu > 0,
sd > 0)
StudentT = T
class Pareto(PositiveContinuous):
"""
Pareto log-likelihood. The Pareto is a continuous, positive
probability distribution with two parameters. It is often used
to characterize wealth distribution, or other examples of the
80/20 rule.
.. math::
f(x \mid \alpha, m) = \frac{\alpha m^{\alpha}}{x^{\alpha+1}}
Parameters
----------
alpha : float
Shape parameter (alpha>0)
m : float
Scale parameter (m>0)
.. note::
- :math:`E(x)=\frac{\alpha m}{\alpha-1} if \alpha > 1`
- :math:`Var(x)=\frac{m^2 \alpha}{(\alpha-1)^2(\alpha-2)} if \alpha > 2`
"""
def __init__(self, alpha, m, *args, **kwargs):
super(Pareto, self).__init__(*args, **kwargs)
self.alpha = alpha
self.m = m
self.mean = switch(gt(alpha,1), alpha * m / (alpha - 1.), inf)
self.median = m * 2.**(1./alpha)
self.variance = switch(gt(alpha,2), (alpha * m**2) / ((alpha - 2.) * (alpha - 1.)**2), inf)
def _random(self, alpha, m, size=None):
u = nr.uniform(size=size)
return m * (1. - u) ** (-1. / alpha)
def random(self, point=None, size=None, repeat=None):
alpha, m = draw_values([self.alpha, self.m],
point=point)
return generate_samples(self._random, alpha, m,
dist_shape=self.shape,
size=size)
def logp(self, value):
alpha = self.alpha
m = self.m
return bound(
log(alpha) + logpow(m, alpha) - logpow(value, alpha+1),
alpha > 0,
m > 0,
value >= m)
class Cauchy(Continuous):
"""
Cauchy log-likelihood. The Cauchy distribution is also known as the
Lorentz or the Breit-Wigner distribution.
.. math::
f(x \mid \alpha, \beta) = \frac{1}{\pi \beta [1 + (\frac{x-\alpha}{\beta})^2]}
Parameters
----------
alpha : float
Location parameter
beta : float
Scale parameter > 0
.. note::
Mode and median are at alpha.
"""
def __init__(self, alpha, beta, *args, **kwargs):
super(Cauchy, self).__init__(*args, **kwargs)
self.median = self.mode = self.alpha = alpha
self.beta = beta
def _random(self, alpha, beta, size=None):
u = nr.uniform(size=size)
return alpha + beta * np.tan(np.pi*(u - 0.5))
def random(self, point=None, size=None, repeat=None):
alpha, beta = draw_values([self.alpha, self.beta],
point=point)
return generate_samples(self._random, alpha, beta,
dist_shape=self.shape,
size=size)
def logp(self, value):
alpha = self.alpha
beta = self.beta
return bound(
-log(pi) - log(beta) - log(1 + ((value - alpha) / beta) ** 2),
beta > 0)
class HalfCauchy(PositiveContinuous):
"""
Half-Cauchy log-likelihood. Simply the absolute value of Cauchy.
.. math::
f(x \mid \beta) = \frac{2}{\pi \beta [1 + (\frac{x}{\beta})^2]}
:Parameters:
- `beta` : Scale parameter (beta > 0).
.. note::
- x must be non-negative.
"""
def __init__(self, beta, *args, **kwargs):
super(HalfCauchy, self).__init__(*args, **kwargs)
self.mode = 0
self.median = beta
self.beta = beta
def _random(self, beta, size=None):
u = nr.uniform(size=size)
return beta * np.abs(np.tan(np.pi*(u - 0.5)))
def random(self, point=None, size=None, repeat=None):
beta = draw_values([self.beta], point=point)
return generate_samples(self._random, beta,
dist_shape=self.shape,
size=size)
def logp(self, value):
beta = self.beta
return bound(
log(2) - log(pi) - log(beta) - log(1 + (value / beta) ** 2),
beta > 0,
value >= 0)
class Gamma(PositiveContinuous):
"""
Gamma log-likelihood.
Represents the sum of alpha exponentially distributed random variables, each
of which has mean beta.
.. math::
f(x \mid \alpha, \beta) = \frac{\beta^{\alpha}x^{\alpha-1}e^{-\beta x}}{\Gamma(\alpha)}
Parameters
----------
x : float
math:`x \ge 0`
alpha : float
Shape parameter (alpha > 0).
beta : float
Rate parameter (beta > 0).
Alternative parameterization:
mu : float
mu > 0
sd : float
sd > 0
.. math::
alpha = \frac{mu^2}{sd^2}
beta = \frac{mu}{sd^2}
.. note::
- :math:`E(X) = \frac{\alpha}{\beta}`
- :math:`Var(X) = \frac{\alpha}{\beta^2}`
"""
def __init__(self, alpha=None, beta=None, mu=None, sd=None, *args, **kwargs):
super(Gamma, self).__init__(*args, **kwargs)
alpha, beta = self.get_alpha_beta(alpha, beta, mu, sd)
self.alpha = alpha
self.beta = beta
self.mean = alpha / beta
self.mode = maximum((alpha - 1) / beta, 0)
self.variance = alpha / beta ** 2
def get_alpha_beta(self, alpha=None, beta=None, mu=None, sd=None):
if (alpha is not None) and (beta is not None):
pass
elif (mu is not None) and (sd is not None):
alpha = mu ** 2 / sd ** 2
beta = mu / sd ** 2
else:
raise ValueError('Incompatible parameterization. Either use alpha and beta, or mu and sd to specify distribution. ')
return alpha, beta
def random(self, point=None, size=None, repeat=None):
alpha, beta = draw_values([self.alpha, self.beta],
point=point)
return generate_samples(st.gamma.rvs, alpha, scale=1. / beta,
dist_shape=self.shape,
size=size)
def logp(self, value):
alpha = self.alpha
beta = self.beta
return bound(
-gammaln(alpha) + logpow(
beta, alpha) - beta * value + logpow(value, alpha - 1),
value >= 0,
alpha > 0,
beta > 0)
class InverseGamma(PositiveContinuous):
"""
Inverse gamma log-likelihood, the reciprocal of the gamma distribution.
.. math::
f(x \mid \alpha, \beta) = \frac{\beta^{\alpha}}{\Gamma(\alpha)} x^{-\alpha - 1} \exp\left(\frac{-\beta}{x}\right)
Parameters
----------
alpha : float
Shape parameter (alpha > 0).
beta : float
Scale parameter (beta > 0).
.. note::
:math:`E(X)=\frac{\beta}{\alpha-1}` for :math:`\alpha > 1`
:math:`Var(X)=\frac{\beta^2}{(\alpha-1)^2(\alpha)}` for :math:`\alpha > 2`
"""
def __init__(self, alpha, beta=1, *args, **kwargs):
super(InverseGamma, self).__init__(*args, **kwargs)
self.alpha = alpha
self.beta = beta
self.mean = (alpha > 1) * beta / (alpha - 1.) or inf
self.mode = beta / (alpha + 1.)
self.variance = switch(gt(alpha, 2), (beta ** 2) / (alpha * (alpha - 1.)**2), inf)
def random(self, point=None, size=None, repeat=None):
alpha, beta = draw_values([self.alpha, self.beta],
point=point)
return generate_samples(st.invgamma.rvs, a=alpha, scale=beta,
dist_shape=self.shape,
size=size)
def logp(self, value):
alpha = self.alpha
beta = self.beta
return bound(
logpow(beta, alpha) - gammaln(alpha) - beta / value + logpow(value, -alpha-1),
value > 0,
alpha > 0,
beta > 0)
class ChiSquared(Gamma):
"""
Chi-squared :math:`\chi^2` log-likelihood.
.. math::
f(x \mid \nu) = \frac{x^{(\nu-2)/2}e^{-x/2}}{2^{\nu/2}\Gamma(\nu/2)}
:Parameters:
- `x` : > 0
- `nu` : [int] Degrees of freedom ( nu > 0 )
.. note::
- :math:`E(X)=\nu`
- :math:`Var(X)=2\nu`
"""
def __init__(self, nu, *args, **kwargs):
self.nu = nu
super(ChiSquared, self).__init__(alpha=nu/2., beta=0.5, *args, **kwargs)
class Weibull(PositiveContinuous):
"""
Weibull log-likelihood
.. math::
f(x \mid \alpha, \beta) = \frac{\alpha x^{\alpha - 1}
\exp(-(\frac{x}{\beta})^{\alpha})}{\beta^\alpha}
:Parameters:
- `x` : :math:`x \ge 0`
- `alpha` : alpha > 0
- `beta` : beta > 0
.. note::
- :math:`E(x)=\beta \Gamma(1+\frac{1}{\alpha})`
- :math:`median(x)=\Gamma(\log(2))^{1/\alpha}`
- :math:`Var(x)=\beta^2 \Gamma(1+\frac{2}{\alpha} - \mu^2)`
"""
def __init__(self, alpha, beta, *args, **kwargs):
super(Weibull, self).__init__(*args, **kwargs)
self.alpha = alpha
self.beta = beta
self.mean = beta * exp(gammaln(1 + 1./alpha))
self.median = beta * exp(gammaln(log(2)))**(1./alpha)
self.variance = (beta**2) * exp(gammaln(1 + 2./alpha - self.mean**2))
def random(self, point=None, size=None, repeat=None):
alpha, beta = draw_values([self.alpha, self.beta],
point=point)
return generate_samples(lambda a, b, size=None: b * (-np.log(nr.uniform(size=size))) ** a,
alpha, beta,
dist_shape=self.shape,
size=size)
def logp(self, value):
alpha = self.alpha
beta = self.beta
return bound(
(log(alpha) - log(beta) + (alpha - 1)*log(value/beta)
- (value/beta)**alpha),
value >= 0,
alpha > 0,
beta > 0)
class Bounded(Continuous):
"""A bounded distribution."""
def __init__(self, distribution, lower, upper, *args, **kwargs):
self.dist = distribution.dist(*args, **kwargs)
self.__dict__.update(self.dist.__dict__)
self.__dict__.update(locals())
if hasattr(self.dist, 'mode'):
self.mode = self.dist.mode
def _random(self, lower, upper, point=None, size=None):
samples = np.zeros(size).flatten()
i, n = 0, len(samples)
while i < len(samples):
sample = self.dist.random(point=point, size=n)
select = sample[np.logical_and(sample > lower, sample <= upper)]
samples[i:(i+len(select))] = select[:]
i += len(select)
n -= len(select)
if size is not None:
return np.reshape(samples, size)
else:
return samples
def random(self, point=None, size=None, repeat=None):
lower, upper = draw_values([self.lower, self.upper], point=point)
return generate_samples(self._random, lower, upper, point,
dist_shape=self.shape,
size=size)
def logp(self, value):
return bound(
self.dist.logp(value),
self.lower <= value, value <= self.upper)
class Bound(object):
"""Creates a new bounded distribution"""
def __init__(self, distribution, lower=-inf, upper=inf):
self.distribution = distribution
self.lower = lower
self.upper = upper
def __call__(self, *args, **kwargs):
first, args = args[0], args[1:]
return Bounded(first, self.distribution, self.lower, self.upper, *args, **kwargs)
def dist(*args, **kwargs):
return Bounded.dist(self.distribution, self.lower, self.upper, *args, **kwargs)
Tpos = Bound(T, 0)
class ExGaussian(Continuous):
"""
Exponentially modified Gaussian random variable with
support :math:`x \in [-\infty, \infty]`.This results from
the convolution of a normal distribution with an exponential
distribution.
.. math::
f(x \mid \mu, \sigma, \tau) = \frac{1}{\nu}\;
\exp\left\{\frac{\mu-x}{\nu}+\frac{\sigma^2}{2\nu^2}\right\}
\Phi\left(\frac{x-\mu}{\sigma}-\frac{\sigma}{\nu}\right)
where :math:`\Phi` is the cumulative distribution function of the
standard normal distribution.
Parameters
----------
mu : float
Mean of the normal distribution (-inf < mu < inf).
sigma : float
Standard deviation of the normal distribution (sigma > 0).
nu : float
Mean of the exponential distribution (nu > 0).
.. note::
- :math:`E(X) = \mu + \nu`
- :math:`Var(X) = \sigma^2 + \nu^2`
References
----------
.. [Rigby2005]
Rigby R.A. and Stasinopoulos D.M. (2005).
"Generalized additive models for location, scale and shape"
Applied Statististics., 54, part 3, pp 507-554.
.. [Lacouture2008]
Lacouture, Y. and Couseanou, D. (2008).
"How to use MATLAB to fit the ex-Gaussian and other probability functions to a distribution of response times".
Tutorials in Quantitative Methods for Psychology, Vol. 4, No. 1, pp 35-45.
"""
def __init__(self, mu, sigma, nu, *args, **kwargs):
super(ExGaussian, self).__init__(*args, **kwargs)
self.mu = mu
self.sigma = sigma
self.nu = nu
self.mean = mu + nu
self.variance = (sigma ** 2) + (nu ** 2)
def random(self, point=None, size=None, repeat=None):
mu, sigma, nu = draw_values([self.mu, self.sigma, self.nu],
point=point)
return generate_samples(lambda mu, sigma, nu, size=None: nr.normal(mu, sigma, size=size) +
nr.exponential(scale=nu, size=size),
mu, sigma, nu,
dist_shape=self.shape,
size=size)
def logp(self, value):
mu = self.mu
sigma = self.sigma
nu = self.nu
lp = switch(gt(nu, 0.05 * sigma),# This condition suggested by exGAUS.R from gamlss
-log(nu) + (mu - value) / nu + 0.5 * (sigma / nu) ** 2 + \
logpow(std_cdf((value - mu) / sigma - sigma / nu), 1.),
-log(sigma * sqrt(2. * pi)) - 0.5 * ((value - mu) / sigma) ** 2)
return bound(lp,
sigma > 0.,
nu > 0.)
| dhiapet/PyMC3 | pymc3/distributions/continuous.py | Python | apache-2.0 | 31,105 | [
"Gaussian"
] | 496f5cb1ecc41442e243f5f01c4216747f10ea81e99a446af6b2623d4dceb3f4 |
# -*- coding: utf-8 -*-
import pytz
import random
import hashlib
import string
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager
from django.core.mail import send_mail
from django.core.mail import EmailMultiAlternatives
from django.core.urlresolvers import reverse
from django.template.loader import get_template
from django.template import Context
from django.db import models
from django.db.models import Q
from django.utils import timezone
from django.utils.text import Truncator
from django.utils.translation import ugettext_lazy as _
from import_export import resources, fields
from datetime import datetime
from atados import settings
from unidecode import unidecode
from django_resized import ResizedImageField
from pygeocoder import Geocoder
from legacy_address import Address, City, State
from address import GoogleAddress
from atados_core.notifications import notificate
from atados_core.emails import VolunteerMail, NonprofitMail, UserMail
PROJECT_EMAIL_STATUS = (
(0, _(u'Não enviado')),
(1, _('Email enviado')),
)
WEEKDAYS = (
(1, _('Monday')),
(2, _('Tuesday')),
(3, _('Wednesday')),
(4, _('Thursday')),
(5, _('Friday')),
(6, _('Saturday')),
(0, _('Sunday')),
)
PERIODS = (
(0, _('Morning')),
(1, _('Afternoon')),
(2, _('Evening')),
)
VISIT_STATUS = (
(0, _(u'Email enviado')),
(1, _(u'CRM criado')),
(2, _(u'Agendado')),
(3, _(u'Visitado')),
(4, _(u'Não responde')),
(5, _(u'Não se enquadra pra estar no Atados')),
(6, _(u'Outros estados')),
)
class Availability(models.Model):
weekday = models.PositiveSmallIntegerField(_('weekday'), choices=WEEKDAYS)
period = models.PositiveSmallIntegerField(_('period'), choices=PERIODS)
def __unicode__(self):
return _('%(weekday)s at %(period)s') % {'weekday': self.get_weekday_display(), 'period': self.get_period_display()}
class Meta:
app_label = 'atados_core'
verbose_name = _('Availability')
verbose_name_plural = _('Availabilities')
class Cause(models.Model):
name = models.CharField(_('name'), max_length=100)
def __unicode__(self):
return self.name
class Meta:
app_label = 'atados_core'
verbose_name = _('cause')
verbose_name_plural = _('causes')
class Skill(models.Model):
name = models.CharField(_('name'), max_length=100)
def __unicode__(self):
return self.name
class Meta:
app_label = 'atados_core'
verbose_name = _('skill')
class Company(models.Model):
name = models.CharField(_('name'), max_length=300)
address = models.OneToOneField(Address, blank=True, null=True)
def __unicode__(self):
return self.name
class Meta:
app_label = 'atados_core'
def volunteer_image_name(self, filename):
left_path, extension = filename.rsplit('.', 1)
return 'volunteer/%s/%s.%s' % (self.user.slug,
self.user.slug,
extension)
class Volunteer(models.Model):
class Meta:
app_label = 'atados_core'
verbose_name = _('Volunteer')
verbose_name_plural = _('Volunteers')
user = models.OneToOneField(settings.AUTH_USER_MODEL)
causes = models.ManyToManyField(Cause, blank=True, null=True)
skills = models.ManyToManyField(Skill, blank=True, null=True)
facebook_uid = models.CharField(blank=True, max_length=255)
facebook_access_token = models.CharField(blank=True, max_length=255)
facebook_access_token_expires = models.PositiveIntegerField(blank=True, null=True)
birthDate = models.DateField(_('Birth Date'), null=True, blank=True, default=None)
created_date = models.DateTimeField(_('Created date'), auto_now_add=True)
modified_date = models.DateTimeField(_('Modified date'), auto_now=True)
image = models.ImageField(_('Image'), upload_to=volunteer_image_name, blank=True,
null=True, default=None)
gdd = models.BooleanField(_("Registered at DBA"), default=False, blank=False)
def mailing(self):
return VolunteerMail(self)
def image_name(self, filename):
return volunteer_image_name(self, filename)
@classmethod
def create(cls, user):
return cls(user=user)
def get_type(self):
return "VOLUNTEER"
def get_full_name(self):
return self.user.name
def get_email(self):
return self.user.email if self.user.email else None
def get_phone(self):
return self.user.phone if self.user.phone else None
def get_apply(self):
return Apply.objects.filter(volunteer=self)
def get_image_url(self):
return self.image.url if self.image else 'https://s3.amazonaws.com/atados-us/volunteer/default_profile.jpg'
def get_projects(self):
return Project.objects.filter(id__in=Apply.objects.filter(volunteer_id=self.id, canceled=False).values_list('project', flat=True))
def get_nonprofits(self):
return Nonprofit.objects.filter(id__in=self.get_projects().values_list('nonprofit', flat=True))
def save(self, *args, **kwargs):
self.modified_date = datetime.utcnow().replace(tzinfo=pytz.timezone("America/Sao_Paulo"))
return super(Volunteer, self).save(*args, **kwargs)
def __unicode__(self):
return self.user.name
def question_image_name(self, filename):
left_path, extension = filename.rsplit('.', 1)
filename = ''.join(random.choice(string.ascii_lowercase) for _ in range(32))
return 'question/%s.%s' % (filename, extension)
def nonprofit_image_name(self, filename):
left_path, extension = filename.rsplit('.', 1)
return 'nonprofit/%s.%s' % (self.user.slug, extension)
def nonprofit_image_name_small(self, filename):
left_path, extension = filename.rsplit('.', 1)
return 'nonprofit/%s_small.%s' % (self.user.slug, extension)
def nonprofit_image_name_medium(self, filename):
left_path, extension = filename.rsplit('.', 1)
return 'nonprofit/%s_medium.%s' % (self.user.slug, extension)
def nonprofit_image_name_large(self, filename):
left_path, extension = filename.rsplit('.', 1)
return 'nonprofit/%s_large.%s' % (self.user.slug, extension)
def nonprofit_cover_name(self, filename):
left_path, extension = filename.rsplit('.', 1)
return 'nonprofit-cover/%s.%s' % (self.user.slug, extension)
class Nonprofit(models.Model):
user = models.OneToOneField(settings.AUTH_USER_MODEL)
causes = models.ManyToManyField(Cause, blank=True, null=True)
volunteers = models.ManyToManyField(Volunteer, blank=True, null=True)
name = models.CharField(_('Name'), max_length=150)
person_name = models.CharField(_('Person Name'), max_length=150)
owner = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, related_name='nonprofits', limit_choices_to={'is_staff': True}, verbose_name=_('Owner'))
not_nonprofit = models.BooleanField(_(u'Nao e uma ONG'), default=False, blank=False)
def ascii_name(self):
return unidecode(self.name)
details = models.TextField(_('Details'), max_length=3000, blank=True,
null=True, default=None)
description = models.TextField(_('Short description'), max_length=160,
blank=True, null=True)
website = models.URLField(blank=True, null=True, default=None)
facebook_page = models.URLField(blank=True, null=True, default=None)
google_page = models.URLField(blank=True, null=True, default=None)
twitter_handle = models.URLField(blank=True, null=True, default=None)
companies = models.ManyToManyField(Company, blank=True, null=True)
#----- ----- -----
def image_tag(self):
return u'<img src="%s" />' % self.image.url
image_tag.short_description = 'Logo 200x200'
image_tag.allow_tags = True
def cover_tag(self):
return u'<img src="%s" />' % self.cover.url
cover_tag.short_description = 'Cover 1450x340'
cover_tag.allow_tags = True
#----- ----- -----
def uploaded_image_tag(self):
return u'<img style="max-width: 100%" src="{}" />'.format(self.get_image_url())
uploaded_image_tag.short_description = 'Logo 200x200'
uploaded_image_tag.allow_tags = True
def uploaded_cover_tag(self):
return u'<img style="max-width: 100%" src="{}" />'.format(self.get_cover_url())
uploaded_cover_tag.short_description = 'Cover 1450x340'
uploaded_cover_tag.allow_tags = True
#----- ----- -----
def image_name(self, filename):
return nonprofit_image_name(self, filename);
image = models.ImageField(_("Logo 200x200"), upload_to=nonprofit_image_name, blank=True, null=True, default=None)
cover = models.ImageField(_("Cover 1450x340"), upload_to=nonprofit_cover_name, blank=True, null=True, default=None)
uploaded_image = models.ForeignKey('UploadedImage', related_name='uploaded_image', blank=True, null=True)
uploaded_cover = models.ForeignKey('UploadedImage', related_name='uploaded_cover', blank=True, null=True)
image_small = ResizedImageField(size=[250, 250], upload_to=nonprofit_image_name_small, blank=True, null=True, default=None)
image_medium = ResizedImageField(size=[450, 450], upload_to=nonprofit_image_name_medium, blank=True, null=True, default=None)
image_large = ResizedImageField(size=[900, 900], upload_to=nonprofit_image_name_large, blank=True, null=True, default=None)
visit_status = models.PositiveSmallIntegerField(_('Visit status'), choices=VISIT_STATUS, default=None, blank=True, null=True)
highlighted = models.BooleanField(_("Highlighted"), default=False, blank=False)
published = models.BooleanField(_("Published"), default=False)
published_date = models.DateTimeField(_("Published date"), blank=True, null=True)
deleted = models.BooleanField(_("Deleted"), default=False)
deleted_date = models.DateTimeField(_("Deleted date"), blank=True, null=True)
created_date = models.DateTimeField(auto_now_add=True)
modified_date = models.DateTimeField(auto_now=True)
volunteer_count = models.IntegerField(null=False, blank=False, default=0)
def delete(self, *args, **kwargs):
self.deleted = True
self.save()
def get_type(self):
return "NONPROFIT";
def user_hidden_address(self):
return self.user.hidden_address
def get_description(self):
return self.description if self.description else Truncator(
self.details).chars(100)
def get_image_url(self):
if self.not_nonprofit:
return self.uploaded_image.get_image_url() if self.uploaded_image else "https://s3.amazonaws.com/atados-us/nonprofit/padrao-perfil.png"
else:
if self.uploaded_image:
return self.uploaded_image.get_image_url()
else:
if self.image:
return self.image.url
else:
return "https://s3.amazonaws.com/atados-us/nonprofit/padrao-perfil.png"
def get_medium_image_url(self):
if self.not_nonprofit:
return self.uploaded_image.get_image_url() if self.uploaded_image else "https://s3.amazonaws.com/atados-us/nonprofit/padrao-perfil.png"
else:
if self.uploaded_image:
return self.uploaded_image.get_image_url()
else:
if self.image:
return self.image.url
else:
return "https://s3.amazonaws.com/atados-us/nonprofit/padrao-perfil.png"
def get_cover_url(self):
if self.not_nonprofit:
return self.uploaded_cover.get_image_url() if self.uploaded_cover else "https://s3.amazonaws.com/atados-us/nonprofit/padrao-cover.png"
else:
if self.uploaded_cover:
return self.uploaded_cover.get_image_url()
else:
if self.cover:
return self.cover.url
else:
return "https://s3.amazonaws.com/atados-us/nonprofit/padrao-cover.png"
def get_volunteers(self):
return Volunteer.objects.filter(
Q(id__in=self.volunteers.all().values_list('id', flat=True)) |
Q(apply__project__nonprofit__id=self.id)).distinct()
def get_volunteers_numbers(self):
return Volunteer.objects.filter(
Q(id__in=self.volunteers.all().values_list('id', flat=True)) |
Q(apply__project__nonprofit__id=self.id)).distinct().count
def __unicode__(self):
return self.name
def mailing(self):
return NonprofitMail(self)
def get_projects(self):
return Project.objects.filter(nonprofit=self, deleted=False)
def get_address(self):
return self.user.googleaddress
def __unicode__(self):
return self.name
def save(self, *args, **kwargs):
# If _committed == False, it means the image is not uploaded to s3 yet
# (will be uploaded on super.save()). This means the image is being updated
# So we update other images accordingly
if not self.image._committed:
self.image_small = self.image._file # ._file because we need the InMemoryUploadedFile instance
self.image_medium = self.image._file
self.image_large = self.image._file
if self.pk is not None:
orig = Nonprofit.objects.get(pk=self.pk)
if not orig.published and self.published:
self.published_date = datetime.utcnow().replace(tzinfo=pytz.timezone("America/Sao_Paulo"))
mailer = NonprofitMail(self)
mailer.sendApproved()
if not orig.deleted and self.deleted:
self.deleted_date = datetime.now()
return super(Nonprofit, self).save(*args, **kwargs)
class Meta:
app_label = 'atados_core'
verbose_name = _('Nonprofit')
verbose_name_plural = _('Nonprofits')
# Cargo para um Ato pontual ou recorrente
class Role(models.Model):
name = models.CharField(_('Role name'), max_length=50,
blank=True, null=True, default=None)
prerequisites = models.TextField(_('Prerequisites'), max_length=1024,
blank=True, null=True, default=None)
details = models.TextField(_('Details'), max_length=1024, blank=True, null=True, default=None)
vacancies = models.PositiveSmallIntegerField(_('Vacancies'),
blank=True, null=True, default=None)
class Meta:
app_label = 'atados_core'
verbose_name = _('role')
verbose_name_plural = _('roles')
def __unicode__(self):
return u'%s - %s - %s (%s vagas)' % (self.name, self.details, self.prerequisites, self.vacancies)
def project_image_name(self, filename):
left_path, extension = filename.rsplit('.', 1)
return 'project/%s/%s.%s' % (self.nonprofit.user.slug, self.slug, extension)
def project_image_name_small(self, filename):
left_path, extension = filename.rsplit('.', 1)
return 'project/%s/%s_small.%s' % (self.nonprofit.user.slug, self.slug, extension)
def project_image_name_medium(self, filename):
left_path, extension = filename.rsplit('.', 1)
return 'project/%s/%s_medium.%s' % (self.nonprofit.user.slug, self.slug, extension)
def project_image_name_large(self, filename):
left_path, extension = filename.rsplit('.', 1)
return 'project/%s/%s_large.%s' % (self.nonprofit.user.slug, self.slug, extension)
class Project(models.Model):
nonprofit = models.ForeignKey(Nonprofit)
name = models.CharField(_('Project name'), max_length=100)
email_status = models.PositiveSmallIntegerField(_('Email Status'), choices=PROJECT_EMAIL_STATUS, default=None, blank=True, null=True)
def ascii_name(self):
return unidecode(self.name)
slug = models.SlugField(max_length=100, unique=True)
details = models.TextField(_('Details'), max_length=3000)
description = models.TextField(_('Short description'), max_length=160,
blank=True, null=True)
facebook_event = models.URLField(blank=True, null=True, default=None)
responsible = models.CharField(_('Responsible name'), max_length=50,
blank=True, null=True)
phone = models.CharField(_('Phone'), max_length=31, blank=True, null=True)
email = models.EmailField(_('E-mail'), blank=True, null=True)
published = models.BooleanField(_("Published"), default=False)
post_fb = models.BooleanField(_("Post FB"), default=False)
news = models.BooleanField(_("News"), default=False)
published_date = models.DateTimeField(_("Published date"), blank=True, null=True)
closed = models.BooleanField(_("Closed"), default=False)
closed_date = models.DateTimeField(_("Closed date"), blank=True, null=True)
deleted = models.BooleanField(_("Deleted"), default=False)
deleted_date = models.DateTimeField(_("Deleted date"), blank=True, null=True)
created_date = models.DateTimeField(_("Created date"), auto_now_add=True)
modified_date = models.DateTimeField(_("Modified date"), auto_now=True)
address = models.OneToOneField(Address, blank=True, null=True)
googleaddress = models.OneToOneField(GoogleAddress, blank=True, null=True)
highlighted = models.BooleanField(_("Highlighted"), default=False, blank=False)
gdd_highlighted = models.BooleanField(_("DBA Highlighted"), default=False, blank=False)
roles = models.ManyToManyField(Role, blank=True, null=True)
skills = models.ManyToManyField(Skill)
causes = models.ManyToManyField(Cause)
legacy_nid = models.PositiveIntegerField(blank=True, null=True)
companies = models.ManyToManyField(Company, blank=True, null=True)
gdd = models.BooleanField(_("Dia das boas acoes"), default=False)
coral = models.BooleanField(_("Projeto Coral"), default=False)
gdd_refer = models.TextField(max_length=100, blank=True, null=True)
gdd_json = models.TextField(blank=True, null=True)
gdd_image = models.TextField(blank=True, null=True)
gdd_org_name = models.TextField(blank=True, null=True)
gdd_org_image = models.TextField(blank=True, null=True)
description = models.TextField(blank=True, null=True)
applied_count = models.IntegerField(blank=False, null=False, default=0)
uploaded_image = models.ForeignKey('UploadedImage', blank=True, null=True)
def image_tag(self):
return u'<img src="%s" />' % self.image.url
image_tag.short_description = 'Image 350x260'
image_tag.allow_tags = True
image = models.ImageField(_('Image 350x260'), upload_to=project_image_name, blank=True,
null=True, default=None)
image_small = ResizedImageField(size=[350, 260], upload_to=project_image_name_small, blank=True, null=True, default=None)
image_medium = ResizedImageField(size=[420, 312], upload_to=project_image_name_medium, blank=True, null=True, default=None)
image_large = ResizedImageField(size=[1260, 936], upload_to=project_image_name_large, blank=True, null=True, default=None)
def get_volunteers(self):
apply = Apply.objects.filter(project=self, canceled=False)
return Volunteer.objects.filter(pk__in=[a.volunteer.pk for a in apply])
def get_volunteers_numbers(self):
return Apply.objects.filter(project=self, canceled=False).count
def delete(self, *args, **kwargs):
self.deleted = True
self.save()
def save(self, *args, **kwargs):
orig = None
if self.pk is not None:
orig = Project.objects.get(pk=self.pk)
if not orig.published and self.published:
self.published_date = datetime.utcnow().replace(tzinfo=pytz.timezone("America/Sao_Paulo"))
if self.email:
mailer = NonprofitMail(self.nonprofit)
mailer.sendProjectApproved(self)
#-- ----- ----- ----- ----- -----
#-- # Sending welcome email on project creation
#-- plaintext = get_template('email/projectApproved.txt')
#-- htmly = get_template('email/projectApproved.html')
#-- d = Context()
#-- subject, from_email, to = u"Seu ato já está no ar.", 'contato@atados.com.br', self.email
#-- text_content = plaintext.render(d)
#-- html_content = htmly.render(d)
#-- msg = EmailMultiAlternatives(subject, text_content, from_email, [to])
#-- msg.attach_alternative(html_content, "text/html")
#-- if self.email:
#-- msg.send()
#-- ----- ----- ----- ----- -----
if not orig.closed and self.closed:
self.closed_date = datetime.now()
if not orig.deleted and self.deleted:
self.deleted_date = datetime.now()
if self.post_fb and (orig == None or orig.post_fb == False):
notificate(channel="#posts_fb", text=u"Alguém solicitou a publicação da vaga <https://atados.com.br/vaga/{}|{}> no facebook.".format(self.slug, self.name))
if not self.post_fb and orig != None and orig.post_fb == True:
notificate(channel="#posts_fb", text=u"Alguém removeu a solicitação de publicação da vaga <https://atados.com.br/vaga/{}|{}> no facebook.".format(self.slug, self.name))
if self.news and (orig == None or orig.news == False):
notificate(channel="#news", text=u"Alguém solicitou a publicação da vaga <https://atados.com.br/vaga/{}|{}> na newsletter.".format(self.slug, self.name))
if not self.news and orig != None and orig.news == True:
notificate(channel="#news", text=u"Alguém removeu a solicitação de publicação da vaga <https://atados.com.br/vaga/{}|{}> na newsletter.".format(self.slug, self.name))
# If _committed == False, it means the image is not uploaded to s3 yet
# (will be uploaded on super.save()). This means the image is being updated
# So we update other images accordingly
if (not self.image._committed) or (orig is not None and (self.image != orig.image)):
self.image_small = self.image._file # ._file because we need the InMemoryUploadedFile instance
self.image_medium = self.image._file
self.image_large = self.image._file
self.modified_date = datetime.utcnow().replace(tzinfo=pytz.timezone("America/Sao_Paulo"))
# If there is no description, take 100 chars from the details
if not self.description and len(self.details) > 100:
self.description = self.details[0:100]
return super(Project, self).save(*args, **kwargs)
def get_image_url(self):
return self.uploaded_image.get_image_url() if self.uploaded_image else 'https://s3.amazonaws.com/atados-us/project/default_project.jpg'
def get_small_image_url(self):
return self.uploaded_image.get_image_small_url() if self.uploaded_image else 'https://s3.amazonaws.com/atados-us/project/default_project.jpg'
def __unicode__(self):
return u'%s - %s' % (self.name, self.nonprofit.name)
def has_job(self):
try:
return True if self.job else False
except:
return False
def has_work(self):
try:
return True if self.work else False
except:
return False
class Meta:
app_label = 'atados_core'
verbose_name = _('Project')
class AddressProject(Project):
class Meta:
app_label = 'atados_core'
proxy = True
# Ato Recorrente
class Work(models.Model):
project = models.OneToOneField(Project, blank=True, null=True, verbose_name=_('Project'))
availabilities = models.ManyToManyField(Availability, verbose_name=_('Availability'))
weekly_hours = models.PositiveSmallIntegerField(_('Weekly hours'), blank=True, null=True)
description = models.CharField(_('Description'), blank=True, null=True, max_length=4000)
can_be_done_remotely = models.BooleanField(_('This work can be done remotely.'), default=False)
def __unicode__(self):
name = self.project and self.project.name or _('Unbound Work')
return u"{} : {} horas por semana".format(name, self.weekly_hours or 0)
class Meta:
app_label = 'atados_core'
verbose_name = _('work')
verbose_name_plural = _('works')
class JobDate(models.Model):
name = models.CharField(_("Start date"), blank=True, null=True, max_length=20)
start_date = models.DateTimeField(_("Start date"), blank=True, null=True)
end_date = models.DateTimeField(_("End date"), blank=True, null=True)
job = models.ForeignKey('Job', on_delete=models.CASCADE, blank=True, null=True)#++, related_name='dates')
def __unicode__(self):
start_date = self.start_date and self.start_date.strftime("%d/%m/%Y %T") or ''
end_date = self.end_date and self.end_date.strftime("%d/%m/%Y %T") or ''
return u"{} : {} ~ {}".format(self.name, start_date, end_date)
class Meta:
app_label = 'atados_core'
verbose_name = _('Job Date')
verbose_name_plural = _('Jobs Dates')
# Ato Pontual
class Job(models.Model):
project = models.OneToOneField(Project, blank=True, null=True)
start_date = models.DateTimeField(_("Start date"), blank=True, null=True)
end_date = models.DateTimeField(_("End date"), blank=True, null=True)
can_be_done_remotely = models.BooleanField(_("This job can be done remotely"), default=False)
dates = models.ManyToManyField('JobDate', blank=True, null=True, related_name='the_job')
def __unicode__(self):
name = self.project and self.project.name or _('Unbound Job')
start_date = self.start_date and self.start_date.strftime("%d/%m/%Y %T") or ''
end_date = self.end_date and self.end_date.strftime("%d/%m/%Y %T") or ''
return u"{} : {} ~ {}".format(name, start_date, end_date)
def update_dates(self):
start = self.dates.all().order_by('start_date').first().start_date
end = self.dates.all().order_by('-end_date').first().end_date
self.start_date = start
self.end_date = end
self.save()
class Meta:
app_label = 'atados_core'
verbose_name = _('Job')
verbose_name_plural = _('Jobs')
class ApplyStatus(models.Model):
name = models.CharField(_('name'), max_length=30)
def __unicode__(self):
return self.name
class Meta:
app_label = 'atados_core'
verbose_name = _('apply status')
class Apply(models.Model):
volunteer = models.ForeignKey(Volunteer, verbose_name=_('Volunteer'))
project = models.ForeignKey(Project, verbose_name=_('Project'))
status = models.ForeignKey(ApplyStatus, verbose_name=_('ApplyStatus'))
date = models.DateTimeField(_('Date'), auto_now_add=True, blank=True) # created date
canceled = models.BooleanField(_("Canceled"), default=False)
canceled_date = models.DateTimeField(_("Canceled date"), blank=True, null=True)
email = models.CharField(_('Email'), max_length=200, blank=True, null=True)
def save(self, *args, **kwargs):
if self.canceled:
self.canceled_date = datetime.now().replace(tzinfo=pytz.timezone("America/Sao_Paulo"))
else:
self.canceled_date = None
return_data = super(Apply, self).save(*args, **kwargs)
# Updating project applied_count
# get_volunteers_numbers return a function, so ()()
self.project.applied_count = self.project.get_volunteers_numbers()()
self.project.save()
nonprofit = self.project.nonprofit
nonprofit.volunteer_count = nonprofit.get_volunteers_numbers()()
nonprofit.save()
return return_data
def __unicode__(self):
return u"[%s] %s - %s" % (self.canceled, self.volunteer.user.name, self.project.name)
def ask_interaction_confirmation_to_user(self):
return VolunteerMail(self.volunteer).askActInteractionConfirmation(self.project, self.volunteer)
class Meta:
app_label = 'atados_core'
verbose_name = _('apply')
verbose_name_plural = _('applies')
class Recommendation(models.Model):
project = models.ForeignKey(Project)
sort = models.PositiveSmallIntegerField(_('Sort'),
blank=True, null=True, default=None)
state = models.ForeignKey(State, blank=True, null=True, default=None)
city = models.ForeignKey(City, blank=True, null=True, default=None)
class Meta:
app_label = 'atados_core'
verbose_name = _('recommendation')
def random_token(extra=None, hash_func=hashlib.sha256):
if extra is None:
extra = []
bits = extra + [str(random.SystemRandom().getrandbits(512))]
return hash_func("".join(bits).encode('utf-8')).hexdigest()
class UserManager(BaseUserManager):
def create_user(self, email, password=None, **extra_fields):
now = timezone.now()
if not email:
raise ValueError('The given email address must be set')
email = UserManager.normalize_email(email)
try:
user = User.get(email=email)
if not user.token is None:
token = user.token
else:
token = random_token([email])
except:
token = random_token([email])
user = self.model(email=email, token=token,
is_staff=False, is_active=True,
last_login=now, joined_date=now, **extra_fields)
site = extra_fields.get('site', 'https://www.atados.com.br')
mailer = UserMail(user)
mailer.sendSignupConfirmation(site, token)
user.set_password(password)
user.save()
return user
def create_superuser(self, email, password, **extra_fields):
user = self.create_user(email, password, **extra_fields)
user.is_staff = True
user.is_active = True
user.is_superuser = True
user.save()
return user
class Meta:
app_label = 'atados_core'
class User(AbstractBaseUser):
email = models.EmailField('Email', max_length=254, unique=True)
name = models.CharField(_('Name'), max_length=200, blank=True)
slug = models.SlugField(_('Slug'), max_length=100, unique=True)
is_staff = models.BooleanField(_('Staff'), default=False)
is_active = models.BooleanField(_('Active'), default=True)
is_email_verified = models.BooleanField(_('Email verified'), default=False)
joined_date = models.DateTimeField(auto_now_add=True, null=True, blank=True)
modified_date = models.DateTimeField(auto_now=True, null=True, blank=True)
address = models.OneToOneField(Address, blank=True, null=True)
googleaddress = models.OneToOneField(GoogleAddress, blank=True, null=True)
hidden_address = models.BooleanField(_("Endereco escondido."), default=False)
company = models.ForeignKey(Company, blank=True, null=True)
site = models.URLField(blank=True, null=True, default=None)
phone = models.CharField(_('Phone'), max_length=20, blank=True, null=True, default=None)
legacy_uid = models.PositiveIntegerField(blank=True, null=True)
token = models.CharField(verbose_name=_('token'), max_length=64, unique=True, null=True, default=None)
objects = UserManager()
USERNAME_FIELD = 'email'
class Meta:
app_label = 'atados_core'
verbose_name = _('user')
verbose_name_plural = _('users')
def email_user(self, subject, message, from_email=None):
send_mail(subject, message, from_email, [self.email])
def get_profile_type(self):
profile = self.get_profile()
if profile:
if isinstance(profile, Volunteer):
return 'volunteer'
if isinstance(profile, Nonprofit):
return 'nonprofit'
return None
def get_profile(self):
try:
v = Volunteer.objects.get(user_id=self.pk)
return v
except Volunteer.DoesNotExist:
try:
n = Nonprofit.objects.get(user_id=self.pk)
return n
except Nonprofit.DoesNotExist:
pass
return None
def fix_profile(self):
profile = self.get_profile()
if not profile:
v = Volunteer(user=self)
v.save()
return True
return False
def get_short_name(self):
return self.name
def save(self, *args, **kwargs):
self.modified_date = datetime.utcnow().replace(tzinfo=pytz.timezone("America/Sao_Paulo"))
return super(User, self).save(*args, **kwargs)
def get_address(self):
if self.hidden_address:
try:
return self.googleaddress
except:
return None
def get_admin_url(self):
return reverse("admin:%s_%s_change" % (self._meta.app_label, self._meta.model_name), args=(self.id,))
def has_module_perms(self, app_label):
# Handle whether the user has permissions to view the app `app_label`?"
return True
def has_perm(self, perm, obj=None):
# Handle whether the user has a specific permission?"
return True
class Comment(models.Model):
project = models.ForeignKey(Project, null=False)
user = models.ForeignKey(User, null=False)
comment = models.TextField()
created_date = models.DateTimeField(auto_now_add=True)
deleted = models.BooleanField(_("Deleted"), default=False)
deleted_date = models.DateTimeField(_("Deleted date"), blank=True, null=True)
def delete(self, *args, **kwargs):
self.deleted = True
self.deleted_date = datetime.utcnow().replace(tzinfo=pytz.timezone("America/Sao_Paulo"))
self.save()
def __unicode__(self):
return u"(%s) %s: %s" % (self.project.name, self.user.email, self.comment)
class Meta:
app_label = 'atados_core'
class VolunteerResource(resources.ModelResource):
nome = fields.Field()
email = fields.Field()
telefone = fields.Field()
class Meta:
app_label = 'atados_core'
model = Volunteer
fields = ()
def dehydrate_nome(self, volunteer):
return volunteer.user.name
def dehydrate_email(self, volunteer):
return volunteer.user.email
def dehydrate_telefone(self, volunteer):
return volunteer.user.phone
class Subscription(models.Model):
name = models.CharField(_('Name'), max_length=200, blank=True, null=True)
email = models.CharField(_('Email'), max_length=200, blank=True, null=True)
phone = models.CharField(_('Phone'), max_length=200, blank=True, null=True)
doc = models.CharField(_('Doc'), max_length=200, blank=True, null=True)
googleaddress = models.ForeignKey(GoogleAddress, blank=True, null=True)
street = models.CharField(_('Street'), max_length=200, blank=True, null=True)
number = models.CharField(_('Number'), max_length=200, blank=True, null=True)
complement = models.CharField(_('Complement'), max_length=200, blank=True, null=True)
postal_code = models.CharField(_('Zip code'), max_length=200, blank=True, null=True)
city = models.CharField(_('City'), max_length=200, blank=True, null=True)
state = models.CharField(_('State'), max_length=200, blank=True, null=True)
cardhash = models.CharField(_('Card'), max_length=500)
card_id = models.CharField(_('Card ID'), max_length=500)
tid = models.CharField(_('Tid'), max_length=500, blank=True, null=True)
status = models.CharField(_('Status'), max_length=500, blank=True, null=True)
status_reason = models.CharField(_('Status'), max_length=500, blank=True, null=True)
value = models.FloatField(blank=False, null=False, default=0.0)
active = models.BooleanField(default=False, null=False)
recurrent = models.BooleanField(default=False, null=False)
parent = models.ForeignKey('self', default='', null=True)
created_date = models.DateTimeField(auto_now_add=True)
month = models.CharField(_('Month payment'), max_length=500, blank=True, null=True)
deleted = models.BooleanField(_("Deleted"), default=False)
deleted_date = models.DateTimeField(_("Deleted date"), blank=True, null=True)
class Meta:
app_label = 'atados_core'
def get_month(self):
""" Returns a string like 10/2015 """
return self.created_date.strftime('%m/%Y')
def generate_payment_dict(self):
""" Returns a dict where the keys correspond to the payment month, and the value is a empty dict(except for current month, that come prefilled as {id: status})
This dict should be filled by fill_payment_dict()
Eg. {'10/2015': {52: 'paid'}, '11/2015': {57: 'paid'}, '12/2015': None, '01/2016': None}
The last key is, as expected, the current month if the payment day has passed
or last month if it's not payment day yet
"""
if self.recurrent:
payment_dict = {}
start_day = self.created_date.day
start_month = self.created_date.month
start_year = self.created_date.year
now = datetime.now()
end_day = now.day
end_month = now.month
end_year = now.year
for year in range(start_year, end_year+1):
for month in range(1, 12+1):
if ( (year == start_year and year != end_year and month >= start_month) or
(year == end_year and year != start_year and month < end_month) or
(year == start_year and start_year == end_year and month >= start_month and month < end_month) or
(year == end_year and month == end_month and start_day <= end_day) or
(year not in [start_year, end_year]) ):
payment_dict['{:02d}/{:04d}'.format(month, year)] = {}
payment_dict[self.get_month()] = {self.id: self.status}
return payment_dict
else:
raise ValueError('This instance is not recurrent.')
def fill_payment_dict(self, payment_dict, children):
""" This method takes an payment_dict and fill the respective months
with the Subscription id
"""
for child in children:
if child.month:
if child.month not in payment_dict: # Avoid errors if there's a payment for a future month(manual payment)
payment_dict[child.month] = {child.id: child.status}
else:
payment_dict[child.month][child.id] = child.status
return payment_dict
class SubscriptionIntent(models.Model):
name = models.CharField(_('Name'), max_length=200, blank=True, null=True)
email = models.CharField(_('Email'), max_length=200, blank=True, null=True)
value = models.FloatField(blank=False, null=False, default=0.0)
created_date = models.DateTimeField(auto_now_add=True, null=True)
class Question(models.Model):
name = models.CharField(_('Name'), max_length=200, blank=True, null=True)
description = models.TextField(_('Description'), blank=True, null=True)
priority = models.IntegerField(blank=False, null=False, default=0)
screenshot = models.ImageField(_("Screenshot"), upload_to=question_image_name, blank=True, null=True, default=None)
created_date = models.DateTimeField(auto_now_add=True, blank=True, default=datetime.now)
modified_date = models.DateTimeField(auto_now=True, blank=True, default=datetime.now)
deleted = models.BooleanField(_("Deleted"), default=False)
deleted_date = models.DateTimeField(_("Deleted date"), blank=True, null=True)
class Meta:
app_label = 'atados_core'
def banner_image_name(self, filename):
left_path, extension = filename.rsplit('.', 1)
return 'slides/%s.%s' % (self.title, extension)
class Banner(models.Model):
def save(self, *args, **kwargs):
if not self.image._committed:
self.image = self.image._file
if self.order == 0:
self.order = Banner.objects.count()+1
print(self.order)
super(Banner, self).save(*args, **kwargs)
def get_image_url(self):
return self.image.url if (self.image and hasattr(self.image, 'url')) else ''
title = models.CharField(_('Titulo'), max_length=1000, blank=True, null=True)
text = models.CharField(_('Texto'), max_length=1000, blank=True, null=True)
image = models.ImageField(_('Background'), upload_to=banner_image_name, blank=False, null=False, default=None)
link = models.URLField(blank=True, null=True, default=None)
link_text = models.CharField(_(u'Texto do Botão'), max_length=300, blank=False, null=False, default='Saiba mais')
order = models.IntegerField(_('Ordem'), blank=False, null=False, default=0)
active = models.BooleanField(_('Ativo'), default=True)
class Meta:
app_label = 'atados_core'
class Newsletter(models.Model):
name = models.CharField(_('Name'), max_length=100)
email = models.CharField(_('Email'), max_length=100)
googleaddress = models.OneToOneField(GoogleAddress, blank=True, null=True, verbose_name=_('GoogleAddress'))
class Meta:
app_label = 'atados_core'
verbose_name = _('News')
verbose_name_plural = _('News')
class Landing(models.Model):
email = models.CharField(_('Email'), max_length=150)
organization = models.CharField(_('Organizacao'), max_length=150)
city = models.CharField(_('Cidade'), max_length=100)
created_date = models.DateTimeField(_('Created date'), auto_now_add=True)
class Meta:
app_label = 'atados_core'
class LeadCompany(models.Model):
email = models.CharField(_('Email'), max_length=150)
organization = models.CharField(_('Organizacao'), max_length=150)
city = models.CharField(_('Cidade'), max_length=100)
created_date = models.DateTimeField(_('Created date'), auto_now_add=True)
employee_number = models.IntegerField(default=0)
class Meta:
app_label = 'atados_core' | atados/api | atados_core/models/core.py | Python | mit | 40,212 | [
"VisIt"
] | 1b0c96fcfeeb44f4b1fc6fea0974af3e837da28bf118256bcb50d25e4eb67ecc |
# Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""
Test the cf module.
"""
# import iris tests first so that some things can be initialised before importing anything else
import iris.tests as tests # isort:skip
from unittest import mock
import iris
import iris.fileformats.cf as cf
class TestCaching(tests.IrisTest):
def test_cached(self):
# Make sure attribute access to the underlying netCDF4.Variable
# is cached.
name = "foo"
nc_var = mock.MagicMock()
cf_var = cf.CFAncillaryDataVariable(name, nc_var)
self.assertEqual(nc_var.ncattrs.call_count, 1)
# Accessing a netCDF attribute should result in no further calls
# to nc_var.ncattrs() and the creation of an attribute on the
# cf_var.
# NB. Can't use hasattr() because that triggers the attribute
# to be created!
self.assertTrue("coordinates" not in cf_var.__dict__)
_ = cf_var.coordinates
self.assertEqual(nc_var.ncattrs.call_count, 1)
self.assertTrue("coordinates" in cf_var.__dict__)
# Trying again results in no change.
_ = cf_var.coordinates
self.assertEqual(nc_var.ncattrs.call_count, 1)
self.assertTrue("coordinates" in cf_var.__dict__)
# Trying another attribute results in just a new attribute.
self.assertTrue("standard_name" not in cf_var.__dict__)
_ = cf_var.standard_name
self.assertEqual(nc_var.ncattrs.call_count, 1)
self.assertTrue("standard_name" in cf_var.__dict__)
@tests.skip_data
class TestCFReader(tests.IrisTest):
def setUp(self):
filename = tests.get_data_path(
("NetCDF", "rotated", "xyt", "small_rotPole_precipitation.nc")
)
self.cfr = cf.CFReader(filename)
def test_ancillary_variables_pass_0(self):
self.assertEqual(self.cfr.cf_group.ancillary_variables, {})
def test_auxiliary_coordinates_pass_0(self):
self.assertEqual(
sorted(self.cfr.cf_group.auxiliary_coordinates.keys()),
["lat", "lon"],
)
lat = self.cfr.cf_group["lat"]
self.assertEqual(lat.shape, (190, 174))
self.assertEqual(lat.dimensions, ("rlat", "rlon"))
self.assertEqual(lat.ndim, 2)
self.assertEqual(
lat.cf_attrs(),
(
("long_name", "latitude"),
("standard_name", "latitude"),
("units", "degrees_north"),
),
)
lon = self.cfr.cf_group["lon"]
self.assertEqual(lon.shape, (190, 174))
self.assertEqual(lon.dimensions, ("rlat", "rlon"))
self.assertEqual(lon.ndim, 2)
self.assertEqual(
lon.cf_attrs(),
(
("long_name", "longitude"),
("standard_name", "longitude"),
("units", "degrees_east"),
),
)
def test_bounds_pass_0(self):
self.assertEqual(
sorted(self.cfr.cf_group.bounds.keys()), ["time_bnds"]
)
time_bnds = self.cfr.cf_group["time_bnds"]
self.assertEqual(time_bnds.shape, (4, 2))
self.assertEqual(time_bnds.dimensions, ("time", "time_bnds"))
self.assertEqual(time_bnds.ndim, 2)
self.assertEqual(time_bnds.cf_attrs(), ())
def test_coordinates_pass_0(self):
self.assertEqual(
sorted(self.cfr.cf_group.coordinates.keys()),
["rlat", "rlon", "time"],
)
rlat = self.cfr.cf_group["rlat"]
self.assertEqual(rlat.shape, (190,))
self.assertEqual(rlat.dimensions, ("rlat",))
self.assertEqual(rlat.ndim, 1)
attr = []
attr.append(("axis", "Y"))
attr.append(("long_name", "rotated latitude"))
attr.append(("standard_name", "grid_latitude"))
attr.append(("units", "degrees"))
self.assertEqual(rlat.cf_attrs(), tuple(attr))
rlon = self.cfr.cf_group["rlon"]
self.assertEqual(rlon.shape, (174,))
self.assertEqual(rlon.dimensions, ("rlon",))
self.assertEqual(rlon.ndim, 1)
attr = []
attr.append(("axis", "X"))
attr.append(("long_name", "rotated longitude"))
attr.append(("standard_name", "grid_longitude"))
attr.append(("units", "degrees"))
self.assertEqual(rlon.cf_attrs(), tuple(attr))
time = self.cfr.cf_group["time"]
self.assertEqual(time.shape, (4,))
self.assertEqual(time.dimensions, ("time",))
self.assertEqual(time.ndim, 1)
attr = []
attr.append(("axis", "T"))
attr.append(("bounds", "time_bnds"))
attr.append(("calendar", "gregorian"))
attr.append(("long_name", "Julian Day"))
attr.append(("units", "days since 1950-01-01 00:00:00.0"))
self.assertEqual(time.cf_attrs(), tuple(attr))
def test_data_pass_0(self):
self.assertEqual(
sorted(self.cfr.cf_group.data_variables.keys()), ["pr"]
)
data = self.cfr.cf_group["pr"]
self.assertEqual(data.shape, (4, 190, 174))
self.assertEqual(data.dimensions, ("time", "rlat", "rlon"))
self.assertEqual(data.ndim, 3)
attr = []
attr.append(("_FillValue", 1e30))
attr.append(("cell_methods", "time: mean"))
attr.append(("coordinates", "lon lat"))
attr.append(("grid_mapping", "rotated_pole"))
attr.append(("long_name", "Precipitation"))
attr.append(("missing_value", 1e30))
attr.append(("standard_name", "precipitation_flux"))
attr.append(("units", "kg m-2 s-1"))
attr = tuple(attr)
self.assertEqual(data.cf_attrs()[0][0], attr[0][0])
self.assertAlmostEqual(data.cf_attrs()[0][1], attr[0][1], delta=1.6e22)
self.assertEqual(data.cf_attrs()[1:5], attr[1:5])
self.assertAlmostEqual(data.cf_attrs()[5][1], attr[5][1], delta=1.6e22)
self.assertEqual(data.cf_attrs()[6:], attr[6:])
def test_formula_terms_pass_0(self):
self.assertEqual(self.cfr.cf_group.formula_terms, {})
def test_grid_mapping_pass_0(self):
self.assertEqual(
sorted(self.cfr.cf_group.grid_mappings.keys()), ["rotated_pole"]
)
rotated_pole = self.cfr.cf_group["rotated_pole"]
self.assertEqual(rotated_pole.shape, ())
self.assertEqual(rotated_pole.dimensions, ())
self.assertEqual(rotated_pole.ndim, 0)
attr = []
attr.append(("grid_mapping_name", "rotated_latitude_longitude"))
attr.append(("grid_north_pole_latitude", 18.0))
attr.append(("grid_north_pole_longitude", -140.75))
self.assertEqual(rotated_pole.cf_attrs(), tuple(attr))
def test_cell_measures_pass_0(self):
self.assertEqual(self.cfr.cf_group.cell_measures, {})
def test_global_attributes_pass_0(self):
self.assertEqual(
sorted(self.cfr.cf_group.global_attributes.keys()),
[
"Conventions",
"NCO",
"experiment",
"history",
"institution",
"source",
],
)
self.assertEqual(
self.cfr.cf_group.global_attributes["Conventions"], "CF-1.0"
)
self.assertEqual(
self.cfr.cf_group.global_attributes["experiment"], "ER3"
)
self.assertEqual(
self.cfr.cf_group.global_attributes["institution"], "DMI"
)
self.assertEqual(
self.cfr.cf_group.global_attributes["source"], "HIRHAM"
)
def test_variable_cf_group_pass_0(self):
self.assertEqual(
sorted(self.cfr.cf_group["time"].cf_group.keys()), ["time_bnds"]
)
self.assertEqual(
sorted(self.cfr.cf_group["pr"].cf_group.keys()),
["lat", "lon", "rlat", "rlon", "rotated_pole", "time"],
)
def test_variable_attribute_touch_pass_0(self):
lat = self.cfr.cf_group["lat"]
self.assertEqual(
lat.cf_attrs(),
(
("long_name", "latitude"),
("standard_name", "latitude"),
("units", "degrees_north"),
),
)
self.assertEqual(lat.cf_attrs_used(), ())
self.assertEqual(
lat.cf_attrs_unused(),
(
("long_name", "latitude"),
("standard_name", "latitude"),
("units", "degrees_north"),
),
)
# touch some variable attributes.
lat.long_name
lat.units
self.assertEqual(
lat.cf_attrs_used(),
(("long_name", "latitude"), ("units", "degrees_north")),
)
self.assertEqual(
lat.cf_attrs_unused(), (("standard_name", "latitude"),)
)
# clear the attribute touch history.
lat.cf_attrs_reset()
self.assertEqual(lat.cf_attrs_used(), ())
self.assertEqual(
lat.cf_attrs_unused(),
(
("long_name", "latitude"),
("standard_name", "latitude"),
("units", "degrees_north"),
),
)
@tests.skip_data
class TestLoad(tests.IrisTest):
def test_attributes_empty(self):
filename = tests.get_data_path(
("NetCDF", "global", "xyt", "SMALL_hires_wind_u_for_ipcc4.nc")
)
cube = iris.load_cube(filename)
self.assertEqual(cube.coord("time").attributes, {})
def test_attributes_contain_positive(self):
filename = tests.get_data_path(
("NetCDF", "global", "xyt", "SMALL_hires_wind_u_for_ipcc4.nc")
)
cube = iris.load_cube(filename)
self.assertEqual(cube.coord("height").attributes["positive"], "up")
def test_attributes_populated(self):
filename = tests.get_data_path(
("NetCDF", "label_and_climate", "small_FC_167_mon_19601101.nc")
)
cube = iris.load_cube(filename, "air_temperature")
self.assertEqual(
sorted(cube.coord("longitude").attributes.items()),
[
("data_type", "float"),
("modulo", 360),
("topology", "circular"),
("valid_max", 359.0),
("valid_min", 0.0),
],
)
def test_cell_methods(self):
filename = tests.get_data_path(
("NetCDF", "global", "xyt", "SMALL_hires_wind_u_for_ipcc4.nc")
)
cube = iris.load_cube(filename)
self.assertEqual(
cube.cell_methods,
(
iris.coords.CellMethod(
method="mean",
coords=("time",),
intervals=("6 minutes",),
comments=(),
),
),
)
@tests.skip_data
class TestClimatology(tests.IrisTest):
def setUp(self):
filename = tests.get_data_path(
(
"NetCDF",
"label_and_climate",
"A1B-99999a-river-sep-2070-2099.nc",
)
)
self.cfr = cf.CFReader(filename)
def test_bounds(self):
time = self.cfr.cf_group["temp_dmax_tmean_abs"].cf_group.coordinates[
"time"
]
climatology = time.cf_group.climatology
self.assertEqual(len(climatology), 1)
self.assertEqual(list(climatology.keys()), ["climatology_bounds"])
climatology_var = climatology["climatology_bounds"]
self.assertEqual(climatology_var.ndim, 2)
self.assertEqual(climatology_var.shape, (1, 2))
@tests.skip_data
class TestLabels(tests.IrisTest):
def setUp(self):
filename = tests.get_data_path(
(
"NetCDF",
"label_and_climate",
"A1B-99999a-river-sep-2070-2099.nc",
)
)
self.cfr_start = cf.CFReader(filename)
filename = tests.get_data_path(
("NetCDF", "label_and_climate", "small_FC_167_mon_19601101.nc")
)
self.cfr_end = cf.CFReader(filename)
def test_label_dim_start(self):
cf_data_var = self.cfr_start.cf_group["temp_dmax_tmean_abs"]
region_group = self.cfr_start.cf_group.labels["region_name"]
self.assertEqual(
sorted(self.cfr_start.cf_group.labels.keys()), ["region_name"]
)
self.assertEqual(
sorted(cf_data_var.cf_group.labels.keys()), ["region_name"]
)
self.assertEqual(
region_group.cf_label_dimensions(cf_data_var), ("georegion",)
)
self.assertEqual(region_group.cf_label_data(cf_data_var)[0], "Anglian")
cf_data_var = self.cfr_start.cf_group["cdf_temp_dmax_tmean_abs"]
self.assertEqual(
sorted(self.cfr_start.cf_group.labels.keys()), ["region_name"]
)
self.assertEqual(
sorted(cf_data_var.cf_group.labels.keys()), ["region_name"]
)
self.assertEqual(
region_group.cf_label_dimensions(cf_data_var), ("georegion",)
)
self.assertEqual(region_group.cf_label_data(cf_data_var)[0], "Anglian")
def test_label_dim_end(self):
cf_data_var = self.cfr_end.cf_group["tas"]
self.assertEqual(
sorted(self.cfr_end.cf_group.labels.keys()),
["experiment_id", "institution", "source"],
)
self.assertEqual(
sorted(cf_data_var.cf_group.labels.keys()),
["experiment_id", "institution", "source"],
)
self.assertEqual(
self.cfr_end.cf_group.labels["experiment_id"].cf_label_dimensions(
cf_data_var
),
("ensemble",),
)
self.assertEqual(
self.cfr_end.cf_group.labels["experiment_id"].cf_label_data(
cf_data_var
)[0],
"2005",
)
self.assertEqual(
self.cfr_end.cf_group.labels["institution"].cf_label_dimensions(
cf_data_var
),
("ensemble",),
)
self.assertEqual(
self.cfr_end.cf_group.labels["institution"].cf_label_data(
cf_data_var
)[0],
"ECMWF",
)
self.assertEqual(
self.cfr_end.cf_group.labels["source"].cf_label_dimensions(
cf_data_var
),
("ensemble",),
)
self.assertEqual(
self.cfr_end.cf_group.labels["source"].cf_label_data(cf_data_var)[
0
],
"IFS33R1/HOPE-E, Sys 1, Met 1, ENSEMBLES",
)
if __name__ == "__main__":
tests.main()
| SciTools/iris | lib/iris/tests/test_cf.py | Python | lgpl-3.0 | 14,871 | [
"NetCDF"
] | 7096ccda7c1d8d16ab4b39eaf143285a3cdcbac6ef0bd447d326ccaddb750037 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# sendrequestaction - send request for e.g. member or ownership action handler
# Copyright (C) 2003-2015 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
"""Send request e.g. for ownership or membership action back end"""
import shared.returnvalues as returnvalues
from shared.defaults import default_vgrid, any_vgrid, any_protocol, \
email_keyword_list
from shared.functional import validate_input_and_cert, REJECT_UNSET
from shared.handlers import correct_handler
from shared.init import initialize_main_variables, find_entry
from shared.notification import notify_user_thread
from shared.resource import anon_to_real_res_map
from shared.user import anon_to_real_user_map
from shared.vgrid import vgrid_list, vgrid_is_owner, vgrid_is_member, \
vgrid_is_resource, user_allowed_vgrids
from shared.vgridaccess import get_user_map, get_resource_map, CONF, OWNERS, \
USERID
def signature():
"""Signature of the main function"""
defaults = {'unique_resource_name': [''],
'vgrid_name': [''], 'cert_id': [''],
'protocol': [any_protocol],
'request_type': REJECT_UNSET,
'request_text': REJECT_UNSET}
return ['html_form', defaults]
def main(client_id, user_arguments_dict):
"""Main function used by front end"""
(configuration, logger, output_objects, op_name) = \
initialize_main_variables(client_id, op_header=False)
defaults = signature()[1]
(validate_status, accepted) = validate_input_and_cert(
user_arguments_dict,
defaults,
output_objects,
client_id,
configuration,
allow_rejects=False,
)
if not validate_status:
return (accepted, returnvalues.CLIENT_ERROR)
if not correct_handler('POST'):
output_objects.append(
{'object_type': 'error_text', 'text'
: 'Only accepting POST requests to prevent unintended updates'})
return (output_objects, returnvalues.CLIENT_ERROR)
title_entry = find_entry(output_objects, 'title')
title_entry['text'] = '%s send request' % \
configuration.short_title
output_objects.append({'object_type': 'header', 'text'
: '%s send request' % \
configuration.short_title})
target_id = client_id
vgrid_name = accepted['vgrid_name'][-1].strip()
visible_user_name = accepted['cert_id'][-1].strip()
visible_res_name = accepted['unique_resource_name'][-1].strip()
request_type = accepted['request_type'][-1].strip().lower()
request_text = accepted['request_text'][-1].strip()
protocols = [proto.strip() for proto in accepted['protocol']]
use_any = False
if any_protocol in protocols:
use_any = True
protocols = configuration.notify_protocols
protocols = [proto.lower() for proto in protocols]
valid_request_types = ['resourceowner', 'resourceaccept', 'vgridowner',
'vgridmember','vgridresource', 'vgridaccept',
'plain']
if not request_type in valid_request_types:
output_objects.append({
'object_type': 'error_text', 'text'
: '%s is not a valid request_type (valid types: %s)!'
% (request_type.lower(),
valid_request_types)})
return (output_objects, returnvalues.CLIENT_ERROR)
if not protocols:
output_objects.append({
'object_type': 'error_text', 'text':
'No protocol specified!'})
return (output_objects, returnvalues.CLIENT_ERROR)
user_map = get_user_map(configuration)
reply_to = user_map[client_id][USERID]
if request_type == "plain":
if not visible_user_name:
output_objects.append({
'object_type': 'error_text', 'text':
'No user ID specified!'})
return (output_objects, returnvalues.CLIENT_ERROR)
user_id = visible_user_name
anon_map = anon_to_real_user_map(configuration.user_home)
if anon_map.has_key(visible_user_name):
user_id = anon_map[visible_user_name]
if not user_map.has_key(user_id):
output_objects.append({'object_type': 'error_text',
'text': 'No such user: %s' % \
visible_user_name
})
return (output_objects, returnvalues.CLIENT_ERROR)
target_name = user_id
user_dict = user_map[user_id]
allow_vgrids = user_allowed_vgrids(configuration, client_id)
vgrids_allow_email = user_dict[CONF].get('VGRIDS_ALLOW_EMAIL', [])
vgrids_allow_im = user_dict[CONF].get('VGRIDS_ALLOW_IM', [])
if any_vgrid in vgrids_allow_email:
email_vgrids = allow_vgrids
else:
email_vgrids = set(vgrids_allow_email).intersection(allow_vgrids)
if any_vgrid in vgrids_allow_im:
im_vgrids = allow_vgrids
else:
im_vgrids = set(vgrids_allow_im).intersection(allow_vgrids)
if use_any:
# Do not try disabled protocols if ANY was requested
if not email_vgrids:
protocols = [proto for proto in protocols \
if proto not in email_keyword_list]
if not im_vgrids:
protocols = [proto for proto in protocols \
if proto in email_keyword_list]
if not email_vgrids and [proto for proto in protocols \
if proto in email_keyword_list]:
output_objects.append({
'object_type': 'error_text', 'text'
: 'You are not allowed to send emails to %s!' % \
visible_user_name
})
return (output_objects, returnvalues.CLIENT_ERROR)
if not im_vgrids and [proto for proto in protocols \
if proto not in email_keyword_list]:
output_objects.append({
'object_type': 'error_text', 'text'
: 'You are not allowed to send instant messages to %s!' % \
visible_user_name
})
return (output_objects, returnvalues.CLIENT_ERROR)
for proto in protocols:
if not user_dict[CONF].get(proto.upper(), False):
if use_any:
# Remove missing protocols if ANY protocol was requested
protocols = [i for i in protocols if i != proto]
else:
output_objects.append({
'object_type': 'error_text', 'text'
: 'User %s does not accept %s messages!' % \
(visible_user_name, proto)
})
return (output_objects, returnvalues.CLIENT_ERROR)
if not protocols:
output_objects.append({
'object_type': 'error_text', 'text':
'User %s does not accept requested protocol(s) messages!' % \
visible_user_name})
return (output_objects, returnvalues.CLIENT_ERROR)
target_list = [user_id]
elif request_type == "vgridaccept":
# Always allow accept messages but only between vgrid members/owners
user_id = visible_user_name
if not vgrid_name:
output_objects.append({
'object_type': 'error_text', 'text': 'No vgrid_name specified!'})
return (output_objects, returnvalues.CLIENT_ERROR)
if vgrid_name.upper() == default_vgrid.upper():
output_objects.append({
'object_type': 'error_text', 'text'
: 'No requests for %s are not allowed!' % \
default_vgrid
})
return (output_objects, returnvalues.CLIENT_ERROR)
if not vgrid_is_owner(vgrid_name, client_id, configuration):
output_objects.append({
'object_type': 'error_text', 'text'
: 'You are not an owner of %s or a parent %s!' % \
(vgrid_name, configuration.site_vgrid_label)})
return (output_objects, returnvalues.CLIENT_ERROR)
allow_vgrids = user_allowed_vgrids(configuration, client_id)
if not vgrid_name in allow_vgrids:
output_objects.append({
'object_type': 'error_text', 'text':
'Invalid %s message! (%s sv %s)' % (request_type, user_id,
allow_vgrids)})
return (output_objects, returnvalues.CLIENT_ERROR)
target_id = '%s %s owners' % (vgrid_name, configuration.site_vgrid_label)
target_name = vgrid_name
target_list = [user_id]
elif request_type == "resourceaccept":
# Always allow accept messages between actual resource owners
user_id = visible_user_name
if not visible_res_name:
output_objects.append({
'object_type': 'error_text', 'text':
'No resource ID specified!'})
return (output_objects, returnvalues.CLIENT_ERROR)
unique_resource_name = visible_res_name
target_name = unique_resource_name
res_map = get_resource_map(configuration)
if not res_map.has_key(unique_resource_name):
output_objects.append({'object_type': 'error_text',
'text': 'No such resource: %s' % \
unique_resource_name
})
return (output_objects, returnvalues.CLIENT_ERROR)
owners_list = res_map[unique_resource_name][OWNERS]
if not client_id in owners_list or not user_id in owners_list:
output_objects.append({
'object_type': 'error_text', 'text'
: 'Invalid resource owner accept message!'})
return (output_objects, returnvalues.CLIENT_ERROR)
target_id = '%s resource owners' % unique_resource_name
target_name = unique_resource_name
target_list = [user_id]
elif request_type == "resourceowner":
if not visible_res_name:
output_objects.append({
'object_type': 'error_text', 'text':
'No resource ID specified!'})
return (output_objects, returnvalues.CLIENT_ERROR)
unique_resource_name = visible_res_name
anon_map = anon_to_real_res_map(configuration.resource_home)
if anon_map.has_key(visible_res_name):
unique_resource_name = anon_map[visible_res_name]
target_name = unique_resource_name
res_map = get_resource_map(configuration)
if not res_map.has_key(unique_resource_name):
output_objects.append({'object_type': 'error_text',
'text': 'No such resource: %s' % \
visible_res_name
})
return (output_objects, returnvalues.CLIENT_ERROR)
target_list = res_map[unique_resource_name][OWNERS]
if client_id in target_list:
output_objects.append({
'object_type': 'error_text', 'text'
: 'You are already an owner of %s!' % unique_resource_name
})
return (output_objects, returnvalues.CLIENT_ERROR)
elif request_type in ["vgridmember", "vgridowner", "vgridresource"]:
unique_resource_name = visible_res_name
if not vgrid_name:
output_objects.append({
'object_type': 'error_text', 'text': 'No vgrid_name specified!'})
return (output_objects, returnvalues.CLIENT_ERROR)
# default vgrid is read-only
if vgrid_name.upper() == default_vgrid.upper():
output_objects.append({
'object_type': 'error_text', 'text'
: 'No requests for %s are not allowed!' % \
default_vgrid
})
return (output_objects, returnvalues.CLIENT_ERROR)
# stop owner or member request if already an owner
if request_type != 'vgridresource':
if vgrid_is_owner(vgrid_name, client_id, configuration):
output_objects.append({
'object_type': 'error_text', 'text'
: 'You are already an owner of %s or a parent %s!' % \
(vgrid_name, configuration.site_vgrid_label)})
return (output_objects, returnvalues.CLIENT_ERROR)
# only ownership requests are allowed for existing members
if request_type == 'vgridmember':
if vgrid_is_member(vgrid_name, client_id, configuration):
output_objects.append({
'object_type': 'error_text', 'text'
: 'You are already a member of %s or a parent %s.' % \
(vgrid_name, configuration.site_vgrid_label)})
return (output_objects, returnvalues.CLIENT_ERROR)
# set target to resource and prevent repeated resource access requests
if request_type == 'vgridresource':
target_id = unique_resource_name
if vgrid_is_resource(vgrid_name, unique_resource_name,
configuration):
output_objects.append({
'object_type': 'error_text', 'text'
: 'You already have access to %s or a parent %s.' % \
(vgrid_name, configuration.site_vgrid_label)})
return (output_objects, returnvalues.CLIENT_ERROR)
# Find all VGrid owners
target_name = vgrid_name
(status, target_list) = vgrid_list(vgrid_name, 'owners', configuration)
if not status:
output_objects.append({
'object_type': 'error_text', 'text'
: 'Could not load list of current owners for %s %s!'
% (vgrid_name, configuration.site_vgrid_label)})
return (output_objects, returnvalues.CLIENT_ERROR)
else:
output_objects.append({
'object_type': 'error_text', 'text': 'Invalid request type: %s' % \
request_type})
return (output_objects, returnvalues.CLIENT_ERROR)
# Now send request to all targets in turn
# TODO: inform requestor if no owners have mail/IM set in their settings
for target in target_list:
# USER_CERT entry is destination
notify = []
for proto in protocols:
notify.append('%s: SETTINGS' % proto)
job_dict = {'NOTIFY': notify, 'JOB_ID': 'NOJOBID', 'USER_CERT': target}
notifier = notify_user_thread(
job_dict,
[target_id, target_name, request_type, request_text, reply_to],
'SENDREQUEST',
logger,
'',
configuration,
)
# Try finishing delivery but do not block forever on one message
notifier.join(30)
output_objects.append({'object_type': 'text', 'text':
'Sent %s message to %d people' % \
(request_type, len(target_list))})
output_objects.append({'object_type': 'text', 'text':
"""Please make sure you have notifications
configured on your Setings page if you expect a reply to this message"""})
return (output_objects, returnvalues.OK)
| heromod/migrid | mig/shared/functionality/sendrequestaction.py | Python | gpl-2.0 | 16,333 | [
"Brian"
] | 43964886c6c5e15ddd92164afb42754e636554f4912e6c79a3be05eb8658192f |
from django.views.generic.edit import CreateView, UpdateView, DeleteView
#whenever you want to make a form to create, update, view a new object
from django.core.urlresolvers import reverse_lazy
from django.shortcuts import render, redirect
#lets us redirect user on login
from django.contrib.auth import authenticate, login, logout
#takes username and password, verifies they are a user and they exist in the database
#login attaches session id so user doesnt have to login on every new page you visit
from django.views import generic
from django.views.generic import View
from .models import Album
from .forms import UserForm
def logout_user(request):
logout(request)
form = UserForm(request.POST or None)
context = {
"form": form,
}
return render(request, 'music/login.html', context)
class IndexView(generic.ListView):
template_name = 'music/index.html'
context_object_name = 'all_albums'
#by default when query returns object as object list
def get_queryset(self):
return Album.objects.all()
#displays details/properties on object
class DetailView(generic.DetailView):
model = Album
template_name = 'music/detail.html'
#inherits from createview
#pass in attributes you want user to be able to fill out in fields
class AlbumCreate(CreateView):
model = Album
fields = ['artist', 'album_title', 'genre', 'album_logo']
#next step is assign url pattern to new view
class AlbumUpdate(UpdateView):
model = Album
fields = ['artist', 'album_title', 'genre', 'album_logo']
class AlbumDelete(DeleteView):
model = Album
success_url = reverse_lazy('music:index')
#inherits from view
class UserFormView(View):
form_class = UserForm
template_name = 'music/registration_form.html'
#display blank form for new user
def get(self, request):
#use this form (userform)
form = self.form_class(None)
return render(request, self.template_name, {'form' : form})
#process form data
def post(self,request):
form = self.form_class(request.POST)
#creates user object from form but does not store in database
if form.is_valid():
#
user=form.save(commit=False)
#cleaned(normalzed) data - ensures data is always formatted proper
username = form.cleaned_data['username']
password = form.cleaned_data['password']
# user.set_password(password)
#gives you ability to change user password
user.save()
#returns User objects if user exists
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
#actually logs in user and attaches user to session
login(request, user)
#retuns user to index if user logs in successfully
return redirect('music:index')
#if not reload form
return render(request, self.template_name, {'form' : form})
| castle-c/djangoTutorial | music/views.py | Python | mit | 2,856 | [
"VisIt"
] | 97ec537a2808bdbf2a674b0886bf3532a710996e065cd8accbdfb88ef9458d25 |
'''
SYNBIOCHEM-DB (c) University of Manchester 2015
SYNBIOCHEM-DB is licensed under the MIT License.
To view a copy of this license, visit <http://opensource.org/licenses/MIT/>.
@author: neilswainston
'''
import math
import uuid
from libchebipy._chebi_entity import ChebiEntity, ChebiException
from sbcdb import namespace_utils as ns_utils
from synbiochem.utils import chem_utils
class ChemicalManager(object):
'''Class to implement a manager of Chemical data.'''
def __init__(self, array_delimiter):
'''Constructor.'''
self.__array_delimiter = array_delimiter
self.__nodes = {}
self.__chem_ids = {}
def write_files(self, writer):
'''Write neo4j import files.'''
return writer.write_nodes(self.__nodes.values(), 'Chemical')
def add_chemical(self, properties):
'''Adds a chemical to the collection of nodes, ensuring uniqueness.'''
chem_id, chebi_ent = self.__get_chem_id(properties)
if 'charge:float' in properties:
charge = properties.pop('charge:float')
if not math.isnan(charge):
properties['charge:float'] = int(charge)
if chem_id not in self.__nodes:
properties[':LABEL'] = 'Chemical'
properties['id:ID(Chemical)'] = chem_id
properties['source'] = 'chebi' if 'chebi' in properties else 'mnx'
_normalise_mass(properties)
self.__nodes[chem_id] = properties
else:
self.__nodes[chem_id].update(properties)
return chem_id, chebi_ent
def get_props(self, prop, default=None):
'''Gets all chem_ids to property as a dict.'''
return {key: self.__nodes[chem_id].get(prop, default)
for key, chem_id in self.__chem_ids.iteritems()}
def get_prop(self, chem_id, prop, default=None):
'''Gets a property.'''
return self.__nodes[self.__chem_ids[chem_id]].get(prop, default)
def __get_chem_id(self, properties):
'''Manages chemical id mapping.'''
chebi_id = properties.get('chebi', None)
chebi_ent = None
if chebi_id:
try:
chebi_id, chebi_ent = _get_chebi_data(chebi_id, properties,
self.__array_delimiter)
except ChebiException, err:
properties.pop('chebi')
chebi_id = None
print err
except ValueError, err:
properties.pop('chebi')
chebi_id = None
print err
mnx_id = properties.get('mnx', None)
inchi_id = properties.get('inchi', None)
if chebi_id:
self.__chem_ids[chebi_id] = chebi_id
if inchi_id:
self.__chem_ids[inchi_id] = chebi_id
if mnx_id:
self.__chem_ids[mnx_id] = chebi_id
return chebi_id, chebi_ent
if inchi_id:
chem_id = self.__chem_ids.get(inchi_id, None)
if chem_id:
return chem_id, None
if mnx_id:
chem_id = self.__chem_ids.get(mnx_id, None)
if chem_id:
return chem_id, None
if inchi_id:
self.__chem_ids[inchi_id] = mnx_id
self.__chem_ids[mnx_id] = mnx_id
return mnx_id, None
new_id = str(uuid.uuid4())
self.__chem_ids[inchi_id] = new_id
return new_id, None
def _get_chebi_data(chebi_id, properties, array_delimiter):
'''Gets ChEBI data.'''
chebi_ent = ChebiEntity(str(chebi_id))
if chebi_ent.get_parent_id():
chebi_id = chebi_ent.get_parent_id()
else:
chebi_id = chebi_ent.get_id()
properties['chebi'] = chebi_id
formula = chebi_ent.get_formula()
charge = chebi_ent.get_charge()
inchi = chebi_ent.get_inchi()
smiles = chebi_ent.get_smiles()
if formula:
properties['formula'] = formula
if not math.isnan(charge):
properties['charge:float'] = charge
if inchi:
properties['inchi'] = inchi
if smiles:
properties['smiles'] = smiles
properties['name'] = chebi_ent.get_name()
properties['names:string[]'] = \
array_delimiter.join([name.get_name()
for name in chebi_ent.get_names()] +
[chebi_ent.get_name()])
for db_acc in chebi_ent.get_database_accessions():
namespace = ns_utils.resolve_namespace(
db_acc.get_type(), True)
if namespace is not None:
properties[namespace] = db_acc.get_accession_number()
return chebi_id, chebi_ent
def _normalise_mass(properties):
'''Removes ambiguity in mass values by recalculating according to chemical
formula.'''
properties.pop('mass:float', None)
if 'formula' in properties and properties['formula'] is not None:
mono_mass = chem_utils.get_molecular_mass(properties['formula'])
if not math.isnan(mono_mass):
properties['monoisotopic_mass:float'] = mono_mass
| synbiochem/biochem4j | sbcdb/chemical_utils.py | Python | mit | 5,103 | [
"VisIt"
] | 650061cf11665e1c9a9b25aecffdb58738e38822bf1e5c1f226c19fbc44d0804 |
"""
Generalized Linear Models with Exponential Dispersion Family
"""
# Author: Christian Lorentzen <lorentzen.ch@googlemail.com>
# some parts and tricks stolen from other sklearn files.
# License: BSD 3 clause
import numbers
import numpy as np
import scipy.optimize
from ...base import BaseEstimator, RegressorMixin
from ...utils import check_array, check_X_y
from ...utils.optimize import _check_optimize_result
from ...utils.validation import check_is_fitted, _check_sample_weight
from ..._loss.glm_distribution import (
ExponentialDispersionModel,
TweedieDistribution,
EDM_DISTRIBUTIONS
)
from .link import (
BaseLink,
IdentityLink,
LogLink,
)
def _safe_lin_pred(X, coef):
"""Compute the linear predictor taking care if intercept is present."""
if coef.size == X.shape[1] + 1:
return X @ coef[1:] + coef[0]
else:
return X @ coef
def _y_pred_deviance_derivative(coef, X, y, weights, family, link):
"""Compute y_pred and the derivative of the deviance w.r.t coef."""
lin_pred = _safe_lin_pred(X, coef)
y_pred = link.inverse(lin_pred)
d1 = link.inverse_derivative(lin_pred)
temp = d1 * family.deviance_derivative(y, y_pred, weights)
if coef.size == X.shape[1] + 1:
devp = np.concatenate(([temp.sum()], temp @ X))
else:
devp = temp @ X # same as X.T @ temp
return y_pred, devp
class GeneralizedLinearRegressor(RegressorMixin, BaseEstimator):
"""Regression via a penalized Generalized Linear Model (GLM).
GLMs based on a reproductive Exponential Dispersion Model (EDM) aim at
fitting and predicting the mean of the target y as y_pred=h(X*w).
Therefore, the fit minimizes the following objective function with L2
priors as regularizer::
1/(2*sum(s)) * deviance(y, h(X*w); s)
+ 1/2 * alpha * |w|_2
with inverse link function h and s=sample_weight.
The parameter ``alpha`` corresponds to the lambda parameter in glmnet.
Read more in the :ref:`User Guide <Generalized_linear_regression>`.
.. versionadded:: 0.23
Parameters
----------
alpha : float, default=1
Constant that multiplies the penalty term and thus determines the
regularization strength. ``alpha = 0`` is equivalent to unpenalized
GLMs. In this case, the design matrix `X` must have full column rank
(no collinearities).
fit_intercept : bool, default=True
Specifies if a constant (a.k.a. bias or intercept) should be
added to the linear predictor (X @ coef + intercept).
family : {'normal', 'poisson', 'gamma', 'inverse-gaussian'} \
or an ExponentialDispersionModel instance, default='normal'
The distributional assumption of the GLM, i.e. which distribution from
the EDM, specifies the loss function to be minimized.
link : {'auto', 'identity', 'log'} or an instance of class BaseLink, \
default='auto'
The link function of the GLM, i.e. mapping from linear predictor
`X @ coeff + intercept` to prediction `y_pred`. Option 'auto' sets
the link depending on the chosen family as follows:
- 'identity' for Normal distribution
- 'log' for Poisson, Gamma and Inverse Gaussian distributions
solver : 'lbfgs', default='lbfgs'
Algorithm to use in the optimization problem:
'lbfgs'
Calls scipy's L-BFGS-B optimizer.
max_iter : int, default=100
The maximal number of iterations for the solver.
tol : float, default=1e-4
Stopping criterion. For the lbfgs solver,
the iteration will stop when ``max{|g_j|, j = 1, ..., d} <= tol``
where ``g_j`` is the j-th component of the gradient (derivative) of
the objective function.
warm_start : bool, default=False
If set to ``True``, reuse the solution of the previous call to ``fit``
as initialization for ``coef_`` and ``intercept_``.
verbose : int, default=0
For the lbfgs solver set verbose to any positive number for verbosity.
Attributes
----------
coef_ : array of shape (n_features,)
Estimated coefficients for the linear predictor (`X @ coef_ +
intercept_`) in the GLM.
intercept_ : float
Intercept (a.k.a. bias) added to linear predictor.
n_iter_ : int
Actual number of iterations used in the solver.
"""
def __init__(self, *, alpha=1.0,
fit_intercept=True, family='normal', link='auto',
solver='lbfgs', max_iter=100, tol=1e-4, warm_start=False,
verbose=0):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.family = family
self.link = link
self.solver = solver
self.max_iter = max_iter
self.tol = tol
self.warm_start = warm_start
self.verbose = verbose
def fit(self, X, y, sample_weight=None):
"""Fit a Generalized Linear Model.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
self : returns an instance of self.
"""
if isinstance(self.family, ExponentialDispersionModel):
self._family_instance = self.family
elif self.family in EDM_DISTRIBUTIONS:
self._family_instance = EDM_DISTRIBUTIONS[self.family]()
else:
raise ValueError(
"The family must be an instance of class"
" ExponentialDispersionModel or an element of"
" ['normal', 'poisson', 'gamma', 'inverse-gaussian']"
"; got (family={0})".format(self.family))
# Guarantee that self._link_instance is set to an instance of
# class BaseLink
if isinstance(self.link, BaseLink):
self._link_instance = self.link
else:
if self.link == 'auto':
if isinstance(self._family_instance, TweedieDistribution):
if self._family_instance.power <= 0:
self._link_instance = IdentityLink()
if self._family_instance.power >= 1:
self._link_instance = LogLink()
else:
raise ValueError("No default link known for the "
"specified distribution family. Please "
"set link manually, i.e. not to 'auto'; "
"got (link='auto', family={})"
.format(self.family))
elif self.link == 'identity':
self._link_instance = IdentityLink()
elif self.link == 'log':
self._link_instance = LogLink()
else:
raise ValueError(
"The link must be an instance of class Link or "
"an element of ['auto', 'identity', 'log']; "
"got (link={0})".format(self.link))
if not isinstance(self.alpha, numbers.Number) or self.alpha < 0:
raise ValueError("Penalty term must be a non-negative number;"
" got (alpha={0})".format(self.alpha))
if not isinstance(self.fit_intercept, bool):
raise ValueError("The argument fit_intercept must be bool;"
" got {0}".format(self.fit_intercept))
if self.solver not in ['lbfgs']:
raise ValueError("GeneralizedLinearRegressor supports only solvers"
"'lbfgs'; got {0}".format(self.solver))
solver = self.solver
if (not isinstance(self.max_iter, numbers.Integral)
or self.max_iter <= 0):
raise ValueError("Maximum number of iteration must be a positive "
"integer;"
" got (max_iter={0!r})".format(self.max_iter))
if not isinstance(self.tol, numbers.Number) or self.tol <= 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol={0!r})".format(self.tol))
if not isinstance(self.warm_start, bool):
raise ValueError("The argument warm_start must be bool;"
" got {0}".format(self.warm_start))
family = self._family_instance
link = self._link_instance
X, y = check_X_y(X, y, accept_sparse=['csc', 'csr'],
dtype=[np.float64, np.float32],
y_numeric=True, multi_output=False)
weights = _check_sample_weight(sample_weight, X)
_, n_features = X.shape
if not np.all(family.in_y_range(y)):
raise ValueError("Some value(s) of y are out of the valid "
"range for family {0}"
.format(family.__class__.__name__))
# TODO: if alpha=0 check that X is not rank deficient
# rescaling of sample_weight
#
# IMPORTANT NOTE: Since we want to minimize
# 1/(2*sum(sample_weight)) * deviance + L2,
# deviance = sum(sample_weight * unit_deviance),
# we rescale weights such that sum(weights) = 1 and this becomes
# 1/2*deviance + L2 with deviance=sum(weights * unit_deviance)
weights = weights / weights.sum()
if self.warm_start and hasattr(self, 'coef_'):
if self.fit_intercept:
coef = np.concatenate((np.array([self.intercept_]),
self.coef_))
else:
coef = self.coef_
else:
if self.fit_intercept:
coef = np.zeros(n_features+1)
coef[0] = link(np.average(y, weights=weights))
else:
coef = np.zeros(n_features)
# algorithms for optimization
if solver == 'lbfgs':
def func(coef, X, y, weights, alpha, family, link):
y_pred, devp = _y_pred_deviance_derivative(
coef, X, y, weights, family, link
)
dev = family.deviance(y, y_pred, weights)
# offset if coef[0] is intercept
offset = 1 if self.fit_intercept else 0
coef_scaled = alpha * coef[offset:]
obj = 0.5 * dev + 0.5 * (coef[offset:] @ coef_scaled)
objp = 0.5 * devp
objp[offset:] += coef_scaled
return obj, objp
args = (X, y, weights, self.alpha, family, link)
opt_res = scipy.optimize.minimize(
func, coef, method="L-BFGS-B", jac=True,
options={
"maxiter": self.max_iter,
"iprint": (self.verbose > 0) - 1,
"gtol": self.tol,
"ftol": 1e3*np.finfo(float).eps,
},
args=args)
self.n_iter_ = _check_optimize_result("lbfgs", opt_res)
coef = opt_res.x
if self.fit_intercept:
self.intercept_ = coef[0]
self.coef_ = coef[1:]
else:
# set intercept to zero as the other linear models do
self.intercept_ = 0.
self.coef_ = coef
return self
def _linear_predictor(self, X):
"""Compute the linear_predictor = `X @ coef_ + intercept_`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Samples.
Returns
-------
y_pred : array of shape (n_samples,)
Returns predicted values of linear predictor.
"""
check_is_fitted(self)
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=[np.float64, np.float32], ensure_2d=True,
allow_nd=False)
return X @ self.coef_ + self.intercept_
def predict(self, X):
"""Predict using GLM with feature matrix X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Samples.
Returns
-------
y_pred : array of shape (n_samples,)
Returns predicted values.
"""
# check_array is done in _linear_predictor
eta = self._linear_predictor(X)
y_pred = self._link_instance.inverse(eta)
return y_pred
def score(self, X, y, sample_weight=None):
"""Compute D^2, the percentage of deviance explained.
D^2 is a generalization of the coefficient of determination R^2.
R^2 uses squared error and D^2 deviance. Note that those two are equal
for ``family='normal'``.
D^2 is defined as
:math:`D^2 = 1-\\frac{D(y_{true},y_{pred})}{D_{null}}`,
:math:`D_{null}` is the null deviance, i.e. the deviance of a model
with intercept alone, which corresponds to :math:`y_{pred} = \\bar{y}`.
The mean :math:`\\bar{y}` is averaged by sample_weight.
Best possible score is 1.0 and it can be negative (because the model
can be arbitrarily worse).
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Test samples.
y : array-like of shape (n_samples,)
True values of target.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
score : float
D^2 of self.predict(X) w.r.t. y.
"""
# Note, default score defined in RegressorMixin is R^2 score.
# TODO: make D^2 a score function in module metrics (and thereby get
# input validation and so on)
weights = _check_sample_weight(sample_weight, X)
y_pred = self.predict(X)
dev = self._family_instance.deviance(y, y_pred, weights=weights)
y_mean = np.average(y, weights=weights)
dev_null = self._family_instance.deviance(y, y_mean, weights=weights)
return 1 - dev / dev_null
def _more_tags(self):
# create the _family_instance if fit wasn't called yet.
if hasattr(self, '_family_instance'):
_family_instance = self._family_instance
elif isinstance(self.family, ExponentialDispersionModel):
_family_instance = self.family
elif self.family in EDM_DISTRIBUTIONS:
_family_instance = EDM_DISTRIBUTIONS[self.family]()
else:
raise ValueError
return {"requires_positive_y": not _family_instance.in_y_range(-1.0)}
class PoissonRegressor(GeneralizedLinearRegressor):
"""Generalized Linear Model with a Poisson distribution.
Read more in the :ref:`User Guide <Generalized_linear_regression>`.
.. versionadded:: 0.23
Parameters
----------
alpha : float, default=1
Constant that multiplies the penalty term and thus determines the
regularization strength. ``alpha = 0`` is equivalent to unpenalized
GLMs. In this case, the design matrix `X` must have full column rank
(no collinearities).
fit_intercept : bool, default=True
Specifies if a constant (a.k.a. bias or intercept) should be
added to the linear predictor (X @ coef + intercept).
max_iter : int, default=100
The maximal number of iterations for the solver.
tol : float, default=1e-4
Stopping criterion. For the lbfgs solver,
the iteration will stop when ``max{|g_j|, j = 1, ..., d} <= tol``
where ``g_j`` is the j-th component of the gradient (derivative) of
the objective function.
warm_start : bool, default=False
If set to ``True``, reuse the solution of the previous call to ``fit``
as initialization for ``coef_`` and ``intercept_`` .
verbose : int, default=0
For the lbfgs solver set verbose to any positive number for verbosity.
Attributes
----------
coef_ : array of shape (n_features,)
Estimated coefficients for the linear predictor (`X @ coef_ +
intercept_`) in the GLM.
intercept_ : float
Intercept (a.k.a. bias) added to linear predictor.
n_iter_ : int
Actual number of iterations used in the solver.
Examples
----------
>>> from sklearn import linear_model
>>> clf = linear_model.PoissonRegressor()
>>> X = [[1, 2], [2, 3], [3, 4], [4, 3]]
>>> y = [12, 17, 22, 21]
>>> clf.fit(X, y)
PoissonRegressor()
>>> clf.score(X, y)
0.990...
>>> clf.coef_
array([0.121..., 0.158...])
>>> clf.intercept_
2.088...
>>> clf.predict([[1, 1], [3, 4]])
array([10.676..., 21.875...])
"""
def __init__(self, *, alpha=1.0, fit_intercept=True, max_iter=100,
tol=1e-4, warm_start=False, verbose=0):
super().__init__(alpha=alpha, fit_intercept=fit_intercept,
family="poisson", link='log', max_iter=max_iter,
tol=tol, warm_start=warm_start, verbose=verbose)
@property
def family(self):
# Make this attribute read-only to avoid mis-uses e.g. in GridSearch.
return "poisson"
@family.setter
def family(self, value):
if value != "poisson":
raise ValueError("PoissonRegressor.family must be 'poisson'!")
class GammaRegressor(GeneralizedLinearRegressor):
"""Generalized Linear Model with a Gamma distribution.
Read more in the :ref:`User Guide <Generalized_linear_regression>`.
.. versionadded:: 0.23
Parameters
----------
alpha : float, default=1
Constant that multiplies the penalty term and thus determines the
regularization strength. ``alpha = 0`` is equivalent to unpenalized
GLMs. In this case, the design matrix `X` must have full column rank
(no collinearities).
fit_intercept : bool, default=True
Specifies if a constant (a.k.a. bias or intercept) should be
added to the linear predictor (X @ coef + intercept).
max_iter : int, default=100
The maximal number of iterations for the solver.
tol : float, default=1e-4
Stopping criterion. For the lbfgs solver,
the iteration will stop when ``max{|g_j|, j = 1, ..., d} <= tol``
where ``g_j`` is the j-th component of the gradient (derivative) of
the objective function.
warm_start : bool, default=False
If set to ``True``, reuse the solution of the previous call to ``fit``
as initialization for ``coef_`` and ``intercept_`` .
verbose : int, default=0
For the lbfgs solver set verbose to any positive number for verbosity.
Attributes
----------
coef_ : array of shape (n_features,)
Estimated coefficients for the linear predictor (`X * coef_ +
intercept_`) in the GLM.
intercept_ : float
Intercept (a.k.a. bias) added to linear predictor.
n_iter_ : int
Actual number of iterations used in the solver.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.GammaRegressor()
>>> X = [[1, 2], [2, 3], [3, 4], [4, 3]]
>>> y = [19, 26, 33, 30]
>>> clf.fit(X, y)
GammaRegressor()
>>> clf.score(X, y)
0.773...
>>> clf.coef_
array([0.072..., 0.066...])
>>> clf.intercept_
2.896...
>>> clf.predict([[1, 0], [2, 8]])
array([19.483..., 35.795...])
"""
def __init__(self, *, alpha=1.0, fit_intercept=True, max_iter=100,
tol=1e-4, warm_start=False, verbose=0):
super().__init__(alpha=alpha, fit_intercept=fit_intercept,
family="gamma", link='log', max_iter=max_iter,
tol=tol, warm_start=warm_start, verbose=verbose)
@property
def family(self):
# Make this attribute read-only to avoid mis-uses e.g. in GridSearch.
return "gamma"
@family.setter
def family(self, value):
if value != "gamma":
raise ValueError("GammaRegressor.family must be 'gamma'!")
class TweedieRegressor(GeneralizedLinearRegressor):
"""Generalized Linear Model with a Tweedie distribution.
This estimator can be used to model different GLMs depending on the
``power`` parameter, which determines the underlying distribution.
Read more in the :ref:`User Guide <Generalized_linear_regression>`.
.. versionadded:: 0.23
Parameters
----------
power : float, default=0
The power determines the underlying target distribution according
to the following table:
+-------+------------------------+
| Power | Distribution |
+=======+========================+
| 0 | Normal |
+-------+------------------------+
| 1 | Poisson |
+-------+------------------------+
| (1,2) | Compound Poisson Gamma |
+-------+------------------------+
| 2 | Gamma |
+-------+------------------------+
| 3 | Inverse Gaussian |
+-------+------------------------+
For ``0 < power < 1``, no distribution exists.
alpha : float, default=1
Constant that multiplies the penalty term and thus determines the
regularization strength. ``alpha = 0`` is equivalent to unpenalized
GLMs. In this case, the design matrix `X` must have full column rank
(no collinearities).
link : {'auto', 'identity', 'log'}, default='auto'
The link function of the GLM, i.e. mapping from linear predictor
`X @ coeff + intercept` to prediction `y_pred`. Option 'auto' sets
the link depending on the chosen family as follows:
- 'identity' for Normal distribution
- 'log' for Poisson, Gamma and Inverse Gaussian distributions
fit_intercept : bool, default=True
Specifies if a constant (a.k.a. bias or intercept) should be
added to the linear predictor (X @ coef + intercept).
max_iter : int, default=100
The maximal number of iterations for the solver.
tol : float, default=1e-4
Stopping criterion. For the lbfgs solver,
the iteration will stop when ``max{|g_j|, j = 1, ..., d} <= tol``
where ``g_j`` is the j-th component of the gradient (derivative) of
the objective function.
warm_start : bool, default=False
If set to ``True``, reuse the solution of the previous call to ``fit``
as initialization for ``coef_`` and ``intercept_`` .
verbose : int, default=0
For the lbfgs solver set verbose to any positive number for verbosity.
Attributes
----------
coef_ : array of shape (n_features,)
Estimated coefficients for the linear predictor (`X @ coef_ +
intercept_`) in the GLM.
intercept_ : float
Intercept (a.k.a. bias) added to linear predictor.
n_iter_ : int
Actual number of iterations used in the solver.
Examples
----------
>>> from sklearn import linear_model
>>> clf = linear_model.TweedieRegressor()
>>> X = [[1, 2], [2, 3], [3, 4], [4, 3]]
>>> y = [2, 3.5, 5, 5.5]
>>> clf.fit(X, y)
TweedieRegressor()
>>> clf.score(X, y)
0.839...
>>> clf.coef_
array([0.599..., 0.299...])
>>> clf.intercept_
1.600...
>>> clf.predict([[1, 1], [3, 4]])
array([2.500..., 4.599...])
"""
def __init__(self, *, power=0.0, alpha=1.0, fit_intercept=True,
link='auto', max_iter=100, tol=1e-4,
warm_start=False, verbose=0):
super().__init__(alpha=alpha, fit_intercept=fit_intercept,
family=TweedieDistribution(power=power), link=link,
max_iter=max_iter, tol=tol,
warm_start=warm_start, verbose=verbose)
@property
def family(self):
# We use a property with a setter to make sure that the family is
# always a Tweedie distribution, and that self.power and
# self.family.power are identical by construction.
dist = TweedieDistribution(power=self.power)
# TODO: make the returned object immutable
return dist
@family.setter
def family(self, value):
if isinstance(value, TweedieDistribution):
self.power = value.power
else:
raise TypeError("TweedieRegressor.family must be of type "
"TweedieDistribution!")
| ndingwall/scikit-learn | sklearn/linear_model/_glm/glm.py | Python | bsd-3-clause | 24,918 | [
"Gaussian"
] | bba635ed7191d15c04ba9eb491ea5219582ff4d0eaaef5dc7ec990338c7a4bb8 |
#!/usr/bin/env python
"""
A very simple HTTP server.
"""
import sys
from BaseHTTPServer import HTTPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
if sys.argv[1:]:
port = int(sys.argv[1])
else:
port = 8080
address = ('localhost', port)
httpd = HTTPServer(address, SimpleHTTPRequestHandler)
sa = httpd.socket.getsockname()
print 'Now visit http://%s:%d' % sa
print 'Press CTRL-C to stop this server'
httpd.serve_forever()
| ntoll/bookreader | scripts/runserver.py | Python | mit | 444 | [
"VisIt"
] | 115836d55855f21a42ea902639b14ba4e4d35180955731e615e62c53967919a2 |
# -*- coding: utf-8 -*-
# python-holidays
# ---------------
# A fast, efficient Python library for generating country and subdivision
# specific sets of holidays on the fly. It aims to make determining whether a
# specific date is a holiday as fast and flexible as possible.
#
# Authors: dr-prodigy <maurizio.montel@gmail.com> (c) 2017-2022
# ryanss <ryanssdev@icloud.com> (c) 2014-2017
# Website: https://github.com/dr-prodigy/python-holidays
# License: MIT (see LICENSE file)
# from __future__ import annotations # add in Python 3.7
import inspect
import warnings
from functools import lru_cache
from typing import Dict, Iterable, List, Optional, Union
from datetime import date, timedelta
from hijri_converter import convert
import holidays.countries
from holidays.holiday_base import HolidayBase
def country_holidays(
country: str,
subdiv: Optional[str] = None,
years: Union[int, Iterable[int]] = None,
expand: bool = True,
observed: bool = True,
prov: Optional[str] = None,
state: Optional[str] = None,
) -> HolidayBase:
"""
Returns a new dictionary-like :py:class:`HolidayBase` object for the public
holidays of the country matching **country** and other keyword arguments.
:param country:
An ISO 3166-1 Alpha-2 country code.
:param subdiv:
The subdivision (e.g. state or province); not implemented for all
countries (see documentation).
:param years:
The year(s) to pre-calculate public holidays for at instantiation.
:param expand:
Whether the entire year is calculated when one date from that year
is requested.
:param observed:
Whether to include the dates of when public holiday are observed
(e.g. a holiday falling on a Sunday being observed the following
Monday). False may not work for all countries.
:param prov:
*deprecated* use subdiv instead.
:param state:
*deprecated* use subdiv instead.
:return:
A :py:class:`HolidayBase` object matching the **country**.
The key of the :class:`dict`-like :class:`HolidayBase` object is the
`date` of the holiday, and the value is the name of the holiday itself.
Dates where a key is not present are not public holidays (or, if
**observed** is False, days when a public holiday is observed).
When passing the `date` as a key, the `date` can be expressed in one of the
following types:
* :class:`datetime.date`,
* :class:`datetime.datetime`,
* a :class:`str` of any format recognized by :func:`dateutil.parser.parse`,
* or a :class:`float` or :class:`int` representing a POSIX timestamp.
The key is always returned as a :class:`datetime.date` object.
To maximize speed, the list of public holidays is built on the fly as
needed, one calendar year at a time. When the object is instantiated
without a **years** parameter, it is empty, but, unless **expand** is set
to False, as soon as a key is accessed the class will calculate that entire
year's list of holidays and set the keys with them.
If you need to list the holidays as opposed to querying individual dates,
instantiate the class with the **years** parameter.
Example usage:
>>> from holidays import country_holidays
>>> us_holidays = country_holidays('US')
# For a specific subdivision (e.g. state or province):
>>> calif_holidays = country_holidays('US', subdiv='CA')
The below will cause 2015 holidays to be calculated on the fly:
>>> from datetime import date
>>> assert date(2015, 1, 1) in us_holidays
This will be faster because 2015 holidays are already calculated:
>>> assert date(2015, 1, 2) not in us_holidays
The :class:`HolidayBase` class also recognizes strings of many formats
and numbers representing a POSIX timestamp:
>>> assert '2014-01-01' in us_holidays
>>> assert '1/1/2014' in us_holidays
>>> assert 1388597445 in us_holidays
Show the holiday's name:
>>> us_holidays.get('2014-01-01')
"New Year's Day"
Check a range:
>>> us_holidays['2014-01-01': '2014-01-03']
[datetime.date(2014, 1, 1)]
List all 2020 holidays:
>>> us_holidays = country_holidays('US', years=2020)
>>> for day in us_holidays.items():
... print(day)
(datetime.date(2020, 1, 1), "New Year's Day")
(datetime.date(2020, 1, 20), 'Martin Luther King Jr. Day')
(datetime.date(2020, 2, 17), "Washington's Birthday")
(datetime.date(2020, 5, 25), 'Memorial Day')
(datetime.date(2020, 7, 4), 'Independence Day')
(datetime.date(2020, 7, 3), 'Independence Day (Observed)')
(datetime.date(2020, 9, 7), 'Labor Day')
(datetime.date(2020, 10, 12), 'Columbus Day')
(datetime.date(2020, 11, 11), 'Veterans Day')
(datetime.date(2020, 11, 26), 'Thanksgiving')
(datetime.date(2020, 12, 25), 'Christmas Day')
Some holidays are only present in parts of a country:
>>> us_pr_holidays = country_holidays('US', subdiv='PR')
>>> assert '2018-01-06' not in us_holidays
>>> assert '2018-01-06' in us_pr_holidays
Append custom holiday dates by passing one of:
* a :class:`dict` with date/name key/value pairs (e.g.
``{'2010-07-10': 'My birthday!'}``),
* a list of dates (as a :class:`datetime.date`, :class:`datetime.datetime`,
:class:`str`, :class:`int`, or :class:`float`); ``'Holiday'`` will be
used as a description,
* or a single date item (of one of the types above); ``'Holiday'`` will be
used as a description:
>>> custom_holidays = country_holidays('US', years=2015)
>>> custom_holidays.update({'2015-01-01': "New Year's Day"})
>>> custom_holidays.update(['2015-07-01', '07/04/2015'])
>>> custom_holidays.update(date(2015, 12, 25))
>>> assert date(2015, 1, 1) in custom_holidays
>>> assert date(2015, 1, 2) not in custom_holidays
>>> assert '12/25/2015' in custom_holidays
For more complex logic, like 4th Monday of January, you can inherit the
:class:`HolidayBase` class and define your own :meth:`_populate` method.
See documentation for examples.
"""
try:
country_classes = inspect.getmembers(
holidays.countries, inspect.isclass
)
country_class = next(
obj for name, obj in country_classes if name == country
)
country_holiday = country_class(
years=years,
subdiv=subdiv,
expand=expand,
observed=observed,
prov=prov,
state=state,
)
except StopIteration:
raise NotImplementedError(f"Country {country} not available")
return country_holiday
def CountryHoliday(
country: str,
subdiv: Optional[str] = None,
years: Union[int, Iterable[int]] = None,
expand: bool = True,
observed: bool = True,
prov: Optional[str] = None,
state: Optional[str] = None,
) -> HolidayBase:
"""
Deprecated name for :py:func:`country_holidays`.
:meta private:
"""
warnings.warn(
"CountryHoliday is deprecated, use country_holidays instead.",
DeprecationWarning,
)
return country_holidays(
country, subdiv, years, expand, observed, prov, state
)
def list_supported_countries() -> Dict[str, List[str]]:
"""
Get all supported countries and their subdivisions.
:return:
A dictionary where the key is the ISO 3166-1 Alpha-2 country codes and
the value is a list of supported subdivision codes.
"""
return {
obj.country: obj.subdivisions
for name, obj in inspect.getmembers(
holidays.countries, inspect.isclass
)
if obj.__base__ == HolidayBase
}
def _islamic_to_gre(Gyear: int, Hmonth: int, Hday: int) -> List[date]:
"""
Find the Gregorian dates of all instances of Islamic (Lunar Hijrī) calendar
month and day falling within the Gregorian year. There could be up to two
such instances in a single Gregorian year since the Islamic (Lunar Hijrī)
calendar is about 11 days shorter.
Relies on package `hijri_converter
<https://www.pypy.org/package/hijri_converter>`__.
:param Gyear:
The Gregorian year.
:param Hmonth:
The Lunar Hijrī (Islamic) month.
:param Hday:
The Lunar Hijrī (Islamic) day.
:return:
List of Gregorian dates within the Gregorian year specified that
matches the Islamic (Lunar Hijrī) calendar day and month specified.
"""
Hyear = convert.Gregorian(Gyear, 1, 1).to_hijri().datetuple()[0]
gres = [
convert.Hijri(y, Hmonth, Hday).to_gregorian()
for y in range(Hyear - 1, Hyear + 2)
]
gre_dates = [date(*gre.datetuple()) for gre in gres if gre.year == Gyear]
return gre_dates
class _ChineseLuniSolar:
def __init__(self):
"""
This class has functions that generate Gregorian dates for holidays
based on the Chinese lunisolar calendar.
See `Wikipedia
<https://en.wikipedia.org/wiki/Chinese_New_Year#Dates_in_Chinese_\
lunisolar_calendar>`__
Usage example:
>>> from holidays.utils import _ChineseLuniSolar
>>> cnls = _ChineseLuniSolar()
>>> print(cnls.lunar_n_y_date(2010))
2010-02-14
"""
# A binary representation starting from year 1901 of the number of
# days per year, and the number of days from the 1st to the 13th to
# store the monthly (including the month of the month). 1 means that
# the month is 30 days. 0 means the month is 29 days.
# The 12th to 15th digits indicate the month of the next month.
# If it is 0x0F, it means that there is no leap month.
self.G_LUNAR_MONTH_DAYS = [
0xF0EA4, # 1901
0xF1D4A,
0x52C94,
0xF0C96,
0xF1536,
0x42AAC,
0xF0AD4,
0xF16B2,
0x22EA4,
0xF0EA4, # 1911
0x6364A,
0xF164A,
0xF1496,
0x52956,
0xF055A,
0xF0AD6,
0x216D2,
0xF1B52,
0x73B24,
0xF1D24, # 1921
0xF1A4A,
0x5349A,
0xF14AC,
0xF056C,
0x42B6A,
0xF0DA8,
0xF1D52,
0x23D24,
0xF1D24,
0x61A4C, # 1931
0xF0A56,
0xF14AE,
0x5256C,
0xF16B4,
0xF0DA8,
0x31D92,
0xF0E92,
0x72D26,
0xF1526,
0xF0A56, # 1941
0x614B6,
0xF155A,
0xF0AD4,
0x436AA,
0xF1748,
0xF1692,
0x23526,
0xF152A,
0x72A5A,
0xF0A6C, # 1951
0xF155A,
0x52B54,
0xF0B64,
0xF1B4A,
0x33A94,
0xF1A94,
0x8152A,
0xF152E,
0xF0AAC,
0x6156A, # 1961
0xF15AA,
0xF0DA4,
0x41D4A,
0xF1D4A,
0xF0C94,
0x3192E,
0xF1536,
0x72AB4,
0xF0AD4,
0xF16D2, # 1971
0x52EA4,
0xF16A4,
0xF164A,
0x42C96,
0xF1496,
0x82956,
0xF055A,
0xF0ADA,
0x616D2,
0xF1B52, # 1981
0xF1B24,
0x43A4A,
0xF1A4A,
0xA349A,
0xF14AC,
0xF056C,
0x60B6A,
0xF0DAA,
0xF1D92,
0x53D24, # 1991
0xF1D24,
0xF1A4C,
0x314AC,
0xF14AE,
0x829AC,
0xF06B4,
0xF0DAA,
0x52D92,
0xF0E92,
0xF0D26, # 2001
0x42A56,
0xF0A56,
0xF14B6,
0x22AB4,
0xF0AD4,
0x736AA,
0xF1748,
0xF1692,
0x53526,
0xF152A, # 2011
0xF0A5A,
0x4155A,
0xF156A,
0x92B54,
0xF0BA4,
0xF1B4A,
0x63A94,
0xF1A94,
0xF192A,
0x42A5C, # 2021
0xF0AAC,
0xF156A,
0x22B64,
0xF0DA4,
0x61D52,
0xF0E4A,
0xF0C96,
0x5192E,
0xF1956,
0xF0AB4, # 2031
0x315AC,
0xF16D2,
0xB2EA4,
0xF16A4,
0xF164A,
0x63496,
0xF1496,
0xF0956,
0x50AB6,
0xF0B5A, # 2041
0xF16D4,
0x236A4,
0xF1B24,
0x73A4A,
0xF1A4A,
0xF14AA,
0x5295A,
0xF096C,
0xF0B6A,
0x31B54, # 2051
0xF1D92,
0x83D24,
0xF1D24,
0xF1A4C,
0x614AC,
0xF14AE,
0xF09AC,
0x40DAA,
0xF0EAA,
0xF0E92, # 2061
0x31D26,
0xF0D26,
0x72A56,
0xF0A56,
0xF14B6,
0x52AB4,
0xF0AD4,
0xF16CA,
0x42E94,
0xF1694, # 2071
0x8352A,
0xF152A,
0xF0A5A,
0x6155A,
0xF156A,
0xF0B54,
0x4174A,
0xF1B4A,
0xF1A94,
0x3392A, # 2081
0xF192C,
0x7329C,
0xF0AAC,
0xF156A,
0x52B64,
0xF0DA4,
0xF1D4A,
0x41C94,
0xF0C96,
0x8192E, # 2091
0xF0956,
0xF0AB6,
0x615AC,
0xF16D4,
0xF0EA4,
0x42E4A,
0xF164A,
0xF1516,
0x22936, # 2100
]
# Define range of years covered
self.START_YEAR = 1901
self.END_YEAR = 2099
# The 1st day of the 1st month of the Gregorian calendar is 1901/2/19
self.LUNAR_START_DATE = ((1901, 1, 1),)
self.SOLAR_START_DATE = date(1901, 2, 19)
# The Gregorian date for December 30, 2099 is 2100/2/8
self.LUNAR_END_DATE = (2099, 12, 30)
self.SOLAR_END_DATE = date(2100, 2, 18)
@lru_cache()
def _get_leap_month(self, lunar_year: int) -> int:
"""
Calculate the leap lunar month in a lunar year.
:param lunar_year:
The lunar year.
:return:
The number of the leap month if one exists in the year, otherwise
15.
"""
return (
self.G_LUNAR_MONTH_DAYS[lunar_year - self.START_YEAR] >> 16
) & 0x0F
def _lunar_month_days(self, lunar_year: int, lunar_month: int) -> int:
"""
Calculate the number of days in a lunar month.
:param lunar_year:
The lunar year.
:param lunar_month:
The lunar month of the lunar year.
:return:
The number of days in the lunar month.
"""
return 29 + (
(
self.G_LUNAR_MONTH_DAYS[lunar_year - self.START_YEAR]
>> lunar_month
)
& 0x01
)
def _lunar_year_days(self, year: int) -> int:
"""
Calculate the number of days in a lunar year.
:param year:
The lunar year.
:return:
The number of days in the lunar year.
"""
days = 0
months_day = self.G_LUNAR_MONTH_DAYS[year - self.START_YEAR]
for i in range(1, 13 if self._get_leap_month(year) == 0x0F else 14):
day = 29 + ((months_day >> i) & 0x01)
days += day
return days
@lru_cache()
def _span_days(self, year: int) -> int:
"""
Calculate the number of days elapsed since self.SOLAR_START_DATE to the
beginning of the year.
:param year:
The year.
:return:
The number of days since self.SOLAR_START_DATE.
"""
span_days = 0
for y in range(self.START_YEAR, year):
span_days += self._lunar_year_days(y)
return span_days
def lunar_n_y_date(self, year: int) -> date:
"""
Calculate the Gregorian date of Chinese Lunar New Year.
This is a faster implementation than calling
``lunar_to_gre(year, 1, 1)``.
:param year:
The Gregorian year.
:return:
The Gregorian date of Chinese Lunar New Year.
"""
# The Chinese calendar defines the lunar month containing the winter
# solstice as the eleventh month, which means that Chinese New Year
# usually falls on the second new moon after the winter solstice
# (rarely the third if an intercalary month intervenes). In more
# than 96 percent of the years, Chinese New Year's Day is the closest
# date to a new moon to lichun (Chinese: 立春; "start of spring") on 4
# or 5 February, and the first new moon after dahan (Chinese: 大寒;
# "major cold"). In the Gregorian calendar, the Chinese New Year begins
# at the new moon that falls between 21 January and 20 February.
span_days = self._span_days(year)
# Always in first month (by definition)
# leap_month = self._get_leap_month(year)
# for m in range(1, 1 + (1 > leap_month)):
# span_days += self._lunar_month_days(year, m)
return self.SOLAR_START_DATE + timedelta(span_days)
def lunar_to_gre(
self, year: int, month: int, day: int, leap: bool = True
) -> date:
"""
Calculate the Gregorian date of a Chinese lunar day and month in a
given Gregorian year.
:param year:
The Gregorian year.
:param year:
The Chinese lunar month.
:param year:
The Chinese lunar day.
:return:
The Gregorian date.
"""
span_days = self._span_days(year)
leap_month = self._get_leap_month(year) if leap else 15
for m in range(1, month + (month > leap_month)):
span_days += self._lunar_month_days(year, m)
span_days += day - 1
return self.SOLAR_START_DATE + timedelta(span_days)
def vesak_date(self, year: int) -> date:
"""
Calculate the estimated Gregorian date of Vesak for Thailand, Laos,
Singapore and Indonesia, corresponding to the fourteenth day of the
fourth month in the Chinese lunar calendar. See `Wikipedia
<https://en.wikipedia.org/wiki/Vesak#Dates_of_observance>`__.
:param year:
The Gregorian year.
:return:
Estimated Gregorian date of Vesak (14th day of 4th month of the
lunar calendar).
"""
span_days = self._span_days(year)
leap_month = self._get_leap_month(year)
for m in range(1, 4 + (4 > leap_month)):
span_days += self._lunar_month_days(year, m)
span_days += 14
return self.SOLAR_START_DATE + timedelta(span_days)
def vesak_may_date(self, year: int) -> date:
"""
Calculate the estimated Gregorian date of Vesak for Sri Lanka, Nepal,
India, Bangladesh and Malaysia, corresponding to the day of the
first full moon in May in the Gregorian calendar. See `Wikipedia
<https://en.wikipedia.org/wiki/Vesak#Dates_of_observance>`__.
:param year:
The Gregorian year.
:return:
Estimated Gregorian date of Vesak (first full moon in May).
"""
span_days = self._span_days(year)
vesak_may_date = self.SOLAR_START_DATE + timedelta(span_days + 14)
m = 1
while vesak_may_date.month < 5:
vesak_may_date += timedelta(self._lunar_month_days(year, m))
m += 1
return vesak_may_date
def s_diwali_date(self, year: int) -> date:
"""
Calculate the estimated Gregorian date of Southern India (Tamil)
Diwali.
Defined as the date of Amāvásyā (new moon) of Kārttikai, which
corresponds with the months of November or December in the Gregorian
calendar. See `Wikipedia <https://en.wikipedia.org/wiki/Diwali>`__.
:param year:
The Gregorian year.
:return:
Estimated Gregorian date of Southern India (Tamil) Diwali.
"""
span_days = self._span_days(year)
leap_month = self._get_leap_month(year)
for m in range(1, 10 + (10 > leap_month)):
span_days += self._lunar_month_days(year, m)
span_days -= 2
return self.SOLAR_START_DATE + timedelta(span_days)
def thaipusam_date(self, year: int) -> date:
"""
Calculate the estimated Gregorian date of Thaipusam (Tamil).
Defined as the date of the full moon in the Tamil month of Thai, which
corresponds with the months of January or February in the Gregorian
calendar. See `Wikipedia <https://en.wikipedia.org/wiki/Thaipusam>`__.
:param year:
The Gregorian year.
:return:
Estimated Gregorian date of Thaipusam (Tamil).
"""
span_days = self._span_days(year)
leap_month = self._get_leap_month(year)
for m in range(1, 1 + (leap_month <= 6)):
span_days += self._lunar_month_days(year, m)
span_days -= 15
return self.SOLAR_START_DATE + timedelta(span_days)
| dr-prodigy/python-holidays | holidays/utils.py | Python | mit | 21,903 | [
"COLUMBUS"
] | 7b878b0f353fad4af3ca329362cb5d64b63a69ad10f10624eef55c23d157c074 |
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module defines tools to generate and analyze phase diagrams.
"""
import collections
import itertools
import json
import logging
import math
import os
import re
import sys
from functools import lru_cache
import numpy as np
import plotly.graph_objs as go
from monty.json import MontyDecoder, MSONable
from scipy.optimize import minimize
from scipy.spatial import ConvexHull
from pymatgen.analysis.reaction_calculator import Reaction, ReactionError
from pymatgen.core.composition import Composition
from pymatgen.core.periodic_table import DummySpecies, Element, get_el_sp
from pymatgen.entries import Entry
from pymatgen.util.coord import Simplex, in_coord_list
from pymatgen.util.plotting import pretty_plot
from pymatgen.util.string import htmlify, latexify
if sys.version_info >= (3, 8):
from typing import Literal
else:
from typing_extensions import Literal
logger = logging.getLogger(__name__)
with open(os.path.join(os.path.dirname(__file__), "..", "util", "plotly_pd_layouts.json")) as f:
plotly_layouts = json.load(f)
class PDEntry(Entry):
"""
An object encompassing all relevant data for phase diagrams.
Attributes:
composition (Composition): The composition associated with the PDEntry.
energy (float): The energy associated with the entry.
name (str): A name for the entry. This is the string shown in the phase diagrams.
By default, this is the reduced formula for the composition, but can be
set to some other string for display purposes.
attribute (MSONable): A arbitrary attribute. Can be used to specify that the
entry is a newly found compound, or to specify a particular label for
the entry, etc. An attribute can be anything but must be MSONable.
"""
def __init__(
self,
composition: Composition,
energy: float,
name: str = None,
attribute: object = None,
):
"""
Args:
composition (Composition): Composition
energy (float): Energy for composition.
name (str): Optional parameter to name the entry. Defaults
to the reduced chemical formula.
attribute: Optional attribute of the entry. Must be MSONable.
"""
super().__init__(composition, energy)
self.name = name if name else self.composition.reduced_formula
self.attribute = attribute
@property
def energy(self) -> float:
"""
Returns:
the energy of the entry.
"""
return self._energy
def as_dict(self):
"""
Returns:
the energy of the entry.
"""
return_dict = super().as_dict()
return_dict.update({"name": self.name, "attribute": self.attribute})
return return_dict
@classmethod
def from_dict(cls, d):
"""
Args:
d (dict): dictionary representation of PDEntry
Returns:
PDEntry
"""
return cls(
Composition(d["composition"]),
d["energy"],
d["name"] if "name" in d else None,
d["attribute"] if "attribute" in d else None,
)
class GrandPotPDEntry(PDEntry):
"""
A grand potential pd entry object encompassing all relevant data for phase
diagrams. Chemical potentials are given as a element-chemical potential
dict.
"""
def __init__(self, entry, chempots, name=None):
"""
Args:
entry: A PDEntry-like object.
chempots: Chemical potential specification as {Element: float}.
name: Optional parameter to name the entry. Defaults to the reduced
chemical formula of the original entry.
"""
super().__init__(
entry.composition,
entry.energy,
name if name else entry.name,
entry.attribute if hasattr(entry, "attribute") else None,
)
# NOTE if we init GrandPotPDEntry from ComputedEntry _energy is the
# corrected energy of the ComputedEntry hence the need to keep
# the original entry to not lose data.
self.original_entry = entry
self.original_comp = self._composition
self.chempots = chempots
@property
def composition(self) -> Composition:
"""The composition after removing free species
Returns:
Composition
"""
return Composition({el: self._composition[el] for el in self._composition.elements if el not in self.chempots})
@property
def chemical_energy(self):
"""The chemical energy term mu*N in the grand potential
Returns:
The chemical energy term mu*N in the grand potential
"""
return sum(self._composition[el] * pot for el, pot in self.chempots.items())
@property
def energy(self):
"""
Returns:
The grand potential energy
"""
return self._energy - self.chemical_energy
def __repr__(self):
chempot_str = " ".join([f"mu_{el} = {mu:.4f}" for el, mu in self.chempots.items()])
return "GrandPotPDEntry with original composition " + "{}, energy = {:.4f}, {}".format(
self.original_entry.composition, self.original_entry.energy, chempot_str
)
def as_dict(self):
"""
Returns:
MSONable dictionary representation of GrandPotPDEntry
"""
return {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"entry": self.original_entry.as_dict(),
"chempots": {el.symbol: u for el, u in self.chempots.items()},
"name": self.name,
}
@classmethod
def from_dict(cls, d):
"""
Args:
d (dict): dictionary representation of GrandPotPDEntry
Returns:
GrandPotPDEntry
"""
chempots = {Element(symbol): u for symbol, u in d["chempots"].items()}
entry = MontyDecoder().process_decoded(d["entry"])
return cls(entry, chempots, d["name"])
class TransformedPDEntry(PDEntry):
"""
This class repesents a TransformedPDEntry, which allows for a PDEntry to be
transformed to a different composition coordinate space. It is used in the
construction of phase diagrams that do not have elements as the terminal
compositions.
"""
# Tolerance for determining if amount of a composition is positive.
amount_tol = 1e-5
def __init__(self, entry, sp_mapping, name=None):
"""
Args:
entry (PDEntry): Original entry to be transformed.
sp_mapping ({Composition: DummySpecies}): dictionary
mapping Terminal Compositions to Dummy Species
"""
super().__init__(
entry.composition,
entry.energy,
name if name else entry.name,
entry.attribute if hasattr(entry, "attribute") else None,
)
self.original_entry = entry
self.sp_mapping = sp_mapping
self.rxn = Reaction(list(self.sp_mapping.keys()), [self._composition])
self.rxn.normalize_to(self.original_entry.composition)
# NOTE We only allow reactions that have positive amounts of reactants.
if not all(self.rxn.get_coeff(comp) <= TransformedPDEntry.amount_tol for comp in self.sp_mapping.keys()):
raise TransformedPDEntryError("Only reactions with positive amounts of reactants allowed")
@property
def composition(self) -> Composition:
"""The composition in the dummy species space
Returns:
Composition
"""
# NOTE this is not infallable as the original entry is mutable and an
# end user could choose to normalize or change the original entry.
# However, the risk of this seems low.
factor = self._composition.num_atoms / self.original_entry.composition.num_atoms
trans_comp = {self.sp_mapping[comp]: -self.rxn.get_coeff(comp) for comp in self.sp_mapping}
trans_comp = {k: v * factor for k, v in trans_comp.items() if v > TransformedPDEntry.amount_tol}
return Composition(trans_comp)
def __repr__(self):
output = [
f"TransformedPDEntry {self.composition}",
f" with original composition {self.original_entry.composition}",
f", E = {self.original_entry.energy:.4f}",
]
return "".join(output)
def as_dict(self):
"""
Returns:
MSONable dictionary representation of TransformedPDEntry
"""
d = {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"sp_mapping": self.sp_mapping,
}
d.update(self.original_entry.as_dict())
return d
@classmethod
def from_dict(cls, d):
"""
Args:
d (dict): dictionary representation of TransformedPDEntry
Returns:
TransformedPDEntry
"""
sp_mapping = d["sp_mapping"]
del d["sp_mapping"]
entry = MontyDecoder().process_decoded(d)
return cls(entry, sp_mapping)
class TransformedPDEntryError(Exception):
"""
An exception class for TransformedPDEntry.
"""
pass
class PhaseDiagram(MSONable):
"""
BasePhaseDiagram is not intended to be used directly, and PhaseDiagram should be preferred.
When constructing a PhaseDiagram, a lot of heavy processing is performed to calculate the
phase diagram information such as facets, simplexes, etc. The BasePhaseDiagram offers a way to
store this information so that a phase diagram can be re-constructed without doing this heavy
processing. It is primarily intended for database applications.
"""
# Tolerance for determining if formation energy is positive.
formation_energy_tol = 1e-11
numerical_tol = 1e-8
def __init__(self, entries, elements=None, *, computed_data=None):
"""
Simple phase diagram class taking in elements and entries as inputs.
The algorithm is based on the work in the following papers:
1. S. P. Ong, L. Wang, B. Kang, and G. Ceder, Li-Fe-P-O2 Phase Diagram from
First Principles Calculations. Chem. Mater., 2008, 20(5), 1798-1807.
doi:10.1021/cm702327g
2. S. P. Ong, A. Jain, G. Hautier, B. Kang, G. Ceder, Thermal stabilities
of delithiated olivine MPO4 (M=Fe, Mn) cathodes investigated using first
principles calculations. Electrochem. Comm., 2010, 12(3), 427-430.
doi:10.1016/j.elecom.2010.01.010
Args:
entries ([PDEntry]): A list of PDEntry-like objects having an
energy, energy_per_atom and composition.
elements ([Element]): Optional list of elements in the phase
diagram. If set to None, the elements are determined from
the the entries themselves and are sorted alphabetically.
If specified, element ordering (e.g. for pd coordinates)
is preserved.
computed_data (dict): A dict containing pre-computed data. This allows
PhaseDiagram object to be reconstituted without performing the
expensive convex hull computation. The dict is the output from the
PhaseDiagram._compute() method and is stored in PhaseDigram.computed_data
when generated for the first time.
Attributes:
dim (int): The dimensionality of the phase diagram.
elements: Elements in the phase diagram.
el_refs: List of elemental references for the phase diagrams. These are
entries corresponding to the lowest energy element entries for simple
compositional phase diagrams.
all_entries: All entries provided for Phase Diagram construction. Note that this
does not mean that all these entries are actually used in the phase
diagram. For example, this includes the positive formation energy
entries that are filtered out before Phase Diagram construction.
qhull_entries: Actual entries used in convex hull. Excludes all positive formation
energy entries.
qhull_data: Data used in the convex hull operation. This is essentially a matrix of
composition data and energy per atom values created from qhull_entries.
facets: Facets of the phase diagram in the form of [[1,2,3],[4,5,6]...].
For a ternary, it is the indices (references to qhull_entries and
qhull_data) for the vertices of the phase triangles. Similarly
extended to higher D simplices for higher dimensions.
simplices: The simplices of the phase diagram as a list of np.ndarray, i.e.,
the list of stable compositional coordinates in the phase diagram.
"""
self.elements = elements
self.entries = entries
if computed_data is None:
computed_data = self._compute()
self.computed_data = computed_data
self.facets = computed_data["facets"]
self.simplexes = computed_data["simplexes"]
self.all_entries = computed_data["all_entries"]
self.qhull_data = computed_data["qhull_data"]
self.dim = computed_data["dim"]
self.el_refs = dict(computed_data["el_refs"])
self.qhull_entries = computed_data["qhull_entries"]
self.stable_entries = {self.qhull_entries[i] for i in set(itertools.chain(*self.facets))}
def as_dict(self):
"""
:return: MSONAble dict
"""
return {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"all_entries": [e.as_dict() for e in self.all_entries],
"elements": [e.as_dict() for e in self.elements],
"computed_data": self.computed_data,
}
@classmethod
def from_dict(cls, d):
"""
:param d: Dict representation
:return: PhaseDiagram
"""
entries = [MontyDecoder().process_decoded(dd) for dd in d["all_entries"]]
elements = [Element.from_dict(dd) for dd in d["elements"]]
computed_data = d.get("computed_data")
return cls(entries, elements, computed_data=computed_data)
def _compute(self):
if self.elements is None:
self.elements = sorted({els for e in self.entries for els in e.composition.elements})
elements = list(self.elements)
dim = len(elements)
entries = sorted(self.entries, key=lambda e: e.composition.reduced_composition)
el_refs = {}
min_entries = []
all_entries = []
for c, g in itertools.groupby(entries, key=lambda e: e.composition.reduced_composition):
g = list(g)
min_entry = min(g, key=lambda e: e.energy_per_atom)
if c.is_element:
el_refs[c.elements[0]] = min_entry
min_entries.append(min_entry)
all_entries.extend(g)
if len(el_refs) != dim:
missing = set(elements).difference(el_refs.keys())
raise ValueError(f"There are no entries for the terminal elements: {missing}")
data = np.array(
[[e.composition.get_atomic_fraction(el) for el in elements] + [e.energy_per_atom] for e in min_entries]
)
# Use only entries with negative formation energy
vec = [el_refs[el].energy_per_atom for el in elements] + [-1]
form_e = -np.dot(data, vec)
inds = np.where(form_e < -PhaseDiagram.formation_energy_tol)[0].tolist()
# Add the elemental references
inds.extend([min_entries.index(el) for el in el_refs.values()])
qhull_entries = [min_entries[i] for i in inds]
qhull_data = data[inds][:, 1:]
# Add an extra point to enforce full dimensionality.
# This point will be present in all upper hull facets.
extra_point = np.zeros(dim) + 1 / dim
extra_point[-1] = np.max(qhull_data) + 1
qhull_data = np.concatenate([qhull_data, [extra_point]], axis=0)
if dim == 1:
facets = [qhull_data.argmin(axis=0)]
else:
facets = get_facets(qhull_data)
final_facets = []
for facet in facets:
# Skip facets that include the extra point
if max(facet) == len(qhull_data) - 1:
continue
m = qhull_data[facet]
m[:, -1] = 1
if abs(np.linalg.det(m)) > 1e-14:
final_facets.append(facet)
facets = final_facets
simplexes = [Simplex(qhull_data[f, :-1]) for f in facets]
self.elements = elements
return dict(
facets=facets,
simplexes=simplexes,
all_entries=all_entries,
qhull_data=qhull_data,
dim=dim,
# Dictionary with Element keys is not JSON-serializable
el_refs=list(el_refs.items()),
qhull_entries=qhull_entries,
)
def pd_coords(self, comp):
"""
The phase diagram is generated in a reduced dimensional space
(n_elements - 1). This function returns the coordinates in that space.
These coordinates are compatible with the stored simplex objects.
Args:
comp (Composition): A composition
Returns:
The coordinates for a given composition in the PhaseDiagram's basis
"""
if set(comp.elements).difference(self.elements):
raise ValueError(f"{comp} has elements not in the phase diagram {self.elements}")
return np.array([comp.get_atomic_fraction(el) for el in self.elements[1:]])
@property
def all_entries_hulldata(self):
"""
Returns:
The actual ndarray used to construct the convex hull.
"""
data = []
data = [
[e.composition.get_atomic_fraction(el) for el in self.elements] + [e.energy_per_atom]
for e in self.all_entries
]
return np.array(data)[:, 1:]
@property
def unstable_entries(self):
"""
Returns:
list of Entries that are unstable in the phase diagram.
Includes positive formation energy entries.
"""
return [e for e in self.all_entries if e not in self.stable_entries]
def get_reference_energy_per_atom(self, comp):
"""
Args:
comp (Composition): Input composition
Returns:
Reference energy of the terminal species at a given composition.
"""
return sum(comp[el] * self.el_refs[el].energy_per_atom for el in comp.elements) / comp.num_atoms
def get_form_energy(self, entry):
"""
Returns the formation energy for an entry (NOT normalized) from the
elemental references.
Args:
entry (PDEntry): A PDEntry-like object.
Returns:
Formation energy from the elemental references.
"""
c = entry.composition
return entry.energy - sum(c[el] * self.el_refs[el].energy_per_atom for el in c.elements)
def get_form_energy_per_atom(self, entry):
"""
Returns the formation energy per atom for an entry from the
elemental references.
Args:
entry (PDEntry): An PDEntry-like object
Returns:
Formation energy **per atom** from the elemental references.
"""
return self.get_form_energy(entry) / entry.composition.num_atoms
def __repr__(self):
symbols = [el.symbol for el in self.elements]
output = [
"{} phase diagram".format("-".join(symbols)),
f"{len(self.stable_entries)} stable phases: ",
", ".join([entry.name for entry in self.stable_entries]),
]
return "\n".join(output)
@lru_cache(1)
def _get_facet_and_simplex(self, comp):
"""
Get any facet that a composition falls into. Cached so successive
calls at same composition are fast.
Args:
comp (Composition): A composition
"""
c = self.pd_coords(comp)
for f, s in zip(self.facets, self.simplexes):
if s.in_simplex(c, PhaseDiagram.numerical_tol / 10):
return f, s
raise RuntimeError(f"No facet found for comp = {comp}")
def _get_all_facets_and_simplexes(self, comp):
"""
Get all facets that a composition falls into.
Args:
comp (Composition): A composition
"""
c = self.pd_coords(comp)
all_facets = [
f for f, s in zip(self.facets, self.simplexes) if s.in_simplex(c, PhaseDiagram.numerical_tol / 10)
]
if not len(all_facets):
raise RuntimeError(f"No facets found for comp = {comp}")
return all_facets
def _get_facet_chempots(self, facet):
"""
Calculates the chemical potentials for each element within a facet.
Args:
facet: Facet of the phase diagram.
Returns:
{element: chempot} for all elements in the phase diagram.
"""
complist = [self.qhull_entries[i].composition for i in facet]
energylist = [self.qhull_entries[i].energy_per_atom for i in facet]
m = [[c.get_atomic_fraction(e) for e in self.elements] for c in complist]
chempots = np.linalg.solve(m, energylist)
return dict(zip(self.elements, chempots))
def _get_simplex_intersections(self, c1, c2):
"""
Returns co-ordinates of the itersection of the tie line between two compositions
and the simplexes of the PhaseDiagram.
Args:
c1: Reduced dimension co-ordinates of first composition
c2: Reduced dimension co-ordinates of second composition
Returns:
Array of the intersections between the tie line and the simplexes of
the PhaseDiagram
"""
intersections = [c1, c2]
for sc in self.simplexes:
intersections.extend(sc.line_intersection(c1, c2))
return np.array(intersections)
def get_decomposition(self, comp):
"""
Provides the decomposition at a particular composition.
Args:
comp (Composition): A composition
Returns:
Decomposition as a dict of {PDEntry: amount} where amount
is the amount of the fractional composition.
"""
facet, simplex = self._get_facet_and_simplex(comp)
decomp_amts = simplex.bary_coords(self.pd_coords(comp))
return {
self.qhull_entries[f]: amt for f, amt in zip(facet, decomp_amts) if abs(amt) > PhaseDiagram.numerical_tol
}
def get_decomp_and_hull_energy_per_atom(self, comp):
"""
Args:
comp (Composition): Input composition
Returns:
Energy of lowest energy equilibrium at desired composition per atom
"""
decomp = self.get_decomposition(comp)
return decomp, sum(e.energy_per_atom * n for e, n in decomp.items())
def get_hull_energy_per_atom(self, comp):
"""
Args:
comp (Composition): Input composition
Returns:
Energy of lowest energy equilibrium at desired composition.
"""
return self.get_decomp_and_hull_energy_per_atom(comp)[1]
def get_hull_energy(self, comp):
"""
Args:
comp (Composition): Input composition
Returns:
Energy of lowest energy equilibrium at desired composition. Not
normalized by atoms, i.e. E(Li4O2) = 2 * E(Li2O)
"""
return comp.num_atoms * self.get_hull_energy_per_atom(comp)
def get_decomp_and_e_above_hull(self, entry, allow_negative=False, check_stable=True):
"""
Provides the decomposition and energy above convex hull for an entry.
Due to caching, can be much faster if entries with the same composition
are processed together.
Args:
entry (PDEntry): A PDEntry like object
allow_negative (bool): Whether to allow negative e_above_hulls. Used to
calculate equilibrium reaction energies. Defaults to False.
check_stable (bool): Whether to first check whether an entry is stable.
In normal circumstances, this is the faster option since checking for
stable entries is relatively fast. However, if you have a huge proportion
of unstable entries, then this check can slow things down. You should then
set this to False.
Returns:
(decomp, energy_above_hull). The decomposition is provided
as a dict of {PDEntry: amount} where amount is the amount of the
fractional composition. Stable entries should have energy above
convex hull of 0. The energy is given per atom.
"""
# Avoid computation for stable_entries.
# NOTE scaled duplicates of stable_entries will not be caught.
if check_stable and entry in self.stable_entries:
return {entry: 1}, 0
decomp, hull_energy = self.get_decomp_and_hull_energy_per_atom(entry.composition)
e_above_hull = entry.energy_per_atom - hull_energy
if allow_negative or e_above_hull >= -PhaseDiagram.numerical_tol:
return decomp, e_above_hull
raise ValueError(f"No valid decomp found for {entry}! (e {e_above_hull})")
def get_e_above_hull(self, entry, **kwargs):
"""
Provides the energy above convex hull for an entry
Args:
entry (PDEntry): A PDEntry like object
Returns:
Energy above convex hull of entry. Stable entries should have
energy above hull of 0. The energy is given per atom.
"""
return self.get_decomp_and_e_above_hull(entry, **kwargs)[1]
def get_equilibrium_reaction_energy(self, entry):
"""
Provides the reaction energy of a stable entry from the neighboring
equilibrium stable entries (also known as the inverse distance to
hull).
Args:
entry (PDEntry): A PDEntry like object
Returns:
Equilibrium reaction energy of entry. Stable entries should have
equilibrium reaction energy <= 0. The energy is given per atom.
"""
# NOTE scaled duplicates of stable_entries will not be caught.
if entry not in self.stable_entries:
raise ValueError(
f"{entry} is unstable, the equilibrium reaction energy is available only for stable entries."
)
if entry.is_element:
return 0
entries = [e for e in self.stable_entries if e != entry]
modpd = PhaseDiagram(entries, self.elements)
return modpd.get_decomp_and_e_above_hull(entry, allow_negative=True)[1]
def get_decomp_and_phase_separation_energy(
self,
entry,
space_limit=200,
stable_only=False,
tols=[1e-8],
maxiter=1000,
):
"""
Provides the combination of entries in the PhaseDiagram that gives the
lowest formation enthalpy with the same composition as the given entry
excluding entries with the same composition and the energy difference
per atom between the given entry and the energy of the combination found.
For unstable entries that are not polymorphs of stable entries (or completely
novel entries) this is simply the energy above (or below) the convex hull.
For entries with the same composition as one of the stable entries in the
phase diagram setting `stable_only` to `False` (Default) allows for entries
not previously on the convex hull to be considered in the combination.
In this case the energy returned is what is referred to as the decomposition
enthalpy in:
1. Bartel, C., Trewartha, A., Wang, Q., Dunn, A., Jain, A., Ceder, G.,
A critical examination of compound stability predictions from
machine-learned formation energies, npj Computational Materials 6, 97 (2020)
For stable entries setting `stable_only` to `True` returns the same energy
as `get_equilibrium_reaction_energy`. This function is based on a constrained
optimisation rather than recalculation of the convex hull making it
algorithmically cheaper. However, if `tol` is too loose there is potential
for this algorithm to converge to a different solution.
Args:
entry (PDEntry): A PDEntry like object.
space_limit (int): The maximum number of competing entries to consider
before calculating a second convex hull to reducing the complexity
of the optimization.
stable_only (bool): Only use stable materials as competing entries.
tol (list): Tolerences for convergence of the SLSQP optimization
when finding the equilibrium reaction. Tighter tolerences tested first.
maxiter (int): The maximum number of iterations of the SLSQP optimizer
when finding the equilibrium reaction.
Returns:
(decomp, energy). The decompostion is given as a dict of {PDEntry, amount}
for all entries in the decomp reaction where amount is the amount of the
fractional composition. The phase separation energy is given per atom.
"""
# NOTE comprhys: for large PatchedPhaseDiagrams this is a bottleneck,
# I have a solution but leaving to later PR with PatchedPhaseDiagram
# For unstable or novel materials use simplex approach
if entry.composition.fractional_composition not in [
e.composition.fractional_composition for e in self.stable_entries
]:
return self.get_decomp_and_e_above_hull(entry, allow_negative=True)
# Handle elemental materials
if entry.is_element:
return self.get_decomp_and_e_above_hull(entry, allow_negative=True)
# Select space to compare against
if stable_only:
compare_entries = self.stable_entries
else:
compare_entries = self.qhull_entries
# take entries with negative formation enthalpies as competing entries
competing_entries = [
c
for c in compare_entries
if (c.composition.fractional_composition != entry.composition.fractional_composition)
if set(c.composition.elements).issubset(entry.composition.elements)
]
# NOTE SLSQP optimizer doesn't scale well for > 300 competing entries. As a
# result in phase diagrams where we have too many competing entries we can
# reduce the number by looking at the first and second convex hulls. This
# requires computing the convex hull of a second (hopefully smallish) space
# and so is not done by default
if len(competing_entries) > space_limit and not stable_only:
inner_hull = PhaseDiagram(
list(
set.intersection(
set(competing_entries), # same chemical space
set(self.qhull_entries), # negative E_f
set(self.unstable_entries), # not already on hull
)
)
+ list(self.el_refs.values())
) # terminal points
competing_entries = list(self.stable_entries.union(inner_hull.stable_entries))
competing_entries = [c for c in competing_entries if c != entry]
decomp = _get_slsqp_decomp(entry.composition, competing_entries, tols, maxiter)
# find the minimum alternative formation energy for the decomposition
decomp_enthalpy = np.sum([c.energy_per_atom * amt for c, amt in decomp.items()])
decomp_enthalpy = entry.energy_per_atom - decomp_enthalpy
return decomp, decomp_enthalpy
def get_phase_separation_energy(self, entry, **kwargs):
"""
Provides the energy to the convex hull for the given entry. For stable entries
already in the phase diagram the algorithm provides the phase separation energy
which is refered to as the decomposition enthalpy in:
1. Bartel, C., Trewartha, A., Wang, Q., Dunn, A., Jain, A., Ceder, G.,
A critical examination of compound stability predictions from
machine-learned formation energies, npj Computational Materials 6, 97 (2020)
Args:
entry (PDEntry): A PDEntry like object
**kwargs: Keyword args passed to `get_decomp_and_decomp_energy`
space_limit (int): The maximum number of competing entries to consider.
stable_only (bool): Only use stable materials as competing entries
tol (float): The tolerence for convergence of the SLSQP optimization
when finding the equilibrium reaction.
maxiter (int): The maximum number of iterations of the SLSQP optimizer
when finding the equilibrium reaction.
Returns:
phase separation energy per atom of entry. Stable entries should have
energies <= 0, Stable elemental entries should have energies = 0 and
unstable entries should have energies > 0. Entries that have the same
composition as a stable energy may have postive or negative phase
separation energies depending on their own energy.
"""
return self.get_decomp_and_phase_separation_energy(entry, **kwargs)[1]
def get_composition_chempots(self, comp):
"""
Get the chemical potentials for all elements at a given composition.
Args:
comp (Composition): Composition
Returns:
Dictionary of chemical potentials.
"""
facet = self._get_facet_and_simplex(comp)[0]
return self._get_facet_chempots(facet)
def get_all_chempots(self, comp):
"""
Get chemical potentials at a given compositon.
Args:
comp (Composition): Composition
Returns:
Chemical potentials.
"""
all_facets = self._get_all_facets_and_simplexes(comp)
chempots = {}
for facet in all_facets:
facet_name = "-".join([self.qhull_entries[j].name for j in facet])
chempots[facet_name] = self._get_facet_chempots(facet)
return chempots
def get_transition_chempots(self, element):
"""
Get the critical chemical potentials for an element in the Phase
Diagram.
Args:
element: An element. Has to be in the PD in the first place.
Returns:
A sorted sequence of critical chemical potentials, from less
negative to more negative.
"""
if element not in self.elements:
raise ValueError("get_transition_chempots can only be called with elements in the phase diagram.")
critical_chempots = []
for facet in self.facets:
chempots = self._get_facet_chempots(facet)
critical_chempots.append(chempots[element])
clean_pots = []
for c in sorted(critical_chempots):
if len(clean_pots) == 0:
clean_pots.append(c)
else:
if abs(c - clean_pots[-1]) > PhaseDiagram.numerical_tol:
clean_pots.append(c)
clean_pots.reverse()
return tuple(clean_pots)
def get_critical_compositions(self, comp1, comp2):
"""
Get the critical compositions along the tieline between two
compositions. I.e. where the decomposition products change.
The endpoints are also returned.
Args:
comp1, comp2 (Composition): compositions that define the tieline
Returns:
[(Composition)]: list of critical compositions. All are of
the form x * comp1 + (1-x) * comp2
"""
n1 = comp1.num_atoms
n2 = comp2.num_atoms
pd_els = self.elements
# NOTE the reduced dimensionality Simplexes don't use the
# first element in the PD
c1 = self.pd_coords(comp1)
c2 = self.pd_coords(comp2)
# NOTE none of the projections work if c1 == c2, so just
# return *copies* of the inputs
if np.all(c1 == c2):
return [comp1.copy(), comp2.copy()]
# NOTE made into method to facilitate inheritance of this method
# in PatchedPhaseDiagram if approximate solution can be found.
intersections = self._get_simplex_intersections(c1, c2)
# find position along line
l = c2 - c1
l /= np.sum(l ** 2) ** 0.5
proj = np.dot(intersections - c1, l)
# only take compositions between endpoints
proj = proj[
np.logical_and(proj > -self.numerical_tol, proj < proj[1] + self.numerical_tol) # proj[1] is |c2-c1|
]
proj.sort()
# only unique compositions
valid = np.ones(len(proj), dtype=bool)
valid[1:] = proj[1:] > proj[:-1] + self.numerical_tol
proj = proj[valid]
ints = c1 + l * proj[:, None]
# reconstruct full-dimensional composition array
cs = np.concatenate([np.array([1 - np.sum(ints, axis=-1)]).T, ints], axis=-1)
# mixing fraction when compositions are normalized
x = proj / np.dot(c2 - c1, l)
# mixing fraction when compositions are not normalized
x_unnormalized = x * n1 / (n2 + x * (n1 - n2))
num_atoms = n1 + (n2 - n1) * x_unnormalized
cs *= num_atoms[:, None]
return [Composition((c, v) for c, v in zip(pd_els, m)) for m in cs]
def get_element_profile(self, element, comp, comp_tol=1e-5):
"""
Provides the element evolution data for a composition.
For example, can be used to analyze Li conversion voltages by varying
uLi and looking at the phases formed. Also can be used to analyze O2
evolution by varying uO2.
Args:
element: An element. Must be in the phase diagram.
comp: A Composition
comp_tol: The tolerance to use when calculating decompositions.
Phases with amounts less than this tolerance are excluded.
Defaults to 1e-5.
Returns:
Evolution data as a list of dictionaries of the following format:
[ {'chempot': -10.487582010000001, 'evolution': -2.0,
'reaction': Reaction Object], ...]
"""
element = get_el_sp(element)
if element not in self.elements:
raise ValueError("get_transition_chempots can only be called with elements in the phase diagram.")
gccomp = Composition({el: amt for el, amt in comp.items() if el != element})
elref = self.el_refs[element]
elcomp = Composition(element.symbol)
evolution = []
for cc in self.get_critical_compositions(elcomp, gccomp)[1:]:
decomp_entries = self.get_decomposition(cc).keys()
decomp = [k.composition for k in decomp_entries]
rxn = Reaction([comp], decomp + [elcomp])
rxn.normalize_to(comp)
c = self.get_composition_chempots(cc + elcomp * 1e-5)[element]
amt = -rxn.coeffs[rxn.all_comp.index(elcomp)]
evolution.append(
{
"chempot": c,
"evolution": amt,
"element_reference": elref,
"reaction": rxn,
"entries": decomp_entries,
}
)
return evolution
def get_chempot_range_map(self, elements, referenced=True, joggle=True):
"""
Returns a chemical potential range map for each stable entry.
Args:
elements: Sequence of elements to be considered as independent
variables. E.g., if you want to show the stability ranges
of all Li-Co-O phases wrt to uLi and uO, you will supply
[Element("Li"), Element("O")]
referenced: If True, gives the results with a reference being the
energy of the elemental phase. If False, gives absolute values.
joggle (boolean): Whether to joggle the input to avoid precision
errors.
Returns:
Returns a dict of the form {entry: [simplices]}. The list of
simplices are the sides of the N-1 dim polytope bounding the
allowable chemical potential range of each entry.
"""
all_chempots = []
for facet in self.facets:
chempots = self._get_facet_chempots(facet)
all_chempots.append([chempots[el] for el in self.elements])
inds = [self.elements.index(el) for el in elements]
if referenced:
el_energies = {el: self.el_refs[el].energy_per_atom for el in elements}
else:
el_energies = {el: 0.0 for el in elements}
chempot_ranges = collections.defaultdict(list)
vertices = [list(range(len(self.elements)))]
if len(all_chempots) > len(self.elements):
vertices = get_facets(all_chempots, joggle=joggle)
for ufacet in vertices:
for combi in itertools.combinations(ufacet, 2):
data1 = self.facets[combi[0]]
data2 = self.facets[combi[1]]
common_ent_ind = set(data1).intersection(set(data2))
if len(common_ent_ind) == len(elements):
common_entries = [self.qhull_entries[i] for i in common_ent_ind]
data = np.array([[all_chempots[i][j] - el_energies[self.elements[j]] for j in inds] for i in combi])
sim = Simplex(data)
for entry in common_entries:
chempot_ranges[entry].append(sim)
return chempot_ranges
def getmu_vertices_stability_phase(self, target_comp, dep_elt, tol_en=1e-2):
"""
returns a set of chemical potentials corresponding to the vertices of
the simplex in the chemical potential phase diagram.
The simplex is built using all elements in the target_composition
except dep_elt.
The chemical potential of dep_elt is computed from the target
composition energy.
This method is useful to get the limiting conditions for
defects computations for instance.
Args:
target_comp: A Composition object
dep_elt: the element for which the chemical potential is computed
from the energy of the stable phase at the target composition
tol_en: a tolerance on the energy to set
Returns:
[{Element: mu}]: An array of conditions on simplex vertices for
which each element has a chemical potential set to a given
value. "absolute" values (i.e., not referenced to element energies)
"""
muref = np.array([self.el_refs[e].energy_per_atom for e in self.elements if e != dep_elt])
chempot_ranges = self.get_chempot_range_map([e for e in self.elements if e != dep_elt])
for e in self.elements:
if e not in target_comp.elements:
target_comp = target_comp + Composition({e: 0.0})
coeff = [-target_comp[e] for e in self.elements if e != dep_elt]
for e, chempots in chempot_ranges.items():
if e.composition.reduced_composition == target_comp.reduced_composition:
multiplicator = e.composition[dep_elt] / target_comp[dep_elt]
ef = e.energy / multiplicator
all_coords = []
for s in chempots:
for v in s._coords:
elts = [e for e in self.elements if e != dep_elt]
res = {}
for i, el in enumerate(elts):
res[el] = v[i] + muref[i]
res[dep_elt] = (np.dot(v + muref, coeff) + ef) / target_comp[dep_elt]
already_in = False
for di in all_coords:
dict_equals = True
for k in di:
if abs(di[k] - res[k]) > tol_en:
dict_equals = False
break
if dict_equals:
already_in = True
break
if not already_in:
all_coords.append(res)
return all_coords
def get_chempot_range_stability_phase(self, target_comp, open_elt):
"""
returns a set of chemical potentials corresponding to the max and min
chemical potential of the open element for a given composition. It is
quite common to have for instance a ternary oxide (e.g., ABO3) for
which you want to know what are the A and B chemical potential leading
to the highest and lowest oxygen chemical potential (reducing and
oxidizing conditions). This is useful for defect computations.
Args:
target_comp: A Composition object
open_elt: Element that you want to constrain to be max or min
Returns:
{Element: (mu_min, mu_max)}: Chemical potentials are given in
"absolute" values (i.e., not referenced to 0)
"""
muref = np.array([self.el_refs[e].energy_per_atom for e in self.elements if e != open_elt])
chempot_ranges = self.get_chempot_range_map([e for e in self.elements if e != open_elt])
for e in self.elements:
if e not in target_comp.elements:
target_comp = target_comp + Composition({e: 0.0})
coeff = [-target_comp[e] for e in self.elements if e != open_elt]
max_open = -float("inf")
min_open = float("inf")
max_mus = None
min_mus = None
for e, chempots in chempot_ranges.items():
if e.composition.reduced_composition == target_comp.reduced_composition:
multiplicator = e.composition[open_elt] / target_comp[open_elt]
ef = e.energy / multiplicator
all_coords = []
for s in chempots:
for v in s._coords:
all_coords.append(v)
test_open = (np.dot(v + muref, coeff) + ef) / target_comp[open_elt]
if test_open > max_open:
max_open = test_open
max_mus = v
if test_open < min_open:
min_open = test_open
min_mus = v
elts = [e for e in self.elements if e != open_elt]
res = {}
for i, el in enumerate(elts):
res[el] = (min_mus[i] + muref[i], max_mus[i] + muref[i])
res[open_elt] = (min_open, max_open)
return res
class GrandPotentialPhaseDiagram(PhaseDiagram):
"""
A class representing a Grand potential phase diagram. Grand potential phase
diagrams are essentially phase diagrams that are open to one or more
components. To construct such phase diagrams, the relevant free energy is
the grand potential, which can be written as the Legendre transform of the
Gibbs free energy as follows
Grand potential = G - u_X N_X
The algorithm is based on the work in the following papers:
1. S. P. Ong, L. Wang, B. Kang, and G. Ceder, Li-Fe-P-O2 Phase Diagram from
First Principles Calculations. Chem. Mater., 2008, 20(5), 1798-1807.
doi:10.1021/cm702327g
2. S. P. Ong, A. Jain, G. Hautier, B. Kang, G. Ceder, Thermal stabilities
of delithiated olivine MPO4 (M=Fe, Mn) cathodes investigated using first
principles calculations. Electrochem. Comm., 2010, 12(3), 427-430.
doi:10.1016/j.elecom.2010.01.010
"""
def __init__(self, entries, chempots, elements=None, *, computed_data=None):
"""
Standard constructor for grand potential phase diagram.
Args:
entries ([PDEntry]): A list of PDEntry-like objects having an
energy, energy_per_atom and composition.
chempots ({Element: float}): Specify the chemical potentials
of the open elements.
elements ([Element]): Optional list of elements in the phase
diagram. If set to None, the elements are determined from
the the entries themselves.
"""
if elements is None:
elements = {els for e in entries for els in e.composition.elements}
self.chempots = {get_el_sp(el): u for el, u in chempots.items()}
elements = set(elements).difference(self.chempots.keys())
all_entries = [
GrandPotPDEntry(e, self.chempots) for e in entries if len(elements.intersection(e.composition.elements)) > 0
]
super().__init__(all_entries, elements, computed_data=None)
def __repr__(self):
chemsys = "-".join([el.symbol for el in self.elements])
chempots = ", ".join([f"u{el}={v}" for el, v in self.chempots.items()])
output = [
f"{chemsys} grand potential phase diagram with {chempots}",
f"{len(self.stable_entries)} stable phases: ",
", ".join([entry.name for entry in self.stable_entries]),
]
return "\n".join(output)
def as_dict(self):
"""
:return: MSONable dict
"""
return {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"all_entries": [e.as_dict() for e in self.all_entries],
"chempots": self.chempots,
"elements": [e.as_dict() for e in self.elements],
}
@classmethod
def from_dict(cls, d):
"""
:param d: Dict representation
:return: GrandPotentialPhaseDiagram
"""
entries = MontyDecoder().process_decoded(d["all_entries"])
elements = MontyDecoder().process_decoded(d["elements"])
return cls(entries, d["chempots"], elements)
class CompoundPhaseDiagram(PhaseDiagram):
"""
Generates phase diagrams from compounds as terminations instead of
elements.
"""
# Tolerance for determining if amount of a composition is positive.
amount_tol = 1e-5
def __init__(self, entries, terminal_compositions, normalize_terminal_compositions=True):
"""
Initializes a CompoundPhaseDiagram.
Args:
entries ([PDEntry]): Sequence of input entries. For example,
if you want a Li2O-P2O5 phase diagram, you might have all
Li-P-O entries as an input.
terminal_compositions ([Composition]): Terminal compositions of
phase space. In the Li2O-P2O5 example, these will be the
Li2O and P2O5 compositions.
normalize_terminal_compositions (bool): Whether to normalize the
terminal compositions to a per atom basis. If normalized,
the energy above hulls will be consistent
for comparison across systems. Non-normalized terminals are
more intuitive in terms of compositional breakdowns.
"""
self.original_entries = entries
self.terminal_compositions = terminal_compositions
self.normalize_terminals = normalize_terminal_compositions
(pentries, species_mapping) = self.transform_entries(entries, terminal_compositions)
self.species_mapping = species_mapping
super().__init__(pentries, elements=species_mapping.values())
def transform_entries(self, entries, terminal_compositions):
"""
Method to transform all entries to the composition coordinate in the
terminal compositions. If the entry does not fall within the space
defined by the terminal compositions, they are excluded. For example,
Li3PO4 is mapped into a Li2O:1.5, P2O5:0.5 composition. The terminal
compositions are represented by DummySpecies.
Args:
entries: Sequence of all input entries
terminal_compositions: Terminal compositions of phase space.
Returns:
Sequence of TransformedPDEntries falling within the phase space.
"""
new_entries = []
if self.normalize_terminals:
terminal_compositions = [c.fractional_composition for c in terminal_compositions]
# Map terminal compositions to unique dummy species.
sp_mapping = collections.OrderedDict()
for i, comp in enumerate(terminal_compositions):
sp_mapping[comp] = DummySpecies("X" + chr(102 + i))
for entry in entries:
if getattr(entry, "attribute", None) is None:
entry.attribute = getattr(entry, "entry_id", None)
try:
transformed_entry = TransformedPDEntry(entry, sp_mapping)
new_entries.append(transformed_entry)
except ReactionError:
# If the reaction can't be balanced, the entry does not fall
# into the phase space. We ignore them.
pass
except TransformedPDEntryError:
# If the reaction has negative amounts for reactants the
# entry does not fall into the phase space.
pass
return new_entries, sp_mapping
def as_dict(self):
"""
Returns:
MSONable dictionary representation of CompoundPhaseDiagram
"""
return {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"original_entries": [e.as_dict() for e in self.original_entries],
"terminal_compositions": [c.as_dict() for c in self.terminal_compositions],
"normalize_terminal_compositions": self.normalize_terminals,
}
@classmethod
def from_dict(cls, d):
"""
Args:
d (dict): dictionary representation of CompoundPhaseDiagram
Returns:
CompoundPhaseDiagram
"""
dec = MontyDecoder()
entries = dec.process_decoded(d["original_entries"])
terminal_compositions = dec.process_decoded(d["terminal_compositions"])
return cls(entries, terminal_compositions, d["normalize_terminal_compositions"])
class ReactionDiagram:
"""
Analyzes the possible reactions between a pair of compounds, e.g.,
an electrolyte and an electrode.
"""
def __init__(self, entry1, entry2, all_entries, tol=1e-4, float_fmt="%.4f"):
"""
Args:
entry1 (ComputedEntry): Entry for 1st component. Note that
corrections, if any, must already be pre-applied. This is to
give flexibility for different kinds of corrections, e.g.,
if a particular entry is fitted to an experimental data (such
as EC molecule).
entry2 (ComputedEntry): Entry for 2nd component. Note that
corrections must already be pre-applied. This is to
give flexibility for different kinds of corrections, e.g.,
if a particular entry is fitted to an experimental data (such
as EC molecule).
all_entries ([ComputedEntry]): All other entries to be
considered in the analysis. Note that corrections, if any,
must already be pre-applied.
tol (float): Tolerance to be used to determine validity of reaction.
float_fmt (str): Formatting string to be applied to all floats.
Determines number of decimal places in reaction string.
"""
elements = set()
for e in [entry1, entry2]:
elements.update([el.symbol for el in e.composition.elements])
elements = tuple(elements) # Fix elements to ensure order.
comp_vec1 = np.array([entry1.composition.get_atomic_fraction(el) for el in elements])
comp_vec2 = np.array([entry2.composition.get_atomic_fraction(el) for el in elements])
r1 = entry1.composition.reduced_composition
r2 = entry2.composition.reduced_composition
logger.debug("%d total entries." % len(all_entries))
pd = PhaseDiagram(all_entries + [entry1, entry2])
terminal_formulas = [
entry1.composition.reduced_formula,
entry2.composition.reduced_formula,
]
logger.debug("%d stable entries" % len(pd.stable_entries))
logger.debug("%d facets" % len(pd.facets))
logger.debug("%d qhull_entries" % len(pd.qhull_entries))
rxn_entries = []
done = []
def fmt(fl):
return float_fmt % fl
for facet in pd.facets:
for face in itertools.combinations(facet, len(facet) - 1):
face_entries = [pd.qhull_entries[i] for i in face]
if any(e.composition.reduced_formula in terminal_formulas for e in face_entries):
continue
try:
m = []
for e in face_entries:
m.append([e.composition.get_atomic_fraction(el) for el in elements])
m.append(comp_vec2 - comp_vec1)
m = np.array(m).T
coeffs = np.linalg.solve(m, comp_vec2)
x = coeffs[-1]
# pylint: disable=R1716
if all(c >= -tol for c in coeffs) and (abs(sum(coeffs[:-1]) - 1) < tol) and (tol < x < 1 - tol):
c1 = x / r1.num_atoms
c2 = (1 - x) / r2.num_atoms
factor = 1 / (c1 + c2)
c1 *= factor
c2 *= factor
# Avoid duplicate reactions.
if any(np.allclose([c1, c2], cc) for cc in done):
continue
done.append((c1, c2))
rxn_str = "{} {} + {} {} -> ".format(
fmt(c1),
r1.reduced_formula,
fmt(c2),
r2.reduced_formula,
)
products = []
product_entries = []
energy = -(x * entry1.energy_per_atom + (1 - x) * entry2.energy_per_atom)
for c, e in zip(coeffs[:-1], face_entries):
if c > tol:
r = e.composition.reduced_composition
products.append(f"{fmt(c / r.num_atoms * factor)} {r.reduced_formula}")
product_entries.append((c, e))
energy += c * e.energy_per_atom
rxn_str += " + ".join(products)
comp = x * comp_vec1 + (1 - x) * comp_vec2
entry = PDEntry(
Composition(dict(zip(elements, comp))),
energy=energy,
attribute=rxn_str,
)
entry.decomposition = product_entries
rxn_entries.append(entry)
except np.linalg.LinAlgError:
logger.debug(
"Reactants = %s"
% (
", ".join(
[
entry1.composition.reduced_formula,
entry2.composition.reduced_formula,
]
)
)
)
logger.debug("Products = %s" % (", ".join([e.composition.reduced_formula for e in face_entries])))
rxn_entries = sorted(rxn_entries, key=lambda e: e.name, reverse=True)
self.entry1 = entry1
self.entry2 = entry2
self.rxn_entries = rxn_entries
self.labels = collections.OrderedDict()
for i, e in enumerate(rxn_entries):
self.labels[str(i + 1)] = e.attribute
e.name = str(i + 1)
self.all_entries = all_entries
self.pd = pd
def get_compound_pd(self):
"""
Get the CompoundPhaseDiagram object, which can then be used for
plotting.
Returns:
CompoundPhaseDiagram
"""
# For this plot, since the reactions are reported in formation
# energies, we need to set the energies of the terminal compositions
# to 0. So we make create copies with 0 energy.
entry1 = PDEntry(self.entry1.composition, 0)
entry2 = PDEntry(self.entry2.composition, 0)
cpd = CompoundPhaseDiagram(
self.rxn_entries + [entry1, entry2],
[
Composition(entry1.composition.reduced_formula),
Composition(entry2.composition.reduced_formula),
],
normalize_terminal_compositions=False,
)
return cpd
class PhaseDiagramError(Exception):
"""
An exception class for Phase Diagram generation.
"""
pass
def get_facets(qhull_data, joggle=False):
"""
Get the simplex facets for the Convex hull.
Args:
qhull_data (np.ndarray): The data from which to construct the convex
hull as a Nxd array (N being number of data points and d being the
dimension)
joggle (boolean): Whether to joggle the input to avoid precision
errors.
Returns:
List of simplices of the Convex Hull.
"""
if joggle:
return ConvexHull(qhull_data, qhull_options="QJ i").simplices
return ConvexHull(qhull_data, qhull_options="Qt i").simplices
def _get_slsqp_decomp(
comp,
competing_entries,
tols=[1e-8],
maxiter=1000,
):
"""
Finds the amounts of competing compositions that minimize the energy of a
given composition
The algorithm is based on the work in the following paper:
1. Bartel, C., Trewartha, A., Wang, Q., Dunn, A., Jain, A., Ceder, G.,
A critical examination of compound stability predictions from
machine-learned formation energies, npj Computational Materials 6, 97 (2020)
Args:
comp (Composition): A Composition to analyze
competing_entries ([PDEntry]): List of entries to consider for decomposition
tols (list): tolerences to try for SLSQP convergence. Issues observed for
tol > 1e-7 in the fractional composition (default 1e-8)
maxiter (int): maximum number of SLSQP iterations
Returns:
decomposition as a dict of {PDEntry: amount} where amount
is the amount of the fractional composition.
"""
# Elemental amount present in given entry
amts = comp.get_el_amt_dict()
chemical_space = tuple(amts.keys())
b = np.array([amts[el] for el in chemical_space])
# Elemental amounts present in competing entries
A_transpose = np.zeros((len(chemical_space), len(competing_entries)))
for j, comp_entry in enumerate(competing_entries):
amts = comp_entry.composition.get_el_amt_dict()
for i, el in enumerate(chemical_space):
A_transpose[i, j] = amts[el]
# NOTE normalize arrays to avoid calls to fractional_composition
b = b / np.sum(b)
A_transpose = A_transpose / np.sum(A_transpose, axis=0)
# Energies of competing entries
Es = np.array([comp_entry.energy_per_atom for comp_entry in competing_entries])
molar_constraint = {"type": "eq", "fun": lambda x: np.dot(A_transpose, x) - b, "jac": lambda x: A_transpose}
options = {"maxiter": maxiter, "disp": False}
# NOTE max_bound needs to be larger than 1
max_bound = comp.num_atoms
bounds = [(0, max_bound)] * len(competing_entries)
x0 = [1 / len(competing_entries)] * len(competing_entries)
# NOTE the tolerence needs to be tight to stop the optimization
# from exiting before convergence is reached. Issues observed for
# tol > 1e-7 in the fractional composition (default 1e-8).
for tol in sorted(tols):
solution = minimize(
fun=lambda x: np.dot(x, Es),
x0=x0,
method="SLSQP",
jac=lambda x: Es,
bounds=bounds,
constraints=[molar_constraint],
tol=tol,
options=options,
)
if solution.success:
decomp_amts = solution.x
return {
c: amt # NOTE this is the amount of the fractional composition.
for c, amt in zip(competing_entries, decomp_amts)
if amt > PhaseDiagram.numerical_tol
}
raise ValueError(f"No valid decomp found for {comp}!")
class PDPlotter:
"""
A plotter class for compositional phase diagrams.
"""
def __init__(
self,
phasediagram: PhaseDiagram,
show_unstable: float = 0.2,
backend: Literal["plotly", "matplotlib"] = "plotly",
**plotkwargs,
):
"""
Args:
phasediagram (PhaseDiagram): PhaseDiagram object.
show_unstable (float): Whether unstable (above the hull) phases will be
plotted. If a number > 0 is entered, all phases with
e_hull < show_unstable (eV/atom) will be shown.
backend ("plotly" | "matplotlib"): Python package used for plotting. Defaults to "plotly".
**plotkwargs (dict): Keyword args passed to matplotlib.pyplot.plot. Can
be used to customize markers etc. If not set, the default is
{
"markerfacecolor": (0.2157, 0.4941, 0.7216),
"markersize": 10,
"linewidth": 3
}
"""
# note: palettable imports matplotlib
from palettable.colorbrewer.qualitative import Set1_3
self._pd = phasediagram
self._dim = len(self._pd.elements)
if self._dim > 4:
raise ValueError("Only 1-4 components supported!")
self.lines = uniquelines(self._pd.facets) if self._dim > 1 else [[self._pd.facets[0][0], self._pd.facets[0][0]]]
self.show_unstable = show_unstable
self.backend = backend
self._min_energy = min(self._pd.get_form_energy_per_atom(e) for e in self._pd.stable_entries)
colors = Set1_3.mpl_colors
self.plotkwargs = plotkwargs or {
"markerfacecolor": colors[2],
"markersize": 10,
"linewidth": 3,
}
@property # type: ignore
@lru_cache(1)
def pd_plot_data(self):
"""
Plotting data for phase diagram. Cached for repetitive calls.
2-comp - Full hull with energies
3/4-comp - Projection into 2D or 3D Gibbs triangle.
Returns:
(lines, stable_entries, unstable_entries):
- lines is a list of list of coordinates for lines in the PD.
- stable_entries is a dict of {coordinates : entry} for each stable node
in the phase diagram. (Each coordinate can only have one
stable phase)
- unstable_entries is a dict of {entry: coordinates} for all unstable
nodes in the phase diagram.
"""
pd = self._pd
entries = pd.qhull_entries
data = np.array(pd.qhull_data)
lines = []
stable_entries = {}
for line in self.lines:
entry1 = entries[line[0]]
entry2 = entries[line[1]]
if self._dim < 3:
x = [data[line[0]][0], data[line[1]][0]]
y = [
pd.get_form_energy_per_atom(entry1),
pd.get_form_energy_per_atom(entry2),
]
coord = [x, y]
elif self._dim == 3:
coord = triangular_coord(data[line, 0:2])
else:
coord = tet_coord(data[line, 0:3])
lines.append(coord)
labelcoord = list(zip(*coord))
stable_entries[labelcoord[0]] = entry1
stable_entries[labelcoord[1]] = entry2
all_entries = pd.all_entries
all_data = np.array(pd.all_entries_hulldata)
unstable_entries = {}
stable = pd.stable_entries
for i, entry in enumerate(all_entries):
if entry not in stable:
if self._dim < 3:
x = [all_data[i][0], all_data[i][0]]
y = [
pd.get_form_energy_per_atom(entry),
pd.get_form_energy_per_atom(entry),
]
coord = [x, y]
elif self._dim == 3:
coord = triangular_coord([all_data[i, 0:2], all_data[i, 0:2]])
else:
coord = tet_coord([all_data[i, 0:3], all_data[i, 0:3], all_data[i, 0:3]])
labelcoord = list(zip(*coord))
unstable_entries[entry] = labelcoord[0]
return lines, stable_entries, unstable_entries
def get_plot(
self,
label_stable=True,
label_unstable=True,
ordering=None,
energy_colormap=None,
process_attributes=False,
plt=None,
label_uncertainties=False,
):
"""
Args:
label_stable: Whether to label stable compounds.
label_unstable: Whether to label unstable compounds.
ordering: Ordering of vertices (matplotlib backend only).
energy_colormap: Colormap for coloring energy (matplotlib backend only).
process_attributes: Whether to process the attributes (matplotlib
backend only).
plt: Existing plt object if plotting multiple phase diagrams (
matplotlib backend only).
label_uncertainties: Whether to add error bars to the hull (plotly
backend only). For binaries, this also shades the hull with the
uncertainty window.
Returns:
go.Figure (plotly) or matplotlib.pyplot (matplotlib)
"""
fig = None
if self.backend == "plotly":
data = [self._create_plotly_lines()]
if self._dim == 3:
data.append(self._create_plotly_ternary_support_lines())
data.append(self._create_plotly_ternary_hull())
stable_labels_plot = self._create_plotly_stable_labels(label_stable)
stable_marker_plot, unstable_marker_plot = self._create_plotly_markers(label_uncertainties)
if self._dim == 2 and label_uncertainties:
data.append(self._create_plotly_uncertainty_shading(stable_marker_plot))
data.append(stable_labels_plot)
data.append(unstable_marker_plot)
data.append(stable_marker_plot)
fig = go.Figure(data=data)
fig.layout = self._create_plotly_figure_layout()
elif self.backend == "matplotlib":
if self._dim <= 3:
fig = self._get_2d_plot(
label_stable,
label_unstable,
ordering,
energy_colormap,
plt=plt,
process_attributes=process_attributes,
)
elif self._dim == 4:
fig = self._get_3d_plot(label_stable)
return fig
def plot_element_profile(self, element, comp, show_label_index=None, xlim=5):
"""
Draw the element profile plot for a composition varying different
chemical potential of an element.
X value is the negative value of the chemical potential reference to
elemental chemical potential. For example, if choose Element("Li"),
X= -(µLi-µLi0), which corresponds to the voltage versus metal anode.
Y values represent for the number of element uptake in this composition
(unit: per atom). All reactions are printed to help choosing the
profile steps you want to show label in the plot.
Args:
element (Element): An element of which the chemical potential is
considered. It also must be in the phase diagram.
comp (Composition): A composition.
show_label_index (list of integers): The labels for reaction products
you want to show in the plot. Default to None (not showing any
annotation for reaction products). For the profile steps you want
to show the labels, just add it to the show_label_index. The
profile step counts from zero. For example, you can set
show_label_index=[0, 2, 5] to label profile step 0,2,5.
xlim (float): The max x value. x value is from 0 to xlim. Default to
5 eV.
Returns:
Plot of element profile evolution by varying the chemical potential
of an element.
"""
plt = pretty_plot(12, 8)
pd = self._pd
evolution = pd.get_element_profile(element, comp)
num_atoms = evolution[0]["reaction"].reactants[0].num_atoms
element_energy = evolution[0]["chempot"]
x1, x2, y1 = None, None, None
for i, d in enumerate(evolution):
v = -(d["chempot"] - element_energy)
if i != 0:
plt.plot([x2, x2], [y1, d["evolution"] / num_atoms], "k", linewidth=2.5)
x1 = v
y1 = d["evolution"] / num_atoms
if i != len(evolution) - 1:
x2 = -(evolution[i + 1]["chempot"] - element_energy)
else:
x2 = 5.0
if show_label_index is not None and i in show_label_index:
products = [
re.sub(r"(\d+)", r"$_{\1}$", p.reduced_formula)
for p in d["reaction"].products
if p.reduced_formula != element.symbol
]
plt.annotate(
", ".join(products),
xy=(v + 0.05, y1 + 0.05),
fontsize=24,
color="r",
)
plt.plot([x1, x2], [y1, y1], "r", linewidth=3)
else:
plt.plot([x1, x2], [y1, y1], "k", linewidth=2.5)
plt.xlim((0, xlim))
plt.xlabel("-$\\Delta{\\mu}$ (eV)")
plt.ylabel("Uptake per atom")
return plt
def show(self, *args, **kwargs):
r"""
Draw the phase diagram using Plotly (or Matplotlib) and show it.
Args:
*args: Passed to get_plot.
**kwargs: Passed to get_plot.
"""
self.get_plot(*args, **kwargs).show()
def _get_2d_plot(
self,
label_stable=True,
label_unstable=True,
ordering=None,
energy_colormap=None,
vmin_mev=-60.0,
vmax_mev=60.0,
show_colorbar=True,
process_attributes=False,
plt=None,
):
"""
Shows the plot using pylab. Contains import statements since matplotlib is a
fairly extensive library to load.
"""
if plt is None:
plt = pretty_plot(8, 6)
from matplotlib.font_manager import FontProperties
if ordering is None:
(lines, labels, unstable) = self.pd_plot_data
else:
(_lines, _labels, _unstable) = self.pd_plot_data
(lines, labels, unstable) = order_phase_diagram(_lines, _labels, _unstable, ordering)
if energy_colormap is None:
if process_attributes:
for x, y in lines:
plt.plot(x, y, "k-", linewidth=3, markeredgecolor="k")
# One should think about a clever way to have "complex"
# attributes with complex processing options but with a clear
# logic. At this moment, I just use the attributes to know
# whether an entry is a new compound or an existing (from the
# ICSD or from the MP) one.
for x, y in labels.keys():
if labels[(x, y)].attribute is None or labels[(x, y)].attribute == "existing":
plt.plot(x, y, "ko", **self.plotkwargs)
else:
plt.plot(x, y, "k*", **self.plotkwargs)
else:
for x, y in lines:
plt.plot(x, y, "ko-", **self.plotkwargs)
else:
from matplotlib.cm import ScalarMappable
from matplotlib.colors import LinearSegmentedColormap, Normalize
for x, y in lines:
plt.plot(x, y, "k-", markeredgecolor="k")
vmin = vmin_mev / 1000.0
vmax = vmax_mev / 1000.0
if energy_colormap == "default":
mid = -vmin / (vmax - vmin)
cmap = LinearSegmentedColormap.from_list(
"my_colormap",
[
(0.0, "#005500"),
(mid, "#55FF55"),
(mid, "#FFAAAA"),
(1.0, "#FF0000"),
],
)
else:
cmap = energy_colormap
norm = Normalize(vmin=vmin, vmax=vmax)
_map = ScalarMappable(norm=norm, cmap=cmap)
_energies = [self._pd.get_equilibrium_reaction_energy(entry) for coord, entry in labels.items()]
energies = [en if en < 0.0 else -0.00000001 for en in _energies]
vals_stable = _map.to_rgba(energies)
ii = 0
if process_attributes:
for x, y in labels.keys():
if labels[(x, y)].attribute is None or labels[(x, y)].attribute == "existing":
plt.plot(x, y, "o", markerfacecolor=vals_stable[ii], markersize=12)
else:
plt.plot(x, y, "*", markerfacecolor=vals_stable[ii], markersize=18)
ii += 1
else:
for x, y in labels.keys():
plt.plot(x, y, "o", markerfacecolor=vals_stable[ii], markersize=15)
ii += 1
font = FontProperties()
font.set_weight("bold")
font.set_size(24)
# Sets a nice layout depending on the type of PD. Also defines a
# "center" for the PD, which then allows the annotations to be spread
# out in a nice manner.
if len(self._pd.elements) == 3:
plt.axis("equal")
plt.xlim((-0.1, 1.2))
plt.ylim((-0.1, 1.0))
plt.axis("off")
center = (0.5, math.sqrt(3) / 6)
else:
all_coords = labels.keys()
miny = min(c[1] for c in all_coords)
ybuffer = max(abs(miny) * 0.1, 0.1)
plt.xlim((-0.1, 1.1))
plt.ylim((miny - ybuffer, ybuffer))
center = (0.5, miny / 2)
plt.xlabel("Fraction", fontsize=28, fontweight="bold")
plt.ylabel("Formation energy (eV/atom)", fontsize=28, fontweight="bold")
for coords in sorted(labels.keys(), key=lambda x: -x[1]):
entry = labels[coords]
label = entry.name
# The follow defines an offset for the annotation text emanating
# from the center of the PD. Results in fairly nice layouts for the
# most part.
vec = np.array(coords) - center
vec = vec / np.linalg.norm(vec) * 10 if np.linalg.norm(vec) != 0 else vec
valign = "bottom" if vec[1] > 0 else "top"
if vec[0] < -0.01:
halign = "right"
elif vec[0] > 0.01:
halign = "left"
else:
halign = "center"
if label_stable:
if process_attributes and entry.attribute == "new":
plt.annotate(
latexify(label),
coords,
xytext=vec,
textcoords="offset points",
horizontalalignment=halign,
verticalalignment=valign,
fontproperties=font,
color="g",
)
else:
plt.annotate(
latexify(label),
coords,
xytext=vec,
textcoords="offset points",
horizontalalignment=halign,
verticalalignment=valign,
fontproperties=font,
)
if self.show_unstable:
font = FontProperties()
font.set_size(16)
energies_unstable = [self._pd.get_e_above_hull(entry) for entry, coord in unstable.items()]
if energy_colormap is not None:
energies.extend(energies_unstable)
vals_unstable = _map.to_rgba(energies_unstable)
ii = 0
for entry, coords in unstable.items():
ehull = self._pd.get_e_above_hull(entry)
if ehull < self.show_unstable:
vec = np.array(coords) - center
vec = vec / np.linalg.norm(vec) * 10 if np.linalg.norm(vec) != 0 else vec
label = entry.name
if energy_colormap is None:
plt.plot(
coords[0],
coords[1],
"ks",
linewidth=3,
markeredgecolor="k",
markerfacecolor="r",
markersize=8,
)
else:
plt.plot(
coords[0],
coords[1],
"s",
linewidth=3,
markeredgecolor="k",
markerfacecolor=vals_unstable[ii],
markersize=8,
)
if label_unstable:
plt.annotate(
latexify(label),
coords,
xytext=vec,
textcoords="offset points",
horizontalalignment=halign,
color="b",
verticalalignment=valign,
fontproperties=font,
)
ii += 1
if energy_colormap is not None and show_colorbar:
_map.set_array(energies)
cbar = plt.colorbar(_map)
cbar.set_label(
"Energy [meV/at] above hull (in red)\nInverse energy [ meV/at] above hull (in green)",
rotation=-90,
ha="left",
va="center",
)
f = plt.gcf()
f.set_size_inches((8, 6))
plt.subplots_adjust(left=0.09, right=0.98, top=0.98, bottom=0.07)
return plt
def _get_3d_plot(self, label_stable=True):
"""
Shows the plot using pylab. Usually I won"t do imports in methods,
but since plotting is a fairly expensive library to load and not all
machines have matplotlib installed, I have done it this way.
"""
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
fig = plt.figure()
ax = fig.add_subplot(111, projection="3d")
font = FontProperties(weight="bold", size=13)
(lines, labels, unstable) = self.pd_plot_data
count = 1
newlabels = []
for x, y, z in lines:
ax.plot(
x,
y,
z,
"bo-",
linewidth=3,
markeredgecolor="b",
markerfacecolor="r",
markersize=10,
)
for coords in sorted(labels.keys()):
entry = labels[coords]
label = entry.name
if label_stable:
if len(entry.composition.elements) == 1:
ax.text(coords[0], coords[1], coords[2], label, fontproperties=font)
else:
ax.text(coords[0], coords[1], coords[2], str(count), fontsize=12)
newlabels.append(f"{count} : {latexify(label)}")
count += 1
plt.figtext(0.01, 0.01, "\n".join(newlabels), fontproperties=font)
ax.axis("off")
ax.set_xlim(-0.1, 0.72)
ax.set_ylim(0, 0.66)
ax.set_zlim(0, 0.56) # pylint: disable=E1101
return plt
def write_image(self, stream, image_format="svg", **kwargs):
r"""
Writes the phase diagram to an image in a stream.
Args:
stream:
stream to write to. Can be a file stream or a StringIO stream.
image_format
format for image. Can be any of matplotlib supported formats.
Defaults to svg for best results for vector graphics.
**kwargs: Pass through to get_plot functino.
"""
plt = self.get_plot(**kwargs)
f = plt.gcf()
f.set_size_inches((12, 10))
plt.savefig(stream, format=image_format)
def plot_chempot_range_map(self, elements, referenced=True):
"""
Plot the chemical potential range _map. Currently works only for
3-component PDs.
Args:
elements: Sequence of elements to be considered as independent
variables. E.g., if you want to show the stability ranges of
all Li-Co-O phases wrt to uLi and uO, you will supply
[Element("Li"), Element("O")]
referenced: if True, gives the results with a reference being the
energy of the elemental phase. If False, gives absolute values.
"""
self.get_chempot_range_map_plot(elements, referenced=referenced).show()
def get_chempot_range_map_plot(self, elements, referenced=True):
"""
Returns a plot of the chemical potential range _map. Currently works
only for 3-component PDs.
Args:
elements: Sequence of elements to be considered as independent
variables. E.g., if you want to show the stability ranges of
all Li-Co-O phases wrt to uLi and uO, you will supply
[Element("Li"), Element("O")]
referenced: if True, gives the results with a reference being the
energy of the elemental phase. If False, gives absolute values.
Returns:
A matplotlib plot object.
"""
plt = pretty_plot(12, 8)
chempot_ranges = self._pd.get_chempot_range_map(elements, referenced=referenced)
missing_lines = {}
excluded_region = []
for entry, lines in chempot_ranges.items():
comp = entry.composition
center_x = 0
center_y = 0
coords = []
contain_zero = any(comp.get_atomic_fraction(el) == 0 for el in elements)
is_boundary = (not contain_zero) and sum(comp.get_atomic_fraction(el) for el in elements) == 1
for line in lines:
(x, y) = line.coords.transpose()
plt.plot(x, y, "k-")
for coord in line.coords:
if not in_coord_list(coords, coord):
coords.append(coord.tolist())
center_x += coord[0]
center_y += coord[1]
if is_boundary:
excluded_region.extend(line.coords)
if coords and contain_zero:
missing_lines[entry] = coords
else:
xy = (center_x / len(coords), center_y / len(coords))
plt.annotate(latexify(entry.name), xy, fontsize=22)
ax = plt.gca()
xlim = ax.get_xlim()
ylim = ax.get_ylim()
# Shade the forbidden chemical potential regions.
excluded_region.append([xlim[1], ylim[1]])
excluded_region = sorted(excluded_region, key=lambda c: c[0])
(x, y) = np.transpose(excluded_region)
plt.fill(x, y, "0.80")
# The hull does not generate the missing horizontal and vertical lines.
# The following code fixes this.
el0 = elements[0]
el1 = elements[1]
for entry, coords in missing_lines.items():
center_x = sum(c[0] for c in coords)
center_y = sum(c[1] for c in coords)
comp = entry.composition
is_x = comp.get_atomic_fraction(el0) < 0.01
is_y = comp.get_atomic_fraction(el1) < 0.01
n = len(coords)
if not (is_x and is_y):
if is_x:
coords = sorted(coords, key=lambda c: c[1])
for i in [0, -1]:
x = [min(xlim), coords[i][0]]
y = [coords[i][1], coords[i][1]]
plt.plot(x, y, "k")
center_x += min(xlim)
center_y += coords[i][1]
elif is_y:
coords = sorted(coords, key=lambda c: c[0])
for i in [0, -1]:
x = [coords[i][0], coords[i][0]]
y = [coords[i][1], min(ylim)]
plt.plot(x, y, "k")
center_x += coords[i][0]
center_y += min(ylim)
xy = (center_x / (n + 2), center_y / (n + 2))
else:
center_x = sum(coord[0] for coord in coords) + xlim[0]
center_y = sum(coord[1] for coord in coords) + ylim[0]
xy = (center_x / (n + 1), center_y / (n + 1))
plt.annotate(
latexify(entry.name),
xy,
horizontalalignment="center",
verticalalignment="center",
fontsize=22,
)
plt.xlabel("$\\mu_{{{0}}} - \\mu_{{{0}}}^0$ (eV)".format(el0.symbol))
plt.ylabel("$\\mu_{{{0}}} - \\mu_{{{0}}}^0$ (eV)".format(el1.symbol))
plt.tight_layout()
return plt
def get_contour_pd_plot(self):
"""
Plot a contour phase diagram plot, where phase triangles are colored
according to degree of instability by interpolation. Currently only
works for 3-component phase diagrams.
Returns:
A matplotlib plot object.
"""
from matplotlib import cm
from scipy import interpolate
pd = self._pd
entries = pd.qhull_entries
data = np.array(pd.qhull_data)
plt = self._get_2d_plot()
data[:, 0:2] = triangular_coord(data[:, 0:2]).transpose()
for i, e in enumerate(entries):
data[i, 2] = self._pd.get_e_above_hull(e)
gridsize = 0.005
xnew = np.arange(0, 1.0, gridsize)
ynew = np.arange(0, 1, gridsize)
f = interpolate.LinearNDInterpolator(data[:, 0:2], data[:, 2])
znew = np.zeros((len(ynew), len(xnew)))
for (i, xval) in enumerate(xnew):
for (j, yval) in enumerate(ynew):
znew[j, i] = f(xval, yval)
# pylint: disable=E1101
plt.contourf(xnew, ynew, znew, 1000, cmap=cm.autumn_r)
plt.colorbar()
return plt
def _create_plotly_lines(self):
"""
Creates Plotly scatter (line) plots for all phase diagram facets.
Returns:
go.Scatter (or go.Scatter3d) plot
"""
line_plot = None
x, y, z, energies = [], [], [], []
for line in self.pd_plot_data[0]:
x.extend(list(line[0]) + [None])
y.extend(list(line[1]) + [None])
if self._dim == 3:
z.extend(
[self._pd.get_form_energy_per_atom(self.pd_plot_data[1][coord]) for coord in zip(line[0], line[1])]
+ [None]
)
elif self._dim == 4:
energies.extend(
[
self._pd.get_form_energy_per_atom(self.pd_plot_data[1][coord])
for coord in zip(line[0], line[1], line[2])
]
+ [None]
)
z.extend(list(line[2]) + [None])
plot_args = dict(
mode="lines",
hoverinfo="none",
line={"color": "rgba(0,0,0,1.0)", "width": 7.0},
showlegend=False,
)
if self._dim == 2:
line_plot = go.Scatter(x=x, y=y, **plot_args)
elif self._dim == 3:
line_plot = go.Scatter3d(x=y, y=x, z=z, **plot_args)
elif self._dim == 4:
line_plot = go.Scatter3d(x=x, y=y, z=z, **plot_args)
return line_plot
def _create_plotly_stable_labels(self, label_stable=True):
"""
Creates a (hidable) scatter trace containing labels of stable phases.
Contains some functionality for creating sensible label positions.
Returns:
go.Scatter (or go.Scatter3d) plot
"""
x, y, z, text, textpositions = [], [], [], [], []
stable_labels_plot = None
min_energy_x = None
offset_2d = 0.005 # extra distance to offset label position for clarity
offset_3d = 0.01
energy_offset = -0.1 * self._min_energy
if self._dim == 2:
min_energy_x = min(list(self.pd_plot_data[1].keys()), key=lambda c: c[1])[0]
for coords, entry in self.pd_plot_data[1].items():
if entry.composition.is_element: # taken care of by other function
continue
x_coord = coords[0]
y_coord = coords[1]
textposition = None
if self._dim == 2:
textposition = "bottom left"
if x_coord >= min_energy_x:
textposition = "bottom right"
x_coord += offset_2d
else:
x_coord -= offset_2d
y_coord -= offset_2d
elif self._dim == 3:
textposition = "middle center"
if coords[0] > 0.5:
x_coord += offset_3d
else:
x_coord -= offset_3d
if coords[1] > 0.866 / 2:
y_coord -= offset_3d
else:
y_coord += offset_3d
z.append(self._pd.get_form_energy_per_atom(entry) + energy_offset)
elif self._dim == 4:
x_coord = x_coord - offset_3d
y_coord = y_coord - offset_3d
textposition = "bottom right"
z.append(coords[2])
x.append(x_coord)
y.append(y_coord)
textpositions.append(textposition)
comp = entry.composition
if hasattr(entry, "original_entry"):
comp = entry.original_entry.composition
formula = comp.reduced_formula
text.append(htmlify(formula))
visible = True
if not label_stable or self._dim == 4:
visible = "legendonly"
plot_args = dict(
text=text,
textposition=textpositions,
mode="text",
name="Labels (stable)",
hoverinfo="skip",
opacity=1.0,
visible=visible,
showlegend=True,
)
if self._dim == 2:
stable_labels_plot = go.Scatter(x=x, y=y, **plot_args)
elif self._dim == 3:
stable_labels_plot = go.Scatter3d(x=y, y=x, z=z, **plot_args)
elif self._dim == 4:
stable_labels_plot = go.Scatter3d(x=x, y=y, z=z, **plot_args)
return stable_labels_plot
def _create_plotly_element_annotations(self):
"""
Creates terminal element annotations for Plotly phase diagrams.
Returns:
list of annotation dicts.
"""
annotations_list = []
x, y, z = None, None, None
for coords, entry in self.pd_plot_data[1].items():
if not entry.composition.is_element:
continue
x, y = coords[0], coords[1]
if self._dim == 3:
z = self._pd.get_form_energy_per_atom(entry)
elif self._dim == 4:
z = coords[2]
if entry.composition.is_element:
clean_formula = str(entry.composition.elements[0])
if hasattr(entry, "original_entry"):
orig_comp = entry.original_entry.composition
clean_formula = htmlify(orig_comp.reduced_formula)
font_dict = {"color": "#000000", "size": 24.0}
opacity = 1.0
annotation = plotly_layouts["default_annotation_layout"].copy()
annotation.update(
{
"x": x,
"y": y,
"font": font_dict,
"text": clean_formula,
"opacity": opacity,
}
)
if self._dim in (3, 4):
for d in ["xref", "yref"]:
annotation.pop(d) # Scatter3d cannot contain xref, yref
if self._dim == 3:
annotation.update({"x": y, "y": x})
if entry.composition.is_element:
z = 0.9 * self._min_energy # place label 10% above base
annotation.update({"z": z})
annotations_list.append(annotation)
# extra point ensures equilateral triangular scaling is displayed
if self._dim == 3:
annotations_list.append(dict(x=1, y=1, z=0, opacity=0, text=""))
return annotations_list
def _create_plotly_figure_layout(self, label_stable=True):
"""
Creates layout for plotly phase diagram figure and updates with
figure annotations.
Args:
label_stable (bool): Whether to label stable compounds
Returns:
Dictionary with Plotly figure layout settings.
"""
annotations_list = None
layout = {}
if label_stable:
annotations_list = self._create_plotly_element_annotations()
if self._dim == 2:
layout = plotly_layouts["default_binary_layout"].copy()
layout["annotations"] = annotations_list
elif self._dim == 3:
layout = plotly_layouts["default_ternary_layout"].copy()
layout["scene"].update({"annotations": annotations_list})
elif self._dim == 4:
layout = plotly_layouts["default_quaternary_layout"].copy()
layout["scene"].update({"annotations": annotations_list})
return layout
def _create_plotly_markers(self, label_uncertainties=False):
"""
Creates stable and unstable marker plots for overlaying on the phase diagram.
Returns:
Tuple of Plotly go.Scatter (or go.Scatter3d) objects in order:
(stable markers, unstable markers)
"""
def get_marker_props(coords, entries, stable=True):
"""Method for getting marker locations, hovertext, and error bars
from pd_plot_data"""
x, y, z, texts, energies, uncertainties = [], [], [], [], [], []
for coord, entry in zip(coords, entries):
energy = round(self._pd.get_form_energy_per_atom(entry), 3)
entry_id = getattr(entry, "entry_id", "no ID")
comp = entry.composition
if hasattr(entry, "original_entry"):
comp = entry.original_entry.composition
entry_id = getattr(entry, "attribute", "no ID")
formula = comp.reduced_formula
clean_formula = htmlify(formula)
label = f"{clean_formula} ({entry_id}) <br> " f"{energy} eV/atom"
if not stable:
e_above_hull = round(self._pd.get_e_above_hull(entry), 3)
if e_above_hull > self.show_unstable:
continue
label += f" (+{e_above_hull} eV/atom)"
energies.append(e_above_hull)
else:
uncertainty = 0
if hasattr(entry, "correction_uncertainty_per_atom") and label_uncertainties:
uncertainty = round(entry.correction_uncertainty_per_atom, 4)
label += f"<br> (Error: +/- {uncertainty} eV/atom)"
uncertainties.append(uncertainty)
energies.append(energy)
texts.append(label)
x.append(coord[0])
y.append(coord[1])
if self._dim == 3:
z.append(energy)
elif self._dim == 4:
z.append(coord[2])
return {
"x": x,
"y": y,
"z": z,
"texts": texts,
"energies": energies,
"uncertainties": uncertainties,
}
stable_coords, stable_entries = (
self.pd_plot_data[1].keys(),
self.pd_plot_data[1].values(),
)
unstable_entries, unstable_coords = (
self.pd_plot_data[2].keys(),
self.pd_plot_data[2].values(),
)
stable_props = get_marker_props(stable_coords, stable_entries)
unstable_props = get_marker_props(unstable_coords, unstable_entries, stable=False)
stable_markers, unstable_markers = {}, {}
if self._dim == 2:
stable_markers = plotly_layouts["default_binary_marker_settings"].copy()
stable_markers.update(
dict(
x=list(stable_props["x"]),
y=list(stable_props["y"]),
name="Stable",
marker=dict(color="darkgreen", size=11, line=dict(color="black", width=2)),
opacity=0.9,
hovertext=stable_props["texts"],
error_y=dict(
array=list(stable_props["uncertainties"]),
type="data",
color="gray",
thickness=2.5,
width=5,
),
)
)
unstable_markers = plotly_layouts["default_binary_marker_settings"].copy()
unstable_markers.update(
dict(
x=list(unstable_props["x"]),
y=list(unstable_props["y"]),
name="Above Hull",
marker=dict(
color=unstable_props["energies"],
colorscale=plotly_layouts["unstable_colorscale"],
size=6,
symbol="diamond",
),
hovertext=unstable_props["texts"],
)
)
elif self._dim == 3:
stable_markers = plotly_layouts["default_ternary_marker_settings"].copy()
stable_markers.update(
dict(
x=list(stable_props["y"]),
y=list(stable_props["x"]),
z=list(stable_props["z"]),
name="Stable",
marker=dict(
color="black",
size=12,
opacity=0.8,
line=dict(color="black", width=3),
),
hovertext=stable_props["texts"],
error_z=dict(
array=list(stable_props["uncertainties"]),
type="data",
color="darkgray",
width=10,
thickness=5,
),
)
)
unstable_markers = plotly_layouts["default_ternary_marker_settings"].copy()
unstable_markers.update(
dict(
x=unstable_props["y"],
y=unstable_props["x"],
z=unstable_props["z"],
name="Above Hull",
marker=dict(
color=unstable_props["energies"],
colorscale=plotly_layouts["unstable_colorscale"],
size=6,
symbol="diamond",
colorbar=dict(title="Energy Above Hull<br>(eV/atom)", x=0.05, len=0.75),
),
hovertext=unstable_props["texts"],
)
)
elif self._dim == 4:
stable_markers = plotly_layouts["default_quaternary_marker_settings"].copy()
stable_markers.update(
dict(
x=stable_props["x"],
y=stable_props["y"],
z=stable_props["z"],
name="Stable",
marker=dict(
color=stable_props["energies"],
colorscale=plotly_layouts["stable_markers_colorscale"],
size=8,
opacity=0.9,
),
hovertext=stable_props["texts"],
)
)
unstable_markers = plotly_layouts["default_quaternary_marker_settings"].copy()
unstable_markers.update(
dict(
x=unstable_props["x"],
y=unstable_props["y"],
z=unstable_props["z"],
name="Above Hull",
marker=dict(
color=unstable_props["energies"],
colorscale=plotly_layouts["unstable_colorscale"],
size=5,
symbol="diamond",
colorbar=dict(title="Energy Above Hull<br>(eV/atom)", x=0.05, len=0.75),
),
hovertext=unstable_props["texts"],
visible="legendonly",
)
)
stable_marker_plot = go.Scatter(**stable_markers) if self._dim == 2 else go.Scatter3d(**stable_markers)
unstable_marker_plot = go.Scatter(**unstable_markers) if self._dim == 2 else go.Scatter3d(**unstable_markers)
return stable_marker_plot, unstable_marker_plot
def _create_plotly_uncertainty_shading(self, stable_marker_plot):
"""
Creates shaded uncertainty region for stable entries. Currently only works
for binary (dim=2) phase diagrams.
Args:
stable_marker_plot: go.Scatter object with stable markers and their
error bars.
Returns:
Plotly go.Scatter object with uncertainty window shading.
"""
uncertainty_plot = None
x = stable_marker_plot.x
y = stable_marker_plot.y
transformed = False
if hasattr(self._pd, "original_entries") or hasattr(self._pd, "chempots"):
transformed = True
if self._dim == 2:
error = stable_marker_plot.error_y["array"]
points = np.append(x, [y, error]).reshape(3, -1).T
points = points[points[:, 0].argsort()] # sort by composition # pylint: disable=E1136
# these steps trace out the boundary pts of the uncertainty window
outline = points[:, :2].copy()
outline[:, 1] = outline[:, 1] + points[:, 2]
last = -1
if transformed:
last = None # allows for uncertainty in terminal compounds
flipped_points = np.flip(points[:last, :].copy(), axis=0)
flipped_points[:, 1] = flipped_points[:, 1] - flipped_points[:, 2]
outline = np.vstack((outline, flipped_points[:, :2]))
uncertainty_plot = go.Scatter(
x=outline[:, 0],
y=outline[:, 1],
name="Uncertainty (window)",
fill="toself",
mode="lines",
line=dict(width=0),
fillcolor="lightblue",
hoverinfo="skip",
opacity=0.4,
)
return uncertainty_plot
def _create_plotly_ternary_support_lines(self):
"""
Creates support lines which aid in seeing the ternary hull in three
dimensions.
Returns:
go.Scatter3d plot of support lines for ternary phase diagram.
"""
stable_entry_coords = dict(map(reversed, self.pd_plot_data[1].items()))
elem_coords = [stable_entry_coords[e] for e in self._pd.el_refs.values()]
# add top and bottom triangle guidelines
x, y, z = [], [], []
for line in itertools.combinations(elem_coords, 2):
x.extend([line[0][0], line[1][0], None] * 2)
y.extend([line[0][1], line[1][1], None] * 2)
z.extend([0, 0, None, self._min_energy, self._min_energy, None])
# add vertical guidelines
for elem in elem_coords:
x.extend([elem[0], elem[0], None])
y.extend([elem[1], elem[1], None])
z.extend([0, self._min_energy, None])
return go.Scatter3d(
x=list(y),
y=list(x),
z=list(z),
mode="lines",
hoverinfo="none",
line=dict(color="rgba (0, 0, 0, 0.4)", dash="solid", width=1.0),
showlegend=False,
)
def _create_plotly_ternary_hull(self):
"""
Creates shaded mesh plot for coloring the ternary hull by formation energy.
Returns:
go.Mesh3d plot
"""
facets = np.array(self._pd.facets)
coords = np.array([triangular_coord(c) for c in zip(self._pd.qhull_data[:-1, 0], self._pd.qhull_data[:-1, 1])])
energies = np.array([self._pd.get_form_energy_per_atom(e) for e in self._pd.qhull_entries])
return go.Mesh3d(
x=list(coords[:, 1]),
y=list(coords[:, 0]),
z=list(energies),
i=list(facets[:, 1]),
j=list(facets[:, 0]),
k=list(facets[:, 2]),
opacity=0.8,
intensity=list(energies),
colorscale=plotly_layouts["stable_colorscale"],
colorbar=dict(title="Formation energy<br>(eV/atom)", x=0.9, len=0.75),
hoverinfo="none",
lighting=dict(diffuse=0.0, ambient=1.0),
name="Convex Hull (shading)",
flatshading=True,
showlegend=True,
)
def uniquelines(q):
"""
Given all the facets, convert it into a set of unique lines. Specifically
used for converting convex hull facets into line pairs of coordinates.
Args:
q: A 2-dim sequence, where each row represents a facet. E.g.,
[[1,2,3],[3,6,7],...]
Returns:
setoflines:
A set of tuple of lines. E.g., ((1,2), (1,3), (2,3), ....)
"""
setoflines = set()
for facets in q:
for line in itertools.combinations(facets, 2):
setoflines.add(tuple(sorted(line)))
return setoflines
def triangular_coord(coord):
"""
Convert a 2D coordinate into a triangle-based coordinate system for a
prettier phase diagram.
Args:
coord: coordinate used in the convex hull computation.
Returns:
coordinates in a triangular-based coordinate system.
"""
unitvec = np.array([[1, 0], [0.5, math.sqrt(3) / 2]])
result = np.dot(np.array(coord), unitvec)
return result.transpose()
def tet_coord(coord):
"""
Convert a 3D coordinate into a tetrahedron based coordinate system for a
prettier phase diagram.
Args:
coord: coordinate used in the convex hull computation.
Returns:
coordinates in a tetrahedron-based coordinate system.
"""
unitvec = np.array(
[
[1, 0, 0],
[0.5, math.sqrt(3) / 2, 0],
[0.5, 1.0 / 3.0 * math.sqrt(3) / 2, math.sqrt(6) / 3],
]
)
result = np.dot(np.array(coord), unitvec)
return result.transpose()
def order_phase_diagram(lines, stable_entries, unstable_entries, ordering):
"""
Orders the entries (their coordinates) in a phase diagram plot according
to the user specified ordering.
Ordering should be given as ['Up', 'Left', 'Right'], where Up,
Left and Right are the names of the entries in the upper, left and right
corners of the triangle respectively.
Args:
lines: list of list of coordinates for lines in the PD.
stable_entries: {coordinate : entry} for each stable node in the
phase diagram. (Each coordinate can only have one stable phase)
unstable_entries: {entry: coordinates} for all unstable nodes in the
phase diagram.
ordering: Ordering of the phase diagram, given as a list ['Up',
'Left','Right']
Returns:
(newlines, newstable_entries, newunstable_entries):
- newlines is a list of list of coordinates for lines in the PD.
- newstable_entries is a {coordinate : entry} for each stable node
in the phase diagram. (Each coordinate can only have one
stable phase)
- newunstable_entries is a {entry: coordinates} for all unstable
nodes in the phase diagram.
"""
yup = -1000.0
xleft = 1000.0
xright = -1000.0
for coord in stable_entries:
if coord[0] > xright:
xright = coord[0]
nameright = stable_entries[coord].name
if coord[0] < xleft:
xleft = coord[0]
nameleft = stable_entries[coord].name
if coord[1] > yup:
yup = coord[1]
nameup = stable_entries[coord].name
if (nameup not in ordering) or (nameright not in ordering) or (nameleft not in ordering):
raise ValueError(
'Error in ordering_phase_diagram : \n"{up}", "{left}" and "{'
'right}"'
" should be in ordering : {ord}".format(up=nameup, left=nameleft, right=nameright, ord=ordering)
)
cc = np.array([0.5, np.sqrt(3.0) / 6.0], np.float_)
if nameup == ordering[0]:
if nameleft == ordering[1]:
# The coordinates were already in the user ordering
return lines, stable_entries, unstable_entries
newlines = [[np.array(1.0 - x), y] for x, y in lines]
newstable_entries = {(1.0 - c[0], c[1]): entry for c, entry in stable_entries.items()}
newunstable_entries = {entry: (1.0 - c[0], c[1]) for entry, c in unstable_entries.items()}
return newlines, newstable_entries, newunstable_entries
if nameup == ordering[1]:
if nameleft == ordering[2]:
c120 = np.cos(2.0 * np.pi / 3.0)
s120 = np.sin(2.0 * np.pi / 3.0)
newlines = []
for x, y in lines:
newx = np.zeros_like(x)
newy = np.zeros_like(y)
for ii, xx in enumerate(x):
newx[ii] = c120 * (xx - cc[0]) - s120 * (y[ii] - cc[1]) + cc[0]
newy[ii] = s120 * (xx - cc[0]) + c120 * (y[ii] - cc[1]) + cc[1]
newlines.append([newx, newy])
newstable_entries = {
(
c120 * (c[0] - cc[0]) - s120 * (c[1] - cc[1]) + cc[0],
s120 * (c[0] - cc[0]) + c120 * (c[1] - cc[1]) + cc[1],
): entry
for c, entry in stable_entries.items()
}
newunstable_entries = {
entry: (
c120 * (c[0] - cc[0]) - s120 * (c[1] - cc[1]) + cc[0],
s120 * (c[0] - cc[0]) + c120 * (c[1] - cc[1]) + cc[1],
)
for entry, c in unstable_entries.items()
}
return newlines, newstable_entries, newunstable_entries
c120 = np.cos(2.0 * np.pi / 3.0)
s120 = np.sin(2.0 * np.pi / 3.0)
newlines = []
for x, y in lines:
newx = np.zeros_like(x)
newy = np.zeros_like(y)
for ii, xx in enumerate(x):
newx[ii] = -c120 * (xx - 1.0) - s120 * y[ii] + 1.0
newy[ii] = -s120 * (xx - 1.0) + c120 * y[ii]
newlines.append([newx, newy])
newstable_entries = {
(
-c120 * (c[0] - 1.0) - s120 * c[1] + 1.0,
-s120 * (c[0] - 1.0) + c120 * c[1],
): entry
for c, entry in stable_entries.items()
}
newunstable_entries = {
entry: (
-c120 * (c[0] - 1.0) - s120 * c[1] + 1.0,
-s120 * (c[0] - 1.0) + c120 * c[1],
)
for entry, c in unstable_entries.items()
}
return newlines, newstable_entries, newunstable_entries
if nameup == ordering[2]:
if nameleft == ordering[0]:
c240 = np.cos(4.0 * np.pi / 3.0)
s240 = np.sin(4.0 * np.pi / 3.0)
newlines = []
for x, y in lines:
newx = np.zeros_like(x)
newy = np.zeros_like(y)
for ii, xx in enumerate(x):
newx[ii] = c240 * (xx - cc[0]) - s240 * (y[ii] - cc[1]) + cc[0]
newy[ii] = s240 * (xx - cc[0]) + c240 * (y[ii] - cc[1]) + cc[1]
newlines.append([newx, newy])
newstable_entries = {
(
c240 * (c[0] - cc[0]) - s240 * (c[1] - cc[1]) + cc[0],
s240 * (c[0] - cc[0]) + c240 * (c[1] - cc[1]) + cc[1],
): entry
for c, entry in stable_entries.items()
}
newunstable_entries = {
entry: (
c240 * (c[0] - cc[0]) - s240 * (c[1] - cc[1]) + cc[0],
s240 * (c[0] - cc[0]) + c240 * (c[1] - cc[1]) + cc[1],
)
for entry, c in unstable_entries.items()
}
return newlines, newstable_entries, newunstable_entries
c240 = np.cos(4.0 * np.pi / 3.0)
s240 = np.sin(4.0 * np.pi / 3.0)
newlines = []
for x, y in lines:
newx = np.zeros_like(x)
newy = np.zeros_like(y)
for ii, xx in enumerate(x):
newx[ii] = -c240 * xx - s240 * y[ii]
newy[ii] = -s240 * xx + c240 * y[ii]
newlines.append([newx, newy])
newstable_entries = {
(-c240 * c[0] - s240 * c[1], -s240 * c[0] + c240 * c[1]): entry for c, entry in stable_entries.items()
}
newunstable_entries = {
entry: (-c240 * c[0] - s240 * c[1], -s240 * c[0] + c240 * c[1]) for entry, c in unstable_entries.items()
}
return newlines, newstable_entries, newunstable_entries
raise ValueError("Invalid ordering.")
| vorwerkc/pymatgen | pymatgen/analysis/phase_diagram.py | Python | mit | 120,302 | [
"pymatgen"
] | 17910654a19cf4722ac53d4ef27f30383e9556540321e0cec8268d0296848d81 |
from django import forms
from django.contrib.admin.widgets import AdminRadioSelect, AdminRadioFieldRenderer
from edc_base.form.forms import BaseModelForm
from edc_constants.constants import ON_STUDY
from edc_visit_tracking.forms import VisitFormMixin
from bcvp.bcvp.choices import VISIT_REASON, VISIT_INFO_SOURCE, VISIT_STUDY_STATUS
from ..models import SubjectVisit
class SubjectVisitForm (VisitFormMixin, BaseModelForm):
participant_label = 'subject'
study_status = forms.ChoiceField(
label='What is the subject\'s current study status',
choices=VISIT_STUDY_STATUS,
initial=ON_STUDY,
help_text="",
widget=AdminRadioSelect(renderer=AdminRadioFieldRenderer))
reason = forms.ChoiceField(
label='Reason for visit',
choices=[choice for choice in VISIT_REASON],
help_text="",
widget=AdminRadioSelect(renderer=AdminRadioFieldRenderer))
info_source = forms.ChoiceField(
label='Source of information',
required=False,
choices=[choice for choice in VISIT_INFO_SOURCE],
widget=AdminRadioSelect(renderer=AdminRadioFieldRenderer))
class Meta:
model = SubjectVisit
fields = '__all__'
| botswana-harvard/bcvp | bcvp/bcvp_subject/forms/subject_visit_form.py | Python | gpl-2.0 | 1,222 | [
"VisIt"
] | 693bff577b9a5f7897e5f3fad21f76b60c2655e14528abdcc23f0ba776a092d8 |
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2013 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
__tests__ = 'stoqlib.gui.wizards.inventorywizard'
from stoqlib.gui.test.uitestutils import GUITest
from stoqlib.gui.wizards.inventorywizard import _InventoryBatchSelectionDialog
class TestInventoryBatchSelectionDialog(GUITest):
def test_batch_number_validation(self):
storable = self.create_storable(is_batch=True)
self.create_storable_batch(storable=storable, batch_number=u'123')
dialog = _InventoryBatchSelectionDialog(self.store, storable, 10)
# We cannot use assertValid/assertInvalid here because last entry will
# change when updating it (the dialog will append another entry that
# will be the new _last_entry) and those other entriews names on the
# dialog are set using setattr with a random name
entry = dialog._last_entry
entry.update(u'123')
self.assertTrue(entry.is_valid())
entry.update(u'124')
self.assertFalse(entry.is_valid())
| andrebellafronte/stoq | stoqlib/gui/test/test_inventorywizard.py | Python | gpl-2.0 | 1,841 | [
"VisIt"
] | 01f087be7331371ad532a2d0f7eaae14b0c9a89a0bf51a0d5e0ba2e6da3c4dee |
# -*- coding: utf-8 -*-
"""The ants module provides basic functions for interfacing with ants functions.
Change directory to provide relative paths for doctests
>>> import os
>>> filepath = os.path.dirname( os.path.realpath( __file__ ) )
>>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data'))
>>> os.chdir(datadir)
"""
from __future__ import print_function, division, unicode_literals, absolute_import
from builtins import range, str
import os
from ...utils.filemanip import split_filename, copyfile
from ..base import TraitedSpec, File, traits, InputMultiPath, OutputMultiPath, isdefined
from .base import ANTSCommand, ANTSCommandInputSpec
class AtroposInputSpec(ANTSCommandInputSpec):
dimension = traits.Enum(3, 2, 4, argstr='--image-dimensionality %d',
usedefault=True,
desc='image dimension (2, 3, or 4)')
intensity_images = InputMultiPath(File(exists=True),
argstr="--intensity-image %s...",
mandatory=True)
mask_image = File(exists=True, argstr='--mask-image %s', mandatory=True)
initialization = traits.Enum('Random', 'Otsu', 'KMeans',
'PriorProbabilityImages', 'PriorLabelImage',
argstr="%s",
requires=['number_of_tissue_classes'],
mandatory=True)
prior_probability_images = InputMultiPath(File(exists=True))
number_of_tissue_classes = traits.Int(mandatory=True)
prior_weighting = traits.Float()
prior_probability_threshold = traits.Float(requires=['prior_weighting'])
likelihood_model = traits.Str(argstr="--likelihood-model %s")
mrf_smoothing_factor = traits.Float(argstr="%s")
mrf_radius = traits.List(traits.Int(), requires=['mrf_smoothing_factor'])
icm_use_synchronous_update = traits.Bool(argstr="%s")
maximum_number_of_icm_terations = traits.Int(
requires=['icm_use_synchronous_update'])
n_iterations = traits.Int(argstr="%s")
convergence_threshold = traits.Float(requires=['n_iterations'])
posterior_formulation = traits.Str(argstr="%s")
use_random_seed = traits.Bool(True, argstr='--use-random-seed %d', desc='use random seed value over constant',
usedefault=True)
use_mixture_model_proportions = traits.Bool(
requires=['posterior_formulation'])
out_classified_image_name = File(argstr="%s", genfile=True,
hash_files=False)
save_posteriors = traits.Bool()
output_posteriors_name_template = traits.Str('POSTERIOR_%02d.nii.gz',
usedefault=True)
class AtroposOutputSpec(TraitedSpec):
classified_image = File(exists=True)
posteriors = OutputMultiPath(File(exist=True))
class Atropos(ANTSCommand):
"""A finite mixture modeling (FMM) segmentation approach with possibilities for
specifying prior constraints. These prior constraints include the specification
of a prior label image, prior probability images (one for each class), and/or an
MRF prior to enforce spatial smoothing of the labels. Similar algorithms include
FAST and SPM.
Examples
--------
>>> from nipype.interfaces.ants import Atropos
>>> at = Atropos()
>>> at.inputs.dimension = 3
>>> at.inputs.intensity_images = 'structural.nii'
>>> at.inputs.mask_image = 'mask.nii'
>>> at.inputs.initialization = 'PriorProbabilityImages'
>>> at.inputs.prior_probability_images = ['rc1s1.nii', 'rc1s2.nii']
>>> at.inputs.number_of_tissue_classes = 2
>>> at.inputs.prior_weighting = 0.8
>>> at.inputs.prior_probability_threshold = 0.0000001
>>> at.inputs.likelihood_model = 'Gaussian'
>>> at.inputs.mrf_smoothing_factor = 0.2
>>> at.inputs.mrf_radius = [1, 1, 1]
>>> at.inputs.icm_use_synchronous_update = True
>>> at.inputs.maximum_number_of_icm_terations = 1
>>> at.inputs.n_iterations = 5
>>> at.inputs.convergence_threshold = 0.000001
>>> at.inputs.posterior_formulation = 'Socrates'
>>> at.inputs.use_mixture_model_proportions = True
>>> at.inputs.save_posteriors = True
>>> at.cmdline # doctest: +IGNORE_UNICODE
'Atropos --image-dimensionality 3 --icm [1,1] \
--initialization PriorProbabilityImages[2,priors/priorProbImages%02d.nii,0.8,1e-07] --intensity-image structural.nii \
--likelihood-model Gaussian --mask-image mask.nii --mrf [0.2,1x1x1] --convergence [5,1e-06] \
--output [structural_labeled.nii,POSTERIOR_%02d.nii.gz] --posterior-formulation Socrates[1] --use-random-seed 1'
"""
input_spec = AtroposInputSpec
output_spec = AtroposOutputSpec
_cmd = 'Atropos'
def _format_arg(self, opt, spec, val):
if opt == 'initialization':
retval = "--initialization %s[%d" % (val,
self.inputs.number_of_tissue_classes)
if val == "PriorProbabilityImages":
_, _, ext = split_filename(
self.inputs.prior_probability_images[0])
retval += ",priors/priorProbImages%02d" + \
ext + ",%g" % self.inputs.prior_weighting
if isdefined(self.inputs.prior_probability_threshold):
retval += ",%g" % self.inputs.prior_probability_threshold
return retval + "]"
if opt == 'mrf_smoothing_factor':
retval = "--mrf [%g" % val
if isdefined(self.inputs.mrf_radius):
retval += ",%s" % self._format_xarray([str(s) for s in self.inputs.mrf_radius])
return retval + "]"
if opt == "icm_use_synchronous_update":
retval = "--icm [%d" % val
if isdefined(self.inputs.maximum_number_of_icm_terations):
retval += ",%g" % self.inputs.maximum_number_of_icm_terations
return retval + "]"
if opt == "n_iterations":
retval = "--convergence [%d" % val
if isdefined(self.inputs.convergence_threshold):
retval += ",%g" % self.inputs.convergence_threshold
return retval + "]"
if opt == "posterior_formulation":
retval = "--posterior-formulation %s" % val
if isdefined(self.inputs.use_mixture_model_proportions):
retval += "[%d]" % self.inputs.use_mixture_model_proportions
return retval
if opt == "out_classified_image_name":
retval = "--output [%s" % val
if isdefined(self.inputs.save_posteriors):
retval += ",%s" % self.inputs.output_posteriors_name_template
return retval + "]"
return super(ANTSCommand, self)._format_arg(opt, spec, val)
def _run_interface(self, runtime, correct_return_codes=[0]):
if self.inputs.initialization == "PriorProbabilityImages":
priors_directory = os.path.join(os.getcwd(), "priors")
if not os.path.exists(priors_directory):
os.makedirs(priors_directory)
_, _, ext = split_filename(self.inputs.prior_probability_images[0])
for i, f in enumerate(self.inputs.prior_probability_images):
target = os.path.join(priors_directory,
'priorProbImages%02d' % (i + 1) + ext)
if not (os.path.exists(target) and os.path.realpath(target) == os.path.abspath(f)):
copyfile(os.path.abspath(f), os.path.join(priors_directory,
'priorProbImages%02d' % (i + 1) + ext))
runtime = super(Atropos, self)._run_interface(runtime)
return runtime
def _gen_filename(self, name):
if name == 'out_classified_image_name':
output = self.inputs.out_classified_image_name
if not isdefined(output):
_, name, ext = split_filename(self.inputs.intensity_images[0])
output = name + '_labeled' + ext
return output
return None
def _list_outputs(self):
outputs = self._outputs().get()
outputs['classified_image'] = os.path.abspath(
self._gen_filename('out_classified_image_name'))
if isdefined(self.inputs.save_posteriors) and self.inputs.save_posteriors:
outputs['posteriors'] = []
for i in range(self.inputs.number_of_tissue_classes):
outputs['posteriors'].append(os.path.abspath(self.inputs.output_posteriors_name_template % (i + 1)))
return outputs
class LaplacianThicknessInputSpec(ANTSCommandInputSpec):
input_wm = File(argstr='%s', mandatory=True, copyfile=True,
desc='white matter segmentation image',
position=1)
input_gm = File(argstr='%s', mandatory=True, copyfile=True,
desc='gray matter segmentation image',
position=2)
output_image = File(desc='name of output file', argstr='%s', position=3,
genfile=True, hash_files=False)
smooth_param = traits.Float(argstr='smoothparam=%d', desc='', position=4)
prior_thickness = traits.Float(argstr='priorthickval=%d', desc='',
position=5)
dT = traits.Float(argstr='dT=%d', desc='', position=6)
sulcus_prior = traits.Bool(argstr='use-sulcus-prior', desc='', position=7)
opt_tolerance = traits.Float(argstr='optional-laplacian-tolerance=%d',
desc='', position=8)
class LaplacianThicknessOutputSpec(TraitedSpec):
output_image = File(exists=True, desc='Cortical thickness')
class LaplacianThickness(ANTSCommand):
"""Calculates the cortical thickness from an anatomical image
Examples
--------
>>> from nipype.interfaces.ants import LaplacianThickness
>>> cort_thick = LaplacianThickness()
>>> cort_thick.inputs.input_wm = 'white_matter.nii.gz'
>>> cort_thick.inputs.input_gm = 'gray_matter.nii.gz'
>>> cort_thick.inputs.output_image = 'output_thickness.nii.gz'
>>> cort_thick.cmdline # doctest: +IGNORE_UNICODE
'LaplacianThickness white_matter.nii.gz gray_matter.nii.gz output_thickness.nii.gz'
"""
_cmd = 'LaplacianThickness'
input_spec = LaplacianThicknessInputSpec
output_spec = LaplacianThicknessOutputSpec
def _gen_filename(self, name):
if name == 'output_image':
output = self.inputs.output_image
if not isdefined(output):
_, name, ext = split_filename(self.inputs.input_wm)
output = name + '_thickness' + ext
return output
return None
def _list_outputs(self):
outputs = self._outputs().get()
_, name, ext = split_filename(os.path.abspath(self.inputs.input_wm))
outputs['output_image'] = os.path.join(os.getcwd(),
''.join((name,
self.inputs.output_image,
ext)))
return outputs
class N4BiasFieldCorrectionInputSpec(ANTSCommandInputSpec):
dimension = traits.Enum(3, 2, argstr='-d %d',
usedefault=True,
desc='image dimension (2 or 3)')
input_image = File(argstr='--input-image %s', mandatory=True,
desc=('image to apply transformation to (generally a '
'coregistered functional)'))
mask_image = File(argstr='--mask-image %s')
weight_image = File(argstr='--weight-image %s')
output_image = traits.Str(argstr='--output %s',
desc='output file name', genfile=True,
hash_files=False)
bspline_fitting_distance = traits.Float(argstr="--bspline-fitting %s")
bspline_order = traits.Int(requires=['bspline_fitting_distance'])
shrink_factor = traits.Int(argstr="--shrink-factor %d")
n_iterations = traits.List(traits.Int(), argstr="--convergence %s")
convergence_threshold = traits.Float(requires=['n_iterations'])
save_bias = traits.Bool(False, mandatory=True, usedefault=True,
desc=('True if the estimated bias should be saved'
' to file.'), xor=['bias_image'])
bias_image = File(desc='Filename for the estimated bias.',
hash_files=False)
class N4BiasFieldCorrectionOutputSpec(TraitedSpec):
output_image = File(exists=True, desc='Warped image')
bias_image = File(exists=True, desc='Estimated bias')
class N4BiasFieldCorrection(ANTSCommand):
"""N4 is a variant of the popular N3 (nonparameteric nonuniform normalization)
retrospective bias correction algorithm. Based on the assumption that the
corruption of the low frequency bias field can be modeled as a convolution of
the intensity histogram by a Gaussian, the basic algorithmic protocol is to
iterate between deconvolving the intensity histogram by a Gaussian, remapping
the intensities, and then spatially smoothing this result by a B-spline modeling
of the bias field itself. The modifications from and improvements obtained over
the original N3 algorithm are described in [Tustison2010]_.
.. [Tustison2010] N. Tustison et al.,
N4ITK: Improved N3 Bias Correction, IEEE Transactions on Medical Imaging,
29(6):1310-1320, June 2010.
Examples
--------
>>> import copy
>>> from nipype.interfaces.ants import N4BiasFieldCorrection
>>> n4 = N4BiasFieldCorrection()
>>> n4.inputs.dimension = 3
>>> n4.inputs.input_image = 'structural.nii'
>>> n4.inputs.bspline_fitting_distance = 300
>>> n4.inputs.shrink_factor = 3
>>> n4.inputs.n_iterations = [50,50,30,20]
>>> n4.cmdline # doctest: +IGNORE_UNICODE
'N4BiasFieldCorrection --bspline-fitting [ 300 ] \
-d 3 --input-image structural.nii \
--convergence [ 50x50x30x20 ] --output structural_corrected.nii \
--shrink-factor 3'
>>> n4_2 = copy.deepcopy(n4)
>>> n4_2.inputs.convergence_threshold = 1e-6
>>> n4_2.cmdline # doctest: +IGNORE_UNICODE
'N4BiasFieldCorrection --bspline-fitting [ 300 ] \
-d 3 --input-image structural.nii \
--convergence [ 50x50x30x20, 1e-06 ] --output structural_corrected.nii \
--shrink-factor 3'
>>> n4_3 = copy.deepcopy(n4_2)
>>> n4_3.inputs.bspline_order = 5
>>> n4_3.cmdline # doctest: +IGNORE_UNICODE
'N4BiasFieldCorrection --bspline-fitting [ 300, 5 ] \
-d 3 --input-image structural.nii \
--convergence [ 50x50x30x20, 1e-06 ] --output structural_corrected.nii \
--shrink-factor 3'
>>> n4_4 = N4BiasFieldCorrection()
>>> n4_4.inputs.input_image = 'structural.nii'
>>> n4_4.inputs.save_bias = True
>>> n4_4.inputs.dimension = 3
>>> n4_4.cmdline # doctest: +IGNORE_UNICODE
'N4BiasFieldCorrection -d 3 --input-image structural.nii \
--output [ structural_corrected.nii, structural_bias.nii ]'
"""
_cmd = 'N4BiasFieldCorrection'
input_spec = N4BiasFieldCorrectionInputSpec
output_spec = N4BiasFieldCorrectionOutputSpec
def _gen_filename(self, name):
if name == 'output_image':
output = self.inputs.output_image
if not isdefined(output):
_, name, ext = split_filename(self.inputs.input_image)
output = name + '_corrected' + ext
return output
if name == 'bias_image':
output = self.inputs.bias_image
if not isdefined(output):
_, name, ext = split_filename(self.inputs.input_image)
output = name + '_bias' + ext
return output
return None
def _format_arg(self, name, trait_spec, value):
if ((name == 'output_image') and
(self.inputs.save_bias or isdefined(self.inputs.bias_image))):
bias_image = self._gen_filename('bias_image')
output = self._gen_filename('output_image')
newval = '[ %s, %s ]' % (output, bias_image)
return trait_spec.argstr % newval
if name == 'bspline_fitting_distance':
if isdefined(self.inputs.bspline_order):
newval = '[ %g, %d ]' % (value, self.inputs.bspline_order)
else:
newval = '[ %g ]' % value
return trait_spec.argstr % newval
if name == 'n_iterations':
if isdefined(self.inputs.convergence_threshold):
newval = '[ %s, %g ]' % (self._format_xarray([str(elt) for elt in value]),
self.inputs.convergence_threshold)
else:
newval = '[ %s ]' % self._format_xarray([str(elt) for elt in value])
return trait_spec.argstr % newval
return super(N4BiasFieldCorrection,
self)._format_arg(name, trait_spec, value)
def _parse_inputs(self, skip=None):
if skip is None:
skip = []
skip += ['save_bias', 'bias_image']
return super(N4BiasFieldCorrection, self)._parse_inputs(skip=skip)
def _list_outputs(self):
outputs = self._outputs().get()
outputs['output_image'] = os.path.abspath(
self._gen_filename('output_image'))
if self.inputs.save_bias or isdefined(self.inputs.bias_image):
outputs['bias_image'] = os.path.abspath(
self._gen_filename('bias_image'))
return outputs
class CorticalThicknessInputSpec(ANTSCommandInputSpec):
dimension = traits.Enum(3, 2, argstr='-d %d', usedefault=True,
desc='image dimension (2 or 3)')
anatomical_image = File(exists=True, argstr='-a %s',
desc=('Structural *intensity* image, typically T1.'
'If more than one anatomical image is specified,'
'subsequently specified images are used during the'
'segmentation process. However, only the first'
'image is used in the registration of priors.'
'Our suggestion would be to specify the T1'
'as the first image.'),
mandatory=True)
brain_template = File(exists=True, argstr='-e %s',
desc=('Anatomical *intensity* template (possibly created using a'
'population data set with buildtemplateparallel.sh in ANTs).'
'This template is *not* skull-stripped.'),
mandatory=True)
brain_probability_mask = File(exists=True, argstr='-m %s',
desc='brain probability mask in template space', copyfile=False, mandatory=True)
segmentation_priors = InputMultiPath(
File(exists=True), argstr='-p %s', mandatory=True)
out_prefix = traits.Str('antsCT_', argstr='-o %s', usedefault=True,
desc=('Prefix that is prepended to all output'
' files (default = antsCT_)'))
image_suffix = traits.Str('nii.gz', desc=('any of standard ITK formats,'
' nii.gz is default'),
argstr='-s %s', usedefault=True)
t1_registration_template = File(exists=True,
desc=('Anatomical *intensity* template'
'(assumed to be skull-stripped). A common'
'case would be where this would be the same'
'template as specified in the -e option which'
'is not skull stripped.'),
argstr='-t %s', mandatory=True)
extraction_registration_mask = File(exists=True, argstr='-f %s',
desc=('Mask (defined in the template space) used during'
' registration for brain extraction.'))
keep_temporary_files = traits.Int(argstr='-k %d',
desc='Keep brain extraction/segmentation warps, etc (default = 0).')
max_iterations = traits.Int(argstr='-i %d', desc=('ANTS registration max iterations'
'(default = 100x100x70x20)'))
prior_segmentation_weight = traits.Float(argstr='-w %f',
desc=('Atropos spatial prior *probability* weight for'
'the segmentation'))
segmentation_iterations = traits.Int(argstr='-n %d',
desc=('N4 -> Atropos -> N4 iterations during segmentation'
'(default = 3)'))
posterior_formulation = traits.Str(argstr='-b %s',
desc=('Atropos posterior formulation and whether or not'
'to use mixture model proportions.'
'''e.g 'Socrates[1]' (default) or 'Aristotle[1]'.'''
'Choose the latter if you'
'want use the distance priors (see also the -l option'
'for label propagation control).'))
use_floatingpoint_precision = traits.Enum(0, 1, argstr='-j %d',
desc=('Use floating point precision '
'in registrations (default = 0)'))
use_random_seeding = traits.Enum(0, 1, argstr='-u %d',
desc=('Use random number generated from system clock in Atropos'
'(default = 1)'))
b_spline_smoothing = traits.Bool(argstr='-v',
desc=('Use B-spline SyN for registrations and B-spline'
'exponential mapping in DiReCT.'))
cortical_label_image = File(exists=True,
desc='Cortical ROI labels to use as a prior for ATITH.')
label_propagation = traits.Str(argstr='-l %s',
desc=('Incorporate a distance prior one the posterior formulation. Should be'
'''of the form 'label[lambda,boundaryProbability]' where label'''
'is a value of 1,2,3,... denoting label ID. The label'
'probability for anything outside the current label'
' = boundaryProbability * exp( -lambda * distanceFromBoundary )'
'Intuitively, smaller lambda values will increase the spatial capture'
'range of the distance prior. To apply to all label values, simply omit'
'specifying the label, i.e. -l [lambda,boundaryProbability].'))
quick_registration = traits.Bool(argstr='-q 1',
desc=('If = 1, use antsRegistrationSyNQuick.sh as the basis for registration'
'during brain extraction, brain segmentation, and'
'(optional) normalization to a template.'
'Otherwise use antsRegistrationSyN.sh (default = 0).'))
debug = traits.Bool(argstr='-z 1',
desc=('If > 0, runs a faster version of the script.'
'Only for testing. Implies -u 0.'
'Requires single thread computation for complete reproducibility.'))
class CorticalThicknessOutputSpec(TraitedSpec):
BrainExtractionMask = File(exists=True, desc='brain extraction mask')
BrainSegmentation = File(exists=True, desc='brain segmentaion image')
BrainSegmentationN4 = File(exists=True, desc='N4 corrected image')
BrainSegmentationPosteriors = OutputMultiPath(File(exists=True),
desc='Posterior probability images')
CorticalThickness = File(exists=True, desc='cortical thickness file')
TemplateToSubject1GenericAffine = File(
exists=True, desc='Template to subject affine')
TemplateToSubject0Warp = File(exists=True, desc='Template to subject warp')
SubjectToTemplate1Warp = File(
exists=True, desc='Template to subject inverse warp')
SubjectToTemplate0GenericAffine = File(
exists=True, desc='Template to subject inverse affine')
SubjectToTemplateLogJacobian = File(
exists=True, desc='Template to subject log jacobian')
CorticalThicknessNormedToTemplate = File(exists=True,
desc='Normalized cortical thickness')
BrainVolumes = File(exists=True, desc='Brain volumes as text')
class CorticalThickness(ANTSCommand):
"""
Examples
--------
>>> from nipype.interfaces.ants.segmentation import CorticalThickness
>>> corticalthickness = CorticalThickness()
>>> corticalthickness.inputs.dimension = 3
>>> corticalthickness.inputs.anatomical_image ='T1.nii.gz'
>>> corticalthickness.inputs.brain_template = 'study_template.nii.gz'
>>> corticalthickness.inputs.brain_probability_mask ='ProbabilityMaskOfStudyTemplate.nii.gz'
>>> corticalthickness.inputs.segmentation_priors = ['BrainSegmentationPrior01.nii.gz',
... 'BrainSegmentationPrior02.nii.gz',
... 'BrainSegmentationPrior03.nii.gz',
... 'BrainSegmentationPrior04.nii.gz']
>>> corticalthickness.inputs.t1_registration_template = 'brain_study_template.nii.gz'
>>> corticalthickness.cmdline # doctest: +IGNORE_UNICODE
'antsCorticalThickness.sh -a T1.nii.gz -m ProbabilityMaskOfStudyTemplate.nii.gz -e study_template.nii.gz -d 3 \
-s nii.gz -o antsCT_ -p nipype_priors/BrainSegmentationPrior%02d.nii.gz -t brain_study_template.nii.gz'
"""
input_spec = CorticalThicknessInputSpec
output_spec = CorticalThicknessOutputSpec
_cmd = 'antsCorticalThickness.sh'
def _format_arg(self, opt, spec, val):
if opt == 'anatomical_image':
retval = '-a %s' % val
return retval
if opt == 'brain_template':
retval = '-e %s' % val
return retval
if opt == 'brain_probability_mask':
retval = '-m %s' % val
return retval
if opt == 'out_prefix':
retval = '-o %s' % val
return retval
if opt == 't1_registration_template':
retval = '-t %s' % val
return retval
if opt == 'segmentation_priors':
_, _, ext = split_filename(self.inputs.segmentation_priors[0])
retval = "-p nipype_priors/BrainSegmentationPrior%02d" + ext
return retval
return super(ANTSCommand, self)._format_arg(opt, spec, val)
def _run_interface(self, runtime, correct_return_codes=[0]):
priors_directory = os.path.join(os.getcwd(), "nipype_priors")
if not os.path.exists(priors_directory):
os.makedirs(priors_directory)
_, _, ext = split_filename(self.inputs.segmentation_priors[0])
for i, f in enumerate(self.inputs.segmentation_priors):
target = os.path.join(
priors_directory, 'BrainSegmentationPrior%02d' % (i + 1) + ext)
if not (os.path.exists(target) and os.path.realpath(target) == os.path.abspath(f)):
copyfile(os.path.abspath(f), target)
runtime = super(CorticalThickness, self)._run_interface(runtime)
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
outputs['BrainExtractionMask'] = os.path.join(os.getcwd(),
self.inputs.out_prefix +
'BrainExtractionMask.' +
self.inputs.image_suffix)
outputs['BrainSegmentation'] = os.path.join(os.getcwd(),
self.inputs.out_prefix +
'BrainSegmentation.' +
self.inputs.image_suffix)
outputs['BrainSegmentationN4'] = os.path.join(os.getcwd(),
self.inputs.out_prefix +
'BrainSegmentation0N4.' +
self.inputs.image_suffix)
posteriors = []
for i in range(len(self.inputs.segmentation_priors)):
posteriors.append(os.path.join(os.getcwd(),
self.inputs.out_prefix +
'BrainSegmentationPosteriors%02d.' % (i + 1) +
self.inputs.image_suffix))
outputs['BrainSegmentationPosteriors'] = posteriors
outputs['CorticalThickness'] = os.path.join(os.getcwd(),
self.inputs.out_prefix +
'CorticalThickness.' +
self.inputs.image_suffix)
outputs['TemplateToSubject1GenericAffine'] = os.path.join(os.getcwd(),
self.inputs.out_prefix +
'TemplateToSubject1GenericAffine.mat')
outputs['TemplateToSubject0Warp'] = os.path.join(os.getcwd(),
self.inputs.out_prefix +
'TemplateToSubject0Warp.' +
self.inputs.image_suffix)
outputs['SubjectToTemplate1Warp'] = os.path.join(os.getcwd(),
self.inputs.out_prefix +
'SubjectToTemplate1Warp.' +
self.inputs.image_suffix)
outputs['SubjectToTemplate0GenericAffine'] = os.path.join(os.getcwd(),
self.inputs.out_prefix +
'SubjectToTemplate0GenericAffine.mat')
outputs['SubjectToTemplateLogJacobian'] = os.path.join(os.getcwd(),
self.inputs.out_prefix +
'SubjectToTemplateLogJacobian.' +
self.inputs.image_suffix)
outputs['CorticalThicknessNormedToTemplate'] = os.path.join(os.getcwd(),
self.inputs.out_prefix +
'CorticalThickness.' +
self.inputs.image_suffix)
outputs['BrainVolumes'] = os.path.join(os.getcwd(),
self.inputs.out_prefix +
'brainvols.csv')
return outputs
class antsCorticalThickness(CorticalThickness):
DeprecationWarning('This class has been replaced by CorticalThickness and will be removed in version 0.13')
class BrainExtractionInputSpec(ANTSCommandInputSpec):
dimension = traits.Enum(3, 2, argstr='-d %d', usedefault=True,
desc='image dimension (2 or 3)')
anatomical_image = File(exists=True, argstr='-a %s',
desc=('Structural image, typically T1. If more than one'
'anatomical image is specified, subsequently specified'
'images are used during the segmentation process. However,'
'only the first image is used in the registration of priors.'
'Our suggestion would be to specify the T1 as the first image.'
'Anatomical template created using e.g. LPBA40 data set with'
'buildtemplateparallel.sh in ANTs.'),
mandatory=True)
brain_template = File(exists=True, argstr='-e %s',
desc=('Anatomical template created using e.g. LPBA40 data set with'
'buildtemplateparallel.sh in ANTs.'),
mandatory=True)
brain_probability_mask = File(exists=True, argstr='-m %s',
desc=('Brain probability mask created using e.g. LPBA40 data set which'
'have brain masks defined, and warped to anatomical template and'
'averaged resulting in a probability image.'),
copyfile=False, mandatory=True)
out_prefix = traits.Str('highres001_', argstr='-o %s', usedefault=True,
desc=('Prefix that is prepended to all output'
' files (default = highress001_)'))
extraction_registration_mask = File(exists=True, argstr='-f %s',
desc=('Mask (defined in the template space) used during'
' registration for brain extraction.'
'To limit the metric computation to a specific region.'))
image_suffix = traits.Str('nii.gz', desc=('any of standard ITK formats,'
' nii.gz is default'),
argstr='-s %s', usedefault=True)
use_random_seeding = traits.Enum(0, 1, argstr='-u %d',
desc=('Use random number generated from system clock in Atropos'
'(default = 1)'))
keep_temporary_files = traits.Int(argstr='-k %d',
desc='Keep brain extraction/segmentation warps, etc (default = 0).')
use_floatingpoint_precision = traits.Enum(0, 1, argstr='-q %d',
desc=('Use floating point precision '
'in registrations (default = 0)'))
debug = traits.Bool(argstr='-z 1',
desc=('If > 0, runs a faster version of the script.'
'Only for testing. Implies -u 0.'
'Requires single thread computation for complete reproducibility.'))
class BrainExtractionOutputSpec(TraitedSpec):
BrainExtractionMask = File(exists=True, desc='brain extraction mask')
BrainExtractionBrain = File(exists=True, desc='brain extraction image')
class BrainExtraction(ANTSCommand):
"""
Examples
--------
>>> from nipype.interfaces.ants.segmentation import BrainExtraction
>>> brainextraction = BrainExtraction()
>>> brainextraction.inputs.dimension = 3
>>> brainextraction.inputs.anatomical_image ='T1.nii.gz'
>>> brainextraction.inputs.brain_template = 'study_template.nii.gz'
>>> brainextraction.inputs.brain_probability_mask ='ProbabilityMaskOfStudyTemplate.nii.gz'
>>> brainextraction.cmdline # doctest: +IGNORE_UNICODE
'antsBrainExtraction.sh -a T1.nii.gz -m ProbabilityMaskOfStudyTemplate.nii.gz -e study_template.nii.gz -d 3 \
-s nii.gz -o highres001_'
"""
input_spec = BrainExtractionInputSpec
output_spec = BrainExtractionOutputSpec
_cmd = 'antsBrainExtraction.sh'
def _list_outputs(self):
outputs = self._outputs().get()
outputs['BrainExtractionMask'] = os.path.join(os.getcwd(),
self.inputs.out_prefix +
'BrainExtractionMask.' +
self.inputs.image_suffix)
outputs['BrainExtractionBrain'] = os.path.join(os.getcwd(),
self.inputs.out_prefix +
'BrainExtractionBrain.' +
self.inputs.image_suffix)
return outputs
class antsBrainExtraction(BrainExtraction):
DeprecationWarning('This class has been replaced by BrainExtraction and will be removed in version 0.13')
class JointFusionInputSpec(ANTSCommandInputSpec):
dimension = traits.Enum(3, 2, 4, argstr='%d', position=0, usedefault=True,
mandatory=True,
desc='image dimension (2, 3, or 4)')
modalities = traits.Int(argstr='%d', position=1, mandatory=True,
desc='Number of modalities or features')
warped_intensity_images = InputMultiPath(File(exists=True),
argstr="-g %s...", mandatory=True,
desc='Warped atlas images')
target_image = InputMultiPath(File(exists=True), argstr='-tg %s...',
mandatory=True, desc='Target image(s)')
warped_label_images = InputMultiPath(File(exists=True), argstr="-l %s...",
mandatory=True,
desc='Warped atlas segmentations')
method = traits.Str(default='Joint', argstr='-m %s', usedefault=True,
desc=('Select voting method. Options: Joint (Joint '
'Label Fusion). May be followed by optional '
'parameters in brackets, e.g., -m Joint[0.1,2]'))
alpha = traits.Float(default=0.1, usedefault=True, requires=['method'],
desc=('Regularization term added to matrix Mx for '
'inverse'))
beta = traits.Int(default=2, usedefault=True, requires=['method'],
desc=('Exponent for mapping intensity difference to joint'
' error'))
output_label_image = File(argstr='%s', mandatory=True, position=-1,
name_template='%s',
output_name='output_label_image',
desc='Output fusion label map image')
patch_radius = traits.ListInt(minlen=3, maxlen=3, argstr='-rp %s',
desc=('Patch radius for similarity measures, '
'scalar or vector. Default: 2x2x2'))
search_radius = traits.ListInt(minlen=3, maxlen=3, argstr='-rs %s',
desc='Local search radius. Default: 3x3x3')
exclusion_region = File(exists=True, argstr='-x %s',
desc=('Specify an exclusion region for the given '
'label.'))
atlas_group_id = traits.ListInt(argstr='-gp %d...',
desc='Assign a group ID for each atlas')
atlas_group_weights = traits.ListInt(argstr='-gpw %d...',
desc=('Assign the voting weights to '
'each atlas group'))
class JointFusionOutputSpec(TraitedSpec):
output_label_image = File(exists=True)
# TODO: optional outputs - output_posteriors, output_voting_weights
class JointFusion(ANTSCommand):
"""
Examples
--------
>>> from nipype.interfaces.ants import JointFusion
>>> at = JointFusion()
>>> at.inputs.dimension = 3
>>> at.inputs.modalities = 1
>>> at.inputs.method = 'Joint[0.1,2]'
>>> at.inputs.output_label_image ='fusion_labelimage_output.nii'
>>> at.inputs.warped_intensity_images = ['im1.nii',
... 'im2.nii',
... 'im3.nii']
>>> at.inputs.warped_label_images = ['segmentation0.nii.gz',
... 'segmentation1.nii.gz',
... 'segmentation1.nii.gz']
>>> at.inputs.target_image = 'T1.nii'
>>> at.cmdline # doctest: +IGNORE_UNICODE
'jointfusion 3 1 -m Joint[0.1,2] -tg T1.nii -g im1.nii -g im2.nii -g im3.nii -l segmentation0.nii.gz \
-l segmentation1.nii.gz -l segmentation1.nii.gz fusion_labelimage_output.nii'
>>> at.inputs.method = 'Joint'
>>> at.inputs.alpha = 0.5
>>> at.inputs.beta = 1
>>> at.inputs.patch_radius = [3,2,1]
>>> at.inputs.search_radius = [1,2,3]
>>> at.cmdline # doctest: +IGNORE_UNICODE
'jointfusion 3 1 -m Joint[0.5,1] -rp 3x2x1 -rs 1x2x3 -tg T1.nii -g im1.nii -g im2.nii -g im3.nii \
-l segmentation0.nii.gz -l segmentation1.nii.gz -l segmentation1.nii.gz fusion_labelimage_output.nii'
"""
input_spec = JointFusionInputSpec
output_spec = JointFusionOutputSpec
_cmd = 'jointfusion'
def _format_arg(self, opt, spec, val):
if opt == 'method':
if '[' in val:
retval = '-m {0}'.format(val)
else:
retval = '-m {0}[{1},{2}]'.format(
self.inputs.method, self.inputs.alpha, self.inputs.beta)
elif opt == 'patch_radius':
retval = '-rp {0}'.format(self._format_xarray(val))
elif opt == 'search_radius':
retval = '-rs {0}'.format(self._format_xarray(val))
else:
if opt == 'warped_intensity_images':
assert len(val) == self.inputs.modalities * len(self.inputs.warped_label_images), \
"Number of intensity images and label maps must be the same {0}!={1}".format(
len(val), len(self.inputs.warped_label_images))
return super(ANTSCommand, self)._format_arg(opt, spec, val)
return retval
def _list_outputs(self):
outputs = self._outputs().get()
outputs['output_label_image'] = os.path.abspath(
self.inputs.output_label_image)
return outputs
class DenoiseImageInputSpec(ANTSCommandInputSpec):
dimension = traits.Enum(2, 3, 4, argstr='-d %d', usedefault=False,
desc='This option forces the image to be treated '
'as a specified-dimensional image. If not '
'specified, the program tries to infer the '
'dimensionality from the input image.')
input_image = File(exists=True, argstr="-i %s", mandatory=True,
desc='A scalar image is expected as input for noise correction.')
noise_model = traits.Enum('Gaussian', 'Rician', argstr='-n %s', usedefault=True,
desc=('Employ a Rician or Gaussian noise model.'))
shrink_factor = traits.Int(default_value=1, usedefault=True, argstr='-s %s',
desc=('Running noise correction on large images can '
'be time consuming. To lessen computation time, '
'the input image can be resampled. The shrink '
'factor, specified as a single integer, describes '
'this resampling. Shrink factor = 1 is the default.'))
output_image = File(argstr="-o %s", name_source=['input_image'], hash_files=False,
keep_extension=True, name_template='%s_noise_corrected',
desc='The output consists of the noise corrected '
'version of the input image.')
save_noise = traits.Bool(False, mandatory=True, usedefault=True,
desc=('True if the estimated noise should be saved '
'to file.'), xor=['noise_image'])
noise_image = File(name_source=['input_image'], hash_files=False,
keep_extension=True, name_template='%s_noise',
desc='Filename for the estimated noise.')
verbose = traits.Bool(False, argstr="-v", desc=('Verbose output.'))
class DenoiseImageOutputSpec(TraitedSpec):
output_image = File(exists=True)
noise_image = File()
class DenoiseImage(ANTSCommand):
"""
Examples
--------
>>> import copy
>>> from nipype.interfaces.ants import DenoiseImage
>>> denoise = DenoiseImage()
>>> denoise.inputs.dimension = 3
>>> denoise.inputs.input_image = 'im1.nii'
>>> denoise.cmdline # doctest: +IGNORE_UNICODE
'DenoiseImage -d 3 -i im1.nii -n Gaussian -o im1_noise_corrected.nii -s 1'
>>> denoise_2 = copy.deepcopy(denoise)
>>> denoise_2.inputs.output_image = 'output_corrected_image.nii.gz'
>>> denoise_2.inputs.noise_model = 'Rician'
>>> denoise_2.inputs.shrink_factor = 2
>>> denoise_2.cmdline # doctest: +IGNORE_UNICODE
'DenoiseImage -d 3 -i im1.nii -n Rician -o output_corrected_image.nii.gz -s 2'
>>> denoise_3 = DenoiseImage()
>>> denoise_3.inputs.input_image = 'im1.nii'
>>> denoise_3.inputs.save_noise = True
>>> denoise_3.cmdline # doctest: +IGNORE_UNICODE
'DenoiseImage -i im1.nii -n Gaussian -o [ im1_noise_corrected.nii, im1_noise.nii ] -s 1'
"""
input_spec = DenoiseImageInputSpec
output_spec = DenoiseImageOutputSpec
_cmd = 'DenoiseImage'
def _format_arg(self, name, trait_spec, value):
if ((name == 'output_image') and
(self.inputs.save_noise or isdefined(self.inputs.noise_image))):
newval = '[ %s, %s ]' % (self._filename_from_source('output_image'),
self._filename_from_source('noise_image'))
return trait_spec.argstr % newval
return super(DenoiseImage,
self)._format_arg(name, trait_spec, value)
class AntsJointFusionInputSpec(ANTSCommandInputSpec):
dimension = traits.Enum(3, 2, 4, argstr='-d %d', usedefault=False,
desc='This option forces the image to be treated '
'as a specified-dimensional image. If not '
'specified, the program tries to infer the '
'dimensionality from the input image.')
target_image = traits.List(InputMultiPath(File(exists=True)), argstr='-t %s',
mandatory=True, desc='The target image (or '
'multimodal target images) assumed to be '
'aligned to a common image domain.')
atlas_image = traits.List(InputMultiPath(File(exists=True)), argstr="-g %s...",
mandatory=True, desc='The atlas image (or '
'multimodal atlas images) assumed to be '
'aligned to a common image domain.')
atlas_segmentation_image = InputMultiPath(File(exists=True), argstr="-l %s...",
mandatory=True, desc='The atlas segmentation '
'images. For performing label fusion the number '
'of specified segmentations should be identical '
'to the number of atlas image sets.')
alpha = traits.Float(default_value=0.1, usedefault=True, argstr='-a %s', desc=('Regularization '
'term added to matrix Mx for calculating the inverse. Default = 0.1'))
beta = traits.Float(default_value=2.0, usedefault=True, argstr='-b %s', desc=('Exponent for mapping '
'intensity difference to the joint error. Default = 2.0'))
retain_label_posterior_images = traits.Bool(False, argstr='-r', usedefault=True,
requires=['atlas_segmentation_image'],
desc=('Retain label posterior probability images. Requires '
'atlas segmentations to be specified. Default = false'))
retain_atlas_voting_images = traits.Bool(False, argstr='-f', usedefault=True,
desc=('Retain atlas voting images. Default = false'))
constrain_nonnegative = traits.Bool(False, argstr='-c', usedefault=True,
desc=('Constrain solution to non-negative weights.'))
patch_radius = traits.ListInt(minlen=3, maxlen=3, argstr='-p %s',
desc=('Patch radius for similarity measures.'
'Default: 2x2x2'))
patch_metric = traits.Enum('PC', 'MSQ', argstr='-m %s', usedefault=False,
desc=('Metric to be used in determining the most similar '
'neighborhood patch. Options include Pearson\'s '
'correlation (PC) and mean squares (MSQ). Default = '
'PC (Pearson correlation).'))
search_radius = traits.List([3,3,3], minlen=1, maxlen=3, argstr='-s %s', usedefault=True,
desc=('Search radius for similarity measures. Default = 3x3x3. '
'One can also specify an image where the value at the '
'voxel specifies the isotropic search radius at that voxel.'))
exclusion_image_label = traits.List(traits.Str(), argstr='-e %s', requires=['exclusion_image'],
desc=('Specify a label for the exclusion region.'))
exclusion_image = traits.List(File(exists=True),
desc=('Specify an exclusion region for the given label.'))
mask_image = File(argstr='-x %s', exists=True, desc='If a mask image '
'is specified, fusion is only performed in the mask region.')
out_label_fusion = File(argstr="%s", hash_files=False,
desc='The output label fusion image.')
out_intensity_fusion_name_format = traits.Str('antsJointFusionIntensity_%d.nii.gz',
argstr="", desc='Optional intensity fusion '
'image file name format.')
out_label_post_prob_name_format = traits.Str('antsJointFusionPosterior_%d.nii.gz',
requires=['out_label_fusion',
'out_intensity_fusion_name_format'],
desc='Optional label posterior probability '
'image file name format.')
out_atlas_voting_weight_name_format = traits.Str('antsJointFusionVotingWeight_%d.nii.gz',
requires=['out_label_fusion',
'out_intensity_fusion_name_format',
'out_label_post_prob_name_format'],
desc='Optional atlas voting weight image '
'file name format.')
verbose = traits.Bool(False, argstr="-v", desc=('Verbose output.'))
class AntsJointFusionOutputSpec(TraitedSpec):
out_label_fusion = File(exists=True)
out_intensity_fusion_name_format = traits.Str()
out_label_post_prob_name_format = traits.Str()
out_atlas_voting_weight_name_format = traits.Str()
class AntsJointFusion(ANTSCommand):
"""
Examples
--------
>>> from nipype.interfaces.ants import AntsJointFusion
>>> antsjointfusion = AntsJointFusion()
>>> antsjointfusion.inputs.out_label_fusion = 'ants_fusion_label_output.nii'
>>> antsjointfusion.inputs.atlas_image = [ ['rc1s1.nii','rc1s2.nii'] ]
>>> antsjointfusion.inputs.atlas_segmentation_image = ['segmentation0.nii.gz']
>>> antsjointfusion.inputs.target_image = ['im1.nii']
>>> antsjointfusion.cmdline # doctest: +IGNORE_UNICODE
"antsJointFusion -a 0.1 -g ['rc1s1.nii', 'rc1s2.nii'] -l segmentation0.nii.gz \
-b 2.0 -o ants_fusion_label_output.nii -s 3x3x3 -t ['im1.nii']"
>>> antsjointfusion.inputs.target_image = [ ['im1.nii', 'im2.nii'] ]
>>> antsjointfusion.cmdline # doctest: +IGNORE_UNICODE
"antsJointFusion -a 0.1 -g ['rc1s1.nii', 'rc1s2.nii'] -l segmentation0.nii.gz \
-b 2.0 -o ants_fusion_label_output.nii -s 3x3x3 -t ['im1.nii', 'im2.nii']"
>>> antsjointfusion.inputs.atlas_image = [ ['rc1s1.nii','rc1s2.nii'],
... ['rc2s1.nii','rc2s2.nii'] ]
>>> antsjointfusion.inputs.atlas_segmentation_image = ['segmentation0.nii.gz',
... 'segmentation1.nii.gz']
>>> antsjointfusion.cmdline # doctest: +IGNORE_UNICODE
"antsJointFusion -a 0.1 -g ['rc1s1.nii', 'rc1s2.nii'] -g ['rc2s1.nii', 'rc2s2.nii'] \
-l segmentation0.nii.gz -l segmentation1.nii.gz -b 2.0 -o ants_fusion_label_output.nii \
-s 3x3x3 -t ['im1.nii', 'im2.nii']"
>>> antsjointfusion.inputs.dimension = 3
>>> antsjointfusion.inputs.alpha = 0.5
>>> antsjointfusion.inputs.beta = 1.0
>>> antsjointfusion.inputs.patch_radius = [3,2,1]
>>> antsjointfusion.inputs.search_radius = [3]
>>> antsjointfusion.cmdline # doctest: +IGNORE_UNICODE
"antsJointFusion -a 0.5 -g ['rc1s1.nii', 'rc1s2.nii'] -g ['rc2s1.nii', 'rc2s2.nii'] \
-l segmentation0.nii.gz -l segmentation1.nii.gz -b 1.0 -d 3 -o ants_fusion_label_output.nii \
-p 3x2x1 -s 3 -t ['im1.nii', 'im2.nii']"
>>> antsjointfusion.inputs.search_radius = ['mask.nii']
>>> antsjointfusion.inputs.verbose = True
>>> antsjointfusion.inputs.exclusion_image = ['roi01.nii', 'roi02.nii']
>>> antsjointfusion.inputs.exclusion_image_label = ['1','2']
>>> antsjointfusion.cmdline # doctest: +IGNORE_UNICODE
"antsJointFusion -a 0.5 -g ['rc1s1.nii', 'rc1s2.nii'] -g ['rc2s1.nii', 'rc2s2.nii'] \
-l segmentation0.nii.gz -l segmentation1.nii.gz -b 1.0 -d 3 -e 1[roi01.nii] -e 2[roi02.nii] \
-o ants_fusion_label_output.nii -p 3x2x1 -s mask.nii -t ['im1.nii', 'im2.nii'] -v"
>>> antsjointfusion.inputs.out_label_fusion = 'ants_fusion_label_output.nii'
>>> antsjointfusion.inputs.out_intensity_fusion_name_format = 'ants_joint_fusion_intensity_%d.nii.gz'
>>> antsjointfusion.inputs.out_label_post_prob_name_format = 'ants_joint_fusion_posterior_%d.nii.gz'
>>> antsjointfusion.inputs.out_atlas_voting_weight_name_format = 'ants_joint_fusion_voting_weight_%d.nii.gz'
>>> antsjointfusion.cmdline # doctest: +IGNORE_UNICODE
"antsJointFusion -a 0.5 -g ['rc1s1.nii', 'rc1s2.nii'] -g ['rc2s1.nii', 'rc2s2.nii'] \
-l segmentation0.nii.gz -l segmentation1.nii.gz -b 1.0 -d 3 -e 1[roi01.nii] -e 2[roi02.nii] \
-o [ants_fusion_label_output.nii, ants_joint_fusion_intensity_%d.nii.gz, \
ants_joint_fusion_posterior_%d.nii.gz, ants_joint_fusion_voting_weight_%d.nii.gz] \
-p 3x2x1 -s mask.nii -t ['im1.nii', 'im2.nii'] -v"
"""
input_spec = AntsJointFusionInputSpec
output_spec = AntsJointFusionOutputSpec
_cmd = 'antsJointFusion'
def _format_arg(self, opt, spec, val):
if opt == 'exclusion_image_label':
retval = []
for ii in range(len(self.inputs.exclusion_image_label)):
retval.append('-e {0}[{1}]'.format(
self.inputs.exclusion_image_label[ii],
self.inputs.exclusion_image[ii]))
retval = ' '.join(retval)
elif opt == 'patch_radius':
retval = '-p {0}'.format(self._format_xarray(val))
elif opt == 'search_radius':
retval = '-s {0}'.format(self._format_xarray(val))
elif opt == 'out_label_fusion':
if isdefined(self.inputs.out_intensity_fusion_name_format):
if isdefined(self.inputs.out_label_post_prob_name_format):
if isdefined(self.inputs.out_atlas_voting_weight_name_format):
retval = '-o [{0}, {1}, {2}, {3}]'.format(self.inputs.out_label_fusion,
self.inputs.out_intensity_fusion_name_format,
self.inputs.out_label_post_prob_name_format,
self.inputs.out_atlas_voting_weight_name_format)
else:
retval = '-o [{0}, {1}, {2}]'.format(self.inputs.out_label_fusion,
self.inputs.out_intensity_fusion_name_format,
self.inputs.out_label_post_prob_name_format)
else:
retval = '-o [{0}, {1}]'.format(self.inputs.out_label_fusion,
self.inputs.out_intensity_fusion_name_format)
else:
retval = '-o {0}'.format(self.inputs.out_label_fusion)
elif opt == 'out_intensity_fusion_name_format':
retval = ''
if not isdefined(self.inputs.out_label_fusion):
retval = '-o {0}'.format(self.inputs.out_intensity_fusion_name_format)
else:
if opt == 'atlas_segmentation_image':
assert len(val) == len(self.inputs.atlas_image), "Number of specified " \
"segmentations should be identical to the number of atlas image " \
"sets {0}!={1}".format(len(val), len(self.inputs.atlas_image))
return super(ANTSCommand, self)._format_arg(opt, spec, val)
return retval
def _list_outputs(self):
outputs = self._outputs().get()
if isdefined(self.inputs.out_label_fusion):
outputs['out_label_fusion'] = os.path.abspath(
self.inputs.out_label_fusion)
if isdefined(self.inputs.out_intensity_fusion_name_format):
outputs['out_intensity_fusion_name_format'] = os.path.abspath(
self.inputs.out_intensity_fusion_name_format)
if isdefined(self.inputs.out_label_post_prob_name_format):
outputs['out_label_post_prob_name_format'] = os.path.abspath(
self.inputs.out_label_post_prob_name_format)
if isdefined(self.inputs.out_atlas_voting_weight_name_format):
outputs['out_atlas_voting_weight_name_format'] = os.path.abspath(
self.inputs.out_atlas_voting_weight_name_format)
return outputs
| carolFrohlich/nipype | nipype/interfaces/ants/segmentation.py | Python | bsd-3-clause | 59,317 | [
"Gaussian"
] | 95bc3e928f81a87d9f42a6d1075d84f752048f5566e16839ebab40d64139701e |
# Calling a JSON API
# Liu Li
# 2015-11-23
'''
In this assignment you will write a Python program somewhat similar to http://www.pythonlearn.com/code/geojson.py. The program will prompt for a location, contact a web service and retrieve JSON for the web service and parse that data, and retrieve the first place_id from the JSON. A place ID is a textual identifier that uniquely identifies a place as within Google Maps.
API End Points
To complete this assignment, you should use this API endpoint that has a static subset of the Google Data:
http://python-data.dr-chuck.net/geojson
This API uses the same parameters (sensor and address) as the Google API. This API also has no rate limit so you can test as often as you like. If you visit the URL with no parameters, you get a list of all of the address values which can be used with this API.
To call the API, you need to provide a sensor=false parameter and the address that you are requesting as the address= parameter that is properly URL encoded using the urllib.urlencode() fuction as shown in http://www.pythonlearn.com/code/geojson.py
Just for fun, you can also test your program with the real Google API:
http://maps.googleapis.com/maps/api/geocode/json?sensor=false&address=University+of+Michigan
Singe Google's data is always changing, the data returned from the Google API could easily be different than from my local copy API. And the Google API has rate limits. But your code should work with the Google API with no modifications other than the base URL.
Test Data / Sample Execution
You can test to see if your program is working with a location of "South Federal University" which will have a place_id of "ChIJJ8oO7_B_bIcR2AlhC8nKlok".
$ python solution.py
Enter location: South Federal University
Retrieving http://...
Retrieved 2101 characters
Place id ChIJJ8oO7_B_bIcR2AlhC8nKlok
'''
import urllib
import json
serviceUrl = "http://python-data.dr-chuck.net/geojson?"
while True:
address = raw_input("Enter location: ")
if len(address) < 1: break
url = serviceUrl + urllib.urlencode({'sensor': 'false', 'address': address})
data = urllib.urlopen(url).read()
try: js = json.loads(data)
except: js = None
if 'status' not in js or js['status'] != 'OK':
print '==== Failure To Retrieve ===='
print data
continue
# print json.dumps(js, indent=4)
id = js['results'][0]['place_id']
print "Retrieving ", url
print "Retrieved", len(data), "characters"
print "Place id", id | ll0816/PythonForEverybody | Using-Python-2-Access-Web-Data/Calling-a-JSON-API.py | Python | mit | 2,516 | [
"VisIt"
] | 7a993b8774b4872477ae5f4c131e7f1e3cbd754a5dd4ee4e494c18ebdd7d400c |
SERVICE_PACKAGE_FILE = 'Firefly/core/services.json'
SERVICE_CONFIG_DIR = 'dev_config/services/'
| Firefly-Automation/Firefly | Firefly/core/const.py | Python | apache-2.0 | 96 | [
"Firefly"
] | db8197bc59f998e826933b6fd6cbe07a5a78f41108aa72b9fe64dc90ed49f9b2 |
#!/usr/bin/env python
from __future__ import print_function
import os
import re
import sys
import datetime
from subprocess import Popen, PIPE, STDOUT
from types import *
"""
Below is an example of the job script
#!/bin/bash
#
# Job Submission Script
#
#@ class = queue
#@ job_name = md
#@ total_tasks = 1
#@ node = 1
#@ wall_clock_limit = 04:00:00
#@ output = $(job_name).$(jobid).log
#@ error = $(job_name).$(jobid).err
#@ job_type = mpich
#@ environment = COPY_ALL
#@ queue
module purge
module load mod1
module load mod2
module load mod3
unset LD_PRELOAD
rm my_env
env > my_env
mpirun -np $LOADL_TOTAL_TASKS prog.exe input.inp
"""
def seconds_to_hms( atime ):
assert type(atime) is IntType, "Duration is not an integer: %r" % atime
return str( datetime.timedelta( seconds = atime ))
class BatchScript:
def __init__( self, location = './', duration = 3600, ncores = 16, nnodes = 1, executable = 'cp2k.popt', exec_path = None,
preamble = None, job_name = 'md', queue = 'clallmds', modules = None,
exports = None, extras = None, input_file = None, script_type = 'll', mpiexec = 'mpirun -np', use_ssh = None ):
# location refers to the folder where the script will be written
# duration refers to the time that will be requested in seconds
# cores refers to the number of cores that will be requested in the script
# nodes refers to the number of nodes that will be requested in the script
# executable refers to the executable that will be used
# job_name is self explanatory
# queue is the job queue that you want to be assigned (class)
self.location = str( location )
self.duration = int(duration)
self.ncores = int( ncores )
self.nnodes = int( nnodes )
self.executable = str( executable )
self.job_name = str( job_name )
self.queue = str( queue )
if exec_path is None:
self.exec_path = ''
else:
self.exec_path = str( exec_path )
if preamble is None:
self.preamble = []
else:
self.preamble = preamble
if modules is None:
self.modules = []
else:
self.modules = modules
if exports is None:
self.exports = []
else:
self.exports = exports
if extras is None:
self.extras = []
else:
self.extras = extras
if use_ssh is None:
self.use_ssh = ''
else:
self.use_ssh = str( use_ssh )
self.script_type = script_type
self.mpiexec = mpiexec
self.input_file = input_file
def create( self ):
if self.script_type == 'll':
out_file = open( 'job.sh', 'w' )
out_file.write( '#!/bin/bash\n' )
out_file.write( '#@ class = ' + self.queue + '\n' )
out_file.write( '#@ job_name = ' + self.job_name + '\n' )
out_file.write( '#@ total_tasks = ' + str( self.ncores ) + '\n' )
out_file.write( '#@ node = ' + str( self.nnodes ) + '\n' )
out_file.write( '#@ wall_clock_limit = ' + seconds_to_hms( self.duration ) + '\n' )
out_file.write( '#@ output = $(job_name).$(jobid).out' + '\n' )
out_file.write( '#@ error = $(job_name).$(jobid).err' + '\n' )
for item in self.preamble:
out_file.write( '#' + str( item ) + ' \n' )
out_file.write( '\n' )
for module in self.modules:
if module == 'purge':
out_file.write( 'module purge\n' )
else:
out_file.write( 'module load ' + str( module ) + '\n' )
out_file.write( '\n' )
for extra in self.extras:
out_file.write( str( extra ) + '\n' )
out_file.write( '\n' )
for export in self.exports:
out_file.write( 'export ' + str( export ) + '\n' )
out_file.write( '\n' )
if self.input_file is not None:
exec_line = str( self.mpiexec ) + ' ' + self.exec_path + self.executable + ' ' + self.input_file + '\n'
else:
exec_line = str( self.mpiexec ) + ' ' + self.exec_path + self.executable + '\n'
out_file.write(exec_line)
else:
print( 'Script type not supported, yet' )
@classmethod
def from_dict( cls, options ):
try:
options["preamble"]
except KeyError:
preamble = None
else:
preamble = filter(len, options["preamble"].split("\n"))
try:
options["modules"]
except KeyError:
modules = None
else:
modules = filter(len, options["modules"].split("\n"))
try:
options["extras"]
except KeyError:
extras = None
else:
extras = filter(len, options["extras"].split("\n"))
try:
options["exports"]
except KeyError:
exports = None
else:
exports = filter(len, options["exports"].split("\n"))
try:
options["mpiexec"]
except KeyError:
mpiexec = None
else:
mpiexec = filter(len, options["mpiexec"].split("\n"))[0]
try:
options["executable"]
except KeyError:
executable = None
else:
executable = options["executable"]
try:
options["input_file"]
except KeyError:
input_file = None
else:
input_file = options["input_file"]
try:
options["use_ssh"]
except KeyError:
use_ssh = ''
else:
use_ssh = filter(len, options["extras"].split("\n"))
return cls( preamble = preamble, modules = modules, exports = exports, extras = extras, mpiexec = mpiexec,
executable = executable, input_file = input_file, use_ssh = use_ssh )
def check_queue( self ):
if self.script_type == 'll':
p = Popen( ['llq'], stdout = PIPE, stderr = PIPE )
self.q_out, self.q_err = p.communicate()
else:
print( 'Script type not supported, yet' )
return
def submit_job( self ):
if self.script_type == 'll':
p = Popen( [ 'llsubmit', 'job.sh'], stdout = PIPE, stderr = STDOUT )
id_holder, self.job_id_err = p.communicate()
try:
self.job_id = int( re.findall(r'\b\d+\b', id_holder )[0] )
except:
print( 'Unable to retreive job ID' )
else:
print( 'Script type not supported, yet' )
if self.job_id is not None:
return self.job_id
else:
return self.job_id_err
def is_running( self ):
if self.is_submitted():
if self.script_type == 'll':
pattern = re.compile( r'\bR\b' )
if pattern.search( self.status ) is not None:
return True
else:
return False
else:
return False
def is_submitted( self ):
self.job_status()
if self.status is not None:
sub_holder = [ int(n) for n in re.findall(r'\b\d+\b', self.status )]
else:
sys.exit('Unable to establish job status')
return self.job_id in sub_holder
def job_status( self ):
if self.job_id is not None:
if self.script_type == 'll':
p = Popen( [ 'llq','-j', str(self.job_id) ], stdout = PIPE, stderr = STDOUT )
self.status, self.status_err = p.communicate()
else:
print( 'Script type not supported, yet' )
else:
sys.exit('Unable to establish job status')
def cancel( self ):
if self.is_submitted():
if self.script_type == 'll':
p = Popen( [ 'llcancel',str(self.job_id) ], stdout = PIPE, stderr = STDOUT )
self.cancelled, self.cancelled_err = p.communicate()
else:
print( 'Script type not supported, yet' )
| burbanom/batch_management | batch_management.py | Python | gpl-3.0 | 8,506 | [
"CP2K"
] | 42a705d78b07c8a67353cc6ac616bfd8adce08056eca0975e20c3336e3cdade1 |
# Created by Blake Cornell, CTO, Integris Security LLC
# Integris Security Carbonator - Beta Version - v1.2
# Released under GPL Version 2 license.
#
# See the INSTALL file for installation instructions.
#
# For more information contact us at carbonator at integrissecurity dot com
# Or visit us at https://www.integrissecurity.com/
from burp import IBurpExtender
from burp import IHttpListener
from burp import IScannerListener
from java.net import URL
from java.io import File
import time
class BurpExtender(IBurpExtender, IHttpListener, IScannerListener):
def registerExtenderCallbacks(self, callbacks):
self._callbacks = callbacks
self._callbacks.setExtensionName("Carbonator")
self._helpers = self._callbacks.getHelpers()
self.clivars = None
self.spider_results=[]
self.scanner_results=[]
self.packet_timeout=5
self.last_packet_seen= int(time.time()) #initialize the start of the spider/scan
if not self.processCLI():
return None
else:
self.clivars = True
print "Initiating Carbonator Against: ", str(self.url)
#add to scope if not already in there.
if self._callbacks.isInScope(self.url) == 0:
self._callbacks.includeInScope(self.url)
#added to ensure that the root directory is scanned
base_request = str.encode(str("GET "+self.path+" HTTP/1.1\nHost: "+self.fqdn+"\n\n"))
if(self.scheme == 'HTTPS'):
print self._callbacks.doActiveScan(self.fqdn,self.port,1,base_request)
else:
print self._callbacks.doActiveScan(self.fqdn,self.port,0,base_request)
self._callbacks.sendToSpider(self.url)
self._callbacks.registerHttpListener(self)
self._callbacks.registerScannerListener(self)
while int(time.time())-self.last_packet_seen <= self.packet_timeout:
time.sleep(1)
print "No packets seen in the last", self.packet_timeout, "seconds."
print "Removing Listeners"
self._callbacks.removeHttpListener(self)
self._callbacks.removeScannerListener(self)
self._callbacks.excludeFromScope(self.url)
print "Generating Report"
self.generateReport(self.rtype)
print "Report Generated"
print "Closing Burp in", self.packet_timeout, "seconds."
time.sleep(self.packet_timeout)
if self.clivars:
self._callbacks.exitSuite(False)
return
def processHttpMessage(self, tool_flag, isRequest, current):
self.last_packet_seen = int(time.time())
if tool_flag == self._callbacks.TOOL_SPIDER and isRequest: #if is a spider request then send to scanner
self.spider_results.append(current)
print "Sending new URL to Vulnerability Scanner: URL #",len(self.spider_results)
if self.scheme == 'https':
self._callbacks.doActiveScan(self.fqdn,self.port,1,current.getRequest()) #returns scan queue, push to array
else:
self._callbacks.doActiveScan(self.fqdn,self.port,0,current.getRequest()) #returns scan queue, push to array
return
def newScanIssue(self, issue):
self.scanner_results.append(issue)
print "New issue identified: Issue #",len(self.scanner_results);
return
def generateReport(self, format):
if format != 'XML':
format = 'HTML'
file_name = self.output
self._callbacks.generateScanReport(format,self.scanner_results,File(file_name))
time.sleep(5)
return
def processCLI(self):
cli = self._callbacks.getCommandLineArguments()
if len(cli) < 0:
print "Incomplete target information provided."
return False
elif not cli:
print "Integris Security Carbonator is now loaded."
print "If Carbonator was loaded through the BApp store then you can run in headless mode simply adding the `-Djava.awt.headless=true` flag from within your shell. Note: If burp doesn't close at the conclusion of a scan then disable Automatic Backup on Exit."
print "For questions or feature requests contact us at carbonator at integris security dot com."
print "Visit carbonator at https://www.integrissecurity.com/Carbonator"
return False
else:
self.url = URL(cli[0])
self.rtype = cli[1]
self.output = cli[2]
self.scheme = self.url.getProtocol()
self.fqdn = self.url.getHost()
self.port1 = self.url.getPort()
if self.port1 == -1 and self.scheme == 'http':
self.port = 80
elif self.port1 == -1 and self.scheme == 'https':
self.port = 443
else:
self.port = self.port1
self.path = self.url.getFile()
print "self.url: " + str(self.url) + "\n"
print "Scheme: " + self.scheme + "\n"
print "FQDN: " + self.fqdn + "\n"
print "Port: " + str(self.port1) + "\n"
print "Path: " + self.path + "\n"
return True
| RB4-Solutions/cscan | plugin/carbonator/carbonator.py | Python | gpl-3.0 | 4,421 | [
"VisIt"
] | 5fdfc1895396263044ac10b41d8e18f0f4b6bd1008dd1442722cfd2f46aefd08 |
#
# This source file is part of appleseed.
# Visit http://appleseedhq.net/ for additional information and resources.
#
# This software is released under the MIT license.
#
# Copyright (c) 2014-2017 The appleseedhq Organization
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import bpy
class AppleseedCameraDoF(bpy.types.Panel):
bl_label = "Depth of Field"
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
COMPAT_ENGINES = {'APPLESEED_RENDER'}
bl_context = "data"
@classmethod
def poll(cls, context):
renderer = context.scene.render
return renderer.engine == 'APPLESEED_RENDER' and context.active_object.type == 'CAMERA'
def draw(self, context):
layout = self.layout
scene = context.scene
asr_cam_props = scene.camera.data.appleseed
row = layout.row()
row.prop(asr_cam_props, "camera_type", text='Model')
if asr_cam_props.camera_type == "thinlens":
layout.prop(asr_cam_props, "camera_dof", text="F-Stop")
layout.prop(context.active_object.data, "dof_distance", text="Focal Distance")
layout.active = context.active_object.data.dof_object is None
layout.prop(context.active_object.data, "dof_object", text='Autofocus')
layout.prop(asr_cam_props, "diaphragm_blades")
layout.prop(asr_cam_props, "diaphragm_angle")
layout.prop(asr_cam_props, "diaphragm_map")
def register():
bpy.types.DATA_PT_camera.COMPAT_ENGINES.add('APPLESEED_RENDER')
bpy.types.DATA_PT_camera_display.COMPAT_ENGINES.add('APPLESEED_RENDER')
bpy.types.CAMERA_MT_presets.COMPAT_ENGINES.add('APPLESEED_RENDER')
bpy.types.DATA_PT_lens.COMPAT_ENGINES.add('APPLESEED_RENDER')
bpy.types.DATA_PT_custom_props_camera.COMPAT_ENGINES.add('APPLESEED_RENDER')
bpy.types.DATA_PT_context_camera.COMPAT_ENGINES.add('APPLESEED_RENDER')
bpy.utils.register_class(AppleseedCameraDoF)
def unregister():
bpy.utils.unregister_class(AppleseedCameraDoF)
| jasperges/blenderseed | ui/camera.py | Python | mit | 3,039 | [
"VisIt"
] | 5d93299250f1cd6e6dda95fd452f33dca3ff96a3b313181193713f5c53cb270c |
import pytest
import webtest
from datetime import date
from urllib.parse import urlencode
from ..types.lab import Lab
pytestmark = [pytest.mark.setone, pytest.mark.working, pytest.mark.schema]
@pytest.fixture
def remc_lab(testapp):
item = {
'name': 'remc-lab',
'title': 'REMC lab',
'status': 'current'
}
return testapp.post_json('/lab', item).json['@graph'][0]
@pytest.fixture
def somelab_w_shared_award(testapp, award):
item = {
'name': 'some-lab',
'title': 'SOME lab',
'status': 'current',
'awards': [award['@id']]
}
return testapp.post_json('/lab', item).json['@graph'][0]
@pytest.fixture
def remc_award(testapp):
item = {
'name': 'remc-award',
'description': 'REMC test award',
'viewing_group': 'Not 4DN',
}
return testapp.post_json('/award', item).json['@graph'][0]
@pytest.fixture
def nofic_award(testapp):
item = {
'name': 'NOFIC-award',
'description': 'NOFIC test award',
'viewing_group': 'NOFIC',
}
return testapp.post_json('/award', item).json['@graph'][0]
@pytest.fixture
def wrangler(testapp):
item = {
'first_name': 'Wrangler',
'last_name': 'Admin',
'email': 'wrangler@example.org',
'groups': ['admin'],
}
# User @@object view has keys omitted.
res = testapp.post_json('/user', item)
return testapp.get(res.location).json
@pytest.fixture
def lab_viewer(testapp, lab, award):
item = {
'first_name': 'ENCODE',
'last_name': 'lab viewer',
'email': 'encode_viewer@example.org',
'lab': lab['name'],
'status': 'current',
'viewing_groups': [award['viewing_group']]
}
# User @@object view has keys omitted.
res = testapp.post_json('/user', item)
return testapp.get(res.location).json
@pytest.fixture
def award_viewer(testapp, somelab_w_shared_award):
item = {
'first_name': 'SOME',
'last_name': 'award viewer',
'email': 'awardee@example.org',
'lab': somelab_w_shared_award['@id'],
'status': 'current',
}
# User @@object view has keys omitted.
res = testapp.post_json('/user', item)
return testapp.get(res.location).json
# this user has the 4DN viewing group
@pytest.fixture
def viewing_group_member(testapp, award):
item = {
'first_name': 'Viewing',
'last_name': 'Group',
'email': 'viewing_group_member@example.org',
'viewing_groups': [award['viewing_group']],
'status': 'current'
}
# User @@object view has keys omitted.
res = testapp.post_json('/user', item)
return testapp.get(res.location).json
# this user has the NOFIC viewing group
@pytest.fixture
def nofic_group_member(testapp, nofic_award):
item = {
'first_name': 'NOFIC',
'last_name': 'Group',
'email': 'viewing_group_member@example.org',
'viewing_groups': [nofic_award['viewing_group']],
'status': 'current'
}
# User @@object view has keys omitted.
res = testapp.post_json('/user', item)
return testapp.get(res.location).json
@pytest.fixture
def multi_viewing_group_member(testapp, award, nofic_award):
item = {
'first_name': 'Viewing',
'last_name': 'Group',
'email': 'viewing_group_member@example.org',
'viewing_groups': [award['viewing_group'], nofic_award['viewing_group']],
'status': 'current'
}
# User @@object view has keys omitted.
res = testapp.post_json('/user', item)
return testapp.get(res.location).json
@pytest.fixture
def remc_submitter(testapp, remc_lab, remc_award):
item = {
'first_name': 'REMC',
'last_name': 'Submitter',
'email': 'remc_submitter@example.org',
'submits_for': [remc_lab['@id']],
'viewing_groups': [remc_award['viewing_group']],
'status': 'current'
}
# User @@object view has keys omitted.
res = testapp.post_json('/user', item)
return testapp.get(res.location).json
def remote_user_testapp(app, remote_user):
environ = {
'HTTP_ACCEPT': 'application/json',
'REMOTE_USER': str(remote_user),
}
return webtest.TestApp(app, environ)
@pytest.fixture
def revoked_user(testapp, lab, award):
item = {
'first_name': 'ENCODE',
'last_name': 'Submitter',
'email': 'no_login_submitter@example.org',
'submits_for': [lab['@id']],
'status': 'revoked',
}
# User @@object view has keys omitted.
res = testapp.post_json('/user', item)
return testapp.get(res.location).json
@pytest.fixture
def other_lab(testapp):
item = {
'title': 'Other lab',
'name': 'other-lab',
}
return testapp.post_json('/lab', item, status=201).json['@graph'][0]
@pytest.fixture
def simple_file(testapp, lab, award, file_formats):
item = {
'uuid': '3413218c-3d86-498b-a0a2-9a406638e777',
'file_format': file_formats.get('fastq').get('@id'),
'paired_end': '1',
'lab': lab['@id'],
'award': award['@id'],
'status': 'uploaded', # avoid s3 upload codepath
}
return testapp.post_json('/file_fastq', item).json['@graph'][0]
@pytest.fixture
def step_run(testapp, lab, award):
software = {
'name': 'do-thing',
'description': 'It does the thing',
'title': 'THING_DOER',
'version': '1.0',
'software_type': "normalizer",
'award': award['@id'],
'lab': lab['@id']
}
sw = testapp.post_json('/software', software, status=201).json['@graph'][0]
analysis_step = {
'name': 'do-thing-step',
'version': 1,
'software_used': sw['@id']
}
return testapp.post_json('/analysis-steps', analysis_step, status=201).json['@graph'][0]
@pytest.fixture
def expt_w_cont_lab_item(lab, remc_lab, award, human_biosample, exp_types):
return {
'lab': lab['@id'],
'award': award['@id'],
'biosample': human_biosample['@id'],
'experiment_type': exp_types['microc']['@id'],
'contributing_labs': [remc_lab['@id']]
}
@pytest.fixture
def wrangler_testapp(wrangler, app, external_tx, zsa_savepoints):
return remote_user_testapp(app, wrangler['uuid'])
@pytest.fixture
def remc_member_testapp(remc_submitter, app, external_tx, zsa_savepoints):
return remote_user_testapp(app, remc_submitter['uuid'])
@pytest.fixture
def submitter_testapp(submitter, app, external_tx, zsa_savepoints):
return remote_user_testapp(app, submitter['uuid'])
@pytest.fixture
def lab_viewer_testapp(lab_viewer, app, external_tx, zsa_savepoints):
return remote_user_testapp(app, lab_viewer['uuid'])
@pytest.fixture
def award_viewer_testapp(award_viewer, app, external_tx, zsa_savepoints):
return remote_user_testapp(app, award_viewer['uuid'])
@pytest.fixture
def viewing_group_member_testapp(viewing_group_member, app, external_tx, zsa_savepoints):
# app for 4DN viewing group member
return remote_user_testapp(app, viewing_group_member['uuid'])
@pytest.fixture
def multi_viewing_group_member_testapp(multi_viewing_group_member, app, external_tx, zsa_savepoints):
# app with both 4DN and NOFIC viewing group
return remote_user_testapp(app, multi_viewing_group_member['uuid'])
@pytest.fixture
def nofic_group_member_testapp(nofic_group_member, app, external_tx, zsa_savepoints):
# app for 4DN viewing group member
return remote_user_testapp(app, nofic_group_member['uuid'])
@pytest.fixture
def indexer_testapp(app, external_tx, zsa_savepoints):
return remote_user_testapp(app, 'INDEXER')
@pytest.fixture
def iwg_member(testapp):
item = {
'first_name': 'IWG',
'last_name': 'Member',
'email': 'iwgmember@example.org',
'viewing_groups': ['IWG'],
'status': 'current'
}
# User @@object view has keys omitted.
res = testapp.post_json('/user', item)
return testapp.get(res.location).json
@pytest.fixture
def arbitrary_group_member_testapp(iwg_member, app, external_tx, zsa_savepoints):
# app for arbitrary viewing_group member
return remote_user_testapp(app, iwg_member['uuid'])
@pytest.fixture
def bs_item(lab, award):
return {
'biosource_type': 'primary cell',
'lab': lab['@id'],
'award': award['@id'],
'status': 'submission in progress'
}
vg_test_stati = ['planned', 'submission in progress', 'pre-release']
@pytest.mark.parametrize('status', vg_test_stati)
def test_arbitrary_viewing_group_can_view_item_w_viewable_by(
testapp, arbitrary_group_member_testapp, bs_item, iwg_member, status):
# post the item - the award has the 4DN viewing group and nothing related to IWG
bsres = testapp.post_json('/biosource', bs_item, status=201).json['@graph'][0]
# the vg testapp should not be able to get this item
arbitrary_group_member_testapp.get(bsres['@id'], status=403)
# now add viewable by property to the item
vgres = testapp.patch_json(bsres['@id'], {'viewable_by': ['IWG'], "status": status}, status=200)
# now should be able to get for each of the statuses
arbitrary_group_member_testapp.get(vgres.json['@graph'][0]['@id'], status=200)
@pytest.mark.parametrize('status', vg_test_stati)
def test_user_w_vg_cannot_view_item_w_vg_from_award(
testapp, remc_member_testapp, remc_award, bs_item, status):
""" For stati - planned, submission in progress, and pre-release - test that an item
does not have viewing_group prinicipal added via the award so the item cannot be
viewed - this tests for an arbitrary viewing_group, there are other tests for the
special handling of NOFIC and JA items, this test is not for those special cases
"""
bs_item['award'] = remc_award['@id'] # iwg award has 'not 4DN' vg as does the remc_submitter in the remc app
res = testapp.post_json('/biosource', bs_item, status=201).json['@graph'][0]
remc_member_testapp.get(res['@id'], status=403)
def test_wrangler_post_non_lab_collection(wrangler_testapp):
item = {
'name': 'human',
'scientific_name': 'Homo sapiens',
'taxon_id': '9606',
}
return wrangler_testapp.post_json('/organism', item, status=201)
def test_submitter_cant_post_non_lab_collection(submitter_testapp):
item = {
'name': 'human',
'scientific_name': 'Homo sapiens',
'taxon_id': '9606',
}
return submitter_testapp.post_json('/organism', item, status=403)
def test_submitter_post_update_experiment(submitter_testapp, lab, award, human_biosample, exp_types):
experiment = {'lab': lab['@id'], 'award': award['@id'],
'experiment_type': exp_types['microc']['@id'], 'biosample': human_biosample['@id']}
res = submitter_testapp.post_json('/experiments-hi-c', experiment, status=201)
location = res.location
res = submitter_testapp.get(location + '@@testing-allowed?permission=edit', status=200)
assert res.json['has_permission'] is True
assert 'submits_for.%s' % lab['uuid'] in res.json['principals_allowed_by_permission']
submitter_testapp.patch_json(location, {'description': 'My experiment'}, status=200)
def test_submitter_cant_post_other_lab(submitter_testapp, other_lab, award, exp_types):
experiment = {'lab': other_lab['@id'], 'award': award['@id'], 'experiment_type': exp_types['microc']['@id']}
res = submitter_testapp.post_json('/experiments-hi-c', experiment, status=422)
assert res.json['errors'][0]['name'] == 'Schema: lab'
assert "not in user submits_for" in res.json['errors'][0]['description']
def test_wrangler_post_other_lab(wrangler_testapp, other_lab, award, human_biosample, exp_types):
experiment = {'lab': other_lab['@id'], 'award': award['@id'],
'experiment_type': exp_types['microc']['@id'], 'biosample': human_biosample['@id']}
wrangler_testapp.post_json('/experiments-hi-c', experiment, status=201)
def test_submitter_view_experiement(submitter_testapp, submitter, lab, award, human_biosample, exp_types):
experiment = {'lab': lab['@id'], 'award': award['@id'],
'experiment_type': exp_types['microc']['@id'], 'biosample': human_biosample['@id']}
res = submitter_testapp.post_json('/experiments-hi-c', experiment, status=201)
submitter_testapp.get(res.json['@graph'][0]['@id'], status=200)
def test_user_view_details_admin(submitter, access_key, testapp):
res = testapp.get(submitter['@id'])
assert 'email' in res.json
def test_users_view_details_self(submitter, access_key, submitter_testapp):
res = submitter_testapp.get(submitter['@id'])
assert 'email' in res.json
def test_users_patch_self(submitter, access_key, submitter_testapp):
submitter_testapp.patch_json(submitter['@id'], {})
def test_users_post_disallowed(submitter, access_key, submitter_testapp):
item = {
'first_name': 'ENCODE',
'last_name': 'Submitter2',
'email': 'encode_submitter2@example.org',
}
submitter_testapp.post_json('/user', item, status=403)
def test_users_cannot_view_other_users_info_with_basic_authenticated(submitter, authenticated_testapp):
authenticated_testapp.get(submitter['@id'], status=403)
def test_users_can_see_their_own_user_info(submitter, submitter_testapp):
res = submitter_testapp.get(submitter['@id'])
assert 'title' in res.json
assert 'email' in res.json
def test_users_view_basic_anon(submitter, anontestapp):
anontestapp.get(submitter['@id'], status=403)
def test_users_view_basic_indexer(submitter, indexer_testapp):
res = indexer_testapp.get(submitter['@id'])
assert 'title' in res.json
assert 'email' not in res.json
assert 'access_keys' not in res.json
def test_viewing_group_member_view(viewing_group_member_testapp, experiment_project_release):
return viewing_group_member_testapp.get(experiment_project_release['@id'], status=200)
def test_lab_viewer_view(lab_viewer_testapp, experiment):
lab_viewer_testapp.get(experiment['@id'], status=200)
def test_award_viewer_view(award_viewer_testapp, experiment):
award_viewer_testapp.get(experiment['@id'], status=200)
def test_submitter_patch_lab_disallowed(submitter, other_lab, submitter_testapp):
res = submitter_testapp.get(submitter['@id'])
lab = {'lab': other_lab['@id']}
submitter_testapp.patch_json(res.json['@id'], lab, status=422) # is that the right status?
def test_wrangler_patch_lab_allowed(submitter, other_lab, wrangler_testapp):
res = wrangler_testapp.get(submitter['@id'])
lab = {'lab': other_lab['@id']}
wrangler_testapp.patch_json(res.json['@id'], lab, status=200)
def test_submitter_patch_submits_for_disallowed(submitter, other_lab, submitter_testapp):
res = submitter_testapp.get(submitter['@id'])
submits_for = {'submits_for': [res.json['submits_for'][0]['@id']] + [other_lab['@id']]}
submitter_testapp.patch_json(res.json['@id'], submits_for, status=422)
def test_wrangler_patch_submits_for_allowed(submitter, other_lab, wrangler_testapp):
res = wrangler_testapp.get(submitter['@id'])
submits_for = {'submits_for': [res.json['submits_for'][0]['@id']] + [other_lab['@id']]}
wrangler_testapp.patch_json(res.json['@id'], submits_for, status=200)
def test_submitter_patch_groups_disallowed(submitter, submitter_testapp):
res = submitter_testapp.get(submitter['@id'])
groups = {'groups': res.json.get('groups', []) + ['admin']}
submitter_testapp.patch_json(res.json['@id'], groups, status=422)
def test_wrangler_patch_groups_allowed(submitter, other_lab, wrangler_testapp):
res = wrangler_testapp.get(submitter['@id'])
groups = {'groups': res.json.get('groups', []) + ['admin']}
wrangler_testapp.patch_json(res.json['@id'], groups, status=200)
def test_submitter_patch_viewing_groups_disallowed(submitter, other_lab, submitter_testapp):
res = submitter_testapp.get(submitter['@id'])
vgroups = {'viewing_groups': res.json['viewing_groups'] + ['GGR']}
submitter_testapp.patch_json(res.json['@id'], vgroups, status=422)
def test_wrangler_patch_viewing_groups_allowed(submitter, wrangler_testapp):
res = wrangler_testapp.get(submitter['@id'])
vgroups = {'viewing_groups': res.json['viewing_groups'] + ['Not 4DN']}
wrangler_testapp.patch_json(res.json['@id'], vgroups, status=200)
def test_revoked_user_denied_authenticated(authenticated_testapp, revoked_user):
authenticated_testapp.get(revoked_user['@id'], status=403)
def test_revoked_user_denied_submitter(submitter_testapp, revoked_user):
submitter_testapp.get(revoked_user['@id'], status=403)
def test_revoked_user_wrangler(wrangler_testapp, revoked_user):
wrangler_testapp.get(revoked_user['@id'], status=200)
def test_labs_view_wrangler(wrangler_testapp, other_lab):
labs = wrangler_testapp.get('/labs/', status=200)
assert(len(labs.json['@graph']) == 1)
##############################################
# Permission tests based on different statuses
# Submitter created item and wants to view
@pytest.fixture
def ind_human_item(human, award, lab):
return {
'award': award['@id'],
'lab': lab['@id'],
'organism': human['@id']
}
@pytest.fixture
def file_item(award, lab, file_formats):
return {
'award': award['@id'],
'lab': lab['@id'],
'file_format': file_formats.get('fastq').get('@id'),
'paired_end': '1'
}
@pytest.fixture
def lab_item(lab):
return {
'name': 'test-lab',
'title': 'test lab',
}
def test_submitter_cannot_view_ownitem(ind_human_item, submitter_testapp, wrangler_testapp):
statuses = ['deleted']
res = submitter_testapp.post_json('/individual_human', ind_human_item, status=201)
for status in statuses:
wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": status}, status=200)
submitter_testapp.get(res.json['@graph'][0]['@id'], status=403)
def test_contributing_lab_member_can_view_item(expt_w_cont_lab_item, submitter_testapp,
remc_member_testapp, wrangler_testapp):
statuses = ['released', 'revoked', 'archived', 'released to project',
'archived to project', 'in review by lab', 'submission in progress', 'planned']
res = submitter_testapp.post_json('/experiment_hi_c', expt_w_cont_lab_item, status=201)
for status in statuses:
wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": status}, status=200)
remc_member_testapp.get(res.json['@graph'][0]['@id'], status=200)
# Submitter created item and lab member wants to patch
def test_contributing_lab_member_cannot_patch(expt_w_cont_lab_item, submitter_testapp,
remc_member_testapp, wrangler_testapp):
statuses = ['released', 'revoked', 'archived', 'released to project', 'archived to project',
'in review by lab', 'submission in progress', 'planned']
res = submitter_testapp.post_json('/experiment_hi_c', expt_w_cont_lab_item, status=201)
for status in statuses:
wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": status}, status=200)
remc_member_testapp.patch_json(res.json['@graph'][0]['@id'], {'sex': 'female'}, status=422)
def test_submitter_can_view_ownitem(ind_human_item, submitter_testapp, wrangler_testapp):
statuses = ['current', 'released', 'revoked', 'archived', 'released to project', 'archived to project', 'in review by lab', 'submission in progress', 'planned']
res = submitter_testapp.post_json('/individual_human', ind_human_item, status=201)
for status in statuses:
wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": status}, status=200)
submitter_testapp.get(res.json['@graph'][0]['@id'], status=200)
def test_submitter_cannot_view_ownitem_replaced_using_accession(ind_human_item, submitter_testapp, wrangler_testapp):
res = submitter_testapp.post_json('/individual_human', ind_human_item, status=201)
wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": "replaced"}, status=200)
my_accession = '/' + res.json['@graph'][0]['accession']
submitter_testapp.get(my_accession, status=404)
def test_submitter_can_view_ownitem_replaced_using_uuid(ind_human_item, submitter_testapp, wrangler_testapp):
res = submitter_testapp.post_json('/individual_human', ind_human_item, status=201)
wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": "replaced"}, status=200)
my_uuid = '/individuals-human/' + res.json['@graph'][0]['uuid'] + '/'
rep_res = submitter_testapp.get(my_uuid, status=200)
def test_submitter_can_view_ownitem_replaced_using_alias(ind_human_item, submitter_testapp, wrangler_testapp):
# alias will redirect to uuid
res = submitter_testapp.post_json('/individual_human', ind_human_item, status=201)
res_p = wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": "replaced", "aliases": ['test:human']}, status=200)
my_alias = '/' + res_p.json['@graph'][0]['aliases'][0]
rep_res = submitter_testapp.get(my_alias, status=301)
# get the landing url, which is /object_type/uuid in this case
landing = rep_res.headers['Location'].replace('http://localhost', '')
submitter_testapp.get(landing, status=200)
def test_submitter_replaced_item_redirects_to_new_one_with_accession(ind_human_item, submitter_testapp, wrangler_testapp):
# posting 2 individual, changing 1 to replaced, and giving its accession to alternate accession field of the
# second one. This should result in redirect when the old accession is used
# item that will be replaced (old item)
old = submitter_testapp.post_json('/individual_human', ind_human_item, status=201)
# item that will replace (new item)
new = submitter_testapp.post_json('/individual_human', ind_human_item, status=201)
# patch old one wih status
wrangler_testapp.patch_json(old.json['@graph'][0]['@id'], {"status": "replaced"}, status=200)
# patch new one with alternate accession
wrangler_testapp.patch_json(new.json['@graph'][0]['@id'], {"alternate_accessions": [old.json['@graph'][0]['accession']]}, status=200)
# visit old item and assert that it lands on new item
rep_res = submitter_testapp.get(old.json['@graph'][0]['@id'], status=301)
# get the landing url, which includes a 'redirected_from' query param
redir_param = '?' + urlencode({ 'redirected_from' : old.json['@graph'][0]['@id'] })
landing = rep_res.headers['Location'].replace('http://localhost', '')
assert landing == new.json['@graph'][0]['@id'] + redir_param
submitter_testapp.get(landing, status=200)
def test_submitter_replaced_item_doesnot_redirect_to_new_one_with_uuid(ind_human_item, submitter_testapp, wrangler_testapp):
# posting 2 individual, changing 1 to replaced, and giving its accession to alternate accession field of the
# second one. This should result in redirect when the old accession is used
# Old item should still be accessible with its uuid
old = submitter_testapp.post_json('/individual_human', ind_human_item, status=201)
old_uuid = '/individuals-human/' + old.json['@graph'][0]['uuid'] + '/'
print(old_uuid)
# item that will replace (new item)
new = submitter_testapp.post_json('/individual_human', ind_human_item, status=201)
# patch old one wih status
wrangler_testapp.patch_json(old.json['@graph'][0]['@id'], {"status": "replaced"}, status=200)
# patch new one with alternate accession
patch_data = {"alternate_accessions": [old.json['@graph'][0]['accession']]}
wrangler_testapp.patch_json(new.json['@graph'][0]['@id'], patch_data, status=200)
# visit old uuid and assert that it lands on old item
submitter_testapp.get(old_uuid, status=200)
def test_submitter_can_not_add_to_alternate_accession_if_not_replaced(ind_human_item, submitter_testapp, wrangler_testapp):
# an accession that's status is not replaced, can not be added to alternate_accessions
old = submitter_testapp.post_json('/individual_human', ind_human_item, status=201)
# item that will replace (new item)
new = submitter_testapp.post_json('/individual_human', ind_human_item, status=201)
# patch old one wih status
statuses = ['current', 'released', 'revoked', 'archived', 'released to project',
'archived to project', 'in review by lab', 'submission in progress', 'planned']
for status in statuses:
wrangler_testapp.patch_json(old.json['@graph'][0]['@id'], {"status": status}, status=200)
# try adding the accession to alternate accessions
# should result in conflict (409)
wrangler_testapp.patch_json(new.json['@graph'][0]['@id'], {"alternate_accessions": [old.json['@graph'][0]['accession']]}, status=409)
# Submitter created item and wants to patch
def test_submitter_cannot_patch_statuses(ind_human_item, submitter_testapp, wrangler_testapp):
statuses = ['deleted', 'current', 'released', 'revoked', 'archived', 'archived to project', 'released to project']
res = submitter_testapp.post_json('/individual_human', ind_human_item, status=201)
for status in statuses:
wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": status}, status=200)
submitter_testapp.patch_json(res.json['@graph'][0]['@id'], {'sex': 'female'}, status=403)
def test_submitter_can_patch_statuses(ind_human_item, submitter_testapp, wrangler_testapp):
statuses = ['in review by lab', 'submission in progress', 'planned']
res = submitter_testapp.post_json('/individual_human', ind_human_item, status=201)
for status in statuses:
wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": status}, status=200)
submitter_testapp.patch_json(res.json['@graph'][0]['@id'], {'sex': 'female'}, status=200)
def test_submitter_can_patch_file_statuses(file_item, submitter_testapp, wrangler_testapp):
statuses = ['uploading', 'uploaded', 'upload failed']
res = submitter_testapp.post_json('/file_fastq', file_item, status=201)
for status in statuses:
wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": status}, status=200)
submitter_testapp.patch_json(res.json['@graph'][0]['@id'], {'paired_end': '1'}, status=200)
def test_submitter_cannot_patch_file_statuses(file_item, submitter_testapp, wrangler_testapp):
statuses = ['released', 'revoked', 'deleted', 'released to project', 'archived to project', 'archived']
res = submitter_testapp.post_json('/file_fastq', file_item, status=201)
for status in statuses:
wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": status}, status=200)
submitter_testapp.patch_json(res.json['@graph'][0]['@id'], {'paired_end': '1'}, status=403)
def test_submitter_cannot_patch_replaced(ind_human_item, submitter_testapp, wrangler_testapp):
res = submitter_testapp.post_json('/individual_human', ind_human_item, status=201)
wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": "replaced"}, status=200)
# replaced items are not accessible by accession
my_uuid = '/' + res.json['@graph'][0]['uuid']
submitter_testapp.patch_json(my_uuid, {'sex': 'female'}, status=403)
# Submitter created item and lab member wants to view
def test_labmember_cannot_view_submitter_item(ind_human_item, submitter_testapp, wrangler_testapp, lab_viewer_testapp):
statuses = ['deleted']
res = submitter_testapp.post_json('/individual_human', ind_human_item, status=201)
for status in statuses:
wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": status}, status=200)
lab_viewer_testapp.get(res.json['@graph'][0]['@id'], status=403)
def test_labmember_can_view_submitter_item(ind_human_item, submitter_testapp, wrangler_testapp, lab_viewer_testapp):
statuses = ['current', 'released', 'revoked', 'released to project', 'in review by lab',
'archived', 'archived to project', 'submission in progress', 'planned']
res = submitter_testapp.post_json('/individual_human', ind_human_item, status=201)
for status in statuses:
wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": status}, status=200)
lab_viewer_testapp.get(res.json['@graph'][0]['@id'], status=200)
def test_labmember_can_view_submitter_file(file_item, submitter_testapp, wrangler_testapp, lab_viewer_testapp):
statuses = ['released', 'revoked', 'released to project', 'uploading', 'uploaded', 'upload failed',
'archived', 'archived to project']
res = submitter_testapp.post_json('/file_fastq', file_item, status=201)
for status in statuses:
wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": status}, status=200)
lab_viewer_testapp.get(res.json['@graph'][0]['@id'], status=200)
def test_labmember_cannot_view_submitter_item_replaced_accession(ind_human_item, submitter_testapp, wrangler_testapp, lab_viewer_testapp):
res = submitter_testapp.post_json('/individual_human', ind_human_item, status=201)
wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": "replaced"}, status=200)
lab_viewer_testapp.get(res.json['@graph'][0]['@id'], status=404)
def test_labmember_can_view_submitter_item_replaced_uuid(ind_human_item, submitter_testapp, wrangler_testapp, lab_viewer_testapp):
res = submitter_testapp.post_json('/individual_human', ind_human_item, status=201)
wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": "replaced"}, status=200)
my_uuid = '/individuals-human/' + res.json['@graph'][0]['uuid'] + '/'
lab_viewer_testapp.get(my_uuid, status=200)
# Submitter created item and lab member wants to patch
def test_labmember_cannot_patch_submitter_item(ind_human_item, submitter_testapp, wrangler_testapp, lab_viewer_testapp):
statuses = ['current', 'released', 'revoked', 'archived', 'released to project',
'archived to project', 'in review by lab', 'submission in progress', 'planned']
res = submitter_testapp.post_json('/individual_human', ind_human_item, status=201)
for status in statuses:
wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": status}, status=200)
lab_viewer_testapp.patch_json(res.json['@graph'][0]['@id'], {'sex': 'female'}, status=403)
# Submitter created item and lab member wants to patch
def test_labmember_cannot_patch_submitter_file(file_item, submitter_testapp, wrangler_testapp, lab_viewer_testapp):
statuses = ['released', 'revoked', 'released to project', 'uploading', 'uploaded',
'upload failed', 'archived', 'archived to project']
res = submitter_testapp.post_json('/file_fastq', file_item, status=201)
for status in statuses:
wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": status}, status=200)
lab_viewer_testapp.patch_json(res.json['@graph'][0]['@id'], {'paired_end': '2'}, status=403)
# person with shared award tests
def test_awardmember_cannot_view_submitter_item(ind_human_item, submitter_testapp, wrangler_testapp, award_viewer_testapp):
statuses = ['deleted']
res = submitter_testapp.post_json('/individual_human', ind_human_item, status=201)
for status in statuses:
wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": status}, status=200)
award_viewer_testapp.get(res.json['@graph'][0]['@id'], status=403)
# people who share the same award should be able to view items that have yet to be released generally
def test_awardmember_can_view_submitter_item(ind_human_item, submitter_testapp, wrangler_testapp, award_viewer_testapp):
statuses = ['current', 'released', 'revoked', 'archived', 'in review by lab', 'pre-release',
'released to project', 'submission in progress', 'planned']
res = submitter_testapp.post_json('/individual_human', ind_human_item, status=201)
for status in statuses:
wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": status}, status=200)
award_viewer_testapp.get(res.json['@graph'][0]['@id'], status=200)
def test_awardmember_cannot_view_submitter_item_replaced(ind_human_item, submitter_testapp, wrangler_testapp, award_viewer_testapp):
res = submitter_testapp.post_json('/individual_human', ind_human_item, status=201)
wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": "replaced"}, status=200)
award_viewer_testapp.get(res.json['@graph'][0]['@id'], status=404)
# Submitter created item and lab member wants to patch
def test_awardmember_cannot_patch_submitter_item(ind_human_item, submitter_testapp, wrangler_testapp, award_viewer_testapp):
statuses = ['current', 'released', 'revoked', 'archived', 'released to project', 'in review by lab',
'submission in progress', 'planned', 'archived to project', 'pre-release']
res = submitter_testapp.post_json('/individual_human', ind_human_item, status=201)
for status in statuses:
wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": status}, status=200)
award_viewer_testapp.patch_json(res.json['@graph'][0]['@id'], {'sex': 'female'}, status=403)
# Submitter created item and project member wants to view
def test_viewing_group_member_cannot_view_submitter_item(ind_human_item, submitter_testapp, wrangler_testapp, viewing_group_member_testapp):
statuses = ['deleted', 'in review by lab', 'submission in progress', 'planned']
res = submitter_testapp.post_json('/individual_human', ind_human_item, status=201)
for status in statuses:
wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": status}, status=200)
viewing_group_member_testapp.get(res.json['@graph'][0]['@id'], status=403)
# Submitter created item and project member wants to view
def test_viewing_group_member_cannot_view_submitter_file(file_item, submitter_testapp, wrangler_testapp, viewing_group_member_testapp):
statuses = ['deleted', 'uploading', 'uploaded', 'upload failed', 'pre-release']
res = submitter_testapp.post_json('/file_fastq', file_item, status=201)
for status in statuses:
wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": status}, status=200)
viewing_group_member_testapp.get(res.json['@graph'][0]['@id'], status=403)
def test_viewing_group_member_can_view_submitter_item(ind_human_item, submitter_testapp, wrangler_testapp, viewing_group_member_testapp):
statuses = ['current', 'released', 'revoked', 'released to project',
'archived', 'archived to project']
res = submitter_testapp.post_json('/individual_human', ind_human_item, status=201)
for status in statuses:
wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": status}, status=200)
viewing_group_member_testapp.get(res.json['@graph'][0]['@id'], status=200)
def test_viewing_group_member_can_view_submitter_file(file_item, submitter_testapp, wrangler_testapp, viewing_group_member_testapp):
statuses = ['released', 'revoked', 'released to project', 'archived to project']
res = submitter_testapp.post_json('/file_fastq', file_item, status=201)
for status in statuses:
wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": status}, status=200)
viewing_group_member_testapp.get(res.json['@graph'][0]['@id'], status=200)
def test_viewing_group_member_can_view_submitter_item_replaced_with_uuid(ind_human_item, submitter_testapp, wrangler_testapp, viewing_group_member_testapp):
res = submitter_testapp.post_json('/individual_human', ind_human_item, status=201)
wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": "replaced"}, status=200)
my_uuid = '/individuals-human/' + res.json['@graph'][0]['uuid'] + '/'
viewing_group_member_testapp.get(my_uuid, status=200)
def test_viewing_group_member_cannot_view_submitter_item_replaced_with_accession(ind_human_item, submitter_testapp, wrangler_testapp, viewing_group_member_testapp):
res = submitter_testapp.post_json('/individual_human', ind_human_item, status=201)
wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": "replaced"}, status=200)
my_accession = '/' + res.json['@graph'][0]['accession']
viewing_group_member_testapp.get(my_accession, status=404)
# Submitter created item and viewing group member wants to patch
def test_viewing_group_member_cannot_patch_submitter_item(ind_human_item, submitter_testapp, wrangler_testapp, viewing_group_member_testapp):
statuses = ['current', 'released', 'revoked', 'archived', 'released to project', 'in review by lab',
'archived to project', 'submission in progress', 'planned']
res = submitter_testapp.post_json('/individual_human', ind_human_item, status=201)
for status in statuses:
wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": status}, status=200)
viewing_group_member_testapp.patch_json(res.json['@graph'][0]['@id'], {'sex': 'female'}, status=403)
def test_viewing_group_member_cannot_patch_submitter_file(file_item, submitter_testapp, wrangler_testapp, viewing_group_member_testapp):
statuses = ['released', 'revoked', 'archived', 'released to project', 'archived to project',
'uploading', 'uploaded', 'upload failed']
res = submitter_testapp.post_json('/file_fastq', file_item, status=201)
for status in statuses:
wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": status}, status=200)
viewing_group_member_testapp.patch_json(res.json['@graph'][0]['@id'], {'paired_end': '2'}, status=403)
def test_non_member_can_view_submitter_item(ind_human_item, submitter_testapp, wrangler_testapp, remc_member_testapp):
statuses = ['current', 'released', 'revoked', 'archived']
res = submitter_testapp.post_json('/individual_human', ind_human_item, status=201)
for status in statuses:
wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": status}, status=200)
remc_member_testapp.get(res.json['@graph'][0]['@id'], status=200)
def test_non_member_can_view_submitter_file(file_item, submitter_testapp, wrangler_testapp, remc_member_testapp):
statuses = ['released', 'revoked', 'archived']
res = submitter_testapp.post_json('/file_fastq', file_item, status=201)
for status in statuses:
wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": status}, status=200)
remc_member_testapp.get(res.json['@graph'][0]['@id'], status=200)
def test_non_member_cannot_view_submitter_item(ind_human_item, submitter_testapp, wrangler_testapp, remc_member_testapp):
statuses = ['released to project', 'archived to project', 'submission in progress',
'in review by lab', 'deleted', 'planned']
res = submitter_testapp.post_json('/individual_human', ind_human_item, status=201)
for status in statuses:
wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": status}, status=200)
remc_member_testapp.get(res.json['@graph'][0]['@id'], status=403)
def test_non_member_cannot_view_submitter_file(file_item, submitter_testapp, wrangler_testapp, remc_member_testapp):
statuses = ['released to project', 'archived to project', 'uploading', 'uploaded', 'upload failed']
res = submitter_testapp.post_json('/file_fastq', file_item, status=201)
for status in statuses:
wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": status}, status=200)
remc_member_testapp.get(res.json['@graph'][0]['@id'], status=403)
def test_everyone_can_view_lab_item(lab_item, submitter_testapp, wrangler_testapp, remc_member_testapp):
statuses = ['current', 'revoked', 'inactive']
apps = [submitter_testapp, wrangler_testapp, remc_member_testapp]
res = wrangler_testapp.post_json('/lab', lab_item, status=201)
for status in statuses:
wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": status}, status=200)
for app in apps:
app.get(res.json['@graph'][0]['@id'], status=200)
def test_noone_can_view_deleted_lab_item(lab_item, submitter_testapp, wrangler_testapp, remc_member_testapp):
lab_item['status'] = 'deleted'
viewing_apps = [submitter_testapp, remc_member_testapp]
res = wrangler_testapp.post_json('/lab', lab_item, status=201)
for app in viewing_apps:
app.get(res.json['@graph'][0]['@id'], status=403)
def test_lab_submitter_can_edit_lab(lab, submitter_testapp, wrangler_testapp):
res = submitter_testapp.get(lab['@id'])
wrangler_testapp.patch_json(res.json['@id'], {'status': 'current'}, status=200)
submitter_testapp.patch_json(res.json['@id'], {'city': 'My fair city'}, status=200)
def test_statuses_that_lab_submitter_cannot_edit_lab(lab, submitter_testapp, wrangler_testapp):
statuses = ['deleted', 'revoked', 'inactive']
res = submitter_testapp.get(lab['@id'])
for status in statuses:
wrangler_testapp.patch_json(res.json['@id'], {'status': status}, status=200)
submitter_testapp.patch_json(res.json['@id'], {'city': 'My fair city'}, status=403)
def test_lab_submitter_cannot_edit_lab_name_or_title(lab, submitter_testapp, wrangler_testapp):
res = submitter_testapp.get(lab['@id'])
wrangler_testapp.patch_json(res.json['@id'], {'status': 'current'}, status=200)
submitter_testapp.patch_json(res.json['@id'], {'title': 'Test Lab, HMS'}, status=422)
submitter_testapp.patch_json(res.json['@id'], {'name': 'test-lab'}, status=422)
def test_wrangler_can_edit_lab_name_or_title(lab, submitter_testapp, wrangler_testapp):
statuses = ['deleted', 'revoked', 'inactive', 'current']
new_name = 'test-lab'
new_id = '/labs/test-lab/'
res = submitter_testapp.get(lab['@id'])
original_id = res.json['@id']
original_name = res.json['name']
for status in statuses:
wrangler_testapp.patch_json(original_id, {'status': status}, status=200)
wrangler_testapp.patch_json(original_id, {'title': 'Test Lab, HMS'}, status=200)
wrangler_testapp.patch_json(original_id, {'name': new_name}, status=200)
wrangler_testapp.patch_json(new_id, {'name': original_name}, status=200)
def test_ac_local_roles_for_lab(registry):
lab_data = {
'status': 'in review by lab',
'award': 'b0b9c607-bbbb-4f02-93f4-9895baa1334b',
'uuid': '828cd4fe-aaaa-4b36-a94a-d2e3a36aa989'
}
test_lab = Lab.create(registry, None, lab_data)
lab_ac_locals = test_lab.__ac_local_roles__()
assert('role.lab_submitter' in lab_ac_locals.values())
assert('role.lab_member' in lab_ac_locals.values())
def test_last_modified_works_correctly(ind_human_item, submitter, wrangler, submitter_testapp, wrangler_testapp):
res = submitter_testapp.post_json('/individual_human', ind_human_item, status=201).json['@graph'][0]
assert res['last_modified']['modified_by'] == submitter['@id']
# patch same item using a different user
res2 = wrangler_testapp.patch_json(res['@id'], {"status": "current"}, status=200).json['@graph'][0]
assert res2['last_modified']['modified_by'] == wrangler['@id']
assert res2['last_modified']['date_modified'] > res['last_modified']['date_modified']
@pytest.fixture
def individual_human(human, remc_lab, nofic_award, wrangler_testapp):
ind_human = {'lab': remc_lab['@id'], 'award': nofic_award['@id'], 'organism': human['@id']}
return wrangler_testapp.post_json('/individual_human', ind_human, status=201).json['@graph'][0]
def test_multi_viewing_group_viewer_can_view_nofic_when_submission_in_progress(
wrangler_testapp, multi_viewing_group_member_testapp, individual_human):
#import pdb; pdb.set_trace()
wrangler_testapp.patch_json(individual_human['@id'], {'status': 'submission in progress'}, status=200)
res = wrangler_testapp.get(individual_human['@id'], status=200)
multi_viewing_group_member_testapp.get(individual_human['@id'], status=200)
def test_viewing_group_viewer_cannot_view_nofic_when_submission_in_progress(
wrangler_testapp, viewing_group_member_testapp, individual_human):
wrangler_testapp.patch_json(individual_human['@id'], {'status': 'submission in progress'}, status=200)
viewing_group_member_testapp.get(individual_human['@id'], status=403)
### These aren't strictly permissions tests but putting them here so we don't need to
### move around wrangler and submitter testapps and associated fixtures
@pytest.fixture
def planned_experiment_set_data(lab, award):
return {
'lab': lab['@id'],
'award': award['@id'],
'description': 'test experiment set',
'experimentset_type': 'custom',
}
@pytest.fixture
def status2date():
return {
'released': 'public_release',
'released to project': 'project_release'
}
def test_planned_item_status_can_be_updated_by_admin(
submitter_testapp, wrangler_testapp, planned_experiment_set_data):
# submitter cannot change status so wrangler needs to patch
res1 = submitter_testapp.post_json('/experiment_set', planned_experiment_set_data).json['@graph'][0]
assert res1['status'] == 'in review by lab'
res2 = wrangler_testapp.patch_json(res1['@id'], {'status': 'planned'}).json['@graph'][0]
assert res2['status'] == 'planned'
def test_planned_item_status_is_not_changed_on_admin_patch(
submitter_testapp, wrangler_testapp, planned_experiment_set_data):
desc = 'updated description'
res1 = submitter_testapp.post_json('/experiment_set', planned_experiment_set_data).json['@graph'][0]
wrangler_testapp.patch_json(res1['@id'], {'status': 'planned'}, status=200)
res2 = wrangler_testapp.patch_json(res1['@id'], {'description': desc}).json['@graph'][0]
assert res2['description'] == desc
assert res2['status'] == 'planned'
def test_planned_item_status_is_changed_on_submitter_patch(
submitter_testapp, wrangler_testapp, planned_experiment_set_data):
desc = 'updated description'
res1 = submitter_testapp.post_json('/experiment_set', planned_experiment_set_data).json['@graph'][0]
wrangler_testapp.patch_json(res1['@id'], {'status': 'planned'}, status=200)
res2 = submitter_testapp.patch_json(res1['@id'], {'description': desc}).json['@graph'][0]
assert res2['description'] == desc
assert res2['status'] == 'submission in progress'
# these tests are for the item _update function as above so sticking them here
def test_unreleased_item_does_not_get_release_date(
wrangler_testapp, planned_experiment_set_data, status2date):
res1 = wrangler_testapp.post_json('/experiment_set', planned_experiment_set_data).json['@graph'][0]
assert res1['status'] == 'in review by lab'
for datefield in status2date.values():
assert datefield not in res1
def test_insert_of_released_item_does_get_release_date(
wrangler_testapp, planned_experiment_set_data, status2date):
for status, datefield in status2date.items():
planned_experiment_set_data['status'] = status
res = wrangler_testapp.post_json('/experiment_set', planned_experiment_set_data).json['@graph'][0]
assert res['status'] == status
assert res[datefield] == date.today().isoformat()
if status in ['released', 'current']:
assert res['project_release'] == res['public_release']
def test_update_of_item_to_released_status_adds_release_date(
wrangler_testapp, planned_experiment_set_data, status2date):
for status, datefield in status2date.items():
res1 = wrangler_testapp.post_json('/experiment_set', planned_experiment_set_data).json['@graph'][0]
assert res1['status'] == 'in review by lab'
assert datefield not in res1
res2 = wrangler_testapp.patch_json(res1['@id'], {'status': status}, status=200).json['@graph'][0]
assert res2['status'] == status
assert res2[datefield] == date.today().isoformat()
if status == 'released to project':
assert 'public_release' not in res2
if status in ['released', 'current']:
assert res2['project_release'] == res2['public_release']
def test_update_of_item_to_non_released_status_does_not_add_release_date(
wrangler_testapp, planned_experiment_set_data):
statuses = ["planned", "revoked", "deleted", "obsolete", "replaced", "in review by lab", "submission in progress"]
datefields = ['public_release', 'project_release']
for status in statuses:
res1 = wrangler_testapp.post_json('/experiment_set', planned_experiment_set_data).json['@graph'][0]
assert res1['status'] == 'in review by lab'
res2 = wrangler_testapp.patch_json(res1['@id'], {'status': status}, status=200).json['@graph'][0]
assert res2['status'] == status
for datefield in datefields:
assert datefield not in res1
assert datefield not in res2
def test_update_of_item_that_has_release_date_does_not_change_release_date(
wrangler_testapp, planned_experiment_set_data, status2date):
test_date = '2001-01-01'
for status, datefield in status2date.items():
planned_experiment_set_data[datefield] = test_date
res1 = wrangler_testapp.post_json('/experiment_set', planned_experiment_set_data).json['@graph'][0]
assert res1['status'] == 'in review by lab'
assert res1[datefield] == test_date
res2 = wrangler_testapp.patch_json(res1['@id'], {'status': status}, status=200).json['@graph'][0]
assert res2['status'] == status
assert res2[datefield] == test_date
def test_update_of_item_without_release_dates_mixin(wrangler_testapp, award):
assert award['status'] == 'current'
datefields = ['public_release', 'project_release']
for field in datefields:
assert field not in award
# tests for bogus nofic specific __ac_local_roles__
def test_4dn_can_view_nofic_released_to_project(
planned_experiment_set_data, wrangler_testapp, viewing_group_member_testapp,
nofic_award):
eset_item = planned_experiment_set_data
eset_item['award'] = nofic_award['@id']
eset_item['status'] = 'released to project'
res1 = wrangler_testapp.post_json('/experiment_set', eset_item).json['@graph'][0]
viewing_group_member_testapp.get(res1['@id'], status=200)
def test_4dn_cannot_view_nofic_not_joint_analysis_planned_and_in_progress(
planned_experiment_set_data, wrangler_testapp, viewing_group_member_testapp,
nofic_award):
statuses = ['planned', 'submission in progress']
eset_item = planned_experiment_set_data
eset_item['award'] = nofic_award['@id']
for status in statuses:
eset_item['status'] = status
res1 = wrangler_testapp.post_json('/experiment_set', eset_item).json['@graph'][0]
viewing_group_member_testapp.get(res1['@id'], status=403)
def test_4dn_can_view_nofic_joint_analysis_planned_and_in_progress(
planned_experiment_set_data, wrangler_testapp, viewing_group_member_testapp,
nofic_award):
statuses = ['planned', 'submission in progress']
eset_item = planned_experiment_set_data
eset_item['award'] = nofic_award['@id']
eset_item['tags'] = ['Joint Analysis']
for status in statuses:
eset_item['status'] = status
res1 = wrangler_testapp.post_json('/experiment_set', eset_item).json['@graph'][0]
viewing_group_member_testapp.get(res1['@id'], status=200)
@pytest.fixture
def replicate_experiment_set_data(lab, award):
return {
'lab': lab['@id'],
'award': award['@id'],
'description': 'test replicate experiment set',
'experimentset_type': 'replicate',
}
def test_ready_to_process_set_status_admin_can_edit(
submitter_testapp, wrangler_testapp, replicate_experiment_set_data):
res1 = submitter_testapp.post_json('/experiment_set_replicate', replicate_experiment_set_data).json['@graph'][0]
assert res1['status'] == 'in review by lab'
res2 = wrangler_testapp.patch_json(res1['@id'], {'status': 'pre-release'}).json['@graph'][0]
assert res2['status'] == 'pre-release'
# admin can Edit
res3 = wrangler_testapp.patch_json(res1['@id'], {'description': 'admin edit'}, status=200).json['@graph'][0]
assert res3['description'] == 'admin edit'
def test_ready_to_process_set_status_submitter_can_view(
submitter_testapp, wrangler_testapp, replicate_experiment_set_data):
res1 = submitter_testapp.post_json('/experiment_set_replicate', replicate_experiment_set_data).json['@graph'][0]
assert res1['status'] == 'in review by lab'
res2 = wrangler_testapp.patch_json(res1['@id'], {'status': 'pre-release'}).json['@graph'][0]
assert res2['status'] == 'pre-release'
# submitter can view
res3 = submitter_testapp.get(res1['@id'], status=200).json
assert res3['description'] == 'test replicate experiment set'
def test_ready_to_process_set_status_submitter_can_not_edit(
submitter_testapp, wrangler_testapp, replicate_experiment_set_data):
res1 = submitter_testapp.post_json('/experiment_set_replicate', replicate_experiment_set_data).json['@graph'][0]
assert res1['status'] == 'in review by lab'
res2 = wrangler_testapp.patch_json(res1['@id'], {'status': 'pre-release'}).json['@graph'][0]
assert res2['status'] == 'pre-release'
# submitter can not edit
submitter_testapp.patch_json(res1['@id'], {'description': 'submitter edit'}, status=403)
def test_ready_to_process_set_status_others_can_not_view(
submitter_testapp, wrangler_testapp, viewing_group_member_testapp, replicate_experiment_set_data):
res1 = submitter_testapp.post_json('/experiment_set_replicate', replicate_experiment_set_data).json['@graph'][0]
assert res1['status'] == 'in review by lab'
res2 = wrangler_testapp.patch_json(res1['@id'], {'status': 'pre-release'}).json['@graph'][0]
assert res2['status'] == 'pre-release'
# others can not view
viewing_group_member_testapp.get(res1['@id'], status=403)
@pytest.fixture
def static_section_item():
return {
'name': 'static-section.test_ss',
'title': 'Test Static Section',
'body': 'This is a test section'
}
def test_static_section_with_lab_view_by_lab_member(
wrangler_testapp, lab_viewer_testapp, lab, static_section_item):
static_section_item['lab'] = lab['@id']
static_section_item['status'] = 'released to lab'
res = wrangler_testapp.post_json('/static_section', static_section_item).json['@graph'][0]
lab_viewer_testapp.get(res['@id'], status=200)
def test_permissions_validate_false(award, lab, file_formats, submitter_testapp, wrangler_testapp):
"""
Only admin can use validate=false with POST/PUT/PATCH
"""
file_item_body = {
'award': award['uuid'],
'lab': lab['uuid'],
'file_format': file_formats.get('fastq').get('uuid'),
'paired_end': '1'
}
# does it matter that the wrangler posts this? I don't think so for this test - Will 03/23/2021
res = submitter_testapp.post_json('/file_fastq', file_item_body, status=201)
# no permissions
submitter_testapp.post_json('/file_fastq/?validate=false', file_item_body, status=403)
submitter_testapp.patch_json(res.json['@graph'][0]['@id'] + '?validate=false',
{'paired_end': '1'}, status=403)
submitter_testapp.put_json(res.json['@graph'][0]['@id'] + '?validate=false',
file_item_body, status=403)
# okay permissions
try:
wrangler_testapp.post_json('/file_fastq/?validate=false&upgrade=False', file_item_body, status=201)
except TypeError: # thrown from open_data_url, but should make it there
pass # we are ok, any other exception should be thrown
wrangler_testapp.patch_json(res.json['@graph'][0]['@id'] + '?validate=false',
{'paired_end': '1'}, status=200)
try:
wrangler_testapp.put_json(res.json['@graph'][0]['@id'] + '?validate=false',
file_item_body, status=200)
except TypeError: # thrown from open_data_url, but should make it there
pass # we are ok, any other exception should be thrown
| hms-dbmi/fourfront | src/encoded/tests/test_permissions.py | Python | mit | 56,605 | [
"VisIt"
] | 2eeaa01a86bf0c0caec6b94af3b8dd985c20937faecd84ba5de6a3e80a0d6e98 |
# ============================================================================
#
# Copyright (C) 2007-2010 Conceptive Engineering bvba. All rights reserved.
# www.conceptive.be / project-camelot@conceptive.be
#
# This file is part of the Camelot Library.
#
# This file may be used under the terms of the GNU General Public
# License version 2.0 as published by the Free Software Foundation
# and appearing in the file license.txt included in the packaging of
# this file. Please review this information to ensure GNU
# General Public Licensing requirements will be met.
#
# If you are unsure which license is appropriate for your use, please
# visit www.python-camelot.com or contact project-camelot@conceptive.be
#
# This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
# WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
#
# For use of this library in commercial applications, please contact
# project-camelot@conceptive.be
#
# ============================================================================
"""Functionality common to TableViews and FormViews"""
from PyQt4 import QtGui
from PyQt4 import QtCore
from camelot.view.model_thread import post
from camelot.view.model_thread import gui_function
from camelot.view.model_thread import model_function
class AbstractView(QtGui.QWidget):
"""A string used to format the title of the view ::
title_format = 'Movie rental overview'
.. attribute:: header_widget
The widget class to be used as a header in the table view::
header_widget = None
"""
title_format = ''
header_widget = None
title_changed_signal = QtCore.pyqtSignal(QtCore.QString)
@QtCore.pyqtSlot()
def refresh(self):
"""Refresh the data in the current view"""
pass
@QtCore.pyqtSlot(str)
@gui_function
def change_title(self, new_title):
"""Will emit the title_changed_signal"""
import sip
if not sip.isdeleted(self):
self.title_changed_signal.emit( unicode(new_title) )
@model_function
def to_html(self):
pass
@model_function
def export_to_word(self):
from camelot.view.export.word import open_html_in_word
html = self.to_html()
open_html_in_word(html)
@model_function
def export_to_excel(self):
from camelot.view.export.excel import open_data_with_excel
title = self.getTitle()
columns = self.getColumns()
data = [d for d in self.getData()]
open_data_with_excel(title, columns, data)
@model_function
def export_to_mail(self):
from camelot.view.export.outlook import open_html_in_outlook
html = self.to_html()
open_html_in_outlook(html)
class TabView(AbstractView):
"""Class to combine multiple views in Tabs and let them behave as one view.
This class can be used when defining custom create_table_view methods on an
ObjectAdmin class to group multiple table views together in one view."""
def __init__(self, parent, views=[], admin=None):
""":param views: a list of the views to combine"""
AbstractView.__init__(self, parent)
layout = QtGui.QVBoxLayout()
if self.header_widget:
self.header = self.header_widget(self, admin)
else:
self.header = None
layout.addWidget(self.header)
self._tab_widget = QtGui.QTabWidget(self)
layout.addWidget(self._tab_widget)
self.setLayout(layout)
def get_views_and_titles():
return [(view, view.get_title()) for view in views]
post(get_views_and_titles, self.set_views_and_titles)
post(lambda:self.title_format, self.change_title)
@QtCore.pyqtSlot()
def refresh(self):
"""Refresh the data in the current view"""
for i in range(self._tab_widget.count()):
view = self._tab_widget.widget(i)
view.refresh()
def set_views_and_titles(self, views_and_titles):
for view, title in views_and_titles:
self._tab_widget.addTab(view, title)
def export_to_excel(self):
return self._tab_widget.currentWidget().export_to_excel()
def export_to_word(self):
return self._tab_widget.currentWidget().export_to_word()
def export_to_mail(self):
return self._tab_widget.currentWidget().export_to_mail()
def to_html(self):
return self._tab_widget.currentWidget().to_html()
| kurtraschke/camelot | camelot/view/controls/view.py | Python | gpl-2.0 | 4,493 | [
"VisIt"
] | 31194a10e32e6b7ee63faf8ed2e86cea485c8d69c62bae3fbaf3523686fd7e79 |
# wxp - general framework classes for wxPython
#
# Copyright 2007 Peter Jang <http://avisynth.nl/users/qwerpoi>
# 2010-2013 the AvsPmod authors <https://github.com/avspmod/avspmod>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA, or visit
# http://www.gnu.org/copyleft/gpl.html .
# Dependencies:
# Python (tested on v2.6 and 2.7)
# wxPython (tested on v2.8 Unicode and 2.9)
# Scripts:
# icon.py (icons embedded in a Python script)
import wx
import wx.lib.buttons as wxButtons
import wx.lib.mixins.listctrl as listmix
import wx.lib.filebrowsebutton as filebrowse
import wx.lib.colourselect as colourselect
from wx.lib.agw.floatspin import FloatSpin
from wx.lib.agw.hyperlink import HyperLinkCtrl
from wx import stc
import string
import keyword
import os
import os.path
import sys
import copy
import time
import wx.lib.newevent
import socket
import thread
import StringIO
import cPickle
from icons import checked_icon, unchecked_icon
OPT_ELEM_CHECK = 0
OPT_ELEM_INT = 1
OPT_ELEM_FLOAT = 1
OPT_ELEM_SPIN = 1
OPT_ELEM_STRING = 2
OPT_ELEM_FILE = 3
OPT_ELEM_FILE_OPEN = 3
OPT_ELEM_FILE_SAVE = 4
OPT_ELEM_FILE_URL = 5
OPT_ELEM_DIR = 6
OPT_ELEM_DIR_URL = 7
OPT_ELEM_RADIO = 8
OPT_ELEM_LIST = 9
OPT_ELEM_SLIDER = 10
OPT_ELEM_COLOR = 11
OPT_ELEM_FONT = 12
OPT_ELEM_BUTTON = 13
OPT_ELEM_SEP = 14
keyStringList = [
'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M',
'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z',
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
'F1', 'F2', 'F3', 'F4', 'F5', 'F6', 'F7', 'F8', 'F9', 'F10', 'F11', 'F12',
'Enter', 'Space', 'Escape', 'Tab', 'Insert', 'Backspace', 'Delete',
'Home', 'End', 'PgUp', 'PgDn', 'Up', 'Down', 'Left', 'Right', 'NumLock',
'Numpad 0', 'Numpad 1', 'Numpad 2', 'Numpad 3', 'Numpad 4', 'Numpad 5', 'Numpad 6', 'Numpad 7', 'Numpad 8', 'Numpad 9',
'Numpad +', 'Numpad -', 'Numpad *', 'Numpad /', 'Numpad .', 'Numpad Enter',
'`', '-', '=', '\\', '[', ']', ';', "'", ',', '.', '/',
'~', '!', '@', '#', '$', '%', '^', '&', '*', '(', ')', '_', '+', '|', '{', '}', ':', '"', '<', '>', '?',
]
numpadDict = {
'NumLock' : wx.WXK_NUMLOCK,
'Numpad 0': wx.WXK_NUMPAD0,
'Numpad 1': wx.WXK_NUMPAD1,
'Numpad 2': wx.WXK_NUMPAD2,
'Numpad 3': wx.WXK_NUMPAD3,
'Numpad 4': wx.WXK_NUMPAD4,
'Numpad 5': wx.WXK_NUMPAD5,
'Numpad 6': wx.WXK_NUMPAD6,
'Numpad 7': wx.WXK_NUMPAD7,
'Numpad 8': wx.WXK_NUMPAD8,
'Numpad 9': wx.WXK_NUMPAD9,
'Numpad +': wx.WXK_NUMPAD_ADD,
'Numpad -': wx.WXK_NUMPAD_SUBTRACT,
'Numpad *': wx.WXK_NUMPAD_MULTIPLY,
'Numpad /': wx.WXK_NUMPAD_DIVIDE,
'Numpad .': wx.WXK_NUMPAD_DECIMAL,
'Numpad Enter': wx.WXK_NUMPAD_ENTER,
}
(PostArgsEvent, EVT_POST_ARGS) = wx.lib.newevent.NewEvent()
try: _
except NameError:
def _(s): return s
def MakeWindowTransparent(window, amount, intangible=False):
import ctypes
user32 = ctypes.windll.user32
hwnd = window.GetHandle()
style = user32.GetWindowLongA(hwnd, 0xffffffecL)
style |= 0x00080000
if intangible:
style |= 0x00000020L
window.SetWindowStyleFlag(window.GetWindowStyleFlag()|wx.STAY_ON_TOP)
user32.SetWindowLongA(hwnd, 0xffffffecL, style)
user32.SetLayeredWindowAttributes(hwnd, 0, amount, 2)
def GetTranslatedShortcut(shortcut):
return shortcut.replace('Ctrl', _('Ctrl')).replace('Shift', _('Shift')).replace('Alt', _('Alt'))
class CharValidator(wx.PyValidator):
def __init__(self, flag):
wx.PyValidator.__init__(self)
self.flag = flag
self.Bind(wx.EVT_CHAR, self.OnChar)
def Clone(self):
return CharValidator(self.flag)
def Validate(self, win):
return True
def TransferToWindow(self):
return True
def TransferFromWindow(self):
return True
def OnChar(self, event):
key = event.GetKeyCode()
if key < wx.WXK_SPACE or key == wx.WXK_DELETE or key > 255:
event.Skip()
return
if self.flag == 'alpha' and chr(key) in string.letters:
event.Skip()
return
if self.flag == 'digit' and chr(key) in string.digits:
event.Skip()
return
return
class ListCtrl(wx.ListCtrl, listmix.ListCtrlAutoWidthMixin):
def __init__(self, parent, ID, pos=wx.DefaultPosition, size=wx.DefaultSize, style=0):
wx.ListCtrl.__init__(self, parent, ID, pos, size, style)
listmix.ListCtrlAutoWidthMixin.__init__(self)
self.parent = parent
def SelectItem(self, item):
self.SetItemState(item, wx.LIST_STATE_SELECTED|wx.LIST_STATE_FOCUSED, wx.LIST_STATE_SELECTED|wx.LIST_STATE_FOCUSED)
self.EnsureVisible(item)
self.SetFocus()
def SelectLabel(self, label):
item = self.FindItem(-1, label)
self.SelectItem(item)
def GetSelectedItem(self):
return self.GetNextItem(-1, wx.LIST_NEXT_ALL, wx.LIST_STATE_SELECTED)
class MenuItemInfo(object):
def __init__(self, label=None, handler=None, status=None, submenu=None, id=wx.ID_ANY):
self.label = label
self.handler = handler
self.submenu = submenu
self.id = id
class StdoutStderrWindow:
"""
A class that can be used for redirecting Python's stdout and
stderr streams. It will do nothing until something is written to
the stream at which point it will create a Frame with a text area
and write the text there.
"""
def __init__(self, title=None):
if title is None:
title = _('Error Window')
self.frame = None
self.title = title
self.pos = wx.DefaultPosition
self.size = (550, 300)
self.parent = None
logname = 'error_log.txt'
if hasattr(sys,'frozen'):
self.logfilename = os.path.join(os.path.dirname(sys.executable), logname)
else:
self.logfilename = os.path.join(os.getcwdu(), logname)
self.firstTime = True
def SetParent(self, parent):
"""Set the window to be used as the popup Frame's parent."""
self.parent = parent
def CreateOutputWindow(self, st):
self.frame = wx.Frame(self.parent, -1, self.title, self.pos, self.size,
style=wx.DEFAULT_FRAME_STYLE)
self.text = TextCtrl(self.frame, -1, "",
style=wx.TE_MULTILINE|wx.TE_READONLY)
self.text.AppendText(st)
self.frame.Show(True)
wx.EVT_CLOSE(self.frame, self.OnCloseWindow)
def OnCloseWindow(self, event):
if self.frame is not None:
self.frame.Destroy()
self.frame = None
self.text = None
# These methods provide the file-like output behaviour.
def write(self, text):
"""
Create the output window if needed and write the string to it.
If not called in the context of the gui thread then uses
CallAfter to do the work there.
"""
if self.frame is None:
if not wx.Thread_IsMain():
wx.CallAfter(self.CreateOutputWindow, text)
else:
self.CreateOutputWindow(text)
else:
if not wx.Thread_IsMain():
wx.CallAfter(self.text.AppendText, text)
else:
self.text.AppendText(text)
f = open(self.logfilename, 'a')
if self.firstTime:
f.write('\n[%s]\n' % time.asctime())
self.firstTime = False
f.write(text)
f.close()
def close(self):
if self.frame is not None:
wx.CallAfter(self.frame.Close)
def flush(self):
pass
class App(wx.App):
outputWindowClass = StdoutStderrWindow
class SingleInstanceApp(wx.App):
outputWindowClass = StdoutStderrWindow
port = 50009
name = 'SingleInstanceApp'
IsFirstInstance = True
boolSingleInstance = True
def __init__(self, *args, **kwargs):
# Get extra keyword arguments
if kwargs.has_key('name'):
self.name = kwargs.pop('name')
if kwargs.has_key('port'):
self.port = kwargs.pop('port')
# Determine if program is already running or not
self.instance = wx.SingleInstanceChecker(self.name+wx.GetUserId())
if self.instance.IsAnotherRunning():
self.IsFirstInstance = False
if self.boolSingleInstance:
# Send data to the main instance via socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(('localhost', self.port))
pickledstring = StringIO.StringIO()
cPickle.dump(sys.argv[1:],pickledstring)
sock.sendall(pickledstring.getvalue())
response = sock.recv(8192)
# Start the wx.App (typically check self.IsFirstInstance flag and return False)
wx.App.__init__(self, *args, **kwargs)
else:
self.IsFirstInstance = True
wx.App.__init__(self, *args, **kwargs)
# Start socket server (in a separate thread) to receive arguments from other instances
self.argsPosterThread = ArgsPosterThread(self)
self.argsPosterThread.Start()
def OnExit(self):
if self.IsFirstInstance:
wx.Yield()
self.argsPosterThread.Stop()
running = 1
while running:
running = 0
running = running + self.argsPosterThread.IsRunning()
time.sleep(0.1)
class ArgsPosterThread:
def __init__(self, app):
self.app = app
def Start(self):
self.keepGoing = self.running = True
thread.start_new_thread(self.Run, ())
def Stop(self):
self.keepGoing = False
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(('localhost', self.app.port))
sock.close()
def IsRunning(self):
return self.running
def Run(self):
# Prevent open sockets from being inherited by child processes
# see http://bugs.python.org/issue3006
# code taken from CherryPy
#
# Copyright (c) 2004-2011, CherryPy Team (team@cherrypy.org)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the CherryPy Team nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
try:
import fcntl
except ImportError:
try:
from ctypes import windll, WinError
except ImportError:
def prevent_socket_inheritance(sock):
"""Dummy function, since neither fcntl nor ctypes are available."""
pass
else:
def prevent_socket_inheritance(sock):
"""Mark the given socket fd as non-inheritable (Windows)."""
if not windll.kernel32.SetHandleInformation(sock.fileno(), 1, 0):
raise WinError()
else:
def prevent_socket_inheritance(sock):
"""Mark the given socket fd as non-inheritable (POSIX)."""
fd = sock.fileno()
old_flags = fcntl.fcntl(fd, fcntl.F_GETFD)
fcntl.fcntl(fd, fcntl.F_SETFD, old_flags | fcntl.FD_CLOEXEC)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
prevent_socket_inheritance(sock)
sock.bind(('localhost',self.app.port))
sock.listen(5)
try:
while self.keepGoing:
newSocket, address = sock.accept()
prevent_socket_inheritance(newSocket)
while True:
receivedData = newSocket.recv(8192)
if not receivedData: break
# Post a wxPython event with the unpickled data
pickledstring = StringIO.StringIO(receivedData)
unpickled = cPickle.load(pickledstring)
evt = PostArgsEvent(data=unpickled)
wx.PostEvent(self.app, evt)
newSocket.sendall(receivedData)
newSocket.close()
if self.app.IsIconized():
self.app.Iconize(False)
else:
self.app.Raise()
if self.app.separatevideowindow and self.app.videoDialog.IsShown():
if self.app.videoDialog.IsIconized():
self.app.videoDialog.Iconize(False)
else:
self.app.videoDialog.Raise()
finally:
sock.close()
self.running = False
class Frame(wx.Frame):
def createMenuBar(self, menuBarInfo, shortcutList, oldShortcuts, menuBackups=[]):
'''
General utility function to create a menu bar of menus
Input is a list of label/menuInfo tuples
The function utilizes the createMenu function (defined below)
'''
buckups = menuBackups[:]
menuBackups[:] = []
index = 0
self._shortcutBindWindowDict = {}
menuBar = wx.MenuBar()
for eachMenuBarInfo in menuBarInfo:
menuLabel = eachMenuBarInfo[0]
menuInfo = eachMenuBarInfo[1:]
menu = self.createMenu(menuInfo, menuLabel, shortcutList, oldShortcuts)
menuBar.Append(menu, menuLabel)
if index in buckups:
menuBackups.append(self.createMenu(menuInfo, menuLabel, shortcutList, oldShortcuts, True))
index += 1
return menuBar
def createMenu(self, menuInfo, name='', shortcutList = None, oldShortcuts=None, backup=False):
menu = wx.Menu()
if shortcutList is None:
shortcutList = []
if oldShortcuts is None:
oldShortcutNames = []
oldShortcuts = []
else:
try:
oldShortcutNames, oldShortcutInfos = oldShortcuts
except ValueError:
oldShortcutNames = []
oldShortcuts = []
for eachMenuInfo in menuInfo:
# Get the info, fill in missing info with defaults
nItems = len(eachMenuInfo)
# Special case: separator
if eachMenuInfo == '' or nItems == 1:
menu.AppendSeparator()
menu.Remove(menu.Append(wx.ID_ANY, '0',).GetId()) # wxGTK fix
continue
if nItems > 7:
raise
defaults = ('', '', None, '', wx.ITEM_NORMAL, None, self)
label, shortcut, handler, status, attr, state, bindwindow = eachMenuInfo + defaults[nItems:]
# Special case: submenu
if handler is None: #not isinstance(handler, FunctionType):
submenu = self.createMenu(shortcut, '%s -> %s'% (name, label), shortcutList, oldShortcuts, backup)
menu.AppendMenu(wx.ID_ANY, label, submenu, status)
continue
elif handler == -1:
submenu = shortcut #self.createMenu(shortcut, '%s -> %s'% (name, label), shortcutList, oldShortcuts, bindwindow)
menu.AppendMenu(wx.ID_ANY, label, submenu, status)
continue
# Get the id and type (normal, checkbox, radio)
if attr in (wx.ITEM_CHECK, wx.ITEM_RADIO):
kind = attr
id = wx.ID_ANY
elif attr == wx.ITEM_NORMAL:
kind = attr
id = wx.ID_ANY
elif type(attr) is tuple:
kind, state, id = attr
else:
kind = wx.ITEM_NORMAL
id = attr
# Get the shortcut string
itemName = '%s -> %s'% (name, label)
itemName = itemName.replace('&', '')
try:
index = oldShortcutNames.index(itemName)
shortcut = oldShortcutInfos[index][1]
except ValueError:
pass
if shortcut != '' and shortcut not in [item[1] for item in shortcutList]:
shortcutString = u'\t%s\u00a0' % GetTranslatedShortcut(shortcut)
else:
shortcutString = ''
# Append the menu item
if os.name != 'nt' and wx.version() >= '2.9': # XXX
shortcutString = shortcutString[:-1]
menuItem = menu.Append(id, '%s%s' % (label, shortcutString), status, kind)
id = menuItem.GetId()
self.Bind(wx.EVT_MENU, handler, menuItem)
# Add the accelerator
if shortcut is not None:
if not backup and (shortcut == '' or not shortcut[-1].isspace()):
shortcutList.append([itemName, shortcut, id])
try:
bindShortcutIdList = self._shortcutBindWindowDict.setdefault(bindwindow, [])
bindShortcutIdList.append(id)
except AttributeError:
pass
# Extra properties (enable/disable, check)
if state is not None:
if kind == wx.ITEM_NORMAL:
menuItem.Enable(state)
else:
menuItem.Check(state)
return menu
def BindShortcutsToWindows(self, shortcutInfo, forcewindow=None):
idDict = dict([(id, shortcut) for itemName, shortcut, id in shortcutInfo])
forceAccelList = []
for window, idList in self._shortcutBindWindowDict.items():
accelList = []
#~ for label, data in value.items():
#~ accelString, id = data
#~ accel = wx.GetAccelFromString('\t'+accelString)
#~ accelList.append((accel.GetFlags(), accel.GetKeyCode(), id))
for id in idList:
try:
accelString = idDict[id]
except KeyError:
continue
#~ index = [z for x,y,z in shortcutInfo].index(id)
accel = wx.GetAccelFromString('\t'+accelString)
if accel is not None and accel.IsOk():
accelList.append((accel.GetFlags(), accel.GetKeyCode(), id))
else:
for key in numpadDict:
if accelString.endswith(key):
break
accelString = accelString.replace(key, 'Space')
accel = wx.GetAccelFromString('\t'+accelString)
accelList.append((accel.GetFlags(), numpadDict[key], id))
if forcewindow is None:
accelTable = wx.AcceleratorTable(accelList)
window.SetAcceleratorTable(accelTable)
else:
forceAccelList += accelList
if forcewindow is not None:
accelTable = wx.AcceleratorTable(forceAccelList)
forcewindow.SetAcceleratorTable(accelTable)
def accelListFromMenu(self, menu, accelList):
for menuItem in menu.GetMenuItems():
submenu = menuItem.GetSubMenu()
if submenu is not None:
self.accelListFromMenu(submenu, accelList)
else:
id = menuItem.GetId()
text = menuItem.GetText()
accel = wx.GetAccelFromString(text)
if accel is not None and accel.IsOk():
accelList.append((accel.GetFlags(), accel.GetKeyCode(), id))
def createButton(self, parent, label='', id=wx.ID_ANY, handler=None, pos=(0,0)):
button = wx.Button(parent, id, label, pos)
if handler:
#~ self.Bind(wx.EVT_BUTTON, handler, button)
button.Bind(wx.EVT_BUTTON, handler)
return button
def createToolbarButton(self, parent, label, handler, pos=(0, 0), size=wx.DefaultSize, style=wx.NO_BORDER, toolTipTxt=None, statusTxt=None):
# Return a static line if empty
if type(label) == type('') and label == '':
return wx.StaticLine(parent, style=wx.LI_VERTICAL)
# Create the button
try: # label is a bitmap
w,h = label.GetSize()
button = wxButtons.GenBitmapButton(parent, wx.ID_ANY, label, pos, size, style)
button.SetBestSize((w+7, h+7))
except AttributeError: # label is a string
button = wxButtons.GenButton(parent, wx.ID_ANY, label, pos, size, style)
# Bind the button to the given handler
#~ self.Bind(wx.EVT_BUTTON, handler, button)
button.Bind(wx.EVT_BUTTON, handler)
# Set the tool tip string if given
if toolTipTxt:
button.SetToolTipString(toolTipTxt)
# Define mouse event functions (change status bar text and button bevel width)
def OnMouseMove(event):
if statusTxt:
self.SetStatusText(statusTxt)
def OnMouseOver(event):
if statusTxt:
self.SetStatusText(statusTxt)
b = event.GetEventObject()
b.SetBezelWidth(b.GetBezelWidth()+1)
b.Refresh()
def OnMouseLeave(event):
if statusTxt:
try:
self.ResetStatusText()
except AttributeError:
self.SetStatusText('')
b = event.GetEventObject()
b.SetBezelWidth(b.GetBezelWidth()-1)
b.Refresh()
button.Bind(wx.EVT_ENTER_WINDOW, OnMouseOver)
button.Bind(wx.EVT_MOTION, OnMouseMove)
button.Bind(wx.EVT_LEAVE_WINDOW, OnMouseLeave)
return button
class Notebook(wx.Notebook):
"""wx.Notebook, changing selected tab on mouse scroll"""
def __init__(self, *args, **kwargs):
self.invert_mouse_wheel_rotation = kwargs.pop('invert_scroll', False)
wx.Notebook.__init__(self, *args, **kwargs)
self.mouse_wheel_rotation = 0
self.Bind(wx.EVT_MOUSEWHEEL, self.OnMouseWheelNotebook)
def OnMouseWheelNotebook(self, event):
"""Rotate between tabs"""
rotation = event.GetWheelRotation()
if self.mouse_wheel_rotation * rotation < 0:
self.mouse_wheel_rotation = rotation
else:
self.mouse_wheel_rotation += rotation
if abs(self.mouse_wheel_rotation) >= event.GetWheelDelta():
inc = -1 if self.mouse_wheel_rotation > 0 else 1
if self.invert_mouse_wheel_rotation: inc = -inc
self.SelectTab(inc=inc)
self.mouse_wheel_rotation = 0
def SelectTab(self, index=None, inc=0):
"""Change to another tab
index: go the specified tab
inc: increment, with wrap-around"""
nTabs = self.GetPageCount()
if nTabs == 1:
self.SetSelection(0)
return True
if index is None:
index = inc + self.GetSelection()
# Allow for wraparound with user-specified inc
if index < 0:
index = nTabs - abs(index) % nTabs
if index == nTabs:
index = 0
if index > nTabs - 1:
index = index % nTabs
# Limit index if specified directly by user
if index < 0:
return False
if index > nTabs - 1:
return False
self.SetSelection(index)
return True
class QuickFindDialog(wx.Dialog):
''' Simple find dialog for a wx.StyledTextCtrl, using FindReplaceDialog'''
def __init__(self, parent, text=''):
wx.Dialog.__init__(self, parent, wx.ID_ANY, _('Quick find'), style=0)
self.app = parent.app
# Prepare a toolbar-like dialog
find_bitmap = wx.StaticBitmap(self, wx.ID_ANY, wx.ArtProvider.GetBitmap(wx.ART_FIND))
self.find_text_ctrl = wx.TextCtrl(self, wx.ID_ANY, size=(200, -1),
style=wx.TE_PROCESS_ENTER, value=text)
id = wx.ID_CLOSE if wx.version() >= '2.9' else wx.ID_OK
self.close = wx.BitmapButton(self, id, bitmap=wx.ArtProvider.GetBitmap(wx.ART_CROSS_MARK))
sizer = wx.BoxSizer(wx.HORIZONTAL)
sizer.Add(find_bitmap, 0, wx.ALIGN_CENTER|wx.ALL, 5)
sizer.Add(self.find_text_ctrl, 1, wx.EXPAND|wx.ALIGN_CENTER|wx.TOP|wx.BOTTOM, 5)
sizer.Add(self.close, 0, wx.ALIGN_CENTER|wx.ALL, 5)
sizer.Fit(self)
self.SetSizer(sizer)
sizer.SetSizeHints(self)
sizer.Layout()
self.Bind(wx.EVT_BUTTON, self.OnClose, self.close)
self.Bind(wx.EVT_TEXT, self.OnInstantFindNext, self.find_text_ctrl)
self.Bind(wx.EVT_TEXT_ENTER, self.OnFindNext, self.find_text_ctrl)
self.find_text_ctrl.Bind(wx.EVT_SET_FOCUS, self.OnSetFocus)
self.find_text_ctrl.Bind(wx.EVT_KILL_FOCUS, self.OnKillFocus)
# Auto-hide timer
class QuickFindTimer(wx.Timer):
def __init__(self, parent):
wx.Timer.__init__(self)
self.parent = parent
def Notify(self):
self.parent.Hide()
self.timer = QuickFindTimer(self)
# Bind open find/replace dialog and up and down arrows
up_id = wx.NewId()
self.Bind(wx.EVT_MENU, self.OnFindPrevious, id=up_id)
down_id = wx.NewId()
self.Bind(wx.EVT_MENU, self.OnFindNext, id=down_id)
accel_list = []
accel_list.append(wx.AcceleratorEntry(wx.ACCEL_NORMAL, wx.WXK_UP, up_id))
accel_list.append(wx.AcceleratorEntry(wx.ACCEL_NORMAL, wx.WXK_DOWN, down_id))
find = replace = False
find_menu = u'{0} -> {1}'.format(_('&Edit'), _('Find...')).replace('&', '')
replace_menu = u'{0} -> {1}'.format(_('&Edit'), _('Replace...')).replace('&', '')
for menu_item, shortcut, id in self.app.options['shortcuts']:
if not find and menu_item.replace('&', '') == find_menu:
accel = wx.GetAccelFromString('\t' + shortcut)
if accel is not None and accel.IsOk():
accel_list.append(wx.AcceleratorEntry(accel.GetFlags(), accel.GetKeyCode(), id))
self.Bind(wx.EVT_MENU, lambda event:self.UpdateText(), id=id)
find = True
if not replace and menu_item.replace('&', '') == replace_menu:
accel = wx.GetAccelFromString('\t' + shortcut)
if accel is not None and accel.IsOk():
accel_list.append(wx.AcceleratorEntry(accel.GetFlags(), accel.GetKeyCode(), id))
self.Bind(wx.EVT_MENU, self.app.OnMenuEditReplace, id=id)
replace = True
if find and replace: break
self.SetAcceleratorTable(wx.AcceleratorTable(accel_list))
def SetFocus(self):
self.find_text_ctrl.SetFocus()
def OnSetFocus(self, event):
self.timer.Stop()
self.find_text_ctrl.SelectAll()
def OnKillFocus(self, event):
self.timer.Start(3000)
def GetFindText(self):
return self.find_text_ctrl.GetValue()
def SetFindText(self, text):
self.find_text_ctrl.ChangeValue(text)
self.find_text_ctrl.SetInsertionPointEnd()
def UpdateText(self, text=None):
if text is None:
text = self.app.currentScript.GetSelectedText()
self.SetFindText(text)
self.app.replaceDialog.SetFindText(text)
def OnInstantFindNext(self, event):
script = self.app.currentScript
range = (script.GetSelectionStart(),
script.GetLineEndPosition(script.GetLineCount() - 1))
self.app.replaceDialog.SetFindText(self.GetFindText())
self.app.replaceDialog.OnFindNext(range=range, update_list=False)
def OnFindNext(self, event):
self.app.replaceDialog.SetFindText(self.GetFindText())
self.app.replaceDialog.OnFindNext()
def OnFindPrevious(self, event):
self.app.replaceDialog.SetFindText(self.GetFindText())
self.app.replaceDialog.OnFindPrevious()
def OnClose(self, event):
self.Hide()
class FindReplaceDialog(wx.Dialog):
''' Find/replace dialog for a wx.StyledTextCtrl'''
def __init__(self, parent, text=''):
wx.Dialog.__init__(self, parent, wx.ID_ANY, _('Find/replace text'),
style=wx.DEFAULT_DIALOG_STYLE|wx.RESIZE_BORDER)
self.app = parent.app
self.find_recent = self.app.options['find_recent']
self.replace_recent = self.app.options['replace_recent']
# Set controls
panel = wx.Panel(self)
find_text = wx.StaticText(self, wx.ID_ANY, _('Search &for'))
self.find_text_ctrl = wx.ComboBox(self, wx.ID_ANY, style=wx.CB_DROPDOWN,
size=(200,-1), value=text, choices=self.find_recent)
replace_text = wx.StaticText(self, wx.ID_ANY, _('R&eplace with'))
self.replace_text_ctrl = wx.ComboBox(self, wx.ID_ANY, size=(200,-1),
style=wx.CB_DROPDOWN|wx.TE_PROCESS_ENTER,
value='', choices=self.replace_recent)
self.find_next = wx.Button(self, wx.ID_ANY, label=_('Find &next'))
self.find_previous = wx.Button(self, wx.ID_ANY, label=_('Find &previous'))
self.replace_next = wx.Button(self, wx.ID_ANY, label=_('&Replace next'))
self.replace_all = wx.Button(self, wx.ID_ANY, label=_('Replace &all'))
id = wx.ID_CLOSE if wx.version() >= '2.9' else wx.ID_OK
self.close = wx.Button(self, id, label=_('Close'))
self.word_start = wx.CheckBox(self, wx.ID_ANY, label=_('Only on word s&tart'))
self.whole_word = wx.CheckBox(self, wx.ID_ANY, label=_('Only &whole words'))
self.only_selection = wx.CheckBox(self, wx.ID_ANY, label=_('Only in &selection'))
self.dont_wrap = wx.CheckBox(self, wx.ID_ANY, label=_("&Don't wrap-around"))
self.match_case = wx.CheckBox(self, wx.ID_ANY, label=_('&Case sensitive'))
self.find_regexp = wx.CheckBox(self, wx.ID_ANY, label=_('Use regular e&xpressions'))
re_url = HyperLinkCtrl(self, wx.ID_ANY, label='?',
URL=r'http://www.yellowbrain.com/stc/regexp.html')
self.escape_sequences = wx.CheckBox(self, wx.ID_ANY, label=_('&Interpret escape sequences'))
# Bind events
def OnChar(event):
key = event.GetKeyCode()
if key == wx.WXK_TAB: # wx.TE_PROCESS_ENTER causes wx.EVT_CHAR to also process TAB
panel.Navigate(flags = 0 if event.ShiftDown() else wx.NavigationKeyEvent.IsForward)
else:
event.Skip()
self.replace_text_ctrl.Bind(wx.EVT_CHAR, OnChar)
self.Bind(wx.EVT_TEXT_ENTER, self.OnReplace, self.replace_text_ctrl)
self.Bind(wx.EVT_BUTTON, self.OnFindNext, self.find_next)
self.Bind(wx.EVT_BUTTON, self.OnFindPrevious, self.find_previous)
self.Bind(wx.EVT_BUTTON, self.OnReplace, self.replace_next)
self.Bind(wx.EVT_BUTTON, self.OnReplaceAll, self.replace_all)
self.Bind(wx.EVT_BUTTON, self.OnClose, self.close)
# Organize controls
check1_sizer = wx.BoxSizer(wx.VERTICAL)
check1_sizer.Add(self.word_start, 0, wx.EXPAND|wx.RIGHT|wx.TOP|wx.BOTTOM, 4)
check1_sizer.Add(self.whole_word, 0, wx.EXPAND|wx.RIGHT|wx.TOP|wx.BOTTOM, 4)
check1_sizer.Add(self.only_selection, 0, wx.EXPAND|wx.RIGHT|wx.TOP|wx.BOTTOM, 4)
check1_sizer.Add(self.dont_wrap, 0, wx.EXPAND|wx.RIGHT|wx.TOP|wx.BOTTOM, 4)
check2_sizer = wx.BoxSizer(wx.VERTICAL)
check2_sizer.Add(self.match_case, 0, wx.EXPAND|wx.ALL, 4)
re_sizer = wx.BoxSizer(wx.HORIZONTAL)
re_sizer.Add(self.find_regexp, 0)
re_sizer.Add(re_url, wx.LEFT, 5)
check2_sizer.Add(re_sizer, 0, wx.EXPAND|wx.ALL, 4)
check2_sizer.Add(self.escape_sequences, 0, wx.EXPAND|wx.ALL, 4)
check_sizer = wx.BoxSizer(wx.HORIZONTAL)
check_sizer.Add(check1_sizer, 0)
check_sizer.Add(check2_sizer, 0)
ctrl_sizer = wx.BoxSizer(wx.VERTICAL)
ctrl_sizer.Add(find_text, 0, wx.EXPAND|wx.LEFT|wx.RIGHT|wx.TOP, 3)
ctrl_sizer.Add(self.find_text_ctrl, 0, wx.EXPAND|wx.ALL, 3)
ctrl_sizer.Add(replace_text, 0, wx.EXPAND|wx.LEFT|wx.RIGHT|wx.TOP, 3)
ctrl_sizer.Add(self.replace_text_ctrl, 0, wx.EXPAND|wx.ALL, 3)
ctrl_sizer.Add(check_sizer, 0, wx.EXPAND|wx.ALL, 3)
button_sizer = wx.BoxSizer(wx.VERTICAL)
button_sizer.Add(self.find_next, 0, wx.EXPAND|wx.ALL, 3)
button_sizer.Add(self.find_previous, 0, wx.EXPAND|wx.ALL, 3)
button_sizer.Add(self.replace_next, 0, wx.EXPAND|wx.ALL, 3)
button_sizer.Add(self.replace_all, 0, wx.EXPAND|wx.ALL, 3)
button_sizer.Add(self.close, 0, wx.EXPAND|wx.ALL, 3)
col_sizer = wx.BoxSizer(wx.HORIZONTAL)
col_sizer.Add(ctrl_sizer, 1, wx.EXPAND|wx.ALIGN_CENTER)
col_sizer.Add(button_sizer, 0, wx.EXPAND|wx.ALIGN_CENTER|wx.LEFT, 2)
# Size the elements
dlgSizer = wx.BoxSizer(wx.VERTICAL)
dlgSizer.Add(col_sizer, 0, wx.EXPAND|wx.ALL, 5)
dlgSizer.Fit(self)
self.SetSizer(dlgSizer)
dlgSizer.SetSizeHints(self)
dlgSizer.Layout()
self.find_next.SetDefault()
self.find_text_ctrl.SetFocus()
def GetFindText(self):
return self.find_text_ctrl.GetValue()
def GetReplaceText(self):
return self.replace_text_ctrl.GetValue()
def SetFindText(self, text):
self.find_text_ctrl.SetValue(text)
if self.IsShown():
self.find_text_ctrl.SetFocus()
self.find_text_ctrl.SetInsertionPointEnd()
def SetReplaceText(self, text):
self.replace_text_ctrl.SetValue(text)
if self.IsShown():
self.replace_text_ctrl.SetFocus()
self.replace_text_ctrl.SetInsertionPointEnd()
def UpdateText(self, text=None, ctrl='find'):
if text is None:
text = self.app.currentScript.GetSelectedText()
if ctrl == 'find':
self.SetFindText(text)
else:
self.SetReplaceText(text)
def OnFindNext(self, event=None, range=None, update_list=True):
text = self.GetFindText()
if not text: return
if update_list and text not in self.find_recent:
self.find_recent[11:] = []
self.find_recent.insert(0, text)
self.find_text_ctrl.Insert(text, 0)
if self.escape_sequences.IsChecked():
text = self.Unescape(text)
if self.Find(text, True, range):
script = self.app.currentScript
script.EnsureCaretVisible()
def OnFindPrevious(self, event=None):
text = self.GetFindText()
if not text: return
if text not in self.find_recent:
self.find_recent[11:] = []
self.find_recent.insert(0, text)
self.find_text_ctrl.Insert(text, 0)
if self.escape_sequences.IsChecked():
text = self.Unescape(text)
if self.Find(text, False):
script = self.app.currentScript
script.EnsureCaretVisible()
def Find(self, text, top2bottom=True, range=None, wrap=None):
script = self.app.currentScript
stcflags = 0
if self.match_case.IsChecked():
stcflags = stcflags | stc.STC_FIND_MATCHCASE
if self.word_start.IsChecked():
stcflags = stcflags | stc.STC_FIND_WORDSTART
if self.whole_word.IsChecked():
stcflags = stcflags | stc.STC_FIND_WHOLEWORD
if self.find_regexp.IsChecked():
stcflags = stcflags | stc.STC_FIND_REGEXP
if self.only_selection.IsChecked() and not range:
range = script.GetSelection()
if wrap is None:
wrap = not self.dont_wrap.IsChecked()
if not range:
if top2bottom:
minPos, maxPos = (script.GetSelectionEnd(),
script.GetLineEndPosition(script.GetLineCount() - 1))
else:
minPos, maxPos = script.GetSelectionStart(), 0
elif top2bottom:
minPos, maxPos = range
else:
minPos, maxPos = reversed(range)
findpos = script.FindText(minPos, maxPos, text, stcflags)
if findpos == -1 and wrap:
minPos = 0 if top2bottom else script.GetLineEndPosition(script.GetLineCount() - 1)
findpos = script.FindText(minPos, maxPos, text, stcflags)
if findpos == -1:
script.app.GetStatusBar().SetStatusText(_('Cannot find "%(text)s"') % locals())
else:
script.app.GetStatusBar().SetStatusText('')
script.SetAnchor(findpos)
script.SetCurrentPos(findpos + len(text.encode('utf-8')))
return findpos
def OnReplace(self, event=None):
find_text = self.GetFindText()
replace_text = self.GetReplaceText()
if not find_text or find_text == replace_text: return
if find_text not in self.find_recent:
self.find_recent[11:] = []
self.find_recent.insert(0, find_text)
self.find_text_ctrl.Insert(find_text, 0)
if replace_text not in self.replace_recent:
self.replace_recent[11:] = []
self.replace_recent.insert(0, replace_text)
self.replace_text_ctrl.Insert(replace_text, 0)
if self.escape_sequences.IsChecked():
find_text = self.Unescape(find_text)
replace_text = self.Unescape(replace_text)
script = self.app.currentScript
script.GotoPos(script.GetSelectionStart())
if self.Replace(find_text, replace_text):
script.EnsureCaretVisible()
def Replace(self, find_text, replace_text, range=None, wrap=True):
script = self.app.currentScript
if self.Find(find_text, True, range, wrap) != -1:
script.ReplaceSelection(replace_text)
return True
def OnReplaceAll(self, event):
find_text = self.GetFindText()
replace_text = self.GetReplaceText()
if not find_text or find_text == replace_text: return
if find_text not in self.find_recent:
self.find_recent[11:] = []
self.find_recent.insert(0, find_text)
self.find_text_ctrl.Insert(find_text, 0)
if replace_text not in self.replace_recent:
self.replace_recent[11:] = []
self.replace_recent.insert(0, replace_text)
self.replace_text_ctrl.Insert(replace_text, 0)
if self.escape_sequences.IsChecked():
find_text = self.Unescape(find_text)
replace_text = self.Unescape(replace_text)
offset = len(replace_text.encode('utf8')) - len(find_text.encode('utf8'))
script = self.app.currentScript
if self.only_selection.IsChecked():
start, end = script.GetSelection()
else:
start, end = 0, script.GetLineEndPosition(script.GetLineCount() - 1)
pos = script.GetCurrentPos()
count = pos_count = 0
script.BeginUndoAction()
while True:
if not self.Replace(find_text, replace_text, (start, end), False):
break
start = script.GetSelectionEnd()
end += offset
count += 1
if script.GetSelectionEnd() < pos:
pos_count += 1
script.EndUndoAction()
script.GotoPos(pos + offset * pos_count)
self.app.GetStatusBar().SetStatusText(_('Replaced %(count)i times') % locals())
def OnClose(self, event):
self.Hide()
@staticmethod
def Unescape(text):
"""Unescape backslashes on a Unicode string"""
return text.encode('utf8').decode('string-escape').decode('utf8')
class TextCtrl(wx.TextCtrl):
"""wx.TextCtrl with Ctrl-A also on multiline"""
def __init__(self, *args, **kwargs):
wx.TextCtrl.__init__(self, *args, **kwargs)
if self.IsMultiLine():
self.Bind(wx.EVT_CHAR, self.OnChar)
def OnChar(self, event):
key = event.GetKeyCode()
if key == 1: # wx.WXK_CONTROL_A in wxPython 2.9
self.SelectAll()
else:
event.Skip()
class FloatSpin2(FloatSpin):
"""FloatSpin without some annoyances
- Select all on TAB or Ctrl+A
- Process RETURN normally
wx.TE_NOHIDESEL effect still present though
"""
def __init__(self, *args, **kwargs):
FloatSpin.__init__(self, *args, **kwargs)
self._validkeycode.append(1) # available on wxPython 2.9 as wx.WXK_CONTROL_A
def OnFocus(self, event):
FloatSpin.OnFocus(self, event)
if self._textctrl:
self._textctrl.SelectAll()
def OnTextEnter(self, event): # bypass wx.TE_PROCESS_ENTER
self.SyncSpinToText() # wx.EVT_TEXT_ENTER action without event.Skip()
top_level = self.GetTopLevelParent()
default_item = top_level.GetDefaultItem()
if default_item is not None:
default_event = wx.PyCommandEvent(wx.EVT_BUTTON.typeId, default_item.GetId())
wx.PostEvent(top_level, default_event)
class ColourSelect(colourselect.ColourSelect):
"""Subclass of ColourSelect accepting a ColourData instance
This allows using and changing custom colours
All in all is still better than wx.ColourPickerCtrl
"""
def __init__(self, *args, **kwargs):
self.colour_data = kwargs.pop('colour_data', None)
colourselect.ColourSelect.__init__(self, *args, **kwargs)
def OnClick(self, event):
data = self.colour_data or wx.ColourData()
data.SetChooseFull(True)
data.SetColour(self.colour)
dlg = wx.ColourDialog(wx.GetTopLevelParent(self), data)
changed = dlg.ShowModal() == wx.ID_OK
if changed:
data = dlg.GetColourData()
self.SetColour(data.GetColour())
if self.colour_data is not None:
for i in range(self.colour_data.NUM_CUSTOM):
self.colour_data.SetCustomColour(i, data.GetCustomColour(i))
dlg.Destroy()
# moved after dlg.Destroy, since who knows what the callback will do...
if changed:
self.OnChange()
if hasattr(wx.ColourData, 'FromString'):
ColourData = wx.ColourData
else:
class ColourData(wx.ColourData):
"""Backport of ToString and FromString methods"""
NUM_CUSTOM = 16
def ToString(self):
colour_data_str = str(int(self.GetChooseFull()))
for i in range(self.NUM_CUSTOM):
colour_data_str += ','
colour = self.GetCustomColour(i)
if colour.IsOk():
colour_data_str += colour.GetAsString(wx.C2S_HTML_SYNTAX)
return colour_data_str
def FromString(self, colour_data_str):
colour_data = colour_data_str.split(',')
if colour_data[0] not in ('0', '1'):
return False
self.SetChooseFull(colour_data[0] == '1')
for i, colour in enumerate(colour_data[1:self.NUM_CUSTOM + 1]):
try:
self.SetCustomColour(i, colour or wx.Colour())
except:
return False
class OptionsDialog(wx.Dialog):
def __init__(self, parent, dlgInfo, options, title=None, startPageIndex=0,
starText=True, invert_scroll=False):
'''Init the OptionsDialog window
Create a wx.Notebook from the tabs specified in 'dlgInfo' and the
current/default values in 'options'. If there's only one tab, create
a simple wx.Panel.
'starText': show a message next to the window's standard buttons if
some condition is satisfied. 'startext' == True imposes a min window
width.
'''
if title is None:
title = _('Program Settings')
wx.Dialog.__init__(self, parent, wx.ID_ANY, title,
style=wx.DEFAULT_DIALOG_STYLE|wx.RESIZE_BORDER)
self.options = options.copy()
self.optionsOriginal = options
# Create the options tabs
self.controls = {}
self.starList = []
notebook = len(dlgInfo) > 1
if notebook:
nb = self.nb = Notebook(self, wx.ID_ANY, style=wx.NO_BORDER,
invert_scroll=invert_scroll)
for tabInfo in dlgInfo:
if notebook:
tabPanel = wx.Panel(nb, wx.ID_ANY)
nb.AddPage(tabPanel, tabInfo[0], select=True)
else:
tabPanel = wx.Panel(self, wx.ID_ANY)
tabSizer = wx.BoxSizer(wx.VERTICAL)
tabSizer.Add((-1,5), 0)
boolStar = False
for line in tabInfo[1:]:
colSizer = wx.BoxSizer(wx.HORIZONTAL)
for label, flag, key, tip, misc in line:
try:
optionsValue = self.options[key]
if optionsValue is None:
optionsValue = ''
except KeyError:
optionsValue = ''
# Set the controls
# possible values for 'label_position' and 'orientation' parameters: wx.HORIZONTAL, wx.VERTICAL
if flag is None:
# horizontal blank space separator
# misc: {height}
height = misc['height'] if 'height' in misc else 10
itemSizer = wx.BoxSizer(wx.VERTICAL)
itemSizer.Add((-1,height), 0)
elif flag == OPT_ELEM_SEP:
# horizontal separator formed by a text line and a horizontal line
# misc: {width, adjust_width, expand}
width = misc['width'] if 'width' in misc else -1
adjust_width = misc['adjust_width'] if 'adjust_width' in misc else False
if width != -1 or adjust_width:
expand = 0
else:
expand = (wx.EXPAND if misc['expand'] else 0) if 'expand' in misc else wx.EXPAND
itemSizer = wx.BoxSizer(wx.VERTICAL)
if label:
staticText = wx.StaticText(tabPanel, wx.ID_ANY, label)
if tip:
staticText.SetToolTipString(tip)
if adjust_width:
width = staticText.GetTextExtent(label)[0] + 4
itemSizer.Add(staticText, 0, wx.EXPAND|wx.ALL, 2)
else:
itemSizer.AddSpacer((-1, 3))
staticLine = wx.StaticLine(tabPanel, wx.ID_ANY, size=(width, -1))
margin = 0 if not width and not expand else 2
itemSizer.Add(staticLine, 0, expand|wx.TOP|wx.BOTTOM, margin)
elif flag == OPT_ELEM_CHECK:
# simple check box, with the label on the right
# misc: {width, ident}
width = misc['width'] if 'width' in misc else -1
ctrl = wx.CheckBox(tabPanel, wx.ID_ANY, label, size=(width,-1))
ctrl.SetMinSize(ctrl.GetBestSize())
ctrl.SetValue(bool(optionsValue))
if tip:
ctrl.SetToolTipString(tip)
itemSizer = wx.BoxSizer(wx.VERTICAL)
if 'ident' in misc:
identSizer = wx.BoxSizer(wx.HORIZONTAL)
identSizer.Add((misc['ident'], -1), 0)
identSizer.Add(ctrl, 1, wx.TOP|wx.BOTTOM, 1)
itemSizer.AddStretchSpacer()
itemSizer.Add(identSizer, 0, wx.EXPAND|wx.ALIGN_CENTER_VERTICAL)
else:
itemSizer.Add((-1,2), 1, wx.EXPAND)
itemSizer.Add(ctrl, 0, wx.EXPAND|wx.ALIGN_CENTER_VERTICAL|wx.TOP|wx.BOTTOM, 2)
elif flag in (OPT_ELEM_SPIN, OPT_ELEM_INT, OPT_ELEM_FLOAT):
# numeric field, with arrows to increment and decrement the value
# misc: (width, expand, label_position, min_val, max_val, digits, increment)
width = misc['width'] if 'width' in misc else 50
expand = misc['expand'] if 'expand' in misc else False
label_position = misc['label_position'] if 'label_position' in misc else wx.HORIZONTAL
min_val = misc['min_val'] if 'min_val' in misc else None
max_val = misc['max_val'] if 'max_val' in misc else None
digits = misc['digits'] if 'digits' in misc else 0
increment = misc['increment'] if 'increment' in misc else 1
ctrl = FloatSpin2(tabPanel, wx.ID_ANY, size=(width, -1),
min_val=min_val, max_val=max_val,
value=optionsValue, digits=digits, increment=increment)
itemSizer = wx.BoxSizer(label_position)
staticText = wx.StaticText(tabPanel, wx.ID_ANY, label)
if tip:
staticText.SetToolTipString(tip)
ctrl._textctrl.SetToolTipString(tip)
if label_position == wx.HORIZONTAL:
itemSizer.Add(staticText, 0, wx.ALIGN_CENTER_VERTICAL|wx.RIGHT, 6)
expand_flags = (1, 0) if expand else (0, 0)
itemSizer.Add(ctrl, expand_flags[0], expand_flags[1]|wx.ALIGN_CENTER_VERTICAL|wx.TOP|wx.BOTTOM, 2)
else:
itemSizer.AddStretchSpacer()
itemSizer.Add(staticText, 0, wx.LEFT|wx.RIGHT|wx.TOP|wx.BOTTOM, 2)
expand_flags = (0, wx.EXPAND) if expand else (0, 0)
itemSizer.Add(ctrl, expand_flags[0], expand_flags[1]|wx.ALIGN_CENTER_VERTICAL|wx.TOP|wx.BOTTOM, 2)
itemSizer.AddStretchSpacer()
elif flag == OPT_ELEM_SLIDER:
# select a number with a draggable handle
# misc: (width, expand, label_position, orientation, minValue, maxValue, TickFreq)
width = misc['width'] if 'width' in misc else 200
expand = misc['expand'] if 'expand' in misc else False
label_position = misc['label_position'] if 'label_position' in misc else wx.HORIZONTAL
orientation = misc['orientation'] if 'orientation' in misc else wx.HORIZONTAL
minValue = misc['minValue'] if 'minValue' in misc else 0
maxValue = misc['maxValue'] if 'maxValue' in misc else 100
TickFreq = misc['TickFreq'] if 'TickFreq' in misc else 50
size = (width, -1) if orientation == wx.HORIZONTAL else (-1, width)
style = wx.SL_LABELS | orientation
if TickFreq: style |= wx.SL_AUTOTICKS
ctrl = wx.Slider(tabPanel, wx.ID_ANY, size=size,
minValue=minValue, maxValue=maxValue,
value=optionsValue, style=style)
ctrl.SetTickFreq(TickFreq)
staticText = wx.StaticText(tabPanel, wx.ID_ANY, label)
if tip:
staticText.SetToolTipString(tip)
ctrl.SetToolTipString(tip)
itemSizer = wx.BoxSizer(label_position)
if label_position == wx.HORIZONTAL:
itemSizer.Add(staticText, 0, wx.ALIGN_CENTER_VERTICAL|wx.RIGHT, 5)
expand_flags = (1, 0) if expand else (0, 0)
itemSizer.Add(ctrl, expand_flags[0], expand_flags[1]|wx.ALIGN_CENTER_VERTICAL)
else:
itemSizer.AddStretchSpacer()
itemSizer.Add(staticText, 0, wx.LEFT|wx.RIGHT|wx.TOP|wx.BOTTOM, 2)
expand_flags = (0, wx.EXPAND) if expand else (0, 0)
itemSizer.Add(ctrl, expand_flags[0], expand_flags[1]|wx.ALIGN_CENTER_VERTICAL)
itemSizer.AddStretchSpacer()
elif flag in (OPT_ELEM_FILE, OPT_ELEM_FILE_OPEN, OPT_ELEM_FILE_SAVE, OPT_ELEM_FILE_URL):
# text field with additional browse for file button
# misc: {width, expand, label_position, fileMask, startDirectory, buttonText, buttonWidth}
width = misc['width'] if 'width' in misc else 400
expand = misc['expand'] if 'expand' in misc else True
label_position = misc['label_position'] if 'label_position' in misc else wx.HORIZONTAL
fileMode = wx.SAVE|wx.OVERWRITE_PROMPT if flag == OPT_ELEM_FILE_SAVE else wx.OPEN|wx.FILE_MUST_EXIST
fileMask = misc['fileMask'] if 'fileMask' in misc else '*.*'
startDirectory = (self.GetParent().ExpandVars(misc['startDirectory']) if misc.get('startDirectory')
else os.path.dirname(self.GetParent().ExpandVars(optionsValue)))
buttonText = misc['buttonText'] if 'buttonText' in misc else _('Browse')
buttonWidth = misc['buttonWidth'] if 'buttonWidth' in misc else -1
itemSizer = wx.BoxSizer(wx.VERTICAL)
itemSizer.AddStretchSpacer()
Label = label
if label and label_position == wx.VERTICAL:
staticText = wx.StaticText(tabPanel, wx.ID_ANY, label)
staticText.SetToolTipString(tip)
itemSizer.Add(staticText, 0, wx.LEFT|wx.RIGHT|wx.TOP|wx.BOTTOM, 2)
label = ''
ctrl = filebrowse.FileBrowseButton(tabPanel, wx.ID_ANY, size=(width,-1),
labelText=label,
toolTip=tip,
fileMode=fileMode,
fileMask=fileMask,
startDirectory=startDirectory,
buttonText=buttonText,
#dialogTitle = ''
)
ctrl.SetValue(optionsValue)
ctrl.Sizer.Children[0].SetBorder(0)
if not label:
ctrl.Sizer.Children[0].Sizer.Children[1].SetBorder(0)
elif tip:
ctrl.label.SetToolTipString(tip)
ctrl.Sizer.Children[0].Sizer.Children[2].SetInitSize(buttonWidth, -1)
ctrl.Label = Label
itemSizer.Add(ctrl, 0, (wx.EXPAND if expand else 0)|wx.ALIGN_CENTER_VERTICAL|wx.TOP|wx.BOTTOM, 2)
itemSizer.AddStretchSpacer()
elif flag in (OPT_ELEM_DIR, OPT_ELEM_DIR_URL):
# text field with additional browse for directory button
# misc: (width, expand, label_position, startDirectory, buttonText, buttonWidth)
width = misc['width'] if 'width' in misc else 400
expand = misc['expand'] if 'expand' in misc else True
label_position = misc['label_position'] if 'label_position' in misc else wx.HORIZONTAL
startDirectory = (self.GetParent().ExpandVars(misc['startDirectory']) if misc.get('startDirectory')
else self.GetParent().ExpandVars(optionsValue))
buttonText = misc['buttonText'] if 'buttonText' in misc else _('Browse')
buttonWidth = misc['buttonWidth'] if 'buttonWidth' in misc else -1
itemSizer = wx.BoxSizer(wx.VERTICAL)
itemSizer.AddStretchSpacer()
Label = label
if label and label_position == wx.VERTICAL:
staticText = wx.StaticText(tabPanel, wx.ID_ANY, label)
staticText.SetToolTipString(tip)
itemSizer.Add(staticText, 0, wx.LEFT|wx.RIGHT|wx.TOP|wx.BOTTOM, 2)
label = ''
ctrl = filebrowse.DirBrowseButton(tabPanel, wx.ID_ANY, size=(width,-1),
labelText=label,
toolTip=tip,
startDirectory=startDirectory,
newDirectory=True,
buttonText=buttonText,
#dialogTitle = ''
)
ctrl.SetValue(optionsValue)
ctrl.Sizer.Children[0].SetBorder(0)
if not label:
ctrl.Sizer.Children[0].Sizer.Children[1].SetBorder(0)
elif tip:
ctrl.label.SetToolTipString(tip)
ctrl.Sizer.Children[0].Sizer.Children[2].SetInitSize(buttonWidth, -1)
ctrl.Label = Label
itemSizer.Add(ctrl, 0, (wx.EXPAND if expand else 0)|wx.ALIGN_CENTER_VERTICAL|wx.TOP|wx.BOTTOM, 2)
itemSizer.AddStretchSpacer()
elif flag == OPT_ELEM_RADIO:
# select an option from the displayed ones
# misc: {width, expand, orientation, dimensions, choices}
width = misc['width'] if 'width' in misc else -1
expand = 1 if 'expand' in misc and misc['expand'] else 0
orientation = (wx.RA_SPECIFY_COLS if 'orientation' in misc and
misc['orientation'] == wx.VERTICAL else wx.RA_SPECIFY_ROWS)
dimensions = misc['dimensions'] if 'dimensions' in misc else 1
choices = [s for s,v in misc['choices']]
ctrl = wx.RadioBox(tabPanel, wx.ID_ANY, size=(width,-1), label=label,
choices=choices, style=orientation, majorDimension=dimensions)
ctrl.items = misc['choices']
ctrl.SetSelection(0)
for s, v in misc['choices']:
if v == optionsValue:
ctrl.SetStringSelection(s)
break
if tip:
ctrl.SetToolTipString(tip)
itemSizer = wx.BoxSizer(wx.HORIZONTAL)
itemSizer.Add(ctrl, expand, wx.ALIGN_CENTER_VERTICAL|wx.TOP|wx.BOTTOM, 2)
elif flag == OPT_ELEM_LIST:
# select an option from a drop-down list
# misc: {width, expand, label_position, choices, writable}
width = misc['width'] if 'width' in misc else -1
expand = misc['expand'] if 'expand' in misc else False
label_position = misc['label_position'] if 'label_position' in misc else wx.HORIZONTAL
list_type = wx.CB_DROPDOWN if 'writable' in misc and misc['writable'] else wx.CB_READONLY
if misc['choices'] and not isinstance(misc['choices'][0], basestring):
ctrl = wx.ComboBox(tabPanel, wx.ID_ANY, size=(width,-1), style=wx.CB_READONLY)
ctrl.client_data = True # not ctrl.HasClientData() in wxWidgets 2.8
for display_string, client_data in misc['choices']:
ctrl.Append(display_string, client_data)
if client_data == optionsValue:
ctrl.SetValue(display_string)
else:
ctrl = wx.ComboBox(tabPanel, wx.ID_ANY, size=(width,-1), choices=misc['choices'],
value=optionsValue, style=list_type)
ctrl.client_data = False
itemSizer = wx.BoxSizer(label_position)
staticText = wx.StaticText(tabPanel, wx.ID_ANY, label)
if tip:
staticText.SetToolTipString(tip)
ctrl.SetToolTipString(tip)
if label_position == wx.HORIZONTAL:
itemSizer.Add(staticText, 0, wx.ALIGN_CENTER_VERTICAL|wx.RIGHT, 5)
expand_flags = (1, 0) if expand else (0, 0)
itemSizer.Add(ctrl, expand_flags[0], expand_flags[1]|wx.ALIGN_CENTER_VERTICAL|wx.TOP|wx.BOTTOM, 2)
else:
itemSizer.AddStretchSpacer()
itemSizer.Add(staticText, 0, wx.LEFT|wx.RIGHT|wx.TOP|wx.BOTTOM, 2)
expand_flags = (0, wx.EXPAND) if expand else (0, 0)
itemSizer.Add(ctrl, expand_flags[0], expand_flags[1]|wx.ALIGN_CENTER_VERTICAL|wx.TOP|wx.BOTTOM, 2)
itemSizer.AddStretchSpacer()
elif flag == OPT_ELEM_BUTTON:
# button with an associated handler
# misc: {width, handler}
width = misc['width'] if 'width' in misc else -1
handler = misc['handler']
ctrl = wx.Button(tabPanel, wx.ID_ANY, size=(width,-1), label=label)
self.Bind(wx.EVT_BUTTON, handler, ctrl)
if tip:
ctrl.SetToolTipString(tip)
itemSizer = wx.BoxSizer(wx.VERTICAL)
#~ staticText = wx.StaticText(tabPanel, wx.ID_ANY, label)
#~ ctrl = wxButtons.GenButton(tabPanel, wx.ID_ANY, label=label)
#~ ctrl.SetUseFocusIndicator(False)
#~ itemSizer.Add(staticText, 0, wx.ALIGN_CENTER_VERTICAL|wx.RIGHT, 5)
itemSizer.Add(ctrl, 0, wx.ALIGN_CENTER_VERTICAL|wx.TOP|wx.BOTTOM, 2)
elif flag == OPT_ELEM_COLOR:
# button for selecting a color
# misc: {width, colour_data}
width = misc['width'] if 'width' in misc else -1
colour_data = misc.get('colour_data')
colour = wx.Colour(*optionsValue)
staticText = wx.StaticText(tabPanel, wx.ID_ANY, label)
ctrl = ColourSelect(tabPanel, wx.ID_ANY, colour=colour,
size=(width, -1), colour_data=colour_data)
if tip:
staticText.SetToolTipString(tip)
ctrl.SetToolTipString(tip)
itemSizer = wx.BoxSizer(wx.HORIZONTAL)
itemSizer.Add(staticText, 0, wx.ALIGN_CENTER_VERTICAL|wx.RIGHT, 6)
itemSizer.Add(ctrl, 0, wx.ALIGN_CENTER_VERTICAL|wx.TOP|wx.BOTTOM, 2)
elif flag == OPT_ELEM_FONT:
# button for choosing font
# misc: {width}
width = misc['width'] if 'width' in misc else -1
staticText = wx.StaticText(tabPanel, wx.ID_ANY, label)
if optionsValue is not None:
(fontFace, fontSize, fontWeight, fontStyle,
fontColorTuple) = optionsValue
weight = wx.FONTWEIGHT_NORMAL
if fontWeight == 'bold':
weight = wx.FONTWEIGHT_BOLD
style = wx.FONTSTYLE_NORMAL
if fontStyle == 'italic':
style = wx.FONTSTYLE_ITALIC
font = wx.Font(fontSize, wx.FONTFAMILY_DEFAULT,
style, weight, faceName=fontFace)
else:
font = wx.NullFont
ctrl = wx.FontPickerCtrl(tabPanel, wx.ID_ANY, font,
size=(width,-1), name=label,
style=wx.FNTP_FONTDESC_AS_LABEL)
if tip:
staticText.SetToolTipString(tip)
ctrl.SetToolTipString(tip)
itemSizer = wx.BoxSizer(wx.HORIZONTAL)
itemSizer.Add(staticText, 0, wx.ALIGN_CENTER_VERTICAL|wx.RIGHT, 6)
itemSizer.Add(ctrl, 0, wx.ALIGN_CENTER_VERTICAL|wx.EXPAND|wx.TOP|wx.BOTTOM, 2)
else: #elif flag == OPT_ELEM_STRING:
# regular text field
# misc: {width, expand, label_position}
width = misc['width'] if 'width' in misc else -1
expand = misc['expand'] if 'expand' in misc else True
label_position = misc['label_position'] if 'label_position' in misc else wx.HORIZONTAL
staticText = wx.StaticText(tabPanel, wx.ID_ANY, label)
ctrl = wx.TextCtrl(tabPanel, wx.ID_ANY, size=(width,-1), value=optionsValue)
if tip:
staticText.SetToolTipString(tip)
ctrl.SetToolTipString(tip)
itemSizer = wx.BoxSizer(label_position)
if label_position == wx.HORIZONTAL:
itemSizer.Add(staticText, 0, wx.ALIGN_CENTER_VERTICAL|wx.RIGHT, 5)
expand_flags = (1, 0) if expand else (0, 0)
itemSizer.Add(ctrl, expand_flags[0], expand_flags[1]|wx.ALIGN_CENTER_VERTICAL|wx.TOP|wx.BOTTOM, 2)
else:
itemSizer.AddStretchSpacer()
itemSizer.Add(staticText, 0, wx.LEFT|wx.RIGHT|wx.TOP|wx.BOTTOM, 2)
expand_flags = (0, wx.EXPAND) if expand else (0, 0)
itemSizer.Add(ctrl, expand_flags[0], expand_flags[1]|wx.ALIGN_CENTER_VERTICAL|wx.TOP|wx.BOTTOM, 2)
itemSizer.AddStretchSpacer()
#~ if label.startswith('*'):
if label.rstrip(' :').endswith('*'):
boolStar = True
if flag != OPT_ELEM_SEP: self.controls[key] = (ctrl, flag, nb.GetSelection() if notebook else -1)
colSizer.Add(itemSizer, 1, wx.EXPAND|wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT, 4)
tabSizer.Add(colSizer, 0, wx.EXPAND|wx.ALIGN_CENTER_VERTICAL|wx.TOP|wx.BOTTOM, 4)
if boolStar:
self.starList.append(tabPanel if notebook else 1)
#~ tabSizer.Add((0,0),1)
#~ tabSizer.Add(wx.StaticText(tabPanel, wx.ID_ANY, ' '+_('* Requires program restart for full effect')), 0, wx.TOP, 20)
tabSizerBorder = wx.BoxSizer(wx.VERTICAL)
tabSizerBorder.Add(tabSizer, 1, wx.EXPAND|wx.LEFT|wx.RIGHT, 4)
tabSizerBorder.Add((-1,4), 0, wx.EXPAND)
tabPanel.SetSizer(tabSizerBorder)
tabSizerBorder.Layout()
if notebook:
if startPageIndex >=0 and startPageIndex < nb.GetPageCount():
nb.SetSelection(startPageIndex)
else:
nb.SetSelection(0)
# Standard buttons
okay = wx.Button(self, wx.ID_OK, _('OK'))
self.Bind(wx.EVT_BUTTON, self.OnButtonOK, okay)
cancel = wx.Button(self, wx.ID_CANCEL, _('Cancel'))
btns = wx.StdDialogButtonSizer()
if starText:
self.starText = wx.StaticText(self, wx.ID_ANY, _('* Requires program restart for full effect'))
btns.Add(self.starText, 0, wx.ALIGN_CENTER_VERTICAL)
btns.AddButton(okay)
btns.AddButton(cancel)
btns.Realize()
# Size the elements
dlgSizer = wx.BoxSizer(wx.VERTICAL)
if notebook:
dlgSizer.Add(nb, 0, wx.EXPAND|wx.ALL, 5)
else:
dlgSizer.Add(tabPanel, 0, wx.EXPAND|wx.ALL, 0)
dlgSizer.Add(btns, 0, wx.EXPAND|wx.ALL, 10)
dlgSizer.Fit(self)
self.Center()
self.SetSizer(dlgSizer)
dlgSizer.SetSizeHints(self)
dlgSizer.Layout()
# Misc
okay.SetDefault()
if starText:
if (notebook and self.nb.GetPage(0) not in self.starList) or (not notebook and not self.starList):
self.starText.Hide()
if notebook: self.nb.Bind(wx.EVT_NOTEBOOK_PAGE_CHANGED, self.OnNotebookPageChanged)
def OnNotebookPageChanged(self, event):
if self.nb.GetPage(event.GetSelection()) in self.starList:
self.starText.Show()
else:
self.starText.Hide()
event.Skip()
def OnButtonOK(self, event):
if self.UpdateDict():
event.Skip()
def OnButtonFont(self, event):
button = event.GetEventObject()
font = button.GetFont()
colour = button.GetForegroundColour()
# Show the font dialog
data = wx.FontData()
data.EnableEffects(False)
#~ data.SetColour(button.GetForegroundColour())
data.SetInitialFont(font)
dlg = wx.FontDialog(self, data)
if dlg.ShowModal() == wx.ID_OK:
data = dlg.GetFontData()
font = data.GetChosenFont()
# Show the color dialog
data = wx.ColourData()
data.SetColour(colour)
dlg2 = wx.ColourDialog(self, data)
dlg2.GetColourData().SetChooseFull(True)
if dlg2.ShowModal() == wx.ID_OK:
data = dlg2.GetColourData()
colour = data.GetColour()
button.SetFont(font)
button.SetForegroundColour(colour)
button.SetBestSize()
button.Refresh()
self.GetSizer().Fit(self)
dlg2.Destroy()
dlg.Destroy()
def GetDict(self):
return self.options
def UpdateDict(self):
for key, value in self.controls.items():
ctrl, flag, tabIndex = value
if flag in (OPT_ELEM_DIR, OPT_ELEM_DIR_URL):
entry = self.GetParent().ExpandVars(ctrl.GetValue())
if entry == '' or os.path.isdir(entry):
newValue = entry
elif flag == OPT_ELEM_DIR_URL and entry.lstrip().startswith('http://'):
newValue = entry
else:
label = u'\n\n{0}{1}'.format(ctrl.Label.rstrip(':') + ': ' if ctrl.Label else '', entry)
self.ShowWarning(ctrl, _('Invalid directory!') + label, tabIndex)
return False
elif flag in (OPT_ELEM_FILE, OPT_ELEM_FILE_OPEN, OPT_ELEM_FILE_SAVE, OPT_ELEM_FILE_URL):
entry = self.GetParent().ExpandVars(ctrl.GetValue())
if entry == '' or os.path.isfile(entry) or flag == OPT_ELEM_FILE_SAVE or (
flag == OPT_ELEM_FILE_URL and entry.lstrip().startswith('http://')):
newValue = entry
else:
label = u'\n\n{0}{1}'.format(ctrl.Label.rstrip(':') + ': ' if ctrl.Label else '', entry)
self.ShowWarning(ctrl, _('Invalid filename!') + label, tabIndex)
return False
elif flag == OPT_ELEM_COLOR:
#~ newValue = ctrl.GetBackgroundColour().Get()
newValue = ctrl.GetColour().Get()
elif flag == OPT_ELEM_FONT:
font = ctrl.GetSelectedFont() # ctrl.GetFont()
bold = ''
if font.GetWeight() == wx.FONTWEIGHT_BOLD:
bold = 'bold'
italic = ''
if font.GetStyle() == wx.FONTSTYLE_ITALIC:
italic = 'italic'
color = ctrl.GetChildren()[0].GetForegroundColour() # ctrl.GetForegroundColour()
newValue = (font.GetFaceName(), font.GetPointSize(), bold, italic, color.Get())
elif flag == OPT_ELEM_CHECK:
newValue = ctrl.GetValue()
elif flag in (OPT_ELEM_INT, OPT_ELEM_FLOAT, OPT_ELEM_SPIN):
newValue = ctrl.GetValue() if ctrl.GetDigits() else int(ctrl.GetValue())
elif flag == OPT_ELEM_SLIDER:
newValue = ctrl.GetValue()
elif flag == OPT_ELEM_RADIO:
index = ctrl.GetSelection()
newValue = ctrl.items[index][1]
elif flag == OPT_ELEM_LIST:
if ctrl.client_data: # ctrl.HasClientData() in wxWidgets 2.9+
newValue = ctrl.GetClientData(ctrl.GetSelection())
else:
newValue = ctrl.GetValue()
elif flag == OPT_ELEM_BUTTON:
newValue = self.optionsOriginal[key]
else: # flag == OPT_ELEM_STRING:
newValue = ctrl.GetValue()
self.options[key] = newValue
return True
def ShowWarning(self, ctrl, message, tabIndex):
if tabIndex != -1: self.nb.SetSelection(tabIndex)
color = ctrl.textControl.GetBackgroundColour()
ctrl.textControl.SetBackgroundColour('pink')
ctrl.Refresh()
wx.MessageBox(message, 'Error')
ctrl.textControl.SetBackgroundColour(color)
ctrl.Refresh()
ctrl.SetFocus()
class ShortcutsDialog(wx.Dialog):
def __init__(self, parent, shortcutList, title=None, exceptionIds=None, submessage=None):
if title is None:
title = _('Edit shortcuts')
wx.Dialog.__init__(self, parent, wx.ID_ANY, title, style=wx.DEFAULT_DIALOG_STYLE|wx.RESIZE_BORDER)
self.parent = parent
self.shortcutList = copy.deepcopy(shortcutList)#shortcutList[:]
if exceptionIds is None:
exceptionIds = []
if type(exceptionIds) is tuple:
exceptionShortcuts = exceptionIds[0]
self.advancedShortcuts = advancedShortcuts = exceptionIds[1]
self.reservedShortcuts = reservedShortcuts = exceptionIds[2][:]
self.advancedInfo = exceptionIds[3]
advanced = wx.Button(self, wx.ID_ANY, _('Advanced'))
advanced.Bind(wx.EVT_BUTTON, self.OnAdvancedButton)
else:
advanced = None
# Define the shortcut editing modal dialog (used later)
#~ self.dlgEdit = self.defineShortcutEditDialog()
# Define the virtual list control
class VListCtrl(ListCtrl):
def OnGetItemText(self, item, column):
label, shortcut, id = self.parent.shortcutList[item]
if column == 0:
if advanced:
if shortcut in exceptionShortcuts:
label = '* %s' % label
elif shortcut in reservedShortcuts:
if (label, shortcut) in advancedShortcuts[-1]:
label = '~ %s' % label
else:
label = '* %s' % label
elif id in exceptionIds:
label = '* %s' % label
return label
elif column == 1:
return GetTranslatedShortcut(shortcut)
#~ return self.parent.shortcutList[item][column]
listCtrl = VListCtrl(self, wx.ID_ANY, style=wx.LC_REPORT|wx.LC_SINGLE_SEL|wx.LC_VIRTUAL|wx.LC_HRULES|wx.LC_VRULES)
listCtrl.InsertColumn(0, _('Menu label'))
listCtrl.InsertColumn(1, _('Keyboard shortcut'))
nItems = len(self.shortcutList)
listCtrl.SetItemCount(nItems)
listCtrl.setResizeColumn(1)
listCtrl.SetColumnWidth(1, wx.LIST_AUTOSIZE_USEHEADER)
listCtrl.Bind(wx.EVT_LIST_ITEM_ACTIVATED, self.OnListCtrlActivated)
self.listCtrl = listCtrl
# Standard buttons
okay = wx.Button(self, wx.ID_OK, _('OK'))
#~ self.Bind(wx.EVT_BUTTON, self.OnButtonClick, okay)
cancel = wx.Button(self, wx.ID_CANCEL, _('Cancel'))
#~ self.Bind(wx.EVT_BUTTON, self.OnButtonClick, cancel)
btns = wx.StdDialogButtonSizer()
if advanced:
btns.Add(advanced)
btns.AddButton(okay)
btns.AddButton(cancel)
btns.Realize()
# Size the elements
dlgSizer = wx.BoxSizer(wx.VERTICAL)
dlgSizer.Add(listCtrl, 1, wx.EXPAND|wx.ALL, 5)
if submessage is not None:
dlgSizer.Add(wx.StaticText(self, wx.ID_ANY, submessage), 0, wx.ALIGN_LEFT|wx.LEFT|wx.BOTTOM, 10)
message = _('Double-click or hit enter on an item in the list to edit the shortcut.')
dlgSizer.Add(wx.StaticText(self, wx.ID_ANY, message), 0, wx.ALIGN_CENTER|wx.ALL, 5)
dlgSizer.Add(btns, 0, wx.EXPAND|wx.ALL, 10)
self.SetSizerAndFit(dlgSizer)
width, height = self.GetSize()
self.SetSize((width, height*2))
self.sizer = dlgSizer
# Misc
#okay.SetDefault()
def OnAdvancedButton(self, event):
dlg = wx.Dialog(self, wx.ID_ANY, _('Advanced'), style=wx.DEFAULT_DIALOG_STYLE|wx.RESIZE_BORDER)
class CheckListCtrl(wx.ListCtrl, listmix.CheckListCtrlMixin):
def __init__(self, parent):
wx.ListCtrl.__init__(self, parent, wx.ID_ANY, style=wx.LC_REPORT)
listmix.CheckListCtrlMixin.__init__(self, checked_icon.GetBitmap(), unchecked_icon.GetBitmap())
checklist = CheckListCtrl(dlg)
checklist.InsertColumn(0, _('Shortcut'))
checklist.InsertColumn(1, _('Action'))
for index in range(0, len(self.advancedShortcuts)-1):
shortcut, action = self.advancedShortcuts[index]
checklist.InsertStringItem(index, shortcut)
checklist.SetStringItem(index, 1, action)
if index % 2:
checklist.SetItemBackgroundColour(index, '#E8E8FF')
if shortcut in self.reservedShortcuts:
checklist.CheckItem(index)
checklist.SetColumnWidth(0, wx.LIST_AUTOSIZE)
checklist.SetColumnWidth(1, wx.LIST_AUTOSIZE)
# Standard buttons
okay = wx.Button(dlg, wx.ID_OK, _('OK'))
cancel = wx.Button(dlg, wx.ID_CANCEL, _('Cancel'))
btns = wx.StdDialogButtonSizer()
btns.AddButton(okay)
btns.AddButton(cancel)
btns.Realize()
# Dialog layout
dlgSizer = wx.BoxSizer(wx.VERTICAL)
dlgSizer.Add(checklist, 1, wx.EXPAND|wx.ALL, 5)
if self.advancedInfo:
dlgSizer.Add(wx.StaticText(dlg, wx.ID_ANY, self.advancedInfo), 0, wx.LEFT|wx.BOTTOM, 10)
dlgSizer.Add(btns, 0, wx.EXPAND|wx.ALL, 10)
dlg.SetSizerAndFit(dlgSizer)
width = checklist.GetColumnWidth(0) + checklist.GetColumnWidth(1) + 40
dlg.SetSize((width, width*3/4))
if wx.ID_OK == dlg.ShowModal():
while self.reservedShortcuts:
self.reservedShortcuts.pop()
for index in range(0, len(self.advancedShortcuts)-1):
if checklist.IsChecked(index):
self.reservedShortcuts.append(self.advancedShortcuts[index][0])
self.listCtrl.RefreshItems(0, len(self.shortcutList)-1)
dlg.Destroy()
def GetShortcutList(self):
return self.shortcutList, self.reservedShortcuts
def defineShortcutEditDialog(self):
dlg = wx.Dialog(self, wx.ID_ANY, _('Edit the keyboard shortcut'))
# Menu string label
dlg.menuLabel = wx.StaticText(dlg, wx.ID_ANY, '')
# Main controls
dlg.checkBoxCtrl = wx.CheckBox(dlg, wx.ID_ANY, _('Ctrl'))
dlg.checkBoxAlt = wx.CheckBox(dlg, wx.ID_ANY, _('Alt'))
dlg.checkBoxShift = wx.CheckBox(dlg, wx.ID_ANY, _('Shift'))
dlg.listBoxKey = wx.Choice(dlg, wx.ID_ANY, choices=keyStringList)
sizer = wx.BoxSizer(wx.HORIZONTAL)
sizer.Add(dlg.checkBoxCtrl, 0, wx.ALIGN_CENTER_VERTICAL|wx.RIGHT, 15)
sizer.Add(dlg.checkBoxAlt, 0, wx.ALIGN_CENTER_VERTICAL|wx.RIGHT, 15)
sizer.Add(dlg.checkBoxShift, 0, wx.ALIGN_CENTER_VERTICAL|wx.RIGHT, 15)
sizer.Add(wx.StaticText(dlg, wx.ID_ANY, _('Key:')), 0, wx.ALIGN_CENTER_VERTICAL|wx.RIGHT, 5)
sizer.Add(dlg.listBoxKey, 0, wx.ALIGN_CENTER_VERTICAL|wx.RIGHT, 15)
# Standard buttons
okay = wx.Button(dlg, wx.ID_OK, _('OK'))
#~ dlg.Bind(wx.EVT_BUTTON, self.OnEditButtonOK, okay)
clear = wx.Button(dlg, wx.ID_NO, _('Clear'))
dlg.Bind(wx.EVT_BUTTON, self.OnEditButtonClear, clear)
cancel = wx.Button(dlg, wx.ID_CANCEL, _('Cancel'))
btns = wx.StdDialogButtonSizer()
btns.AddButton(okay)
btns.AddButton(cancel)
btns.AddButton(clear)
btns.Realize()
# Size the elements
dlgSizer = wx.BoxSizer(wx.VERTICAL)
dlgSizer.Add(dlg.menuLabel, 0, wx.TOP|wx.LEFT, 10)
dlgSizer.Add(sizer, 0, wx.EXPAND|wx.ALL, 15)
dlgSizer.Add(btns, 0, wx.EXPAND|wx.ALL, 10)
dlg.SetSizer(dlgSizer)
dlgSizer.Fit(dlg)
# Misc
okay.SetDefault()
return dlg
def OnListCtrlActivated(self, event):
dlg = self.defineShortcutEditDialog()
index = event.GetIndex()
dlg.listIndex = index
label, shortcut, id = self.shortcutList[index]
dlg.menuLabel.SetLabel(label)
if shortcut == '':
dlg.checkBoxCtrl.SetValue(False)
dlg.checkBoxAlt.SetValue(False)
dlg.checkBoxShift.SetValue(False)
dlg.listBoxKey.SetSelection(wx.NOT_FOUND)
else:
items = [s.upper() for s in shortcut.split('+')]
if not items[-1]:
del items[-1]
items[-1] += '+'
boolCtrl = False
boolAlt = False
boolShift = False
if 'CTRL' in items:
boolCtrl = True
if 'ALT' in items:
boolAlt = True
if 'SHIFT' in items:
boolShift = True
if len(items) == 1:
keyString = items[0]
else:
keyString = items[-1]
dlg.checkBoxCtrl.SetValue(boolCtrl)
dlg.checkBoxAlt.SetValue(boolAlt)
dlg.checkBoxShift.SetValue(boolShift)
if not dlg.listBoxKey.SetStringSelection(keyString):
print>>sys.stderr, _('%(keyString)s not found in key string list') % locals()
ID = dlg.ShowModal()
# Set the data
if ID == wx.ID_OK:
self.OnEditButtonOK(dlg)
#~ self.options = dlg.GetDict()
dlg.Destroy()
def OnEditButtonOK(self, dlg):
# Get the values from the dialog
boolCtrl = dlg.checkBoxCtrl.GetValue()
boolAlt = dlg.checkBoxAlt.GetValue()
boolShift = dlg.checkBoxShift.GetValue()
keyString = dlg.listBoxKey.GetStringSelection()
# Check basic invalid cases
#~ if keyString == '':
#~ wx.MessageBox(_('You must specify a key!'), _('Error'), style=wx.ICON_ERROR)
#~ return
#~ if (len(keyString) == 1 or keyString in ('Home', 'End', 'PgUp', 'PgDn', 'Up', 'Down', 'Left', 'Right')) and (not boolCtrl and not boolAlt and not boolShift):
#~ if (len(keyString) == 1) and (not boolCtrl and not boolAlt and not boolShift):
#~ wx.MessageBox(_('You must check at least one modifier!'), _('Error'), style=wx.ICON_ERROR)
#~ return
# Build the shortcut string
shortcut = ''
if boolCtrl:
shortcut += 'Ctrl+'
if boolAlt:
shortcut += 'Alt+'
if boolShift:
shortcut += 'Shift+'
if keyString:
shortcut += keyString
# Check if keyboard shortcut already exists
oldShortcut = self.shortcutList[dlg.listIndex][1]
shortcutUpper = shortcut.upper()
if shortcutUpper != oldShortcut.upper():
#~ if shortcutUpper in [info[1].upper() for info in self.shortcutList]:
for info in self.shortcutList:
if shortcutUpper == info[1].upper():
line1 = _('This shortcut is being used by:')
line2 = info[0]
line3 = _('Do you wish to continue?')
ret = wx.MessageBox('%s\n\n%s\n\n%s' % (line1, line2 , line3), _('Warning'),
wx.OK|wx.CANCEL|wx.ICON_EXCLAMATION, dlg)
if ret == wx.OK:
info[1] = ''
#~ self.updateMenuLabel(info[2], '')
else:
return
break
self.shortcutList[dlg.listIndex][1] = shortcut
#~ self.updateMenuLabel(self.shortcutList[dlg.listIndex][2], shortcut)
self.listCtrl.Refresh()
#~ event.Skip()
def OnEditButtonClear(self, event):
dlg = event.GetEventObject().GetParent()
dlg.checkBoxCtrl.SetValue(False)
dlg.checkBoxAlt.SetValue(False)
dlg.checkBoxShift.SetValue(False)
dlg.listBoxKey.SetSelection(wx.NOT_FOUND)
#~ msgDlg = wx.MessageDialog(self, _('Are you sure you want to clear this shortcut?'), _('Warning'))
#~ ID = msgDlg.ShowModal()
#~ msgDlg.Destroy()
#~ if ID == wx.ID_OK:
#~ self.shortcutList[dlg.listIndex][1] = ''
#~ dlg.EndModal(wx.ID_NO)
#~ self.listCtrl.Refresh()
#~ else:
#~ dlg.EndModal(wx.ID_CANCEL)
def _x_updateMenuLabel(self, id, shortcut):
menuItem = self.parent.GetMenuBar().FindItemById(id)
label = menuItem.GetLabel()
newLabel = '%s\t%s' % (label, shortcut)
menuItem.SetText(newLabel)
class EditStringDictDialog(wx.Dialog):
def __init__(self, parent, infoDict, title='Edit', keyTitle='Key',
valueTitle='Value', editable=False, insertable=False,
about='', keyChecker=None, valueChecker=None, nag=True):
wx.Dialog.__init__(self, parent, wx.ID_ANY, title, size=(500, 300), style=wx.DEFAULT_DIALOG_STYLE|wx.RESIZE_BORDER)
self.infoDict = infoDict.copy()
self.keyTitle = keyTitle
self.valueTitle = valueTitle
self.keyChecker = keyChecker
self.valueChecker = valueChecker
self.nag = nag
self.previousKey = None
self.editName = ''
self.textChanged = False
# Create the key and value static text labels
keyLabel = wx.StaticText(self, wx.ID_ANY, keyTitle)
valueLabel = wx.StaticText(self, wx.ID_ANY, valueTitle)
# Create the list control using the dictionary
style = wx.LC_REPORT|wx.LC_NO_HEADER|wx.LC_SORT_ASCENDING|wx.LC_SINGLE_SEL
if editable:
style |= wx.LC_EDIT_LABELS
self.listCtrl = ListCtrl(self, wx.ID_ANY, style=style)
self.listCtrl.InsertColumn(0, 'Column 0')
for row, key in enumerate(self.infoDict.keys()):
self.listCtrl.InsertStringItem(row, key)
self.listCtrl.SetColumnWidth(0, wx.LIST_AUTOSIZE)
self.listCtrl.Bind(wx.EVT_LIST_ITEM_SELECTED, self.OnListItemSelected)
self.listCtrl.Bind(wx.EVT_LIST_BEGIN_LABEL_EDIT, self.OnListItemEdit)
self.listCtrl.Bind(wx.EVT_LIST_END_LABEL_EDIT, self.OnListItemEdited)
# Create the text control
self.textCtrl = TextCtrl(self, wx.ID_ANY, style=wx.TE_MULTILINE|wx.HSCROLL)
self.textCtrl.Bind(wx.EVT_TEXT, self.OnValueTextChanged)
# Create the insert/delete buttons
if insertable:
insertButton = wx.Button(self, wx.ID_ANY, _('Insert'))
self.Bind(wx.EVT_BUTTON, self.OnButtonInsert, insertButton)
deleteButton = wx.Button(self, wx.ID_ANY, _('Delete'))
self.Bind(wx.EVT_BUTTON, self.OnButtonDelete, deleteButton)
insSizer = wx.BoxSizer(wx.HORIZONTAL)
insSizer.Add(insertButton, 1, wx.ALIGN_CENTER|wx.ALL, 5)
insSizer.Add(deleteButton, 1, wx.ALIGN_CENTER|wx.ALL, 5)
# Standard buttons
okay = wx.Button(self, wx.ID_OK, _('OK'))
self.Bind(wx.EVT_BUTTON, self.OnButtonOK, okay)
cancel = wx.Button(self, wx.ID_CANCEL, _('Cancel'))
btns = wx.StdDialogButtonSizer()
btns.AddButton(okay)
btns.AddButton(cancel)
btns.Realize()
# Size the elements
gridSizer = wx.FlexGridSizer(cols=2, hgap=10, vgap=5)
gridSizer.AddGrowableCol(1, 1)
gridSizer.AddGrowableRow(1)
gridSizer.Add(keyLabel, 0)
gridSizer.Add(valueLabel, 0, wx.ALIGN_CENTER)
gridSizer.Add(self.listCtrl, 2, wx.EXPAND|wx.RIGHT, 10)
gridSizer.Add(self.textCtrl, 3, wx.EXPAND)
minWidth = max(self.listCtrl.GetColumnWidth(0), keyLabel.GetSize()[0])
gridSizer.SetItemMinSize(self.listCtrl, min(minWidth+20, 250), 20)
dlgSizer = wx.BoxSizer(wx.VERTICAL)
dlgSizer.Add(gridSizer, 1, wx.EXPAND|wx.ALL, 10)
if insertable:
dlgSizer.Add(insSizer, 0, wx.EXPAND|wx.ALIGN_CENTER|wx.BOTTOM, 15)
if about:
dlgSizer.Add(wx.StaticText(self, wx.ID_ANY, about), 0, wx.ALIGN_CENTER|wx.BOTTOM, 10)
dlgSizer.Add(btns, 0, wx.EXPAND|wx.ALL, 5)
self.SetSizer(dlgSizer)
# Misc
if self.listCtrl.GetItemCount():
self.listCtrl.SelectItem(0)
okay.SetDefault()
def GetDict(self):
return self.infoDict
def UpdateDictEntry(self):
if self.infoDict.has_key(self.previousKey) and self.textChanged:
self.infoDict[self.previousKey] = self.textCtrl.GetValue()
def OnValueTextChanged(self, event):
self.textChanged = True
def OnListItemSelected(self, event):
# Update the previously selected key
self.UpdateDictEntry()
# Display the value associated with the selected key
key = event.GetText()
value = self.infoDict.get(key)
if value is not None:
self.textCtrl.Replace(0, -1, value)
self.textCtrl.SetInsertionPoint(0)
self.textChanged = False
else:
print>>sys.stderr, _('Error: key %(key)s does not exist!') % locals()
self.previousKey = key
def OnListItemEdit(self, event):
self.editName = event.GetText()
def OnListItemEdited(self, event):
if event.IsEditCancelled():
return
newName = event.GetLabel()
if not newName:
event.Veto()
return
if newName != self.editName:
if newName in self.infoDict:
wx.MessageBox(_('Item %(newKey)s already exists!') % {'newKey': newName},
_('Error'), style=wx.OK|wx.ICON_ERROR)
event.Veto()
return
if self.keyChecker:
msg = self.keyChecker(newName)
if msg is not None:
wx.MessageBox(msg, _('Error'), style=wx.OK|wx.ICON_ERROR)
event.Veto()
return
if self.nag:
oldName = self.editName
dlg = wx.MessageDialog(self, _('Are you sure you want to rename from %(oldName)s to %(newName)s?') % locals(), _('Question'))
ID = dlg.ShowModal()
dlg.Destroy()
if ID != wx.ID_OK:
event.Veto()
return
# "Rename" the key in the dictionary
del self.infoDict[self.editName]
self.infoDict[newName] = self.textCtrl.GetValue()
def OnButtonInsert(self, event):
dlg = wx.Dialog(self, wx.ID_ANY, _('Insert a new item'))
sizer = wx.BoxSizer(wx.VERTICAL)
keyTextCtrl = wx.TextCtrl(dlg, wx.ID_ANY)
valueTextCtrl = TextCtrl(dlg, wx.ID_ANY, style=wx.TE_MULTILINE|wx.HSCROLL)
sizer.Add(wx.StaticText(dlg, wx.ID_ANY, self.keyTitle.strip()), 0, wx.EXPAND)
sizer.Add(keyTextCtrl, 0, wx.EXPAND|wx.BOTTOM, 10)
sizer.Add(wx.StaticText(dlg, wx.ID_ANY, self.valueTitle.strip()), 0, wx.EXPAND)
sizer.Add(valueTextCtrl, 1, wx.EXPAND|wx.BOTTOM, 10)
# Standard buttons
okay = wx.Button(dlg, wx.ID_OK, _('OK'))
cancel = wx.Button(dlg, wx.ID_CANCEL, _('Cancel'))
btns = wx.StdDialogButtonSizer()
btns.AddButton(okay)
btns.AddButton(cancel)
btns.Realize()
# Size the elements
dlgSizer = wx.BoxSizer(wx.VERTICAL)
dlgSizer.Add(sizer, 1, wx.EXPAND|wx.ALL, 10)
dlgSizer.Add(btns, 0, wx.EXPAND|wx.ALL, 5)
dlg.SetSizer(dlgSizer)
# Show the dialog
ID = dlg.ShowModal()
newKey = keyTextCtrl.GetValue().lstrip('.')
newValue = valueTextCtrl.GetValue()
dlg.Destroy()
# Add the new item to the dictionary as well as the listCtrl
if ID == wx.ID_OK:
if not newKey:
wx.MessageBox(_('Must enter a name!'), _('Error'),
style=wx.OK|wx.ICON_ERROR)
return
if self.infoDict.has_key(newKey):
wx.MessageBox(_('Item %(newKey)s already exists!') % locals(),
_('Error'), style=wx.OK|wx.ICON_ERROR)
return
if self.keyChecker:
msg = self.keyChecker(newKey)
if msg is not None:
wx.MessageBox(msg, _('Error'), style=wx.OK|wx.ICON_ERROR)
return
if self.valueChecker:
msg = self.valueChecker(newValue)
if msg is not None:
wx.MessageBox(msg, _('Error'), style=wx.OK|wx.ICON_ERROR)
return
self.infoDict[newKey] = newValue
self.listCtrl.InsertStringItem(0, newKey)
self.listCtrl.SelectLabel(newKey)
if newValue == '' and self.nag:
wx.MessageBox(_('Warning: no value entered for item %(newKey)s!') % locals(), _('Warning'))
self.textCtrl.SetFocus()
def OnButtonDelete(self, event):
index = self.listCtrl.GetNextItem(-1, wx.LIST_NEXT_ALL, wx.LIST_STATE_SELECTED)
if index == -1:
wx.MessageBox(_('Select an item to delete first'), _('Message'))
return
key = self.listCtrl.GetItemText(index)
dlg = wx.MessageDialog(self, _('Are you sure you want to delete item %(key)s?') % locals(), _('Question'))
ID = dlg.ShowModal()
if ID == wx.ID_OK:
del self.infoDict[key]
self.listCtrl.DeleteItem(index)
if self.listCtrl.GetItemCount():
if index - 1 < 0:
self.listCtrl.SelectItem(0)
else:
self.listCtrl.SelectItem(index-1)
dlg.Destroy()
def OnButtonOK(self, event):
# Update the previously selected key
self.UpdateDictEntry()
event.Skip()
class Slider(wx.Slider):
def __init__(self, parent, id=wx.ID_ANY,
value=0, minValue=0, maxValue=100,
point=wx.DefaultPosition, size=wx.DefaultSize,
style=wx.SL_HORIZONTAL, validator=wx.DefaultValidator,
name="slider", nDecimal=0, mod=None, onscroll=None):
# Class variables
self.parent = parent
self.onscroll = onscroll
self.wxMaxValueLimit = 10000
# User slider values
self.uValue = value
self.uMinValue = minValue
self.uMaxValue = maxValue
if mod is not None:
if mod > maxValue - minValue:
mod = None
else:
#~ minValue = minValue + minValue % mod
if type(mod) is int:
maxValue = maxValue - (maxValue - minValue) % mod
if mod > maxValue - minValue:
mod = None
else:
nDecimal = 0
self.uMinValue = minValue
self.uMaxValue = maxValue
self.uValue = min(value + (value - minValue) % mod, maxValue)
self.uSelStart = 0
self.uSelEnd = 0
self.nDecimal = nDecimal
# Determine the internal slider range (0 to wxMaxValue)
self.wxMaxValue = self._get_wxMaxValue(minValue, maxValue, nDecimal, mod)
# Create the slider control
aValue = self._upos2wxpos(value)
aMaxValue = self._upos2wxpos(maxValue)
wx.Slider.__init__(self, parent, id,
aValue, 0, self.wxMaxValue,
point=point, size=size, style=style,
validator=validator, name=name
)
self.name = self.GetName()
# Event binding
#EVT_SCROLL_ENDSCROLL(self,self.OnSliderChanged)
self.Bind(wx.EVT_SCROLL, self._OnSliderChanging)
#~ super(Slider, self).Bind(wx.EVT_SCROLL, self._OnSliderChanged)
def _get_wxMaxValue(self, uMinValue, uMaxValue, nDecimal, mod):
if mod is None:
step = 1/float(10**nDecimal)
else:
step = mod
wxMaxValue = (uMaxValue - uMinValue) / step
wxMaxValue = int(round(wxMaxValue))
if wxMaxValue >= self.wxMaxValueLimit:
wxMaxValue = self.wxMaxValueLimit
return wxMaxValue
def _upos2wxpos(self, upos):
''' Converts user pos to actual wxSlider pos '''
wxpos = self.wxMaxValue * (upos - self.uMinValue) / float(self.uMaxValue - self.uMinValue)
return int(round(wxpos))
def _wxpos2upos(self, wxpos):
''' Converts actual wxSlider pos to user pos '''
upos = self.uMinValue + (self.uMaxValue - self.uMinValue) * wxpos / float(self.wxMaxValue)
if self.nDecimal == 0:
upos = int(round(upos))
#~ else:
#~ upos = round(upos, 4)
return upos
def _OnSliderChanging(self, event):
self.uValue = self._wxpos2upos(super(Slider, self).GetValue())
if self.onscroll:
#~ if False:
self.onscroll(event)
event.Skip()
def GetValue(self):
#~ self.uValue = self._wxpos2upos(super(Slider, self).GetValue())
return self.uValue
def GetValueAsString(self):
strTemplate = '%.'+str(self.nDecimal)+'f'
return strTemplate % self.uValue
def GetMin(self):
return self.uMinValue
def GetMax(self):
return self.uMaxValue
def _GetSelStart(self):
return self.uSelStart
def _GetSelEnd(self):
return self.uSelEnd
def _GetLineSize(self):
pass
def _GetPageSize(self):
pass
def _GetThumbLength(self):
pass
def _GetTickFreq(self):
pass
def SetValue(self, value):
if self.nDecimal == 0:
value = int(round(value))
self.uValue = value
super(Slider, self).SetValue(self._upos2wxpos(value))
#~ self.uValue = self._wxpos2upos(super(Slider, self).GetValue())
def SetRange(self, minValue, maxValue, nDecimal=None, mod=None):
if minValue >= maxValue:
if minValue == 0 and (maxValue == -1 or maxValue ==0):
maxValue = 1
else:
print>>sys.stderr, _('Error: minValue must be less than maxValue')
return
self.uMinValue = minValue
self.uMaxValue = maxValue
if nDecimal is not None:
self.nDecimal = nDecimal
self.wxMaxValue = self._get_wxMaxValue(minValue, maxValue, self.nDecimal, mod)
super(Slider, self).SetRange(0, self.wxMaxValue)
def SetSelection(self, startPos, endPos):
self.uSelStart = startPos
self.uSelEnd = endPos
super(Slider, self).SetSelection(self._upos2wxpos(startPos), self._upos2wxpos(endFrame))
def Increment(self):
wxpos = super(Slider, self).GetValue()
if wxpos < super(Slider, self).GetMax():
wxpos += 1
self.uValue = self._wxpos2upos(wxpos)
super(Slider, self).SetValue(wxpos)
return self.uValue
def Decrement(self):
wxpos = super(Slider, self).GetValue()
if wxpos > super(Slider, self).GetMin():
wxpos -= 1
self.uValue = self._wxpos2upos(wxpos)
super(Slider, self).SetValue(wxpos)
return self.uValue
def _SetLineSize(self):
pass
def _SetPageSize(self):
pass
def _SetThumbLength(self):
pass
def _SetTickFreq(self):
pass
def SetTick(self, upos):
super(Slider, self).SetTick(self._upos2wxpos(upos))
| AvsPmod/AvsPmod | wxp.py | Python | gpl-2.0 | 105,336 | [
"VisIt"
] | 2ba413dcbd29e6e11a6d0c2d0608103c7f1bbba1cf67f388aa81a1417ed41dbe |
"""Tests for distutils.dist."""
import os
import io
import sys
import unittest
import warnings
import textwrap
from unittest import mock
from distutils.dist import Distribution, fix_help_options, DistributionMetadata
from distutils.cmd import Command
from test.support import (
TESTFN, captured_stdout, captured_stderr, run_unittest
)
from distutils.tests import support
from distutils import log
class test_dist(Command):
"""Sample distutils extension command."""
user_options = [
("sample-option=", "S", "help text"),
]
def initialize_options(self):
self.sample_option = None
class TestDistribution(Distribution):
"""Distribution subclasses that avoids the default search for
configuration files.
The ._config_files attribute must be set before
.parse_config_files() is called.
"""
def find_config_files(self):
return self._config_files
class DistributionTestCase(support.LoggingSilencer,
support.TempdirManager,
support.EnvironGuard,
unittest.TestCase):
def setUp(self):
super(DistributionTestCase, self).setUp()
self.argv = sys.argv, sys.argv[:]
del sys.argv[1:]
def tearDown(self):
sys.argv = self.argv[0]
sys.argv[:] = self.argv[1]
super(DistributionTestCase, self).tearDown()
def create_distribution(self, configfiles=()):
d = TestDistribution()
d._config_files = configfiles
d.parse_config_files()
d.parse_command_line()
return d
def test_command_packages_unspecified(self):
sys.argv.append("build")
d = self.create_distribution()
self.assertEqual(d.get_command_packages(), ["distutils.command"])
def test_command_packages_cmdline(self):
from distutils.tests.test_dist import test_dist
sys.argv.extend(["--command-packages",
"foo.bar,distutils.tests",
"test_dist",
"-Ssometext",
])
d = self.create_distribution()
# let's actually try to load our test command:
self.assertEqual(d.get_command_packages(),
["distutils.command", "foo.bar", "distutils.tests"])
cmd = d.get_command_obj("test_dist")
self.assertIsInstance(cmd, test_dist)
self.assertEqual(cmd.sample_option, "sometext")
def test_venv_install_options(self):
sys.argv.append("install")
self.addCleanup(os.unlink, TESTFN)
fakepath = '/somedir'
with open(TESTFN, "w") as f:
print(("[install]\n"
"install-base = {0}\n"
"install-platbase = {0}\n"
"install-lib = {0}\n"
"install-platlib = {0}\n"
"install-purelib = {0}\n"
"install-headers = {0}\n"
"install-scripts = {0}\n"
"install-data = {0}\n"
"prefix = {0}\n"
"exec-prefix = {0}\n"
"home = {0}\n"
"user = {0}\n"
"root = {0}").format(fakepath), file=f)
# Base case: Not in a Virtual Environment
with mock.patch.multiple(sys, prefix='/a', base_prefix='/a') as values:
d = self.create_distribution([TESTFN])
option_tuple = (TESTFN, fakepath)
result_dict = {
'install_base': option_tuple,
'install_platbase': option_tuple,
'install_lib': option_tuple,
'install_platlib': option_tuple,
'install_purelib': option_tuple,
'install_headers': option_tuple,
'install_scripts': option_tuple,
'install_data': option_tuple,
'prefix': option_tuple,
'exec_prefix': option_tuple,
'home': option_tuple,
'user': option_tuple,
'root': option_tuple,
}
self.assertEqual(
sorted(d.command_options.get('install').keys()),
sorted(result_dict.keys()))
for (key, value) in d.command_options.get('install').items():
self.assertEqual(value, result_dict[key])
# Test case: In a Virtual Environment
with mock.patch.multiple(sys, prefix='/a', base_prefix='/b') as values:
d = self.create_distribution([TESTFN])
for key in result_dict.keys():
self.assertNotIn(key, d.command_options.get('install', {}))
def test_command_packages_configfile(self):
sys.argv.append("build")
self.addCleanup(os.unlink, TESTFN)
f = open(TESTFN, "w")
try:
print("[global]", file=f)
print("command_packages = foo.bar, splat", file=f)
finally:
f.close()
d = self.create_distribution([TESTFN])
self.assertEqual(d.get_command_packages(),
["distutils.command", "foo.bar", "splat"])
# ensure command line overrides config:
sys.argv[1:] = ["--command-packages", "spork", "build"]
d = self.create_distribution([TESTFN])
self.assertEqual(d.get_command_packages(),
["distutils.command", "spork"])
# Setting --command-packages to '' should cause the default to
# be used even if a config file specified something else:
sys.argv[1:] = ["--command-packages", "", "build"]
d = self.create_distribution([TESTFN])
self.assertEqual(d.get_command_packages(), ["distutils.command"])
def test_empty_options(self):
# an empty options dictionary should not stay in the
# list of attributes
# catching warnings
warns = []
def _warn(msg):
warns.append(msg)
self.addCleanup(setattr, warnings, 'warn', warnings.warn)
warnings.warn = _warn
dist = Distribution(attrs={'author': 'xxx', 'name': 'xxx',
'version': 'xxx', 'url': 'xxxx',
'options': {}})
self.assertEqual(len(warns), 0)
self.assertNotIn('options', dir(dist))
def test_finalize_options(self):
attrs = {'keywords': 'one,two',
'platforms': 'one,two'}
dist = Distribution(attrs=attrs)
dist.finalize_options()
# finalize_option splits platforms and keywords
self.assertEqual(dist.metadata.platforms, ['one', 'two'])
self.assertEqual(dist.metadata.keywords, ['one', 'two'])
attrs = {'keywords': 'foo bar',
'platforms': 'foo bar'}
dist = Distribution(attrs=attrs)
dist.finalize_options()
self.assertEqual(dist.metadata.platforms, ['foo bar'])
self.assertEqual(dist.metadata.keywords, ['foo bar'])
def test_get_command_packages(self):
dist = Distribution()
self.assertEqual(dist.command_packages, None)
cmds = dist.get_command_packages()
self.assertEqual(cmds, ['distutils.command'])
self.assertEqual(dist.command_packages,
['distutils.command'])
dist.command_packages = 'one,two'
cmds = dist.get_command_packages()
self.assertEqual(cmds, ['distutils.command', 'one', 'two'])
def test_announce(self):
# make sure the level is known
dist = Distribution()
args = ('ok',)
kwargs = {'level': 'ok2'}
self.assertRaises(ValueError, dist.announce, args, kwargs)
def test_find_config_files_disable(self):
# Ticket #1180: Allow user to disable their home config file.
temp_home = self.mkdtemp()
if os.name == 'posix':
user_filename = os.path.join(temp_home, ".pydistutils.cfg")
else:
user_filename = os.path.join(temp_home, "pydistutils.cfg")
with open(user_filename, 'w') as f:
f.write('[distutils]\n')
def _expander(path):
return temp_home
old_expander = os.path.expanduser
os.path.expanduser = _expander
try:
d = Distribution()
all_files = d.find_config_files()
d = Distribution(attrs={'script_args': ['--no-user-cfg']})
files = d.find_config_files()
finally:
os.path.expanduser = old_expander
# make sure --no-user-cfg disables the user cfg file
self.assertEqual(len(all_files)-1, len(files))
class MetadataTestCase(support.TempdirManager, support.EnvironGuard,
unittest.TestCase):
def setUp(self):
super(MetadataTestCase, self).setUp()
self.argv = sys.argv, sys.argv[:]
def tearDown(self):
sys.argv = self.argv[0]
sys.argv[:] = self.argv[1]
super(MetadataTestCase, self).tearDown()
def format_metadata(self, dist):
sio = io.StringIO()
dist.metadata.write_pkg_file(sio)
return sio.getvalue()
def test_simple_metadata(self):
attrs = {"name": "package",
"version": "1.0"}
dist = Distribution(attrs)
meta = self.format_metadata(dist)
self.assertIn("Metadata-Version: 1.0", meta)
self.assertNotIn("provides:", meta.lower())
self.assertNotIn("requires:", meta.lower())
self.assertNotIn("obsoletes:", meta.lower())
def test_provides(self):
attrs = {"name": "package",
"version": "1.0",
"provides": ["package", "package.sub"]}
dist = Distribution(attrs)
self.assertEqual(dist.metadata.get_provides(),
["package", "package.sub"])
self.assertEqual(dist.get_provides(),
["package", "package.sub"])
meta = self.format_metadata(dist)
self.assertIn("Metadata-Version: 1.1", meta)
self.assertNotIn("requires:", meta.lower())
self.assertNotIn("obsoletes:", meta.lower())
def test_provides_illegal(self):
self.assertRaises(ValueError, Distribution,
{"name": "package",
"version": "1.0",
"provides": ["my.pkg (splat)"]})
def test_requires(self):
attrs = {"name": "package",
"version": "1.0",
"requires": ["other", "another (==1.0)"]}
dist = Distribution(attrs)
self.assertEqual(dist.metadata.get_requires(),
["other", "another (==1.0)"])
self.assertEqual(dist.get_requires(),
["other", "another (==1.0)"])
meta = self.format_metadata(dist)
self.assertIn("Metadata-Version: 1.1", meta)
self.assertNotIn("provides:", meta.lower())
self.assertIn("Requires: other", meta)
self.assertIn("Requires: another (==1.0)", meta)
self.assertNotIn("obsoletes:", meta.lower())
def test_requires_illegal(self):
self.assertRaises(ValueError, Distribution,
{"name": "package",
"version": "1.0",
"requires": ["my.pkg (splat)"]})
def test_requires_to_list(self):
attrs = {"name": "package",
"requires": iter(["other"])}
dist = Distribution(attrs)
self.assertIsInstance(dist.metadata.requires, list)
def test_obsoletes(self):
attrs = {"name": "package",
"version": "1.0",
"obsoletes": ["other", "another (<1.0)"]}
dist = Distribution(attrs)
self.assertEqual(dist.metadata.get_obsoletes(),
["other", "another (<1.0)"])
self.assertEqual(dist.get_obsoletes(),
["other", "another (<1.0)"])
meta = self.format_metadata(dist)
self.assertIn("Metadata-Version: 1.1", meta)
self.assertNotIn("provides:", meta.lower())
self.assertNotIn("requires:", meta.lower())
self.assertIn("Obsoletes: other", meta)
self.assertIn("Obsoletes: another (<1.0)", meta)
def test_obsoletes_illegal(self):
self.assertRaises(ValueError, Distribution,
{"name": "package",
"version": "1.0",
"obsoletes": ["my.pkg (splat)"]})
def test_obsoletes_to_list(self):
attrs = {"name": "package",
"obsoletes": iter(["other"])}
dist = Distribution(attrs)
self.assertIsInstance(dist.metadata.obsoletes, list)
def test_classifier(self):
attrs = {'name': 'Boa', 'version': '3.0',
'classifiers': ['Programming Language :: Python :: 3']}
dist = Distribution(attrs)
self.assertEqual(dist.get_classifiers(),
['Programming Language :: Python :: 3'])
meta = self.format_metadata(dist)
self.assertIn('Metadata-Version: 1.1', meta)
def test_classifier_invalid_type(self):
attrs = {'name': 'Boa', 'version': '3.0',
'classifiers': ('Programming Language :: Python :: 3',)}
with captured_stderr() as error:
d = Distribution(attrs)
# should have warning about passing a non-list
self.assertIn('should be a list', error.getvalue())
# should be converted to a list
self.assertIsInstance(d.metadata.classifiers, list)
self.assertEqual(d.metadata.classifiers,
list(attrs['classifiers']))
def test_keywords(self):
attrs = {'name': 'Monty', 'version': '1.0',
'keywords': ['spam', 'eggs', 'life of brian']}
dist = Distribution(attrs)
self.assertEqual(dist.get_keywords(),
['spam', 'eggs', 'life of brian'])
def test_keywords_invalid_type(self):
attrs = {'name': 'Monty', 'version': '1.0',
'keywords': ('spam', 'eggs', 'life of brian')}
with captured_stderr() as error:
d = Distribution(attrs)
# should have warning about passing a non-list
self.assertIn('should be a list', error.getvalue())
# should be converted to a list
self.assertIsInstance(d.metadata.keywords, list)
self.assertEqual(d.metadata.keywords, list(attrs['keywords']))
def test_platforms(self):
attrs = {'name': 'Monty', 'version': '1.0',
'platforms': ['GNU/Linux', 'Some Evil Platform']}
dist = Distribution(attrs)
self.assertEqual(dist.get_platforms(),
['GNU/Linux', 'Some Evil Platform'])
def test_platforms_invalid_types(self):
attrs = {'name': 'Monty', 'version': '1.0',
'platforms': ('GNU/Linux', 'Some Evil Platform')}
with captured_stderr() as error:
d = Distribution(attrs)
# should have warning about passing a non-list
self.assertIn('should be a list', error.getvalue())
# should be converted to a list
self.assertIsInstance(d.metadata.platforms, list)
self.assertEqual(d.metadata.platforms, list(attrs['platforms']))
def test_download_url(self):
attrs = {'name': 'Boa', 'version': '3.0',
'download_url': 'http://example.org/boa'}
dist = Distribution(attrs)
meta = self.format_metadata(dist)
self.assertIn('Metadata-Version: 1.1', meta)
def test_long_description(self):
long_desc = textwrap.dedent("""\
example::
We start here
and continue here
and end here.""")
attrs = {"name": "package",
"version": "1.0",
"long_description": long_desc}
dist = Distribution(attrs)
meta = self.format_metadata(dist)
meta = meta.replace('\n' + 8 * ' ', '\n')
self.assertIn(long_desc, meta)
def test_custom_pydistutils(self):
# fixes #2166
# make sure pydistutils.cfg is found
if os.name == 'posix':
user_filename = ".pydistutils.cfg"
else:
user_filename = "pydistutils.cfg"
temp_dir = self.mkdtemp()
user_filename = os.path.join(temp_dir, user_filename)
f = open(user_filename, 'w')
try:
f.write('.')
finally:
f.close()
try:
dist = Distribution()
# linux-style
if sys.platform in ('linux', 'darwin'):
os.environ['HOME'] = temp_dir
files = dist.find_config_files()
self.assertIn(user_filename, files)
# win32-style
if sys.platform == 'win32':
# home drive should be found
os.environ['HOME'] = temp_dir
files = dist.find_config_files()
self.assertIn(user_filename, files,
'%r not found in %r' % (user_filename, files))
finally:
os.remove(user_filename)
def test_fix_help_options(self):
help_tuples = [('a', 'b', 'c', 'd'), (1, 2, 3, 4)]
fancy_options = fix_help_options(help_tuples)
self.assertEqual(fancy_options[0], ('a', 'b', 'c'))
self.assertEqual(fancy_options[1], (1, 2, 3))
def test_show_help(self):
# smoke test, just makes sure some help is displayed
self.addCleanup(log.set_threshold, log._global_log.threshold)
dist = Distribution()
sys.argv = []
dist.help = 1
dist.script_name = 'setup.py'
with captured_stdout() as s:
dist.parse_command_line()
output = [line for line in s.getvalue().split('\n')
if line.strip() != '']
self.assertTrue(output)
def test_read_metadata(self):
attrs = {"name": "package",
"version": "1.0",
"long_description": "desc",
"description": "xxx",
"download_url": "http://example.com",
"keywords": ['one', 'two'],
"requires": ['foo']}
dist = Distribution(attrs)
metadata = dist.metadata
# write it then reloads it
PKG_INFO = io.StringIO()
metadata.write_pkg_file(PKG_INFO)
PKG_INFO.seek(0)
metadata.read_pkg_file(PKG_INFO)
self.assertEqual(metadata.name, "package")
self.assertEqual(metadata.version, "1.0")
self.assertEqual(metadata.description, "xxx")
self.assertEqual(metadata.download_url, 'http://example.com')
self.assertEqual(metadata.keywords, ['one', 'two'])
self.assertEqual(metadata.platforms, ['UNKNOWN'])
self.assertEqual(metadata.obsoletes, None)
self.assertEqual(metadata.requires, ['foo'])
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(DistributionTestCase))
suite.addTest(unittest.makeSuite(MetadataTestCase))
return suite
if __name__ == "__main__":
run_unittest(test_suite())
| FFMG/myoddweb.piger | monitor/api/python/Python-3.7.2/Lib/distutils/tests/test_dist.py | Python | gpl-2.0 | 19,095 | [
"Brian"
] | 49cbb121dcecb5dbdc6095a1675c1fe248901f93d125509911f800840d837d9f |
import ast
from ast import AST
import io
import contextlib
import weakref
import random
TAB_SIZE = 2
TAB = TAB_SIZE * " "
TEMP_VNAME = "X_X"
LUA_True = "true"
LUA_False = "false"
LUA_None = "nil"
class RawRepr(str):
def __repr__(self):
return self
def unresolve(value, raw=False, *, has_ast):
if isinstance(value, AST):
if has_ast:
return RawRepr("*")
else:
fname = type(value).__name__
fargs = []
for key, value in ast.iter_fields(value):
if isinstance(value, AST):
value = unresolve(value, raw=True, has_ast=False)
fargs.append((key, value))
fargs = ", ".join("%s=%s" % (key, value) for key, value in fargs)
return RawRepr('%s(%s)' % (fname, fargs))
elif isinstance(value, list):
newvalue = []
for node in value:
newvalue.append(unresolve(node, raw=True, has_ast=has_ast))
return newvalue
elif raw:
return value
return repr(value)
def repr_node(node, *, has_ast):
#return dump(node, annotate_fields=False)
result = "%s(%%s)" % (type(node).__name__,)
attrs = []
for key, value in ast.iter_fields(node):
value = unresolve(value, has_ast=has_ast)
attrs.append((key, value))
rattrs = ", ".join("%s=%s" % (key, value) for key, value in attrs)
result = result % rattrs
return result
class ASTChildVisitor(ast.NodeVisitor):
def generic_visit(self, node):
for key, value in ast.iter_fields(node):
if isinstance(value, AST):
for k, v in ast.iter_fields(value):
if isinstance(v, AST):
return True
elif isinstance(value, list):
for node in value:
if isinstance(node, AST):
return True
return False
class ShowTree(ast.NodeVisitor):
def __init__(self):
self.level = 0
def generic_visit(self, node):
self.level += 1
try:
child_visitor = ASTChildVisitor()
has_ast = child_visitor.visit(node)
print((self.level - 1) * " ", end="")
print(repr_node(node, has_ast=has_ast))
if has_ast:
super().generic_visit(node)
finally:
self.level -= 1
class IsControlFlow(Exception):
pass
class LuaCodeGenerator(ast.NodeVisitor):
def __init__(self):
self.reset()
def reset(self):
self.indent = 0
self.fp = io.StringIO()
self.blocks = [self.new_blockenv()]
self.lastend = "\n"
self.lineno = 1
def print(self, *args, **kwargs):
fp = self.fp
if self.lastend == "\n":
fp.write(self.indent * TAB)
self.lineno += 1
self.lastend = kwargs.get("end", "\n")
print(*args, file=fp, **kwargs)
def new_blockenv(self):
return {
"global_defined" : set(),
"local_defined" : set(),
"nonlocal_defined" : set(),
"defined" : set(),
}
@property
def current_block(self):
return self.blocks[-1]
@contextlib.contextmanager
def block(self):
self.indent += 1
self.blocks.append(self.new_blockenv())
try:
yield
finally:
self.indent -= 1
self.blocks.pop()
@contextlib.contextmanager
def noblock(self):
try:
yield
except IsControlFlow:
raise ValueError("Control Flow are not excepted.")
@contextlib.contextmanager
def hasblock(self):
try:
yield
except IsControlFlow:
pass
def generic_visit(self, node):
raise TypeError("%r are not supported by py2lua (did you call direct?)" % (type(node),))
def visit_Module(self, node):
self.reset()
print = self.print
print('-- PYTHON ARE REQUIRE FOR EXECUTE --')
print("assert(__py__, 'Require Python API')")
print("__py__._init_module()")
print("local", TEMP_VNAME)
print()
for subnode in node.body:
with self.hasblock():
print(self.visit(subnode))
return self.fp.getvalue()
# -- Literals -- #
def visit_Num(self, node):
with self.noblock():
if isinstance(node.n, int):
return "int(%r)" % (node.n)
elif isinstance(node.n, float):
return "float(%r)" % (node.n)
else:
raise TypeError("%r are not supported by py2lua" % (type(node.n),))
def visit_Str(self, node):
with self.noblock():
return "str(%r)" % node.s
def visit_List(self, node):
with self.noblock():
return "list({%s})" % (", ".join(map(self.visit, node.elts)))
def visit_Tuple(self, node):
with self.noblock():
return "tuple({%s})" % (", ".join(map(self.visit, node.elts)))
def visit_Dict(self, node):
with self.noblock():
has_content = False
result = "dict({"
for key, value in zip(node.keys, node.values):
has_content = True
result += "[%s] = %s, " % (self.visit(key), self.visit(value))
if has_content:
result = result[:-len(", ")]
result += "})"
return result
# -- Variables -- #
def visit_Name(self, node):
with self.noblock():
return node.id
# -- Expressions -- #
def visit_Expr(self, node):
with self.noblock():
return self.visit(node.value)
def visit_UnaryOp(self, node):
with self.noblock():
op = self.visit(node.op)
operand = self.visit(node.operand)
return "%s%s" % (op, operand)
visit_UAdd = lambda self, node: "+"
visit_USub = lambda self, node: "-"
visit_Not = lambda self, node: "not "
def visit_BinOp(self, node):
# TODO: unwarp /var = (...)/ the union
# must ((3 + 2) * 1) => (3 + 2) * 1
with self.noblock():
left = self.visit(node.left)
right = self.visit(node.right)
op = self.visit(node.op)
return "(%s%s%s)" % (left, op, right)
visit_Add = lambda self, node: " + "
visit_Sub = lambda self, node: " - "
visit_Mult = lambda self, node: " * "
visit_Div = lambda self, node: " / "
visit_Pow = lambda self, node: " ^ "
def visit_BoolOp(self, node):
with self.noblock():
op = self.visit(node.op)
return "(%s)" % op.join(map(self.visit, node.values))
visit_And = lambda self, node: " and "
visit_Or = lambda self, node: " or "
def visit_Call(self, node):
with self.noblock():
func = self.visit(node.func)
args = ", ".join(map(self.visit, node.args))
assert not node.keywords
assert node.starargs is None
assert node.kwargs is None
return "%s(%s)" % (func, args)
def visit_arg(self, node):
with self.noblock():
return node.arg
def visit_IfExp(self, node):
with self.noblock():
return "_OP__IFEXP__(%s, %s, %s)" % (
self.visit(node.test),
self.visit(node.body),
self.visit(node.orelse),
)
def visit_Attribute(self, node):
with self.noblock():
name = self.visit(node.value)
#if name.startswith("_R."):
# return name[len("_R."):]
#else:
return "%s.%s" % (name, node.attr)
def visit_Compare(self, node):
with self.noblock():
body = [self.visit(node.left)]
assert node.ops
assert node.comparators
for op, value in zip(node.ops, node.comparators):
value = self.visit(value)
if isinstance(op, (ast.Is, ast.IsNot, ast.In, ast.NotIn)):
op = op.__class__.__name__
last = body.pop()
if last.startswith(" and "):
last = last[len(" and "):]
body.append("_OP__%s__(%s, %s)" % (op, last, value))
else:
op = self.visit(op)
body.append("%s%s" % (op, value))
body.append(" and %s" % (value,))
body.pop()
return "".join(body)
visit_Eq = lambda self, node: " == "
visit_NotEq = lambda self, node: " ~= "
visit_Lt = lambda self, node: " < "
visit_LtE = lambda self, node: " <= "
visit_Gt = lambda self, node: " > "
visit_GtE = lambda self, node: " <= "
# -- Subscripting -- #
# visit_Subscript must be not direct call from visit, it must different
# when assign or see the value??
def visit_Subscript(self, node):
with self.noblock():
name = self.visit(node.value)
if isinstance(node.slice, ast.Index):
return "_OP__subscript__(%s, %s)" % (name, self.visit(node.slice))
else:
return "_OP__subscript__(%s, %s)" % (name, self.visit(node.slice))
def visit_Index(self, node):
with self.noblock():
return self.visit(node.value)
def visit_Slice(self, node):
def visit_SInfo(node):
if node:
return self.visit(node)
return LUA_None
with self.noblock():
sinfo = tuple(map(visit_SInfo, (node.lower, node.upper, node.step)))
return "slice(%s, %s, %s)" % sinfo
def visit_ExtSlice(self, node):
with self.noblock():
# TODO: self.visit(tupleAST)
return "tuple(%s)" % ", ".join(map(self.visit, node.dims))
# -- Comprehensions -- #
# -- Statements -- #
def visit_AugAssign(self, node):
with self.noblock():
AssignAST = ast.Assign(targets=[node.target],
value=ast.BinOp(node.target, node.op, node.value))
assert self.visit(node.target) in self.current_block["defined"]
return self.visit(AssignAST)
def visit_Assign(self, node):
with self.noblock():
def visit_TestNeedAssignPack(node):
if isinstance(node, ast.Call):
return True
else:
return False
tvalue = TEMP_VNAME
multi = False
defined = set()
def localdefine(rawname, add=True):
if rawname not in self.current_block["defined"]:
self.current_block["local_defined"].add(rawname)
self.current_block["defined"].add(rawname)
if defined is not None:
if add:
defined.add(rawname)
return "local "
return ""
if len(node.targets) > 1:
multi = True
if visit_TestNeedAssignPack(node.value):
result = "%s = pack(%s)" % (tvalue, self.visit(node.value))
else:
result = "%s = %s" % (tvalue, self.visit(node.value))
else:
result = ""
tvalue = self.visit(node.value)
for target in node.targets:
if isinstance(target, ast.Name):
vname = self.visit(target)
result += "; %s%s = %s" % (localdefine(vname, add=False), vname, tvalue)
elif isinstance(target, ast.Attribute):
# TODO: fix t.n.g[2] = 1 problem.
vname = self.visit(target.value)
self.current_block["global_defined"].add(vname)
self.current_block["defined"].add(vname)
result += "; %s = %s" % (self.visit(target), tvalue)
elif isinstance(target, ast.Subscript):
pname = self.visit(target.value)
index = self.visit(target.slice)
vname = pname
while isinstance(target.value, ast.Attribute):
target = target.value
vname = target.value
vname = self.visit(target.value)
self.current_block["global_defined"].add(vname)
self.current_block["defined"].add(vname)
result += "; _OP__ASSIGN_ITEM__(%s, %s, %s)" % (pname, index, tvalue)
elif isinstance(target, ast.Tuple):
def visit_AssignTargets(node):
if isinstance(node, ast.Name):
vname = self.visit(node)
elif isinstance(node, ast.Starred):
vname = self.visit(node.value)
else:
raise TypeError("unexcepted %r." % (type(node),))
localdefine(vname)
return vname
var = ", ".join(map(visit_AssignTargets, target.elts))
starred = None
for no, sub in enumerate(target.elts, 1):
if isinstance(sub, ast.Starred):
starred = no
if starred is None:
# TODO: choice one, lua.unpack or unpack use.
result += "; %s = lua.unpack(%s)" % (var, tvalue)
else:
result += "; %s = unpack(%s, %i)" % (var, tvalue, starred)
if not multi:
assert result.startswith("; ")
result = result[len("; "):]
if defined:
result = "local %s; %s" % (", ".join(defined), result)
return result
def visit_Assert(self, node):
with self.noblock():
test = self.visit(node.test)
if not node.msg:
return "assert(%s)" % (test,)
else:
return "assert(%s, %s)" % (test, self.visit(node.msg))
def visit_Pass(self, node):
with self.noblock():
return "--pass"
# -- Imports -- #
def visit_Import(self, node):
result = ""
with self.noblock():
# TODO: import.
for alias in node.names:
assert isinstance(alias, ast.alias)
locstr = ""
if alias.asname not in self.current_block["defined"]:
self.current_block["local_defined"].add(alias.asname)
self.current_block["defined"].add(alias.asname)
locstr = "local "
result += "; %s%s = import(%r)" % (locstr, alias.asname or alias.name, alias.name)
if alias.asname:
pass
## AsNameAst = ast.Assign(targets=[
## ast.Name(id=alias.asname, ctx=ast.Store()),
## ], value=ast.Name(id=alias.name, ctx=ast.Load()))
##
## result += "; %s" % (self.visit(AsNameAst))
# TODO: make common local define (or global)
print = self.print
print(result[len("; "):])
raise IsControlFlow
# -- Control flow -- #
def visit_If(self, node):
print = self.print
print("if", self.visit(node.test), "then")
with self.block():
print("local", TEMP_VNAME)
for subnode in node.body:
with self.hasblock():
print(self.visit(subnode))
if len(node.orelse) == 1 and isinstance(node.orelse[0], ast.If):
print("else", end="")
with self.hasblock():
self.visit_If(node.orelse[0])
raise IsControlFlow
elif node.orelse:
print("else")
with self.block():
for subnode in node.orelse:
with self.hasblock():
print(self.visit(subnode))
print("end")
raise IsControlFlow
def visit_For(self, node):
print = self.print
zrand = random.randint(100000, 999999)
hascont = False
contname = ""
hasbreak = False
breakname = ""
target = self.visit(node.target)
if isinstance(node.target, ast.Tuple):
# TODO: Common Tuple Assign Interface??
target = target[+1:-1]
iter = self.visit(node.iter)
iter = "iter(%s)" % (iter,)
print("for", target, "in", iter, "do")
with self.block():
print("local", TEMP_VNAME)
for subnode in node.body:
if isinstance(subnode, ast.Continue):
hascont = True
contname = "ZCONT_%i" % zrand
print("goto", contname, '-- continue')
elif isinstance(subnode, ast.Break) and node.orelse:
hasbreak = True
breakname = "ZBREAK_%i" % zrand
print("goto", breakname, '-- break')
else:
with self.hasblock():
print(self.visit(subnode))
if hascont:
print("::", contname, "::", sep="")
print("end")
if node.orelse:
for subnode in node.orelse:
with self.hasblock():
print(self.visit(subnode))
if hasbreak:
print("::", breakname, "::", sep="")
raise IsControlFlow
def visit_While(self, node):
print = self.print
zrand = random.randint(100000, 999999)
hascont = False
contname = ""
hasbreak = False
breakname = ""
print("while", self.visit(node.test), "do")
with self.block():
print("local", TEMP_VNAME)
for subnode in node.body:
if isinstance(subnode, ast.Continue):
hascont = True
contname = "ZCONT_%i" % zrand
print("goto", contname, '-- continue')
elif isinstance(subnode, ast.Break) and node.orelse:
hasbreak = True
breakname = "ZBREAK_%i" % zrand
print("goto", breakname, '-- break')
else:
with self.hasblock():
print(self.visit(subnode))
if hascont:
print("::", contname, "::", sep="")
print("end")
if node.orelse:
for subnode in node.orelse:
with self.hasblock():
print(self.visit(subnode))
if hasbreak:
print("::", breakname, "::", sep="")
raise IsControlFlow
def visit_Try(self, node):
print = self.print
assert not node.handlers
print(TEMP_VNAME, "= {(function()")
with self.block():
print("local", TEMP_VNAME)
for subnode in node.body:
with self.hasblock():
print(self.visit(subnode))
print("end)()}")
for subnode in node.finalbody:
with self.hasblock():
print(self.visit(subnode))
print("if not %s[1] then error(%s[2])" % (TEMP_VNAME, TEMP_VNAME))
raise IsControlFlow
# -- Function and class definitions -- #
def visit_FunctionDef(self, node):
print = self.print
fname = node.name
fargs = ", ".join(map(self.visit, node.args.args))
assert ", arg, " not in ", %s, " % fargs
if node.args.varargannotation:
fargs += ", ..."
assert not node.args.kwonlyargs
#assert not node.args.varargannotation
assert not node.args.kwarg
assert not node.args.kwargannotation
assert not node.args.defaults
assert not node.args.kw_defaults
assert not node.returns
if fname not in self.current_block["defined"]:
self.current_block["local_defined"].add(fname)
self.current_block["defined"].add(fname)
print("local", end=" ")
print("function %s(%s)" % (fname, fargs))
with self.block():
print("local", TEMP_VNAME)
if node.args.varargannotation:
print("local %s = arg" % (node.args.varargannotation,))
for subnode in node.body:
with self.hasblock():
print(self.visit(subnode))
print("end")
for decorator in node.decorator_list:
print("%s = %s(%s)" % (fname, self.visit(decorator), fname))
raise IsControlFlow
def visit_Call(self, node):
with self.noblock():
func = self.visit(node.func)
args = ", ".join(map(self.visit, node.args))
assert not node.keywords
assert node.starargs is None
assert node.kwargs is None
return "%s(%s)" % (func, args)
def visit_ClassDef(self, node):
print = self.print
print(TEMP_VNAME, "= function ()")
with self.block():
print("local", TEMP_VNAME)
for subnode in node.body:
with self.hasblock():
print(self.visit(subnode))
print("end")
fname = node.name
classmakeAST = ast.Call(
func=ast.Name(id='__build_class__', ctx=ast.Load()),
args=[ast.Name(id=TEMP_VNAME, ctx=ast.Load()), ast.Str(fname)] + node.bases,
keywords=node.keywords,
starargs=node.starargs,
kwargs=node.kwargs,
)
if fname not in self.current_block["defined"]:
self.current_block["local_defined"].add(fname)
self.current_block["defined"].add(fname)
print("local", end=" ")
print(fname, "=", self.visit(classmakeAST))
for decorator in node.decorator_list:
print("%s = %s(%s)" % (fname, self.visit(decorator), fname))
raise IsControlFlow
def visit_Lambda(self, node):
fargs = ", ".join(map(self.visit, node.args.args))
assert not node.args.kwonlyargs
assert not node.args.varargannotation
assert not node.args.kwarg
assert not node.args.kwargannotation
assert not node.args.defaults
assert not node.args.kw_defaults
result = "(function(%s) " % fargs
with self.noblock():
result += self.visit(node.body)
result += " end)"
return result
def visit_Return(self, node):
with self.noblock():
return "return %s" % (self.visit(node.value),)
def visit_Break(self, node):
with self.noblock():
return "break"
def visit_Global(self, node):
# TODO: add error if already defined by local but try define with global
# and mixed local + global + nonlocal + etc.
innerblock = self.blocks[-1]
global_defined = innerblock["global_defined"]
defined = innerblock["defined"]
for name in node.names:
global_defined.add(name)
defined.add(name)
raise IsControlFlow
def visit_Nonlocal(self, node):
innerblock = self.blocks[-1]
global_defined = innerblock["nonlocal_defined"]
defined = innerblock["defined"]
for name in node.names:
global_defined.add(name)
defined.add(name)
raise IsControlFlow
class LuaCodeGeneratorNotSupported(LuaCodeGenerator):
def check(self, node):
self.found = set()
self.visit(node)
return self.found
def visit(self, node):
method = 'visit_' + node.__class__.__name__
visitor = hasattr(self, method)
if not visitor:
self.found.add(node.__class__.__name__)
self.generic_visit(node)
def generic_visit(self, node):
for field, value in ast.iter_fields(node):
if isinstance(value, list):
for item in value:
if isinstance(item, AST):
self.visit(item)
elif isinstance(value, AST):
self.visit(value)
code = """\
t = 3
"""
def main():
codetree = ast.parse(code, mode="single")
print("===== INPUT ====")
print(code)
print("===== TREE =====")
visitor = ShowTree()
visitor.visit(codetree)
print()
print("===== NON_SUPPORT =====")
print(LuaCodeGeneratorNotSupported().check(codetree))
print()
print("===== OUTPUT =====")
print(LuaCodeGenerator().visit(codetree))
if __name__ == '__main__':
main()
| EcmaXp/PyCraft | pc_old.py | Python | gpl-2.0 | 24,863 | [
"VisIt"
] | d21552065e4369e819dce33302e818d1c2161a325c465690eb04055ca765b57e |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Mail-Merge for Scribus. This file provides the backend.
#
# For further information (manual, description, etc.) please visit:
# https://github.com/berteh/ScribusGenerator/
#
# v2.9.1 (2021-01-22): update port to Python3 for Scribut 1.5.6+, various DOC update
#
"""
The MIT License
Copyright (c) 2010-2014 Ekkehard Will (www.ekkehardwill.de), 2014-2021 Berteh (https://github.com/berteh/)
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import csv
import os
import platform
import logging
import logging.config
import sys
import xml.etree.ElementTree as ET
import json
import re
import string
class CONST:
# Constants for general usage
TRUE = 1
FALSE = 0
EMPTY = ''
APP_NAME = 'Scribus Generator'
FORMAT_PDF = 'PDF'
FORMAT_SLA = 'Scribus'
FILE_EXTENSION_PDF = 'pdf'
FILE_EXTENSION_SCRIBUS = 'sla'
SEP_PATH = '/' # In any case we use '/' as path separator on any platform
SEP_EXT = os.extsep
# CSV entry separator, comma by default; tab: " " is also common if using Excel.
CSV_SEP = ","
# indent the generated SLA code for more readability, aka "XML pretty print". set to 1 if you want to edit generated SLA manually.
INDENT_SLA = 0
CONTRIB_TEXT = "\npowered by ScribusGenerator - https://github.com/berteh/ScribusGenerator/"
STORAGE_NAME = "ScribusGeneratorDefaultSettings"
# set to 0 to prevent removal of un-subsituted variables, along with their empty containing itext
CLEAN_UNUSED_EMPTY_VARS = 1
# set to 0 to keep the separating element before an unused/empty variable, typicaly a linefeed (<para>) or list syntax token (,;-.)
REMOVE_CLEANED_ELEMENT_PREFIX = 1
# set to 0 to replace all tabs and linebreaks in csv data by simple spaces.
KEEP_TAB_LINEBREAK = 1
SG_VERSION = '2.9.1 python2'
# set to any word you'd like to use to trigger a jump to the next data record. using a name similar to the variables %VAR_ ... % will ensure it is cleaned after generation, and not show in the final document(s).
NEXT_RECORD = '%SG_NEXT-RECORD%'
class ScribusGenerator:
# The Generator Module has all the logic and will do all the work
def __init__(self, dataObject):
self.__dataObject = dataObject
logging.config.fileConfig(os.path.join(
os.path.abspath(os.path.dirname(__file__)), 'logging.conf'))
# todo: check if logging works, if not warn user to configure log file path and disable.
logging.info("ScribusGenerator initialized")
logging.debug("OS: %s - Python: %s - ScribusGenerator v%s" %
(os.name, platform.python_version(), CONST.SG_VERSION))
def run(self):
# Read CSV data and replace the variables in the Scribus File with the cooresponding data. Finaly export to the specified format.
# may throw exceptions if errors are met, use traceback to get all error details
# log options
optionsTxt = self.__dataObject.toString()
logging.debug("active options: %s%s" %
(optionsTxt[:1], optionsTxt[172:]))
# output file name
if(self.__dataObject.getSingleOutput() and (self.__dataObject.getOutputFileName() is CONST.EMPTY)):
self.__dataObject.setOutputFileName(os.path.split(os.path.splitext(
self.__dataObject.getScribusSourceFile())[0])[1] + '__single')
# source sla
logging.info("parsing scribus source file %s" %
(self.__dataObject.getScribusSourceFile()))
try:
tree = ET.parse(self.__dataObject.getScribusSourceFile())
except IOError as exception:
logging.error("Scribus file not found: %s" %
(self.__dataObject.getScribusSourceFile()))
raise
root = tree.getroot()
version = root.get('Version')
logging.debug("Scribus SLA template version is %s" % (version))
# save settings
if (self.__dataObject.getSaveSettings()):
serial = self.__dataObject.toString()
# as: %s"%serial)
logging.debug(
"saving current Scribus Generator settings in your source file")
docElt = root.find('DOCUMENT')
storageElt = docElt.find('./JAVA[@NAME="'+CONST.STORAGE_NAME+'"]')
if (storageElt is None):
colorElt = docElt.find('./COLOR[1]')
scriptPos = docElt.getchildren().index(colorElt)
logging.debug(
"creating new storage element in SLA template at position %s" % scriptPos)
storageElt = ET.Element("JAVA", {"NAME": CONST.STORAGE_NAME})
docElt.insert(scriptPos, storageElt)
storageElt.set("SCRIPT", serial)
# todo check if scribus reloads (or overwrites :/ ) when doc is opened, opt use API to add a script if there's an open doc.
tree.write(self.__dataObject.getScribusSourceFile())
# data
logging.info("parsing data source file %s" %
(self.__dataObject.getDataSourceFile()))
try:
csvData = self.getCsvData(self.__dataObject.getDataSourceFile())
except IOError as exception:
logging.error("CSV file not found: %s" %
(self.__dataObject.getDataSourceFile()))
raise
if(len(csvData) < 1):
logging.error("Data file %s is empty. At least a header line and a line of data is needed. Halting." % (
self.__dataObject.getDataSourceFile()))
return -1
if(len(csvData) < 2):
logging.error("Data file %s has only one line. At least a header line and a line of data is needed. Halting." % (
self.__dataObject.getDataSourceFile()))
return -1
# range
firstElement = 1
if(self.__dataObject.getFirstRow() != CONST.EMPTY):
try:
newFirstElementValue = int(self.__dataObject.getFirstRow())
# Guard against 0 or negative numbers
firstElement = max(newFirstElementValue, 1)
except:
logging.warning(
"Could not parse value of 'first row' as an integer, using default value instead")
lastElement = len(csvData)
if(self.__dataObject.getLastRow() != CONST.EMPTY):
try:
newLastElementValue = int(self.__dataObject.getLastRow())
# Guard against numbers higher than the length of csvData
lastElement = min(newLastElementValue + 1, lastElement)
except:
logging.warning(
"Could not parse value of 'last row' as an integer, using default value instead")
if ((firstElement != 1) or (lastElement != len(csvData))):
csvData = csvData[0:1] + csvData[firstElement: lastElement]
logging.debug("custom data range is: %s - %s" %
(firstElement, lastElement))
else:
logging.debug("full data range will be used")
# generation
dataC = len(csvData)-1
fillCount = len(str(dataC))
# XML-Content/Text-Content of the Source Scribus File (List of Lines)
template = []
outputFileNames = []
index = 0 # current data record
rootStr = ET.tostring(root, encoding='utf8', method='xml')
# number of data records appearing in source document
recordsInDocument = 1 + string.count(rootStr, CONST.NEXT_RECORD)
logging.info("source document consumes %s data record(s) from %s." %
(recordsInDocument, dataC))
dataBuffer = []
for row in csvData:
if(index == 0): # first line is the Header-Row of the CSV-File
varNamesForFileName = row
varNamesForReplacingVariables = self.encodeScribusXML([row])[0]
# overwrite attributes from their /*/ItemAttribute[Parameter=SGAttribute] sibling, when applicable.
templateElt = self.overwriteAttributesFromSGAttributes(root)
else: # index > 0, row is one data entry
# accumulate row in buffer
dataBuffer.append(row)
# buffered data for all document records OR reached last data record
if (index % recordsInDocument == 0) or index == dataC:
# subsitute
outContent = self.substituteData(varNamesForReplacingVariables, self.encodeScribusXML(dataBuffer),
ET.tostring(templateElt, method='xml').split('\n'), keepTabsLF=CONST.KEEP_TAB_LINEBREAK)
if (self.__dataObject.getSingleOutput()):
# first substitution, update DOCUMENT properties
if (index == min(recordsInDocument,dataC)):
logging.debug(
"generating reference content from dataBuffer #1")
outputElt = ET.fromstring(outContent)
docElt = outputElt.find('DOCUMENT')
pagescount = int(docElt.get('ANZPAGES'))
pageheight = float(docElt.get('PAGEHEIGHT'))
vgap = float(docElt.get('GapVertical'))
groupscount = int(docElt.get('GROUPC'))
objscount = len(outputElt.findall('.//PAGEOBJECT'))
logging.debug(
"current template has #%s pageobjects" % (objscount))
# if version.startswith('1.4'):
# docElt.set('GROUPC', str(groupscount*dataC))
# todo replace +1 by roundup()
docElt.set('ANZPAGES', str(
pagescount*dataC//recordsInDocument + 1))
docElt.set('DOCCONTRIB', docElt.get(
'DOCCONTRIB')+CONST.CONTRIB_TEXT)
else: # not first substitution, append DOCUMENT content
logging.debug(
"merging content from dataBuffer #%s" % (index))
tmpElt = ET.fromstring(outContent).find('DOCUMENT')
shiftedElts = self.shiftPagesAndObjects(
tmpElt, pagescount, pageheight, vgap, index-1, recordsInDocument, groupscount, objscount, version)
docElt.extend(shiftedElts)
else: # write one of multiple sla
outputFileName = self.createOutputFileName(
index, self.__dataObject.getOutputFileName(), varNamesForFileName, dataBuffer, fillCount)
self.writeSLA(ET.fromstring(
outContent), outputFileName)
outputFileNames.append(outputFileName)
dataBuffer = []
index = index + 1
# clean & write single sla
if (self.__dataObject.getSingleOutput()):
self.writeSLA(outputElt, self.__dataObject.getOutputFileName())
outputFileNames.append(self.__dataObject.getOutputFileName())
# Export the generated Scribus Files as PDF
if(CONST.FORMAT_PDF == self.__dataObject.getOutputFormat()):
for outputFileName in outputFileNames:
pdfOutputFilePath = self.createOutputFilePath(
self.__dataObject.getOutputDirectory(), outputFileName, CONST.FILE_EXTENSION_PDF)
scribusOutputFilePath = self.createOutputFilePath(
self.__dataObject.getOutputDirectory(), outputFileName, CONST.FILE_EXTENSION_SCRIBUS)
self.exportPDF(scribusOutputFilePath, pdfOutputFilePath)
logging.info("pdf file created: %s" % (pdfOutputFilePath))
# Cleanup the generated Scribus Files
if(not (CONST.FORMAT_SLA == self.__dataObject.getOutputFormat()) and CONST.FALSE == self.__dataObject.getKeepGeneratedScribusFiles()):
for outputFileName in outputFileNames:
scribusOutputFilePath = self.createOutputFilePath(
self.__dataObject.getOutputDirectory(), outputFileName, CONST.FILE_EXTENSION_SCRIBUS)
self.deleteFile(scribusOutputFilePath)
return 1
def exportPDF(self, scribusFilePath, pdfFilePath):
import scribus
d = os.path.dirname(pdfFilePath)
if not os.path.exists(d):
os.makedirs(d)
# Export to PDF
scribus.openDoc(scribusFilePath)
listOfPages = []
i = 0
while (i < scribus.pageCount()):
i = i + 1
listOfPages.append(i)
pdfExport = scribus.PDFfile()
pdfExport.info = CONST.APP_NAME
pdfExport.file = str(pdfFilePath)
pdfExport.pages = listOfPages
pdfExport.save()
scribus.closeDoc()
def writeSLA(self, slaET, outFileName, clean=CONST.CLEAN_UNUSED_EMPTY_VARS, indentSLA=CONST.INDENT_SLA):
# write SLA to filepath computed from given elements, optionnaly cleaning empty ITEXT elements and their empty PAGEOBJECTS
scribusOutputFilePath = self.createOutputFilePath(
self.__dataObject.getOutputDirectory(), outFileName, CONST.FILE_EXTENSION_SCRIBUS)
d = os.path.dirname(scribusOutputFilePath)
if not os.path.exists(d):
os.makedirs(d)
outTree = ET.ElementTree(slaET)
if (clean):
self.removeEmptyTexts(outTree.getroot())
if (indentSLA):
from xml.dom import minidom
xmlstr = minidom.parseString(ET.tostring(outTree.getroot())).toprettyxml(indent=" ")
with open(scribusOutputFilePath, "w") as f:
f.write(xmlstr.encode('utf-8'))
else:
outTree.write(scribusOutputFilePath, encoding="UTF-8")
logging.info("scribus file created: %s" % (scribusOutputFilePath))
return scribusOutputFilePath
def overwriteAttributesFromSGAttributes(self, root):
# modifies root such that
# attributes have been rewritten from their /*/ItemAttribute[Parameter=SGAttribute] sibling, when applicable.
#
# allows to use %VAR_<var-name>% in Item Attribute to overwrite internal attributes (eg FONT)
for pageobject in root.findall(".//ItemAttribute[@Parameter='SGAttribute']/../.."):
for sga in pageobject.findall(".//ItemAttribute[@Parameter='SGAttribute']"):
attribute = sga.get('Name')
value = sga.get('Value')
ref = sga.get('RelationshipTo')
if ref is "": # Cannot use 'default' on .get() as it is "" by default in SLA file.
# target is pageobject by default. Cannot use ".|*" as not supported by ET.
ref = "."
elif ref.startswith("/"): # ET cannot use absolute path on element
ref = "."+ref
try:
targets = pageobject.findall(ref)
if targets:
for target in targets:
logging.debug('overwriting value of %s in %s with "%s"' % (
attribute, target.tag, value))
target.set(attribute, value)
else:
logging.error('Target "%s" could be parsed but designated no node. Check it out as it is probably not what you expected to replace %s.' % (
ref, attribute)) # todo message to user
except SyntaxError:
logging.error('XPATH expression "%s" could not be parsed by ElementTree to overwrite %s. Skipping.' % (
ref, attribute)) # todo message to user
return root
def shiftPagesAndObjects(self, docElt, pagescount, pageheight, vgap, index, recordsInDocument, groupscount, objscount, version):
shifted = []
voffset = (float(pageheight)+float(vgap)) * \
(index // recordsInDocument)
for page in docElt.findall('PAGE'):
page.set('PAGEYPOS', str(float(page.get('PAGEYPOS')) + voffset))
page.set('NUM', str(int(page.get('NUM')) + pagescount))
shifted.append(page)
for obj in docElt.findall('PAGEOBJECT'):
obj.set('YPOS', str(float(obj.get('YPOS')) + voffset))
obj.set('OwnPage', str(int(obj.get('OwnPage')) + pagescount))
# update ID and links
if version.startswith('1.4'):
# if not (int(obj.get('NUMGROUP')) == 0):
# obj.set('NUMGROUP', str(int(obj.get('NUMGROUP')) + groupscount * index))
# next linked frame by position
if (obj.get('NEXTITEM') != None and (str(obj.get('NEXTITEM')) != "-1")):
obj.set('NEXTITEM', str(
int(obj.get('NEXTITEM')) + (objscount * index)))
# previous linked frame by position
if (obj.get('BACKITEM') != None and (str(obj.get('BACKITEM')) != "-1")):
obj.set('BACKITEM', str(
int(obj.get('BACKITEM')) + (objscount * index)))
else: # 1.5, 1.6
logging.debug("shifting object %s (#%s)" %
(obj.tag, obj.get('ItemID')))
# todo update ID with something unlikely allocated, TODO ensure unique ID instead of 6:, issue #101
obj.set('ItemID', str(objscount * index) +
str(int(obj.get('ItemID')))[7:])
# next linked frame by ItemID
if (obj.get('NEXTITEM') != None and (str(obj.get('NEXTITEM')) != "-1")):
obj.set('NEXTITEM', str(objscount * index) +
str(int(obj.get('NEXTITEM')))[7:])
# previous linked frame by ItemID
if (obj.get('BACKITEM') != None and (str(obj.get('BACKITEM')) != "-1")):
obj.set('BACKITEM', str(objscount * index) +
str(int(obj.get('BACKITEM')))[7:])
shifted.append(obj)
logging.debug("shifted page %s element of %s" % (index, voffset))
return shifted
def removeEmptyTexts(self, root):
# *modifies* root ElementTree by removing empty text elements and their empty placeholders.
# returns number of ITEXT elements deleted.
# 1. clean text in which some variable-like text is not substituted (ie: known or unknown variable):
# <ITEXT CH="empty %VAR_empty% variable should not show" FONT="Arial Regular" />
# 2. remove <ITEXT> with empty @CH and precedings <para/> if any
# 3. remove any <PAGEOBJECT> that has no <ITEXT> child left
emptyXPath = "ITEXT[@CH='']"
d = 0
# little obscure because its parent is needed to remove an element, and ElementTree has no parent() method.
for page in root.findall(".//%s/../.." % emptyXPath):
# collect emptyXPath and <para> that precede for removal, iter is need for lack of sibling-previous navigation in ElementTree
for po in page.findall(".//%s/.." % emptyXPath):
trash = []
for pos, item in enumerate(po):
if (item.tag == "ITEXT") and (item.get("CH") == ""):
logging.debug(
"cleaning 1 empty ITEXT and preceding linefeed (opt.)")
if (CONST.REMOVE_CLEANED_ELEMENT_PREFIX and po[pos-1].tag == "para"):
trash.append(pos-1)
trash.append(pos)
d += 1
trash.reverse()
# remove trashed elements as stack (lifo order), to preserve positions validity
for i in trash:
po.remove(po[i])
if (len(po.findall("ITEXT")) is 0):
logging.debug("cleaning 1 empty PAGEOBJECT")
page.remove(po)
logging.info("removed %d empty texts items" % d)
return d
def deleteFile(self, outputFilePath):
# Delete the temporarily generated files from off the file system
os.remove(outputFilePath)
def createOutputFilePath(self, outputDirectory, outputFileName, fileExtension):
# Build the absolute path, like C:/tmp/template.sla
return outputDirectory + CONST.SEP_PATH + outputFileName + CONST.SEP_EXT + fileExtension
def createOutputFileName(self, index, outputFileName, varNames, rows, fillCount):
# If the User has not set an Output File Name, an internal unique file name
# will be generated which is the index of the loop.
result = str(index)
result = result.zfill(fillCount)
# Following characters are not allowed for File-Names on WINDOWS: < > ? " : | \ / *
# Note / is still allowed in filename as it allows dynamic subdirectory in Linux (issue 102); todo check & fix for Windows
if(CONST.EMPTY != outputFileName):
table = {
# ord(u'ä'): u'ae',
# ord(u'Ä'): u'Ae',
# ord(u'ö'): u'oe',
# ord(u'Ö'): u'Oe',
# ord(u'ü'): u'ue',
# ord(u'Ü'): u'Ue',
# ord(u'ß'): u'ss',
ord(u'<'): u'_',
ord(u'>'): u'_',
ord(u'?'): u'_',
ord(u'"'): u'_',
ord(u':'): u'_',
ord(u'|'): u'_',
ord(u'\\'): u'_',
# ord(u'/'): u'_',
ord(u'*'): u'_'
}
result = self.substituteData(varNames, rows, [outputFileName])
result = result.decode('utf_8')
result = result.translate(table)
logging.debug("output file name is %s" % result)
return result
def copyScribusContent(self, src):
# Returns a plain copy of src where src is expected to be a list (of text lines)
result = []
for line in src:
result.append(line)
return result
def readFileContent(self, src):
# Returns the list of lines (as strings) of the text-file
tmp = open(src, 'r')
result = tmp.readlines()
tmp.close()
return result
def encodeScribusXML(self, rows):
# Encode some characters that can be found in CSV into XML entities
# not all are needed as Scribus handles most UTF8 characters just fine.
result = []
replacements = {'&':'&', '"':'"', '<':'<'}
for row in rows:
res1 = []
for i in row:
res1.append(self.multiple_replace(i, replacements))
result.append(res1)
return result
def multiple_replace(self, string, rep_dict):
# multiple simultaneous string replacements, per http://stackoverflow.com/a/15448887/1694411)
# combine with dictionary = dict(zip(keys, values)) to use on arrays
pattern = re.compile("|".join([re.escape(k)
for k in rep_dict.keys()]), re.M)
return pattern.sub(lambda x: rep_dict[x.group(0)], string)
# lines as list of strings
def substituteData(self, varNames, rows, lines, clean=CONST.CLEAN_UNUSED_EMPTY_VARS, keepTabsLF=0):
result = ''
currentRecord = 0
replacements = dict(
zip(['%VAR_'+i+'%' for i in varNames], rows[currentRecord]))
#logging.debug("replacements is: %s"%replacements)
# done in string instead of XML for lack of efficient attribute-value-based substring-search in ElementTree
for idx, line in enumerate(lines):
# logging.debug("replacing vars in (out of %s): %s"%(len(line), line[:30]))
# skip un-needed computations and preserve colors declarations
if (re.search('%VAR_|'+CONST.NEXT_RECORD, line) == None) or (re.search('\s*<COLOR\s+', line) != None):
result = result + line
# logging.debug(" keeping intact %s"%line[:30])
continue
# detect NEXT_RECORD
if CONST.NEXT_RECORD in line:
currentRecord += 1
if currentRecord < len(rows):
logging.debug("loading next record")
replacements = dict(
zip(['%VAR_'+i+'%' for i in varNames], rows[currentRecord]))
else: # last record reach, leave remaing variables to be cleaned
replacements = {
"END-OF-REPLACEMENTS": "END-OF-REPLACEMENTS"}
logging.debug("next record reached last data entry")
# replace with data
logging.debug("replacing VARS_* in %s" % line[:30].strip())
line = self.multiple_replace(line, replacements)
#logging.debug("replaced in line: %s" % line)
# remove (& trim) any (unused) %VAR_\w*% like string.
if (clean):
if (CONST.REMOVE_CLEANED_ELEMENT_PREFIX):
(line, d) = re.subn('\s*[,;-]*\s*%VAR_\w*%\s*', '', line)
else:
(line, d) = re.subn('\s*%VAR_\w*%\s*', '', line)
if (d > 0):
logging.debug("cleaned %d empty variable" % d)
(line, d) = re.subn('\s*%s\w*\s*' %
CONST.NEXT_RECORD, '', line)
# convert \t and \n into scribus <tab/> and <linebreak/>
if (keepTabsLF == 1) and (re.search('[\t\n]+', line, flags=re.MULTILINE)):
m = re.search(
'(<ITEXT.* CH=")([^"]+)(".*/>)', line, flags=re.MULTILINE | re.DOTALL)
if m:
begm = m.group(1)
endm = m.group(3)
# logging.debug("converting tabs and linebreaks in line: %s"%(line))
line = re.sub('([\t\n]+)', endm + '\g<1>' +
begm, line, flags=re.MULTILINE)
# replace \t and \n
line = re.sub('\t', '<tab />', line)
line = re.sub('\n', '<breakline />',
line, flags=re.MULTILINE)
logging.debug(
"converted tabs and linebreaks in line: %s" % line)
else:
logging.warning(
"could not convert tabs and linebreaks in this line, kindly report this to the developppers: %s" % (line))
result = result + line
return result
def getCsvData(self, csvfile):
# Read CSV file and return 2-dimensional list containing the data ,
# TODO check to replace with https://docs.python.org/3/library/csv.html#csv.DictReader
reader = csv.reader(file(csvfile), delimiter=self.__dataObject.getCsvSeparator(
), skipinitialspace=True, doublequote=True)
result = []
for row in reader:
if(len(row) > 0): # strip empty lines in source CSV
rowlist = []
for col in row:
rowlist.append(col)
result.append(rowlist)
return result
def getLog(self):
return logging
def getSavedSettings(self):
logging.debug("parsing scribus source file %s for user settings" % (
self.__dataObject.getScribusSourceFile()))
try:
t = ET.parse(self.__dataObject.getScribusSourceFile())
r = t.getroot()
doc = r.find('DOCUMENT')
storage = doc.find('./JAVA[@NAME="'+CONST.STORAGE_NAME+'"]')
return storage.get("SCRIPT")
except SyntaxError as exception:
logging.error(
"Loading settings in only possible with Python 2.7 and later, please update your system: %s" % exception)
return None
except Exception as exception:
logging.debug("could not load the user settings: %s" % exception)
return None
class GeneratorDataObject:
# Data Object for transfering the settings made by the user on the UI / CLI
def __init__(self,
scribusSourceFile=CONST.EMPTY,
dataSourceFile=CONST.EMPTY,
outputDirectory=CONST.EMPTY,
outputFileName=CONST.EMPTY,
outputFormat=CONST.EMPTY,
keepGeneratedScribusFiles=CONST.FALSE,
csvSeparator=CONST.CSV_SEP,
singleOutput=CONST.FALSE,
firstRow=CONST.EMPTY,
lastRow=CONST.EMPTY,
saveSettings=CONST.TRUE,
closeDialog=CONST.FALSE):
self.__scribusSourceFile = scribusSourceFile
self.__dataSourceFile = dataSourceFile
self.__outputDirectory = outputDirectory
self.__outputFileName = outputFileName
self.__outputFormat = outputFormat
self.__keepGeneratedScribusFiles = keepGeneratedScribusFiles
self.__csvSeparator = csvSeparator
self.__singleOutput = singleOutput
self.__firstRow = firstRow
self.__lastRow = lastRow
self.__saveSettings = saveSettings
self.__closeDialog = closeDialog
# Get
def getScribusSourceFile(self):
return self.__scribusSourceFile
def getDataSourceFile(self):
return self.__dataSourceFile
def getOutputDirectory(self):
return self.__outputDirectory
def getOutputFileName(self):
return self.__outputFileName
def getOutputFormat(self):
return self.__outputFormat
def getKeepGeneratedScribusFiles(self):
return self.__keepGeneratedScribusFiles
def getCsvSeparator(self):
return self.__csvSeparator
def getSingleOutput(self):
return self.__singleOutput
def getFirstRow(self):
return self.__firstRow
def getLastRow(self):
return self.__lastRow
def getSaveSettings(self):
return self.__saveSettings
def getCloseDialog(self):
return self.__closeDialog
# Set
def setScribusSourceFile(self, fileName):
self.__scribusSourceFile = fileName
def setDataSourceFile(self, fileName):
self.__dataSourceFile = fileName
def setOutputDirectory(self, directory):
self.__outputDirectory = directory
def setOutputFileName(self, fileName):
self.__outputFileName = fileName
def setOutputFormat(self, outputFormat):
self.__outputFormat = outputFormat
def setKeepGeneratedScribusFiles(self, value):
self.__keepGeneratedScribusFiles = value
def setCsvSeparator(self, value):
self.__csvSeparator = value
def setSingleOutput(self, value):
self.__singleOutput = value
def setFirstRow(self, value):
self.__firstRow = value
def setLastRow(self, value):
self.__lastRow = value
def setSaveSettings(self, value):
self.__saveSettings = value
def setCloseDialog(self, value):
self.__closeDialog = value
# (de)Serialize all options but scribusSourceFile and saveSettings
def toString(self):
return json.dumps({
'_comment': "this is an automated placeholder for ScribusGenerator default settings. more info at https://github.com/berteh/ScribusGenerator/. modify at your own risks.",
# 'scribusfile':self.__scribusSourceFile NOT saved
'csvfile': self.__dataSourceFile,
'outdir': self.__outputDirectory,
'outname': self.__outputFileName,
'outformat': self.__outputFormat,
'keepsla': self.__keepGeneratedScribusFiles,
'separator': self.__csvSeparator,
'single': self.__singleOutput,
'from': self.__firstRow,
'to': self.__lastRow,
'close': self.__closeDialog
# 'savesettings':self.__saveSettings NOT saved
}, sort_keys=True)
# todo add validity/plausibility checks on all values?
def loadFromString(self, string):
j = json.loads(string)
for k, v in j.iteritems():
if v == None:
j[k] = CONST.EMPTY
# self.__scribusSourceFile NOT loaded
self.__dataSourceFile = j['csvfile']
self.__outputDirectory = j['outdir']
self.__outputFileName = j['outname']
self.__outputFormat = j['outformat']
self.__keepGeneratedScribusFiles = j['keepsla']
# str()to prevent TypeError: : "delimiter" must be string, not unicode, in csv.reader() call
self.__csvSeparator = str(j['separator'])
self.__singleOutput = j["single"]
self.__firstRow = j["from"]
self.__lastRow = j["to"]
self.__closeDialog = j["close"]
# self.__saveSettings NOT loaded
logging.debug("loaded %d user settings" %
(len(j)-1)) # -1 for the artificial "comment"
return j
| berteh/ScribusGenerator | ScribusGeneratorBackend.py | Python | mit | 35,118 | [
"VisIt"
] | de810dec91ce25a4e353d6d68c4508527dab0ad4a6258cf2fbea2af23f3b774c |
import os, sys, re, shutil, json, zipfile, subprocess
# import os.path
import xbmc, xbmcaddon, xbmcvfs
# from xbmcswift2 import xbmcgui
from resources.lib.xbmcswift2b import xbmcgui
from descriptionparserfactory import *
#
# CONSTANTS AND GLOBALS #
#
iarlnes_plugin_name = 'plugin.program.iarlnes'
debugging_enabled = True
LOG_LEVEL_INFO = 'LOG_LEVEL_INFO'
__addon__ = xbmcaddon.Addon(id='%s' %iarlnes_plugin_name)
__language__ = __addon__.getLocalizedString
html_unescape_table = {
"&" : "&",
""" : '"' ,
"'" : "'",
">" : ">",
"<" : "<",
" " : " ",
"&" : "&",
"'" : "\'",
"²" : "2",
"³" : "3",
}
def html_unescape(text):
for key in html_unescape_table.keys():
text = text.replace(key, html_unescape_table[key])
return text
html_escape_table = {
"&" : "%26",
" " : "%20" ,
"'" : "%27",
">" : "%3E",
"<" : "%3C",
}
def html_escape(text):
for key in html_escape_table.keys():
text = text.replace(key, html_escape_table[key])
return text
txt_escape_table = {
"&" : "&",
">" : ">",
"<" : "<",
}
def txt_escape(text):
for key in txt_escape_table.keys():
text = text.replace(key, txt_escape_table[key])
return text
def joinPath(part1, *parts):
path = ''
if(part1.startswith('smb://')):
path = part1
for part in parts:
path = "%s/%s" %(path, part)
else:
path = os.path.join(part1, *parts)
return path
#
# METHODS #
#
def get_Operating_System():
current_OS = None
if 'win32' in sys.platform:
current_OS = 'Windows'
elif 'win64' in sys.platform:
current_OS = 'Windows'
elif 'linux' in sys.platform:
current_OS = 'Nix' #Default to Nix, then look for other alternatives
if 'XBMC_ANDROID_APK' in os.environ.keys():
current_OS = 'Android' #Similar method to find android as done below for IOS
elif os.path.exists('/etc/os-release'):
try:
with open('/etc/os-release', 'r') as content_file: #Best method I could find to determine if its OE
os_content_file = content_file.read().replace('\n', '')
if 'OpenELEC'.lower() in os_content_file.lower():
if 'RPi2.arm'.lower() in os_content_file.lower():
current_OS = 'OpenElec RPi'
else:
current_OS = 'OpenElec x86'
except:
current_OS = 'Nix'
else:
current_OS = 'Nix'
elif 'darwin' in sys.platform:
if 'USER' in os.environ and os.environ['USER'] in ('mobile','frontrow',):
current_OS = 'IOS'
else:
current_OS = 'OSX'
return current_OS
def getEnvironment():
return ( os.environ.get( "OS", "win32" ), "win32", )[ os.environ.get( "OS", "win32" ) == "xbox" ]
def localize(id):
try:
return __language__(id)
except:
return "Sorry. No translation available for string with id: " +str(id)
def getAddonDataPath():
path = ''
path = xbmc.translatePath('special://profile/addon_data/%s' %(iarlnes_plugin_name))
if not os.path.exists(path):
try:
os.makedirs(path)
except:
path = ''
return path
def getAddonInstallPath():
path = ''
path = __addon__.getAddonInfo('path')
return path
def getDATFilePath():
path = ''
# path = os.path.join(getAddonInstallPath(),'resources/data/dat_files')
path = get_userdata_xmldir()
return path
def getMediaFilePath():
path = ''
path = os.path.join(getAddonInstallPath(),'resources/skins/Default/media/')
return path
def getSkinFilePath():
path = ''
path = os.path.join(getAddonInstallPath(),'resources/skins/')
return path
def getParserFilePath(xmlname):
path = ''
path = os.path.join(getAddonInstallPath(),'resources/data/'+xmlname)
return path
def getYouTubePluginurl(videoid):
url = ''
url = 'plugin://plugin.video.youtube/play/?video_id='+videoid
return url
def update_addonxml(option):
current_dialog = xbmcgui.Dialog()
ret1 = current_dialog.select('Are you sure you want to update this setting?', ['No','Yes'])
if ret1 == 0:
pass
else:
ok_ret = current_dialog.ok('Complete','The addon was updated.[CR]You may have to restart Kodi for the settings to take effect.')
update_xml_header(getAddonInstallPath(),'/addon.xml','provides',option)
print 'iarlnes: Addon provides was updated to ' + option
def getAutoexecPath():
return xbmc.translatePath('special://profile/autoexec.py')
def show_busy_dialog():
xbmc.executebuiltin('ActivateWindow(busydialog)')
def hide_busy_dialog():
xbmc.executebuiltin('Dialog.Close(busydialog)')
while xbmc.getCondVisibility('Window.IsActive(busydialog)'):
xbmc.sleep(100)
def getTempDir():
tempDir = os.path.join(getAddonDataPath(), 'temp_iarlnes')
try:
#check if folder exists
if(not os.path.isdir(tempDir)):
os.mkdir(tempDir) #If it doesn't exist, make it
return tempDir
except Exception, (exc):
Logutil.log('Error creating temp dir: ' +str(exc), LOG_LEVEL_ERROR)
return None
def get_userdata_xmldir():
xmlDir = os.path.join(getAddonDataPath(), 'dat_files')
try:
#check if folder exists
if(not os.path.isdir(xmlDir)):
os.mkdir(xmlDir) #If it doesn't exist, make it
return xmlDir
except Exception, (exc):
Logutil.log('Error creating userdata DAT dir: ' +str(exc), LOG_LEVEL_ERROR)
return None
def get_addondata_xmldir():
path = ''
path = os.path.join(getAddonInstallPath(),'resources/data/dat_files')
return path
def initialize_userdata():
addondata_xmldir = get_addondata_xmldir()
userdata_xmldir = get_userdata_xmldir()
addondata_subfolders, addondata_files = xbmcvfs.listdir(addondata_xmldir)
userdata_subfolders, userdata_files = xbmcvfs.listdir(userdata_xmldir)
if len(addondata_files) > 0:
# show_busy_dialog()
print 'iarlnes: Initializing XML Files'
for file_name in addondata_files:
if file_name in userdata_files: #The file already exists in userdata
print 'iarlnes: '+file_name+' already exists, check version'
addon_file_info = get_xml_header_version(os.path.join(addondata_xmldir,file_name))
userdata_file_info = get_xml_header_version(os.path.join(userdata_xmldir,file_name))
if addon_file_info['emu_version'][0] == userdata_file_info['emu_version'][0]: #Files are the same, delete addondata file
print 'iarlnes: '+file_name+' same version detected, deleting addondata file'
os.remove(os.path.join(addondata_xmldir,file_name))
else:
current_dialog = xbmcgui.Dialog()
current_dialog.ok('New Version Found', 'New version '+addon_file_info['emu_version'][0]+' for the file:', addon_file_info['emu_name'][0], 'was detected.')
ret1 = current_dialog.select('Overwrite old file: '+addon_file_info['emu_name'][0]+' ?', ["Yes, Replace!", "Remind me later", "No, Never!"])
if ret1 == 0: #Yes, replace!
print 'iarlnes: Copying new file '+file_name+' to userdata'
copyFile(os.path.join(addondata_xmldir,file_name), os.path.join(userdata_xmldir,file_name))
if os.path.isfile(os.path.join(userdata_xmldir,file_name)): #Copy was successful, delete addondata file
os.remove(os.path.join(addondata_xmldir,file_name)) #Remove the file from the addondata folder
else:
print 'iarlnes Error, copying xml file failed.'
elif ret1 == 1: #Remind me later
print 'iarlnes: XML File will not be copied at this time'
else: #No, delete the file
print 'iarlnes: XML File will be deleted'
os.remove(os.path.join(addondata_xmldir,file_name)) #Remove the file from the addondata folder
else: #The files does not yet exist in userdata
print 'iarlnes: Copying new file '+file_name+' to userdata'
copyFile(os.path.join(addondata_xmldir,file_name), os.path.join(userdata_xmldir,file_name))
if os.path.isfile(os.path.join(userdata_xmldir,file_name)): #Copy was successful, delete addondata file
os.remove(os.path.join(addondata_xmldir,file_name)) #Remove the file from the addondata folder
else:
print 'iarlnes Error, copying xml file failed.'
# hide_busy_dialog()
def check_temp_folder_and_clean(iarlnes_options_dl_cache):
current_path = getTempDir()
current_path_size = getFolderSize(current_path)
if current_path_size > iarlnes_options_dl_cache:
print 'Deleting iarlnes Cache'
shutil.rmtree(current_path)
current_path = getTempDir() #Remake the directory
def unhide_all_archives(plugin):
emu_info = scape_xml_headers() #Find all xml dat files and get the header info
for ii in range(0,len(emu_info['emu_name'])):
if ', hidden' in emu_info['emu_category'][ii]: #Don't include the archive if it's tagged hidden
new_xml_category = emu_info['emu_category'][ii].replace(', hidden','')
current_xml_fileparts = os.path.split(emu_info['emu_location'][ii])
current_xml_filename = current_xml_fileparts[1]
current_xml_path = current_xml_fileparts[0] + '/'
update_xml_header(current_xml_path,current_xml_filename,'emu_category',new_xml_category)
print 'iarlnes: Unhide all archives completed'
plugin.clear_function_cache()
def check_if_rom_exits(current_save_fname,current_path,iarlnes_setting_localfile_action):
file_already_exists = False
do_not_download_flag = False
fname_found = None
file_basename = os.path.basename(current_save_fname)
file_basename_no_ext = os.path.splitext(file_basename)
files_in_current_path = []
for (dp, dn, ff) in os.walk(current_path):
files_in_current_path.extend(ff)
if len(files_in_current_path)>0:
for check_f in files_in_current_path:
if file_basename_no_ext[0] in check_f:
file_already_exists = True
fname_found = check_f
print fname_found + ' already exists in the directory'
if file_already_exists:
if 'Prompt'.lower() in iarlnes_setting_localfile_action.lower(): #Prompt if the file exists locally
current_dialog = xbmcgui.Dialog()
ret1 = current_dialog.select('The ROM already appears to exist.[CR]Re-Download and overwrite?', ['No','Yes'])
if ret1 == 0:
do_not_download_flag = True
else:
pass
elif 'Do Not ReDownload'.lower() in iarlnes_setting_localfile_action.lower(): #Do Not ReDownload the file
do_not_download_flag = True
print 'iarlnes: File already exists, do not redownload'
else: #Overwrite and ReDownload the file
do_not_download_flag = False
print 'iarlnes: File already exists, but redownload and overwrite is selected'
return fname_found, do_not_download_flag
def check_for_warn(current_filename):
file_extension = current_filename.split('.')[-1]
chd_warn = False
iso_warn = False
if 'chd' in file_extension.lower():
chd_warn = True
if 'img' in file_extension.lower():
iso_warn = True
if 'img' in file_extension.lower():
iso_warn = True
if 'true' in __addon__.getSetting(id='iarlnes_setting_warn_chd').lower():
print __addon__.getSetting(id='iarlnes_setting_warn_chd')
if chd_warn:
current_dialog = xbmcgui.Dialog()
ret1 = current_dialog.yesno('Warning','Warning: This ROM is in CHD Format[CR]It will have to be converted prior to use[CR]These files are also typically large[CR]Check addon settings and wiki for more info',nolabel='OK',yeslabel='OK! Stop showing this!')
print ret1
if ret1>0:
__addon__.setSetting(id='iarlnes_setting_warn_chd',value='false') #No longer show the warning
if 'true' in __addon__.getSetting(id='iarlnes_setting_warn_iso').lower():
if iso_warn:
current_dialog = xbmcgui.Dialog()
ret1 = current_dialog.yesno('Warning','Warning: This ROM is in ISO/IMG Format[CR]These files are also typically large!',nolabel='OK',yeslabel='OK! Stop showing this!')
print ret1
if ret1>0:
__addon__.setSetting(id='iarlnes_setting_warn_iso',value='false') #No longer show the warning
def getFolderSize(folder):
total_size = os.path.getsize(folder)
for item in os.listdir(folder):
itempath = os.path.join(folder, item)
if os.path.isfile(itempath):
total_size += os.path.getsize(itempath)
elif os.path.isdir(itempath):
total_size += getFolderSize(itempath)
return total_size
def getConfigXmlPath():
if(not ISTESTRUN):
addonDataPath = getAddonDataPath()
configFile = os.path.join(addonDataPath, "config.xml")
else:
configFile = os.path.join(getAddonInstallPath(), "resources", "lib", "TestDataBase", "config.xml")
Logutil.log('Path to configuration file: ' +str(configFile), LOG_LEVEL_INFO)
return configFile
def advanced_setting_action_clear_cache(plugin):
plugin.clear_function_cache()
__addon__.setSetting(id='iarlnes_setting_clear_cache_value',value='false') #Set back to false, no need to clear it next run
print 'iarlnes: Advanced Setting Cache Clear Completed'
def update_external_launch_commands(current_os,retroarch_path,retroarch_cfg_path,xml_id,plugin):
current_xml_fileparts = os.path.split(xml_id)
current_xml_filename = current_xml_fileparts[1]
current_xml_path = current_xml_fileparts[0] + '/'
parserfile = getParserFilePath('external_launcher_parser.xml')
launchersfile = getParserFilePath('external_command_database.xml')
# helper_script_1 = os.path.join(getAddonInstallPath(),'resources/bin/applaunch.sh')
# helper_script_2 = os.path.join(getAddonInstallPath(),'resources/bin/romlaunch_OE.sh')
# helper_script_3 = os.path.join(getAddonInstallPath(),'resources/bin/applaunch-vbs.bat')
descParser = DescriptionParserFactory.getParser(parserfile)
results = descParser.parseDescription(launchersfile,'xml')
user_options = list()
launch_command = list()
new_launch_command = None
current_path = None
#Define %APP_PATH% Variable
if current_os == 'OSX':
current_path = retroarch_path.split('.app')[0]+'.app' #Make App Path for OSX only up to the container
elif current_os == 'Windows':
current_path = os.path.split(retroarch_path)
current_path = current_path[0]
if current_path is not None: #Update %APP_PATH%
retroarch_path = current_path
current_cfg_path = ''
#Define %CFG_PATH% Variable for android
if current_os == 'Android':
default_config_locations = ['/mnt/internal_sd/Android/data/com.retroarch/files/retroarch.cfg','/sdcard/Android/data/com.retroarch/files/retroarch.cfg','/data/data/com.retroarch/retroarch.cfg']
current_cfg_path = None
if len(retroarch_cfg_path)<1: #Config is not defined in settings, try to find it in one of the default locales
for cfg_files in default_config_locations:
try:
if os.path.exists(cfg_files):
if current_cfg_path is None: #If the current config path is not yet defined and the file was found, then define it
current_cfg_path = cfg_files
except:
print 'iarlnes: '+cfg_files+' does not exist'
else:
current_cfg_path = retroarch_cfg_path #If the config path is defined in settings, use that
if current_cfg_path is None:
current_cfg_path = ''
print 'iarlnes: Error, no config file is defined'
for entries in results: #Create list of available commands for the current OS
if entries['operating_system'][0] == current_os:
user_options.append(entries['launcher'][0])
launch_command.append(entries['launcher_command'][0])
user_options.append('Manually entered command line')
user_options.append('None')
launch_command.append('manual_command')
launch_command.append('none')
current_dialog = xbmcgui.Dialog()
ret1 = current_dialog.select('Select from the available launch commands', user_options)
new_launch_command = launch_command[ret1]
if new_launch_command == 'manual_command':
new_launch_command = current_dialog.input('Enter your new launch command:')
if ret1>=0:
ret2 = current_dialog.select('Are you sure you want to update[CR]the current External Launch Command?', ['Yes','Cancel'])
if ret2<1:
# new_launch_command = launch_command[ret1]
new_launch_command = new_launch_command.replace('%APP_PATH%',retroarch_path) #Replace app path with user setting
new_launch_command = new_launch_command.replace('%ADDON_DIR%',getAddonInstallPath()) #Replace helper script with the more generic ADDON_DIR
new_launch_command = new_launch_command.replace('%CFG_PATH%',current_cfg_path) #Replace config path user setting
update_xml_header(current_xml_path,current_xml_filename,'emu_ext_launch_cmd',new_launch_command)
ok_ret = current_dialog.ok('Complete','External Launch Command was updated[CR]Cache was cleared for new settings')
plugin.clear_function_cache()
def copyFile(oldPath, newPath):
Logutil.log('new path = %s' %newPath, LOG_LEVEL_INFO)
newDir = os.path.dirname(newPath)
if not os.path.isdir(newDir):
Logutil.log('create directory: %s' %newDir, LOG_LEVEL_INFO)
try:
os.mkdir(newDir)
except Exception, (exc):
Logutil.log('Error creating directory: %s\%s' %(newDir, str(exc)), LOG_LEVEL_ERROR)
return
if not os.path.isfile(newPath):
Logutil.log('copy file from %s to %s' %(oldPath, newPath), LOG_LEVEL_INFO)
try:
shutil.copy2(oldPath, newPath)
except Exception, (exc):
Logutil.log('Error copying file from %s to %s\%s' %(oldPath, newPath, str(exc)), LOG_LEVEL_ERROR)
def getSettings():
settings = xbmcaddon.Addon(id='%s' %iarlnes_plugin_name)
return settings
#HACK: XBMC does not update labels with empty strings
def setLabel(label, control):
if(label == ''):
label = ' '
control.setLabel(str(label))
#Parses all the xml dat files in the folder and returns them to create the proper directories
def scape_xml_headers():
dat_path = getDATFilePath()
subfolders, files = xbmcvfs.listdir(dat_path)
#debug("Contents of %r:\nSubfolders: %r\nFiles: %r" % (dat_path, subfolders, files))
emu_location = list()
emu_name = list()
emu_parser = list()
emu_description = list()
emu_category = list()
emu_version = list()
emu_date = list()
emu_author = list()
emu_homepage = list()
emu_baseurl = list()
emu_downloadpath = list()
emu_postdlaction = list()
emu_comment = list()
emu_thumb = list()
emu_banner = list()
emu_fanart = list()
emu_logo = list()
emu_trailer = list()
for ffile in files:
total_lines = 500 #Read up to this many lines looking for the header
f=open(os.path.join(dat_path,ffile),'rU')
f.seek(0)
header_end=0
line_num=0
header_text = ''
while header_end < 1:
line=f.readline()
header_text+=str(line)
line_num = line_num+1
if '</header>' in header_text: #Found the header
header_end = 1
header_text = header_text.split('<header>')[1].split('</header>')[0]
emu_name.append(header_text.split('<emu_name>')[1].split('</emu_name>')[0])
emu_parser.append(header_text.split('<emu_parser>')[1].split('</emu_parser>')[0])
emu_location.append(os.path.join(dat_path,ffile))
emu_description.append(header_text.split('<emu_description>')[1].split('</emu_description>')[0])
emu_category.append(header_text.split('<emu_category>')[1].split('</emu_category>')[0])
emu_version.append(header_text.split('<emu_version>')[1].split('</emu_version>')[0])
emu_date.append(header_text.split('<emu_date>')[1].split('</emu_date>')[0])
emu_author.append(header_text.split('<emu_author>')[1].split('</emu_author>')[0])
emu_homepage.append(header_text.split('<emu_homepage>')[1].split('</emu_homepage>')[0])
emu_baseurl.append(header_text.split('<emu_baseurl>')[1].split('</emu_baseurl>')[0])
emu_downloadpath.append(header_text.split('<emu_downloadpath>')[1].split('</emu_downloadpath>')[0])
emu_postdlaction.append(header_text.split('<emu_postdlaction>')[1].split('</emu_postdlaction>')[0])
emu_comment.append(header_text.split('<emu_comment>')[1].split('</emu_comment>')[0])
emu_thumb.append(header_text.split('<emu_thumb>')[1].split('</emu_thumb>')[0])
emu_banner.append(header_text.split('<emu_banner>')[1].split('</emu_banner>')[0])
emu_fanart.append(header_text.split('<emu_fanart>')[1].split('</emu_fanart>')[0])
emu_logo.append(header_text.split('<emu_logo>')[1].split('</emu_logo>')[0])
emu_trailer.append(header_text.split('<emu_trailer>')[1].split('</emu_trailer>')[0])
f.close()
if line_num == total_lines: #Couldn't find the header
header_end = 1
f.close()
print 'iarlnes Error: Unable to parse header in xml file'
dat_file_table = {
'emu_name' : emu_name,
'emu_parser' : emu_parser,
'emu_location' : emu_location,
'emu_description' : emu_description,
'emu_category' : emu_category,
'emu_version' : emu_version,
'emu_date' : emu_date,
'emu_author' : emu_author,
'emu_homepage' : emu_homepage,
'emu_baseurl' : emu_baseurl,
'emu_downloadpath' : emu_downloadpath,
'emu_postdlaction' : emu_postdlaction,
'emu_comment' : emu_comment,
'emu_thumb' : emu_thumb,
'emu_banner' : emu_banner,
'emu_fanart' : emu_fanart,
'emu_logo': emu_logo,
'emu_trailer': emu_trailer
}
#print dat_file_table
return dat_file_table
def get_xml_header_paths(xmlfilename):
total_lines = 500 #Read up to this many lines looking for the header
f=open(xmlfilename,'rU')
f.seek(0)
header_end=0
line_num=0
header_text = ''
emu_name = list()
emu_logo = list()
emu_fanart = list()
emu_baseurl = list()
emu_downloadpath = list()
emu_postdlaction = list()
emu_launcher = list()
emu_ext_launch_cmd = list()
while header_end < 1:
line=f.readline()
header_text+=str(line)
line_num = line_num+1
if '</header>' in header_text: #Found the header
header_end = 1
header_text = header_text.split('<header>')[1].split('</header>')[0]
emu_name.append(header_text.split('<emu_name>')[1].split('</emu_name>')[0])
emu_logo.append(header_text.split('<emu_logo>')[1].split('</emu_logo>')[0])
emu_fanart.append(header_text.split('<emu_fanart>')[1].split('</emu_fanart>')[0])
emu_baseurl.append(header_text.split('<emu_baseurl>')[1].split('</emu_baseurl>')[0])
emu_downloadpath.append(header_text.split('<emu_downloadpath>')[1].split('</emu_downloadpath>')[0])
emu_postdlaction.append(header_text.split('<emu_postdlaction>')[1].split('</emu_postdlaction>')[0])
emu_launcher.append(header_text.split('<emu_launcher>')[1].split('</emu_launcher>')[0])
emu_ext_launch_cmd.append(header_text.split('<emu_ext_launch_cmd>')[1].split('</emu_ext_launch_cmd>')[0])
f.close()
if line_num == total_lines: #Couldn't find the header
header_end = 1
f.close()
print 'iarlnes Error: Unable to parse header in xml file'
dat_file_table = {
'emu_name' : emu_name,
'emu_logo': emu_logo,
'emu_fanart': emu_fanart,
'emu_baseurl' : emu_baseurl,
'emu_downloadpath' : emu_downloadpath,
'emu_postdlaction' : emu_postdlaction,
'emu_launcher' : emu_launcher,
'emu_ext_launch_cmd' : emu_ext_launch_cmd,
}
return dat_file_table
def get_xml_header_version(xmlfilename):
total_lines = 500 #Read up to this many lines looking for the header
f=open(xmlfilename,'rU')
f.seek(0)
header_end=0
line_num=0
header_text = ''
emu_version = list()
emu_name = list()
while header_end < 1:
line=f.readline()
header_text+=str(line)
line_num = line_num+1
if '</header>' in header_text: #Found the header
header_end = 1
header_text = header_text.split('<header>')[1].split('</header>')[0]
emu_name.append(header_text.split('<emu_name>')[1].split('</emu_name>')[0])
emu_version.append(header_text.split('<emu_version>')[1].split('</emu_version>')[0])
f.close()
if line_num == total_lines: #Couldn't find the header
header_end = 1
f.close()
print 'iarlnes Error: Unable to get version in xml header file'
dat_file_table = {
'emu_name' : emu_name,
'emu_version' : emu_version,
}
return dat_file_table
def get_xml_header_category(xmlfilename):
total_lines = 500 #Read up to this many lines looking for the header
f=open(xmlfilename,'rU')
f.seek(0)
header_end=0
line_num=0
header_text = ''
emu_category = list()
emu_name = list()
while header_end < 1:
line=f.readline()
header_text+=str(line)
line_num = line_num+1
if '</header>' in header_text: #Found the header
header_end = 1
header_text = header_text.split('<header>')[1].split('</header>')[0]
emu_name.append(header_text.split('<emu_name>')[1].split('</emu_name>')[0])
emu_category.append(header_text.split('<emu_category>')[1].split('</emu_category>')[0])
f.close()
if line_num == total_lines: #Couldn't find the header
header_end = 1
f.close()
print 'iarlnes Error: Unable to get category in xml header file'
dat_file_table = {
'emu_name' : emu_name,
'emu_category' : emu_category,
}
return dat_file_table
def set_iarlnes_window_properties(emu_name):
if '32X' in emu_name:
xbmcgui.Window(10000).setProperty('iarlnes.current_theme','32x')
xbmcgui.Window(10000).setProperty('iarlnes.default_thumb','32x_default_box.jpg')
xbmcgui.Window(10000).setProperty('iarlnes.header_color','32x_head.png')
xbmcgui.Window(10000).setProperty('iarlnes.bg_color','32x_bg.png')
xbmcgui.Window(10000).setProperty('iarlnes.buttonfocustheme','button-highlight1.png')
xbmcgui.Window(10000).setProperty('iarlnes.buttonnofocustheme','button-nofocus2.png')
elif 'Nintendo Entertainment System - NES' in emu_name:
xbmcgui.Window(10000).setProperty('iarlnes.current_theme','NES')
xbmcgui.Window(10000).setProperty('iarlnes.default_thumb','NES_default_box.jpg')
xbmcgui.Window(10000).setProperty('iarlnes.header_color','white.png')
xbmcgui.Window(10000).setProperty('iarlnes.bg_color','nes_dark_bg.png')
xbmcgui.Window(10000).setProperty('iarlnes.buttonfocustheme','button-highlight1.png')
xbmcgui.Window(10000).setProperty('iarlnes.buttonnofocustheme','button-nofocus2.png')
elif 'Super Nintendo Entertainment System - SNES' in emu_name:
xbmcgui.Window(10000).setProperty('iarlnes.current_theme','SNES')
xbmcgui.Window(10000).setProperty('iarlnes.default_thumb','SNES_default_box.jpg')
xbmcgui.Window(10000).setProperty('iarlnes.header_color','white.png')
xbmcgui.Window(10000).setProperty('iarlnes.bg_color','nes_dark_bg.png')
xbmcgui.Window(10000).setProperty('iarlnes.buttonfocustheme','button-highlight1.png')
xbmcgui.Window(10000).setProperty('iarlnes.buttonnofocustheme','button-nofocus2.png')
elif 'Genesis' in emu_name:
xbmcgui.Window(10000).setProperty('iarlnes.current_theme','Genesis')
xbmcgui.Window(10000).setProperty('iarlnes.default_thumb','genesis_default_box.jpg')
xbmcgui.Window(10000).setProperty('iarlnes.header_color','sega_head.png')
xbmcgui.Window(10000).setProperty('iarlnes.bg_color','sega_bg.png')
xbmcgui.Window(10000).setProperty('iarlnes.buttonfocustheme','button-highlight1.png')
xbmcgui.Window(10000).setProperty('iarlnes.buttonnofocustheme','button-nofocus2.png')
elif 'Game Gear' in emu_name:
xbmcgui.Window(10000).setProperty('iarlnes.current_theme','Game Gear')
xbmcgui.Window(10000).setProperty('iarlnes.default_thumb','genesis_default_box.jpg')
xbmcgui.Window(10000).setProperty('iarlnes.header_color','sega_head.png')
xbmcgui.Window(10000).setProperty('iarlnes.bg_color','sega_bg.png')
xbmcgui.Window(10000).setProperty('iarlnes.buttonfocustheme','button-highlight1.png')
xbmcgui.Window(10000).setProperty('iarlnes.buttonnofocustheme','button-nofocus2.png')
elif 'Master System' in emu_name:
xbmcgui.Window(10000).setProperty('iarlnes.current_theme','Master System')
xbmcgui.Window(10000).setProperty('iarlnes.default_thumb','genesis_default_box.jpg')
xbmcgui.Window(10000).setProperty('iarlnes.header_color','white.png')
xbmcgui.Window(10000).setProperty('iarlnes.bg_color','sega_bg.png')
xbmcgui.Window(10000).setProperty('iarlnes.buttonfocustheme','button-highlight1.png')
xbmcgui.Window(10000).setProperty('iarlnes.buttonnofocustheme','button-nofocus2.png')
elif 'N64' in emu_name:
xbmcgui.Window(10000).setProperty('iarlnes.current_theme','N64')
xbmcgui.Window(10000).setProperty('iarlnes.default_thumb','N64_default_box.jpg')
xbmcgui.Window(10000).setProperty('iarlnes.header_color','n64_head.png')
xbmcgui.Window(10000).setProperty('iarlnes.bg_color','n64_bg.png')
xbmcgui.Window(10000).setProperty('iarlnes.buttonfocustheme','button-highlight1.png')
xbmcgui.Window(10000).setProperty('iarlnes.buttonnofocustheme','button-nofocus2.png')
elif 'MAME' in emu_name:
xbmcgui.Window(10000).setProperty('iarlnes.current_theme','MAME')
xbmcgui.Window(10000).setProperty('iarlnes.default_thumb','arcade_default_box.jpg')
xbmcgui.Window(10000).setProperty('iarlnes.header_color','arcade_head.png')
xbmcgui.Window(10000).setProperty('iarlnes.bg_color','arcade_bg.png')
xbmcgui.Window(10000).setProperty('iarlnes.buttonfocustheme','button-highlight1.png')
xbmcgui.Window(10000).setProperty('iarlnes.buttonnofocustheme','button-nofocus2.png')
elif '2600' in emu_name:
xbmcgui.Window(10000).setProperty('iarlnes.current_theme','2600')
xbmcgui.Window(10000).setProperty('iarlnes.default_thumb','arcade_default_box.jpg')
xbmcgui.Window(10000).setProperty('iarlnes.header_color','atari_head.png')
xbmcgui.Window(10000).setProperty('iarlnes.bg_color','atari_bg.png')
xbmcgui.Window(10000).setProperty('iarlnes.buttonfocustheme','button-highlight1.png')
xbmcgui.Window(10000).setProperty('iarlnes.buttonnofocustheme','button-nofocus2.png')
elif 'Jaguar' in emu_name:
xbmcgui.Window(10000).setProperty('iarlnes.current_theme','Jaguar')
xbmcgui.Window(10000).setProperty('iarlnes.default_thumb','arcade_default_box.jpg')
xbmcgui.Window(10000).setProperty('iarlnes.header_color','jaguar_head.png')
xbmcgui.Window(10000).setProperty('iarlnes.bg_color','black.png')
xbmcgui.Window(10000).setProperty('iarlnes.buttonfocustheme','button-highlight1.png')
xbmcgui.Window(10000).setProperty('iarlnes.buttonnofocustheme','button-nofocus2.png')
elif 'Lynx' in emu_name:
xbmcgui.Window(10000).setProperty('iarlnes.current_theme','Lynx')
xbmcgui.Window(10000).setProperty('iarlnes.default_thumb','arcade_default_box.jpg')
xbmcgui.Window(10000).setProperty('iarlnes.header_color','lynx_head.png')
xbmcgui.Window(10000).setProperty('iarlnes.bg_color','black.png')
xbmcgui.Window(10000).setProperty('iarlnes.buttonfocustheme','button-highlight1.png')
xbmcgui.Window(10000).setProperty('iarlnes.buttonnofocustheme','button-nofocus2.png')
elif 'TurboGrafx' in emu_name:
xbmcgui.Window(10000).setProperty('iarlnes.current_theme','TurboGrafx')
xbmcgui.Window(10000).setProperty('iarlnes.default_thumb','arcade_default_box.jpg')
xbmcgui.Window(10000).setProperty('iarlnes.header_color','tg16_head.png')
xbmcgui.Window(10000).setProperty('iarlnes.bg_color','tg16_bg.png')
xbmcgui.Window(10000).setProperty('iarlnes.buttonfocustheme','button-highlight1.png')
xbmcgui.Window(10000).setProperty('iarlnes.buttonnofocustheme','button-nofocus2.png')
else:
xbmcgui.Window(10000).setProperty('iarlnes.current_theme','default')
xbmcgui.Window(10000).setProperty('iarlnes.default_thumb','arcade_default_box.jpg')
xbmcgui.Window(10000).setProperty('iarlnes.header_color','white.png')
xbmcgui.Window(10000).setProperty('iarlnes.bg_color','black.png')
xbmcgui.Window(10000).setProperty('iarlnes.buttonfocustheme','button-highlight1.png')
xbmcgui.Window(10000).setProperty('iarlnes.buttonnofocustheme','button-nofocus2.png')
def parse_xml_romfile(xmlfilename,parserfile,cleanlist,plugin):
#Get basic xml path info
xml_header_info = get_xml_header_paths(xmlfilename)
#Define the Parser
descParser = DescriptionParserFactory.getParser(parserfile)
#Get Results
results = descParser.parseDescription(xmlfilename,'xml')
set_iarlnes_window_properties(xml_header_info['emu_name'][0])
iarlnes_setting_naming = plugin.get_setting('iarlnes_setting_naming',unicode)
items = []
current_item = []
idx = 0
total_arts = 10
replacement_h = re.compile(r'\([^)]*\)')
for entries in results:
idx += 1
current_name = []
if entries['rom_name']:
current_name = entries['rom_name'][0]
try:
current_rom_tag = replacement_h.search(current_name).group(0).replace('(','').replace(')','').strip()
except:
current_rom_tag = None
if cleanlist:
current_name = replacement_h.sub('', current_name).strip()
else:
current_name = None
current_rom_tag = None
current_fname = []
if entries['rom_filename']:
current_fname = xml_header_info['emu_baseurl'][0]+str(entries['rom_filename'][0])
current_fname = html_unescape(current_fname)
else:
current_fname = None
current_save_fname = []
if entries['rom_filename']:
current_save_fname = str(entries['rom_filename'][0])
current_save_fname = html_unescape(current_save_fname)
else:
current_save_fname = None
current_emu_name = []
if xml_header_info['emu_name']:
current_emu_name = xml_header_info['emu_name'][0]
else:
current_emu_name = None
current_emu_logo = []
if xml_header_info['emu_logo']:
current_emu_logo = xml_header_info['emu_logo'][0]
else:
current_emu_logo = None
current_emu_fanart = []
if xml_header_info['emu_fanart']:
current_emu_fanart = xml_header_info['emu_fanart'][0]
else:
current_emu_fanart = None
current_emu_downloadpath = []
if xml_header_info['emu_downloadpath']:
current_emu_downloadpath = xml_header_info['emu_downloadpath'][0]
else:
current_emu_downloadpath = None
current_emu_postdlaction = []
if xml_header_info['emu_postdlaction']:
current_emu_postdlaction = xml_header_info['emu_postdlaction'][0]
else:
current_emu_postdlaction = None
current_emu_launcher = []
if xml_header_info['emu_launcher']:
current_emu_launcher = xml_header_info['emu_launcher'][0]
else:
current_emu_launcher = None
current_emu_ext_launch_cmd = []
if xml_header_info['emu_ext_launch_cmd']:
current_emu_ext_launch_cmd = xml_header_info['emu_ext_launch_cmd'][0]
else:
current_emu_ext_launch_cmd = None
current_sfname = []
try:
if entries['rom_supporting_file'][0]:
current_sfname = xml_header_info['emu_baseurl'][0]+str(entries['rom_supporting_file'][0])
current_sfname = html_unescape(current_sfname)
else:
current_sfname = None
except:
current_sfname = None
current_save_sfname = []
try:
if entries['rom_supporting_file'][0]:
current_save_sfname = str(entries['rom_supporting_file'][0])
current_save_sfname = html_unescape(current_save_sfname)
else:
current_save_sfname = None
except:
current_save_sfname = None
current_icon = list()
for ii in range(0,total_arts):
if entries['rom_clearlogo'+str(ii+1)]:
current_icon.append(html_unescape(entries['rom_clearlogo'+str(ii+1)][0]))
else:
current_icon.append(None)
current_icon2 = filter(bool, current_icon)
if not current_icon2:
current_icon2 = getMediaFilePath() + xbmcgui.Window(10000).getProperty('iarlnes.default_thumb') #Use the default thumb if nothing else is avialable
else:
current_icon2 = current_icon2[0]
current_snapshot = list()
for ii in range(0,total_arts):
if entries['rom_snapshot'+str(ii+1)]:
current_snapshot.append(html_unescape(entries['rom_snapshot'+str(ii+1)][0]))
else:
current_snapshot.append(None)
current_thumbnail = list()
for ii in range(0,total_arts):
if entries['rom_boxart'+str(ii+1)]:
current_thumbnail.append(html_unescape(entries['rom_boxart'+str(ii+1)][0]))
# print html_unescape(entries['rom_boxart'+str(ii+1)][0])
else:
current_thumbnail.append(None)
current_thumbnail2 = filter(bool, current_thumbnail)
if not current_thumbnail2:
current_thumbnail2 = getMediaFilePath() + xbmcgui.Window(10000).getProperty('iarlnes.default_thumb') #Use the default thumb if nothing else is avialable
else:
current_thumbnail2 = current_thumbnail2[0]
try:
if entries['rom_size']:
current_filesize = sum(map(int,entries['rom_size'])) #Sum all the rom_sizes for the current entry. This may not be accurate for zips, but better than ???
else:
current_filesize = None
except:
current_filesize = None
if entries['rom_category']:
current_genre = entries['rom_category'][0]
else:
current_genre = None
if entries['rom_author']:
current_credits = entries['rom_author'][0]
else:
current_credits = None
if entries['rom_year']:
current_date = entries['rom_year'][0]
else:
current_date = None
if entries['rom_plot']:
current_plot = entries['rom_plot'][0]
else:
current_plot = None
if entries['rom_players']:
current_nplayers = entries['rom_players'][0]
else:
current_nplayers = None
if entries['rom_videoid']:
current_trailer = getYouTubePluginurl(entries['rom_videoid'][0]) #Return youtube plugin URL
else:
current_trailer = None
if entries['rom_emu_command']:
current_rom_emu_command = entries['rom_emu_command'][0]
else:
current_rom_emu_command = None
current_fanart = list()
for ii in range(0,total_arts):
if entries['rom_fanart'+str(ii+1)]:
current_fanart.append(html_unescape(entries['rom_fanart'+str(ii+1)][0]))
else:
current_fanart.append(None)
current_banner = list()
for ii in range(0,total_arts):
if entries['rom_banner'+str(ii+1)]:
current_banner.append(html_unescape(entries['rom_banner'+str(ii+1)][0]))
else:
current_banner.append(None)
current_clearlogo = list()
for ii in range(0,total_arts):
if entries['rom_clearlogo'+str(ii+1)]:
current_clearlogo.append(html_unescape(entries['rom_clearlogo'+str(ii+1)][0]))
else:
current_clearlogo.append(None)
if 'MAME_parser' in parserfile: #MAME xml filenames dont include the extension
# if current_emu_name == 'MAME - Multiple Arcade Machine Emulator': #MAME xml filenames dont include the extension
if current_fname:
current_fname = current_fname+'.zip'
if current_sfname:
current_sfname = current_sfname+'.zip'
if current_save_fname:
current_save_fname = current_save_fname+'.zip'
if current_save_sfname:
current_save_sfname = current_save_sfname+'.zip'
label_sep = ' | '
xstr = lambda s: s or ''
if iarlnes_setting_naming == 'Title':
current_label = xstr(current_name)
elif iarlnes_setting_naming == 'Title, Genre':
current_label = xstr(current_name) + label_sep + xstr(current_genre)
elif iarlnes_setting_naming == 'Title, Date':
current_label = xstr(current_name) + label_sep + current_date
elif iarlnes_setting_naming == 'Title, Genre, Date':
current_label = xstr(current_name) + label_sep + xstr(current_genre) + label_sep + xstr(current_date)
elif iarlnes_setting_naming == 'Genre, Title':
current_label = xstr(current_genre) + label_sep + xstr(current_name)
elif iarlnes_setting_naming == 'Date, Title':
current_label = xstr(current_date) + label_sep + xstr(current_name)
elif iarlnes_setting_naming == 'Genre, Title, Date':
current_label = xstr(current_genre) + label_sep + xstr(current_name) + label_sep + xstr(current_date)
elif iarlnes_setting_naming == 'Date, Title, Genre':
current_label = xstr(current_date) + label_sep + xstr(current_name) + label_sep + xstr(current_genre)
elif iarlnes_setting_naming == 'Title, Genre, Date, ROM Tag':
current_label = xstr(current_name) + label_sep + xstr(current_genre) + label_sep + xstr(current_date) + label_sep + xstr(current_rom_tag)
else:
current_label = xstr(current_name)
current_item = []
current_item = {
'label' : current_label, 'icon': current_icon2,
'thumbnail' : current_thumbnail2,
'path' : plugin.url_for('get_selected_rom', romname=entries['rom_name'][0]),
'info' : {'title' : current_name, 'genre': current_genre, 'studio': current_credits, 'date': current_date, 'plot': current_plot, 'trailer': current_trailer, 'size': current_filesize},
'properties' : {'fanart_image' : current_fanart[0], 'banner' : current_banner[0], 'clearlogo': current_clearlogo[0], 'poster': current_thumbnail[1], 'rom_tag': current_rom_tag,
'fanart1': current_fanart[0], 'fanart2': current_fanart[1], 'fanart3': current_fanart[2], 'fanart4': current_fanart[3], 'fanart5': current_fanart[4], 'fanart6': current_fanart[5], 'fanart7': current_fanart[6], 'fanart8': current_fanart[7], 'fanart9': current_fanart[8], 'fanart10': current_fanart[9],
'banner1': current_banner[0], 'banner2': current_banner[1], 'banner3': current_banner[2], 'banner4': current_banner[3], 'banner5': current_banner[4], 'banner6': current_banner[5], 'banner7': current_banner[6], 'banner8': current_banner[7], 'banner9': current_banner[8], 'banner10': current_banner[9],
'snapshot1': current_snapshot[0], 'snapshot2': current_snapshot[1], 'snapshot3': current_snapshot[2], 'snapshot4': current_snapshot[3], 'snapshot5': current_snapshot[4], 'snapshot6': current_snapshot[5], 'snapshot7': current_snapshot[6], 'snapshot8': current_snapshot[7], 'snapshot9': current_snapshot[8], 'snapshot10': current_snapshot[9],
'boxart1': current_thumbnail[0], 'boxart2': current_thumbnail[1], 'boxart3': current_thumbnail[2], 'boxart4': current_thumbnail[3], 'boxart5': current_thumbnail[4], 'boxart6': current_thumbnail[5], 'boxart7': current_thumbnail[6], 'boxart8': current_thumbnail[7], 'boxart9': current_thumbnail[8], 'boxart10': current_thumbnail[9],
'nplayers': current_nplayers, 'emu_logo': current_emu_logo, 'emu_fanart': current_emu_fanart, 'emu_name': current_emu_name, 'rom_fname': current_fname, 'rom_sfname': current_sfname, 'rom_save_fname': current_save_fname, 'rom_save_sfname': current_save_sfname,
'emu_downloadpath': current_emu_downloadpath, 'emu_postdlaction': current_emu_postdlaction, 'emu_launcher': current_emu_launcher, 'emu_ext_launch_cmd': current_emu_ext_launch_cmd, 'rom_emu_command': current_rom_emu_command},
'context_menu': None}
items.append(current_item)
return items
#HACK: XBMC does not update labels with empty strings
def getLabel(control):
label = control.getLabel()
if(label == ' '):
label = ''
return label
def size_to_bytes(size_str):
conversion = {'K' : 1024,
'M' : 1048576,
'G' : 1073741824,}
try:
RE_GMK = ('(\w[GMK]?)B')
RE_DIGIT = ('(\d*\.?\d*)')
re_obj_gmk = re.compile(RE_GMK)
re_obj_digit = re.compile(RE_DIGIT)
gmk = re_obj_gmk.search(size_str)
unit = 1
if gmk:
unit = conversion[gmk.groups()[0]]
digit = re_obj_digit.search(size_str)
if digit:
size = int((float(digit.groups()[0]) * unit)) #Removed math function, no need to import for a byte difference
else:
size = 0
except:
size = None
return size
def query_favorites_xml():
favorites_xml_filename = None
emu_info = scape_xml_headers() #Find all xml dat files and get the header info
favorite_xmls = dict()
favorite_xmls['emu_name'] = list()
favorite_xmls['emu_location'] = list()
for ii in range(0,len(emu_info['emu_name'])):
if 'iarlnes_Favorites'.lower() in emu_info['emu_description'][ii].lower():
favorite_xmls['emu_name'].append(emu_info['emu_name'][ii])
favorite_xmls['emu_location'].append(emu_info['emu_location'][ii])
favorite_xmls['emu_name'].append('+ Create New Favorites List')
favorite_xmls['emu_name'].append('Cancel')
current_dialog = xbmcgui.Dialog()
ret1 = current_dialog.select('Choose Favorites List',favorite_xmls['emu_name'])
if favorite_xmls['emu_name'][ret1] == favorite_xmls['emu_name'][-2]: #Create new list
ret2 = current_dialog.input('Enter Favorites Label')
if len(ret2)>0:
saved_filename = create_new_favorites_list(''.join([x if x.isalnum() else "_" for x in ret2])) #Pass filename safe string to create favorites xml
if saved_filename is not None:
current_xml_fileparts = os.path.split(saved_filename)
current_xml_filename = current_xml_fileparts[1]
current_xml_path = current_xml_fileparts[0] + '/'
update_xml_header(current_xml_path,current_xml_filename,'emu_name',ret2)
favorites_xml_filename = saved_filename
elif favorite_xmls['emu_name'][ret1] == favorite_xmls['emu_name'][-1]: #Cancel adding favorite
print 'iarlnes: Adding Favorite Cancelled'
favorites_xml_filename = None
else:
favorites_xml_filename = favorite_xmls['emu_location'][ret1]
return favorites_xml_filename
def create_new_favorites_list(new_filename):
saved_filename = None
template_path = getParserFilePath('Favorites_Template.xml')
dat_path = getDATFilePath()
new_xml_filename = os.path.join(dat_path,new_filename+'.xml')
copyFile(template_path, new_xml_filename)
if os.path.exists(new_xml_filename):
saved_filename = new_xml_filename
return saved_filename
def add_favorite_to_xml(fav_item,favorites_xml_filename):
add_success = False
xml_string = ''
current_rom_command = ''
strip_base_url_string_1 = 'http://archive.org/download/'
strip_base_url_string_2 = 'https://archive.org/download/'
xstr = lambda s: txt_escape(s) or ''
try: current_rom_command = current_rom_command+xstr(fav_item['properties']['emu_postdlaction'])
except: pass
try: current_rom_command = current_rom_command+'|'+xstr(fav_item['properties']['rom_emu_command'])
except: pass
if current_rom_command[0] == '|':
current_rom_command = current_rom_command[1:]
if current_rom_command[-1] == '|':
current_rom_command = current_rom_command[:-1]
try: xml_string = xml_string+'<game name="%GAME_TITLE%">\r\n'.replace('%GAME_TITLE%',xstr(fav_item['info']['title']))
except: pass
try: xml_string = xml_string+'<description>%GAME_TITLE%</description>\r\n'.replace('%GAME_TITLE%',xstr(fav_item['info']['title']))
except: pass
try: xml_string = xml_string+'<rom name="%ROM_URL%" size="%ROM_SIZE%"/>\r\n'.replace('%ROM_URL%',fav_item['properties']['rom_fname'].replace(strip_base_url_string_1,'').replace(strip_base_url_string_2,'')).replace('%ROM_SIZE%',str(fav_item['info']['size']))
except: pass
try: xml_string = xml_string+'<plot>%GAME_PLOT%</plot>\r\n'.replace('%GAME_PLOT%',xstr(fav_item['info']['plot']))
except: pass
try: xml_string = xml_string+'<releasedate>%GAME_DATE%</releasedate>\r\n'.replace('%GAME_DATE%',xstr(fav_item['info']['date']))
except: pass
try: xml_string = xml_string+'<genre>%GAME_GENRE%</genre>\r\n'.replace('%GAME_GENRE%',xstr(fav_item['info']['genre']))
except: pass
try: xml_string = xml_string+'<studio>%GAME_STUDIO%</studio>\r\n'.replace('%GAME_STUDIO%',xstr(fav_item['info']['studio']))
except: pass
try: xml_string = xml_string+'<nplayers>%GAME_NPLAYERS%</nplayers>\r\n'.replace('%GAME_NPLAYERS%',xstr(fav_item['properties']['nplayers']))
except: pass
try: xml_string = xml_string+'<videoid>%GAME_VIDEOID%</videoid>\r\n'.replace('%GAME_VIDEOID%',xstr(fav_item['info']['trailer']).split('=')[-1]) #Only add the video ID
except: pass
try: xml_string = xml_string+'<boxart1>%GAME_boxart1%</boxart1>\r\n'.replace('%GAME_boxart1%',xstr(fav_item['properties']['boxart1']))
except: pass
try: xml_string = xml_string+'<boxart2>%GAME_boxart2%</boxart2>\r\n'.replace('%GAME_boxart2%',xstr(fav_item['properties']['boxart2']))
except: pass
try: xml_string = xml_string+'<boxart3>%GAME_boxart3%</boxart3>\r\n'.replace('%GAME_boxart3%',xstr(fav_item['properties']['boxart3']))
except: pass
try: xml_string = xml_string+'<boxart4>%GAME_boxart4%</boxart4>\r\n'.replace('%GAME_boxart4%',xstr(fav_item['properties']['boxart4']))
except: pass
try: xml_string = xml_string+'<boxart5>%GAME_boxart5%</boxart5>\r\n'.replace('%GAME_boxart5%',xstr(fav_item['properties']['boxart5']))
except: pass
try: xml_string = xml_string+'<boxart6>%GAME_boxart6%</boxart6>\r\n'.replace('%GAME_boxart6%',xstr(fav_item['properties']['boxart6']))
except: pass
try: xml_string = xml_string+'<boxart7>%GAME_boxart7%</boxart7>\r\n'.replace('%GAME_boxart7%',xstr(fav_item['properties']['boxart7']))
except: pass
try: xml_string = xml_string+'<boxart8>%GAME_boxart8%</boxart8>\r\n'.replace('%GAME_boxart8%',xstr(fav_item['properties']['boxart8']))
except: pass
try: xml_string = xml_string+'<boxart9>%GAME_boxart9%</boxart9>\r\n'.replace('%GAME_boxart9%',xstr(fav_item['properties']['boxart9']))
except: pass
try: xml_string = xml_string+'<boxart10>%GAME_boxart10%</boxart10>\r\n'.replace('%GAME_boxart10%',xstr(fav_item['properties']['boxart10']))
except: pass
try: xml_string = xml_string+'<snapshot1>%GAME_snapshot1%</snapshot1>\r\n'.replace('%GAME_snapshot1%',xstr(fav_item['properties']['snapshot1']))
except: pass
try: xml_string = xml_string+'<snapshot2>%GAME_snapshot2%</snapshot2>\r\n'.replace('%GAME_snapshot2%',xstr(fav_item['properties']['snapshot2']))
except: pass
try: xml_string = xml_string+'<snapshot3>%GAME_snapshot3%</snapshot3>\r\n'.replace('%GAME_snapshot3%',xstr(fav_item['properties']['snapshot3']))
except: pass
try: xml_string = xml_string+'<snapshot4>%GAME_snapshot4%</snapshot4>\r\n'.replace('%GAME_snapshot4%',xstr(fav_item['properties']['snapshot4']))
except: pass
try: xml_string = xml_string+'<snapshot5>%GAME_snapshot5%</snapshot5>\r\n'.replace('%GAME_snapshot5%',xstr(fav_item['properties']['snapshot5']))
except: pass
try: xml_string = xml_string+'<snapshot6>%GAME_snapshot6%</snapshot6>\r\n'.replace('%GAME_snapshot6%',xstr(fav_item['properties']['snapshot6']))
except: pass
try: xml_string = xml_string+'<snapshot7>%GAME_snapshot7%</snapshot7>\r\n'.replace('%GAME_snapshot7%',xstr(fav_item['properties']['snapshot7']))
except: pass
try: xml_string = xml_string+'<snapshot8>%GAME_snapshot8%</snapshot8>\r\n'.replace('%GAME_snapshot8%',xstr(fav_item['properties']['snapshot8']))
except: pass
try: xml_string = xml_string+'<snapshot9>%GAME_snapshot9%</snapshot9>\r\n'.replace('%GAME_snapshot9%',xstr(fav_item['properties']['snapshot9']))
except: pass
try: xml_string = xml_string+'<snapshot10>%GAME_snapshot10%</snapshot10>\r\n'.replace('%GAME_snapshot10%',xstr(fav_item['properties']['snapshot10']))
except: pass
try: xml_string = xml_string+'<fanart1>%GAME_fanart1%</fanart1>\r\n'.replace('%GAME_fanart1%',xstr(fav_item['properties']['fanart1']))
except: pass
try: xml_string = xml_string+'<fanart2>%GAME_fanart2%</fanart2>\r\n'.replace('%GAME_fanart2%',xstr(fav_item['properties']['fanart2']))
except: pass
try: xml_string = xml_string+'<fanart3>%GAME_fanart3%</fanart3>\r\n'.replace('%GAME_fanart3%',xstr(fav_item['properties']['fanart3']))
except: pass
try: xml_string = xml_string+'<fanart4>%GAME_fanart4%</fanart4>\r\n'.replace('%GAME_fanart4%',xstr(fav_item['properties']['fanart4']))
except: pass
try: xml_string = xml_string+'<fanart5>%GAME_fanart5%</fanart5>\r\n'.replace('%GAME_fanart5%',xstr(fav_item['properties']['fanart5']))
except: pass
try: xml_string = xml_string+'<fanart6>%GAME_fanart6%</fanart6>\r\n'.replace('%GAME_fanart6%',xstr(fav_item['properties']['fanart6']))
except: pass
try: xml_string = xml_string+'<fanart7>%GAME_fanart7%</fanart7>\r\n'.replace('%GAME_fanart7%',xstr(fav_item['properties']['fanart7']))
except: pass
try: xml_string = xml_string+'<fanart8>%GAME_fanart8%</fanart8>\r\n'.replace('%GAME_fanart8%',xstr(fav_item['properties']['fanart8']))
except: pass
try: xml_string = xml_string+'<fanart9>%GAME_fanart9%</fanart9>\r\n'.replace('%GAME_fanart9%',xstr(fav_item['properties']['fanart9']))
except: pass
try: xml_string = xml_string+'<fanart10>%GAME_fanart10%</fanart10>\r\n'.replace('%GAME_fanart10%',xstr(fav_item['properties']['fanart10']))
except: pass
try: xml_string = xml_string+'<banner1>%GAME_banner1%</banner1>\r\n'.replace('%GAME_banner1%',xstr(fav_item['properties']['banner1']))
except: pass
try: xml_string = xml_string+'<banner2>%GAME_banner2%</banner2>\r\n'.replace('%GAME_banner2%',xstr(fav_item['properties']['banner2']))
except: pass
try: xml_string = xml_string+'<banner3>%GAME_banner3%</banner3>\r\n'.replace('%GAME_banner3%',xstr(fav_item['properties']['banner3']))
except: pass
try: xml_string = xml_string+'<banner4>%GAME_banner4%</banner4>\r\n'.replace('%GAME_banner4%',xstr(fav_item['properties']['banner4']))
except: pass
try: xml_string = xml_string+'<banner5>%GAME_banner5%</banner5>\r\n'.replace('%GAME_banner5%',xstr(fav_item['properties']['banner5']))
except: pass
try: xml_string = xml_string+'<banner6>%GAME_banner6%</banner6>\r\n'.replace('%GAME_banner6%',xstr(fav_item['properties']['banner6']))
except: pass
try: xml_string = xml_string+'<banner7>%GAME_banner7%</banner7>\r\n'.replace('%GAME_banner7%',xstr(fav_item['properties']['banner7']))
except: pass
try: xml_string = xml_string+'<banner8>%GAME_banner8%</banner8>\r\n'.replace('%GAME_banner8%',xstr(fav_item['properties']['banner8']))
except: pass
try: xml_string = xml_string+'<banner9>%GAME_banner9%</banner9>\r\n'.replace('%GAME_banner9%',xstr(fav_item['properties']['banner9']))
except: pass
try: xml_string = xml_string+'<banner10>%GAME_banner10%</banner10>\r\n'.replace('%GAME_banner10%',xstr(fav_item['properties']['banner10']))
except: pass
try: xml_string = xml_string+'<clearlogo1>%GAME_clearlogo1%</clearlogo1>\r\n'.replace('%GAME_clearlogo1%',xstr(fav_item['properties']['clearlogo1']))
except: pass
try: xml_string = xml_string+'<clearlogo2>%GAME_clearlogo2%</clearlogo2>\r\n'.replace('%GAME_clearlogo2%',xstr(fav_item['properties']['clearlogo2']))
except: pass
try: xml_string = xml_string+'<clearlogo3>%GAME_clearlogo3%</clearlogo3>\r\n'.replace('%GAME_clearlogo3%',xstr(fav_item['properties']['clearlogo3']))
except: pass
try: xml_string = xml_string+'<clearlogo4>%GAME_clearlogo4%</clearlogo4>\r\n'.replace('%GAME_clearlogo4%',xstr(fav_item['properties']['clearlogo4']))
except: pass
try: xml_string = xml_string+'<clearlogo5>%GAME_clearlogo5%</clearlogo5>\r\n'.replace('%GAME_clearlogo5%',xstr(fav_item['properties']['clearlogo5']))
except: pass
try: xml_string = xml_string+'<clearlogo6>%GAME_clearlogo6%</clearlogo6>\r\n'.replace('%GAME_clearlogo6%',xstr(fav_item['properties']['clearlogo6']))
except: pass
try: xml_string = xml_string+'<clearlogo7>%GAME_clearlogo7%</clearlogo7>\r\n'.replace('%GAME_clearlogo7%',xstr(fav_item['properties']['clearlogo7']))
except: pass
try: xml_string = xml_string+'<clearlogo8>%GAME_clearlogo8%</clearlogo8>\r\n'.replace('%GAME_clearlogo8%',xstr(fav_item['properties']['clearlogo8']))
except: pass
try: xml_string = xml_string+'<clearlogo9>%GAME_clearlogo9%</clearlogo9>\r\n'.replace('%GAME_clearlogo9%',xstr(fav_item['properties']['clearlogo9']))
except: pass
try: xml_string = xml_string+'<clearlogo10>%GAME_clearlogo10%</clearlogo10>\r\n'.replace('%GAME_clearlogo10%',xstr(fav_item['properties']['clearlogo10']))
except: pass
try: xml_string = xml_string+'<emu_command>%GAME_COMMAND%</emu_command>\r\n'.replace('%GAME_COMMAND%',current_rom_command)
except: pass
try: xml_string = xml_string+'</game>\r\n'
except: pass
xml_string = xml_string.replace('<plot></plot>','').replace('<releasedate></releasedate>','').replace('<studio></studio>','').replace('<nplayers></nplayers>','').replace('<videoid></videoid>','').replace('<genre></genre>','')
xml_string = xml_string.replace('<boxart1></boxart1>','').replace('<boxart2></boxart2>','').replace('<boxart3></boxart3>','').replace('<boxart4></boxart4>','').replace('<boxart5></boxart5>','').replace('<boxart6></boxart6>','').replace('<boxart7></boxart7>','').replace('<boxart8></boxart8>','').replace('<boxart9></boxart9>','').replace('<boxart10></boxart10>','')
xml_string = xml_string.replace('<snapshot1></snapshot1>','').replace('<snapshot2></snapshot2>','').replace('<snapshot3></snapshot3>','').replace('<snapshot4></snapshot4>','').replace('<snapshot5></snapshot5>','').replace('<snapshot6></snapshot6>','').replace('<snapshot7></snapshot7>','').replace('<snapshot8></snapshot8>','').replace('<snapshot9></snapshot9>','').replace('<snapshot10></snapshot10>','')
xml_string = xml_string.replace('<fanart1></fanart1>','').replace('<fanart2></fanart2>','').replace('<fanart3></fanart3>','').replace('<fanart4></fanart4>','').replace('<fanart5></fanart5>','').replace('<fanart6></fanart6>','').replace('<fanart7></fanart7>','').replace('<fanart8></fanart8>','').replace('<fanart9></fanart9>','').replace('<fanart10></fanart10>','')
xml_string = xml_string.replace('<banner1></banner1>','').replace('<banner2></banner2>','').replace('<banner3></banner3>','').replace('<banner4></banner4>','').replace('<banner5></banner5>','').replace('<banner6></banner6>','').replace('<banner7></banner7>','').replace('<banner8></banner8>','').replace('<banner9></banner9>','').replace('<banner10></banner10>','')
xml_string = xml_string.replace('<clearlogo1></clearlogo1>','').replace('<clearlogo2></clearlogo2>','').replace('<clearlogo3></clearlogo3>','').replace('<clearlogo4></clearlogo4>','').replace('<clearlogo5></clearlogo5>','').replace('<clearlogo6></clearlogo6>','').replace('<clearlogo7></clearlogo7>','').replace('<clearlogo8></clearlogo8>','').replace('<clearlogo9></clearlogo9>','').replace('<clearlogo10></clearlogo10>','')
xml_string = xml_string.replace('\r\n\r\n','')
full_reg_exp = '</datafile>' #Look for this
fout = open(os.path.join(getDATFilePath(),'temp.xml'), 'w') # out file
value_updated = False
with open(favorites_xml_filename, 'rU') as fin:
while True:
line = fin.readline()
if not value_updated: #Only update the first instance of the requested tag
if full_reg_exp in line:
try:
my_new_line = xml_string+full_reg_exp
fout.write(my_new_line)
except:
fout.write(full_reg_exp)
value_updated = True
else:
fout.write(line)
else:
fout.write(line)
if not line:
break
pass
fout.close()
if value_updated:
os.remove(favorites_xml_filename) #Remove Old File
os.rename(os.path.join(getDATFilePath(),'temp.xml'),favorites_xml_filename) #Rename Temp File
add_success = True
return add_success
def get_size_of_folder(start_path):
total_size = 0
for dirpath, dirnames, filenames in os.walk(start_path):
for f in filenames:
fp = os.path.join(dirpath, f)
total_size += os.path.getsize(fp)
return total_size
def dlfile(url,dest):
from urllib2 import Request, urlopen, URLError, HTTPError
# Open the url
try:
f = urlopen(url)
print "iarlnes: Downloading " + url
print "iarlnes: To location " + dest
# Open our local file for writing
with open(dest, "wb") as local_file:
local_file.write(f.read())
if(os.path.exists(dest)):
print 'progress: ' + str(os.path.getsize(dest))
result = 1
#handle errors
except HTTPError, e:
print "iarlnes HTTP Error:", e.code, url
result=0
except URLError, e:
print "iarlnes URL Error:", e.reason, url
result=0
return result
def update_xml_header(current_path,current_filename,reg_exp,new_value):
full_reg_exp = '</'+reg_exp+'>' #Look for this
fout = open(current_path+'temp.xml', 'w') # out file
full_new_val = '<'+reg_exp+'>'+new_value+'</'+reg_exp+'>' #replacement value
value_updated = False
with open(current_path+current_filename, 'rU') as fin:
while True:
line = fin.readline()
if not value_updated: #Only update the first instance of the requested tag
if full_reg_exp in line:
try:
beg_of_line = line.split('<')
end_of_line = line.split('>')
my_new_line = beg_of_line[0]+full_new_val+end_of_line[-1:][0] #Match the characters that were previously on the line
fout.write(my_new_line)
except:
fout.write(full_new_val)
value_updated = True
else:
fout.write(line)
else:
fout.write(line)
if not line:
break
pass
fout.close()
if value_updated:
os.remove(current_path+current_filename) #Remove Old File
os.rename(current_path+'temp.xml',current_path+current_filename) #Rename Temp File
print 'File Updated: '+current_filename
def unzip_file(current_fname):
zip_success = False
uz_file_extension = None
new_fname = None
if zipfile.is_zipfile(current_fname):
try:
current_zip_fileparts = os.path.split(current_fname)
current_zip_path = current_zip_fileparts[0] + '/'
z_file = zipfile.ZipFile(current_fname)
uz_file_extension = os.path.splitext(z_file.namelist()[0])[1] #Get rom extension
z_file.extractall(current_zip_path)
zip_success = True
z_file.close()
print 'Unzip Successful'
except:
zip_success = False
print 'Unzip Failed'
if zip_success:
os.remove(current_fname)
else:
print current_fname + ' was not regognized as a zipfile and not extracted'
if uz_file_extension is not None: #The file was unzipped, change from zip to rom extension
# new_fname = os.path.splitext(current_fname)[0]+uz_file_extension
new_fname = os.path.join(current_zip_fileparts[0],z_file.namelist()[0]) #Updated unzipped filename
print 'Unzipped file: '+new_fname
else:
new_fname = current_fname #Didn't unzip or didn't find a file extension
return zip_success, new_fname
def unzip_dosbox_file(current_fname,current_rom_emu_command):
zip_success = False
new_fname = None
if zipfile.is_zipfile(current_fname):
try:
current_zip_fileparts = os.path.split(current_fname)
current_zip_path = current_zip_fileparts[0] + '/'
z_file = zipfile.ZipFile(current_fname)
# uz_file_extension = os.path.splitext(z_file.namelist()[0])[1] #Get rom extension
z_file.extractall(current_zip_path)
zip_success = True
z_file.close()
print 'Unzip Successful'
except:
zip_success = False
print 'Unzip Failed'
if zip_success:
os.remove(current_fname)
else:
print current_fname + ' was not regognized as a zipfile and not extracted'
if current_rom_emu_command: #The file was unzipped, change from zip to rom extension
try:
new_fname = current_zip_path+current_rom_emu_command
except:
new_fname = current_fname #Didn't unzip or didn't find a file extension
else:
new_fname = current_fname #Didn't unzip or didn't find a file extension
return zip_success, new_fname
def unzip_dosbox_update_conf_file(current_fname):
zip_success = False
new_fname = None
conf_file = None
if zipfile.is_zipfile(current_fname):
try:
current_zip_fileparts = os.path.split(current_fname)
current_zip_path = current_zip_fileparts[0] + '/'
z_file = zipfile.ZipFile(current_fname)
# uz_file_extension = os.path.splitext(z_file.namelist()[0])[1] #Get rom extension
z_file.extractall(current_zip_path)
zip_success = True
z_file.close()
print 'Unzip Successful'
except:
zip_success = False
print 'Unzip Failed'
if zip_success:
# os.remove(current_fname)
try:
conf_file = [s for s in z_file.namelist() if s.endswith('.conf')][0]
except:
conf_file = None
if conf_file is not None:
old_conf_file = os.path.join(current_zip_path,conf_file)
new_conf_file = os.path.join(current_zip_path,conf_file.split('/')[0],'kodi_launch.conf')
fout = open(new_conf_file, 'w') # out file
with open(old_conf_file, 'rU') as fin:
while True:
line = fin.readline()
if 'mount c ' in line:
try:
my_new_line = 'mount c "'+current_zip_path+'"\r'
fout.write(my_new_line)
except:
fout.write(line)
elif 'exit' in line: #Comment out any exit calls in the configuration file
my_new_line = '#exit\r'
fout.write(my_new_line)
else:
fout.write(line)
if not line:
break
pass
fout.close()
new_fname = new_conf_file
print 'iarlnes: Created DOSBox Launch configuration file: '+new_fname
else:
current_dialog = xbmcgui.Dialog()
ok_ret = current_dialog.ok('Notice','No configuration file found with DOS Game Archive[CR]You will have to manually launch this game.')
print 'iarlnes: No configuration file found with DOS Game Archive'
new_fname = current_fname
return zip_success, new_fname
def convert_chd_bin(current_fname,iarlnes_setting_chdman_path):
chd_success = False
new_file_extension = None
new_fname = None
current_dialog = xbmcgui.Dialog()
if iarlnes_setting_chdman_path is None: #Check if there's a CHDMAN available
ok_ret = current_dialog.ok('Error','No CHDMAN path appears to be set in your addon settings.')
return chd_success, new_fname
current_dialog.notification('Please Wait', 'Just a moment, converting CHD to BIN/CUE', xbmcgui.NOTIFICATION_INFO, 500000)
current_chd_fileparts = os.path.split(current_fname)
file_path = current_chd_fileparts[0]
file_extension = current_chd_fileparts[-1]
file_base_name = os.path.splitext(os.path.split(current_fname)[-1])[0]
if 'chd' in file_extension.lower():
# try:
output_cue = os.path.join(file_path,file_base_name+'.cue')
output_bin = os.path.join(file_path,file_base_name+'.bin')
command = '"%CHD_APP_PATH%" extractcd -i "%INPUT_CHD%" -o "%OUTPUT_CUE%" -ob "%OUTPUT_BIN%"' #May need to provide other OS options here
command = command.replace('%CHD_APP_PATH%',iarlnes_setting_chdman_path)
command = command.replace("%INPUT_CHD%",current_fname)
command = command.replace("%OUTPUT_CUE%",output_cue)
command = command.replace("%OUTPUT_BIN%",output_bin)
print 'iarlnes: Attempting CHD Conversion: '+command
failed_text = 'Unhandled exception'
already_exists_text = 'file already exists'
success_text = 'Extraction complete'
conversion_process = subprocess.Popen(command, shell=True,stdout=subprocess.PIPE,stderr=subprocess.STDOUT) #Convert CHD to BIN/CUE
results1 = conversion_process.stdout.read().replace('\n', '')
conversion_process.kill() #End the process after its completed
if success_text.lower() in results1.lower():
print 'iarlnes: CHD Conversion Successful'
new_fname = output_bin
chd_success = True
elif already_exists_text.lower() in results1.lower():
print 'iarlnes: BIN File already exists, conversion not required'
new_fname = output_bin
chd_success = True
elif failed_text.lower() in results1.lower():
chd_success = False
print 'iarlnes: CHD Conversion Failed'
print results1
else:
chd_success = False
print 'iarlnes: CHD Conversion Failed'
# except:
# chd_success = False
# print 'iarlnes: CHD Conversion Failed'
if chd_success:
os.remove(current_fname) #Delete the CHD and leave the new BIN/CUE if the conversion was a success
current_dialog.notification('Complete', 'Conversion Successful', xbmcgui.NOTIFICATION_INFO, 1000)
else:
current_dialog.notification('Error', 'Error Converting, please see log', xbmcgui.NOTIFICATION_INFO, 1000)
else:
print current_fname + ' was not regognized as a chd and not converted'
# current_dialog.close()
return chd_success, new_fname
def convert_chd_cue(current_fname,iarlnes_setting_chdman_path):
#Quick and dirty to point to cue if needed, may fix later
chd_success = False
new_file_extension = None
new_fname = None
current_dialog = xbmcgui.Dialog()
if iarlnes_setting_chdman_path is None: #Check if there's a CHDMAN available
ok_ret = current_dialog.ok('Error','No CHDMAN path appears to be set in your addon settings.')
return chd_success, new_fname
current_dialog.notification('Please Wait', 'Just a moment, converting CHD to BIN/CUE', xbmcgui.NOTIFICATION_INFO, 500000)
current_chd_fileparts = os.path.split(current_fname)
file_path = current_chd_fileparts[0]
file_extension = current_chd_fileparts[-1]
file_base_name = os.path.splitext(os.path.split(current_fname)[-1])[0]
if 'chd' in file_extension.lower():
# try:
output_cue = os.path.join(file_path,file_base_name+'.cue')
output_bin = os.path.join(file_path,file_base_name+'.bin')
command = '"%CHD_APP_PATH%" extractcd -i "%INPUT_CHD%" -o "%OUTPUT_CUE%" -ob "%OUTPUT_BIN%"' #May need to provide other OS options here
command = command.replace('%CHD_APP_PATH%',iarlnes_setting_chdman_path)
command = command.replace("%INPUT_CHD%",current_fname)
command = command.replace("%OUTPUT_CUE%",output_cue)
command = command.replace("%OUTPUT_BIN%",output_bin)
print 'iarlnes: Attempting CHD Conversion: '+command
failed_text = 'Unhandled exception'
already_exists_text = 'file already exists'
success_text = 'Extraction complete'
conversion_process = subprocess.Popen(command, shell=True,stdout=subprocess.PIPE,stderr=subprocess.STDOUT) #Convert CHD to BIN/CUE
results1 = conversion_process.stdout.read().replace('\n', '')
conversion_process.kill() #End the process after its completed
if success_text.lower() in results1.lower():
print 'iarlnes: CHD Conversion Successful'
new_fname = output_cue
chd_success = True
elif already_exists_text.lower() in results1.lower():
print 'iarlnes: CUE File already exists, conversion not required'
new_fname = output_cue
chd_success = True
elif failed_text.lower() in results1.lower():
chd_success = False
print 'iarlnes: CHD Conversion Failed'
print results1
else:
chd_success = False
print 'iarlnes: CHD Conversion Failed'
# except:
# chd_success = False
# print 'iarlnes: CHD Conversion Failed'
if chd_success:
os.remove(current_fname) #Delete the CHD and leave the new BIN/CUE if the conversion was a success
current_dialog.notification('Complete', 'Conversion Successful', xbmcgui.NOTIFICATION_INFO, 1000)
else:
current_dialog.notification('Error', 'Error Converting, please see log', xbmcgui.NOTIFICATION_INFO, 1000)
else:
print current_fname + ' was not regognized as a chd and not converted'
# current_dialog.close()
return chd_success, new_fname
def rename_rom_postdl(current_fname,new_extension):
rename_success = False
new_fname = None
if os.path.exists(current_fname):
file_basename_no_ext = os.path.splitext(current_fname)
new_fname = file_basename_no_ext[0]+'.'+new_extension.replace('.','').replace("'",'') #Clean extension
os.rename(current_fname,new_fname) #Rename file with new extension
print 'iarlnes: Renamed filename to: '+new_fname
rename_success = True
return rename_success, new_fname
def lynx_header_fix(current_fname):
success = False
new_fname = None
header_text = '???'
# This is a hack to make these headerless roms work
# $0000- Empty - Not used?
# $0100- 64KB
# $0200- 128KB
# $0400- 256KB
# $0800- 512KB
temp_filename = os.path.join(os.path.split(current_fname)[0],'temp.lnx')
lynx_header = dict()
lynx_header['64'] = 'LYNX\x00\x01\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00Atari\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
lynx_header['128'] = 'LYNX\x00\x02\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00Atari\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
lynx_header['256'] = 'LYNX\x00\x04\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00Atari\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
lynx_header['512'] = 'LYNX\x00\x08\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00Atari\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
if os.path.exists(current_fname):
zip_success1, new_rom_fname = unzip_file(current_fname)
if zip_success1:
st = os.stat(new_rom_fname)
if st.st_size<65000: #Add a little extra in terms of size depending on system
use_header = lynx_header['64']
header_text = '64'
elif 65000<=st.st_size<130000:
use_header = lynx_header['128']
header_text = '128'
elif 130000<=st.st_size<270000:
use_header = lynx_header['256']
header_text = '256'
elif st.st_size>=270000:
use_header = lynx_header['512']
header_text = '512'
else:
use_header = lynx_header['256']
header_text = '???'
with open(new_rom_fname, 'rb') as old:
with open(temp_filename, 'wb') as new:
new.write(use_header)
new.write(old.read())
os.remove(new_rom_fname) #Remove Old File
os.rename(temp_filename,new_rom_fname) #Rename Temp File
new_fname = new_rom_fname
success = True
print 'iarlnes: Lynx ROM Updated with '+header_text+' bytes'
return success, new_fname
def set_new_dl_path(xml_id,plugin):
current_xml_fileparts = os.path.split(xml_id)
current_xml_filename = current_xml_fileparts[1]
current_xml_path = current_xml_fileparts[0] + '/'
current_dialog = xbmcgui.Dialog()
ret1 = current_dialog.select('Select Download Path Type', ['Default','Custom'])
if ret1 == 0:
ret2 = current_dialog.select('Are you sure you want to update the current Download Path for '+current_xml_filename, ['Yes','Cancel'])
if ret2<1:
update_xml_header(current_xml_path,current_xml_filename,'emu_downloadpath','default')
ok_ret = current_dialog.ok('Complete','Download Path was updated to default[CR]Cache was cleared for new settings')
plugin.clear_function_cache()
elif ret1 == 1:
new_path = current_dialog.browse(0,'Update Download Path','files')
ret2 = current_dialog.select('Are you sure you want to update the current Download Path for '+current_xml_filename, ['Yes','Cancel'])
if ret2<1:
update_xml_header(current_xml_path,current_xml_filename,'emu_downloadpath',new_path)
ok_ret = current_dialog.ok('Complete','Download Path was updated to your custom folder[CR]Cache was cleared for new settings')
plugin.clear_function_cache()
else:
pass
def hide_selected_archive(xml_id,plugin):
xml_current_category = get_xml_header_category(xml_id)
print xml_current_category['emu_category'][0]
current_xml_fileparts = os.path.split(xml_id)
current_xml_filename = current_xml_fileparts[1]
current_xml_path = current_xml_fileparts[0] + '/'
current_dialog = xbmcgui.Dialog()
ret1 = current_dialog.select('Are you sure you want to Hide '+current_xml_filename, ['Yes','Cancel'])
if ret1 == 0:
new_xml_category = xml_current_category['emu_category'][0] + ', hidden'
update_xml_header(current_xml_path,current_xml_filename,'emu_category',new_xml_category)
ok_ret = current_dialog.ok('Complete','Archive will be hidden after iarlnes restart[CR]Cache was cleared for new settings')
plugin.clear_function_cache()
else:
pass
def set_new_post_dl_action(xml_id,plugin):
current_xml_fileparts = os.path.split(xml_id)
current_xml_filename = current_xml_fileparts[1]
current_xml_path = current_xml_fileparts[0] + '/'
current_dialog = xbmcgui.Dialog()
ret1 = current_dialog.select('Select New Post Download Action', ['None','Unzip','Unzip and Update DOSBox CMD','Convert CHD to BIN/CUE','Convert CHD to CUE/BIN','Rename with .gg ext','Cancel'])
if ret1 == 0:
ret2 = current_dialog.select('Are you sure you want to set the post DL action to none for '+current_xml_filename, ['Yes','Cancel'])
if ret2<1:
update_xml_header(current_xml_path,current_xml_filename,'emu_postdlaction','none')
ok_ret = current_dialog.ok('Complete','Post Download Action Updated to None[CR]Cache was cleared for new settings')
plugin.clear_function_cache()
elif ret1 == 1:
ret2 = current_dialog.select('Are you sure you want to set the post DL action to Unzip for '+current_xml_filename, ['Yes','Cancel'])
if ret2<1:
update_xml_header(current_xml_path,current_xml_filename,'emu_postdlaction','unzip_rom')
ok_ret = current_dialog.ok('Complete','Post Download Action Updated to Unzip[CR]Cache was cleared for new settings')
plugin.clear_function_cache()
elif ret1 == 2:
ret2 = current_dialog.select('Are you sure you want to set the post DL action to Unzip and Update DOSBox CMDs for '+current_xml_filename, ['Yes','Cancel'])
if ret2<1:
update_xml_header(current_xml_path,current_xml_filename,'emu_postdlaction','unzip_update_rom_path_dosbox')
ok_ret = current_dialog.ok('Complete','Post Download Action Updated to Unzip and Update DOSBox CMDs[CR]Cache was cleared for new settings')
plugin.clear_function_cache()
elif ret1 == 3:
ret2 = current_dialog.select('Are you sure you want to set the post DL action to convert CHD to BIN/CUE (launch BIN) for '+current_xml_filename, ['Yes','Cancel'])
if ret2<1:
update_xml_header(current_xml_path,current_xml_filename,'emu_postdlaction','convert_chd_bin')
ok_ret = current_dialog.ok('Complete','Post Download Action Updated to convert CHD to BIN/CUE[CR]Cache was cleared for new settings')
plugin.clear_function_cache()
elif ret1 == 4:
ret2 = current_dialog.select('Are you sure you want to set the post DL action to convert CHD to CUE/BIN (launch CUE) for'+current_xml_filename, ['Yes','Cancel'])
if ret2<1:
update_xml_header(current_xml_path,current_xml_filename,'emu_postdlaction','convert_chd_cue')
ok_ret = current_dialog.ok('Complete','Post Download Action Updated to convert CHD to CUE/BIN[CR]Cache was cleared for new settings')
plugin.clear_function_cache()
elif ret1 == 5:
ret2 = current_dialog.select('Are you sure you want to set the post DL action to rename file with .gg extension for '+current_xml_filename, ['Yes','Cancel'])
if ret2<1:
update_xml_header(current_xml_path,current_xml_filename,'emu_postdlaction','rename_rom_postdl('"'gg'"')')
ok_ret = current_dialog.ok('Complete','Post Download Action Updated to rename file with .gg extension[CR]Cache was cleared for new settings')
plugin.clear_function_cache()
else:
pass
def set_new_emu_launcher(xml_id,plugin):
current_xml_fileparts = os.path.split(xml_id)
current_xml_filename = current_xml_fileparts[1]
current_xml_path = current_xml_fileparts[0] + '/'
current_dialog = xbmcgui.Dialog()
ret1 = current_dialog.select('Select New Emulator Launcher', ['Kodi RetroPlayer','External','Cancel'])
if ret1 == 0:
ret2 = current_dialog.select('Are you sure you want to set the Emulator to Kodi Retroplayer for '+current_xml_filename, ['Yes','Cancel'])
if ret2<1:
update_xml_header(current_xml_path,current_xml_filename,'emu_launcher','retroplayer')
ok_ret = current_dialog.ok('Complete','Emulator updated to Kodi Retroplayer[CR]Cache was cleared for new settings')
plugin.clear_function_cache()
elif ret1 == 1:
ret2 = current_dialog.select('Are you sure you want to set the Emulator to External Program for '+current_xml_filename, ['Yes','Cancel'])
if ret2<1:
update_xml_header(current_xml_path,current_xml_filename,'emu_launcher','external')
ok_ret = current_dialog.ok('Complete','Emulator updated to External Program[CR]Cache was cleared for new settings')
plugin.clear_function_cache()
else:
pass
def check_downloaded_file(file_path):
bad_file_found = False
st = os.stat(file_path)
# print st
if st.st_size < 1: #Zero Byte File
current_dialog = xbmcgui.Dialog()
ok_ret = current_dialog.ok('Error','The selected file was not available in the Archive[CR]Sorry about that')
os.remove(file_path) #Remove Zero Byte File
bad_file_found = True
return bad_file_found
def getScrapingMode(settings):
scrapingMode = 0
scrapingModeStr = settings.getSetting(SETTING_iarlnes_SCRAPINGMODE)
if(scrapingModeStr == 'Automatic: Accurate'):
scrapingMode = 0
elif(scrapingModeStr == 'Automatic: Guess Matches'):
scrapingMode = 1
elif(scrapingModeStr == 'Interactive: Select Matches'):
scrapingMode = 2
return scrapingMode
def indentXml(elem, level=0):
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
indentXml(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
def debug(message, level=xbmc.LOGNOTICE):
"""
Write a debug message to xbmc.log
:type message: str
:param message: the message to log
:type level: int
:param level: (Optional) the log level (supported values are found at xbmc.LOG...)
"""
if debugging_enabled:
if isinstance(message, unicode):
message = message.encode("utf-8")
for line in message.splitlines():
xbmc.log(msg="iarlnes: " + line, level=level)
iarlnes_plugin_home = getAddonInstallPath()
#
# Logging
#
try:
from sqlite3 import dbapi2 as sqlite
print("iarlnes_INFO: Loading sqlite3 as DB engine")
except:
from pysqlite2 import dbapi2 as sqlite
print("iarlnes_INFO: Loading pysqlite2 as DB engine")
class Logutil:
currentLogLevel = None
@staticmethod
def log(message, logLevel):
if(Logutil.currentLogLevel == None):
print "iarlnes: init log level"
Logutil.currentLogLevel = Logutil.getCurrentLogLevel()
print "iarlnes: current log level: " +str(Logutil.currentLogLevel)
if(logLevel > Logutil.currentLogLevel):
return
prefix = ''
if(logLevel == LOG_LEVEL_DEBUG):
prefix = 'iarlnes_DEBUG: '
elif(logLevel == LOG_LEVEL_INFO):
prefix = 'iarlnes_INFO: '
elif(logLevel == LOG_LEVEL_WARNING):
prefix = 'iarlnes_WARNING: '
elif(logLevel == LOG_LEVEL_ERROR):
prefix = 'iarlnes_ERROR: '
try:
print prefix + message
except:
pass
@staticmethod
def getCurrentLogLevel():
logLevel = 1
try:
settings = getSettings()
logLevelStr = settings.getSetting(SETTING_iarlnes_LOGLEVEL)
if(logLevelStr == 'ERROR'):
logLevel = LOG_LEVEL_ERROR
elif(logLevelStr == 'WARNING'):
logLevel = LOG_LEVEL_WARNING
elif(logLevelStr == 'INFO'):
logLevel = LOG_LEVEL_INFO
elif(logLevelStr == 'DEBUG'):
logLevel = LOG_LEVEL_DEBUG
except:
pass
return logLevel
| bbr25/plugin.program.iarlnes | resources/lib/util.py | Python | gpl-2.0 | 83,029 | [
"Jaguar"
] | 6ce1227a241ca6d71e755756728554c933ce25f71694f2dfc4a9dd5723ceb611 |
"""
Student Views
"""
import datetime
import logging
import uuid
import json
import warnings
from collections import defaultdict
from urlparse import urljoin, urlsplit, parse_qs, urlunsplit
from django.views.generic import TemplateView
from pytz import UTC
from requests import HTTPError
from ipware.ip import get_ip
import edx_oauth2_provider
from django.conf import settings
from django.contrib.auth import logout, authenticate, login
from django.contrib.auth.models import User, AnonymousUser
from django.contrib.auth.decorators import login_required
from django.contrib.auth.views import password_reset_confirm
from django.contrib import messages
from django.core.context_processors import csrf
from django.core import mail
from django.core.urlresolvers import reverse, NoReverseMatch, reverse_lazy
from django.core.validators import validate_email, ValidationError
from django.db import IntegrityError, transaction
from django.http import HttpResponse, HttpResponseBadRequest, HttpResponseForbidden, HttpResponseServerError, Http404
from django.shortcuts import redirect
from django.utils.encoding import force_bytes, force_text
from django.utils.translation import ungettext
from django.utils.http import base36_to_int, urlsafe_base64_encode, urlencode
from django.utils.translation import ugettext as _, get_language
from django.views.decorators.csrf import csrf_exempt, ensure_csrf_cookie
from django.views.decorators.http import require_POST, require_GET
from django.db.models.signals import post_save
from django.dispatch import receiver, Signal
from django.template.response import TemplateResponse
from provider.oauth2.models import Client
from ratelimitbackend.exceptions import RateLimitException
from social.apps.django_app import utils as social_utils
from social.backends import oauth as social_oauth
from social.exceptions import AuthException, AuthAlreadyAssociated
from edxmako.shortcuts import render_to_response, render_to_string
from util.enterprise_helpers import data_sharing_consent_requirement_at_login
from course_modes.models import CourseMode
from shoppingcart.api import order_history
from student.models import (
Registration, UserProfile,
PendingEmailChange, CourseEnrollment, CourseEnrollmentAttribute, unique_id_for_user,
CourseEnrollmentAllowed, UserStanding, LoginFailures,
create_comments_service_user, PasswordHistory, UserSignupSource,
DashboardConfiguration, LinkedInAddToProfileConfiguration, ManualEnrollmentAudit, ALLOWEDTOENROLL_TO_ENROLLED,
LogoutViewConfiguration, RegistrationCookieConfiguration)
from student.forms import AccountCreationForm, PasswordResetFormNoActive, get_registration_extension_form
from student.tasks import send_activation_email
from lms.djangoapps.commerce.utils import EcommerceService # pylint: disable=import-error
from lms.djangoapps.verify_student.models import SoftwareSecurePhotoVerification # pylint: disable=import-error
from bulk_email.models import Optout, BulkEmailFlag # pylint: disable=import-error
from certificates.models import CertificateStatuses, certificate_status_for_student
from certificates.api import ( # pylint: disable=import-error
get_certificate_url,
has_html_certificates_enabled,
)
from lms.djangoapps.grades.new.course_grade import CourseGradeFactory
from xmodule.modulestore.django import modulestore
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from opaque_keys.edx.locator import CourseLocator
from collections import namedtuple
from courseware.courses import get_courses, sort_by_announcement, sort_by_start_date # pylint: disable=import-error
from courseware.access import has_access
from django_comment_common.models import Role
from openedx.core.djangoapps.external_auth.models import ExternalAuthMap
import openedx.core.djangoapps.external_auth.views
from openedx.core.djangoapps.external_auth.login_and_register import (
login as external_auth_login,
register as external_auth_register
)
import track.views
import dogstats_wrapper as dog_stats_api
from util.db import outer_atomic
from util.json_request import JsonResponse
from util.bad_request_rate_limiter import BadRequestRateLimiter
from util.milestones_helpers import (
get_pre_requisite_courses_not_completed,
)
from util.password_policy_validators import validate_password_strength
import third_party_auth
from third_party_auth import pipeline, provider
from student.helpers import (
check_verify_status_by_course,
auth_pipeline_urls, get_next_url_for_login_page,
DISABLE_UNENROLL_CERT_STATES,
destroy_oauth_tokens
)
from student.cookies import set_logged_in_cookies, delete_logged_in_cookies, set_user_info_cookie
from student.models import anonymous_id_for_user, UserAttribute, EnrollStatusChange
from shoppingcart.models import DonationConfiguration, CourseRegistrationCode
from openedx.core.djangoapps.embargo import api as embargo_api
import analytics
from eventtracking import tracker
# Note that this lives in LMS, so this dependency should be refactored.
from notification_prefs.views import enable_notifications
from openedx.core.djangoapps.credit.email_utils import get_credit_provider_display_names, make_providers_strings
from openedx.core.djangoapps.lang_pref import LANGUAGE_KEY
from openedx.core.djangoapps.programs import utils as programs_utils
from openedx.core.djangoapps.programs.models import ProgramsApiConfig
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
from openedx.core.djangoapps.theming import helpers as theming_helpers
from openedx.core.djangoapps.user_api.preferences import api as preferences_api
from openedx.core.djangoapps.catalog.utils import get_programs_data
log = logging.getLogger("edx.student")
AUDIT_LOG = logging.getLogger("audit")
ReverifyInfo = namedtuple('ReverifyInfo', 'course_id course_name course_number date status display') # pylint: disable=invalid-name
SETTING_CHANGE_INITIATED = 'edx.user.settings.change_initiated'
# Used as the name of the user attribute for tracking affiliate registrations
REGISTRATION_AFFILIATE_ID = 'registration_affiliate_id'
REGISTRATION_UTM_PARAMETERS = {
'utm_source': 'registration_utm_source',
'utm_medium': 'registration_utm_medium',
'utm_campaign': 'registration_utm_campaign',
'utm_term': 'registration_utm_term',
'utm_content': 'registration_utm_content',
}
REGISTRATION_UTM_CREATED_AT = 'registration_utm_created_at'
# used to announce a registration
REGISTER_USER = Signal(providing_args=["user", "profile"])
# Disable this warning because it doesn't make sense to completely refactor tests to appease Pylint
# pylint: disable=logging-format-interpolation
def csrf_token(context):
"""A csrf token that can be included in a form."""
token = context.get('csrf_token', '')
if token == 'NOTPROVIDED':
return ''
return (u'<div style="display:none"><input type="hidden"'
' name="csrfmiddlewaretoken" value="%s" /></div>' % (token))
# NOTE: This view is not linked to directly--it is called from
# branding/views.py:index(), which is cached for anonymous users.
# This means that it should always return the same thing for anon
# users. (in particular, no switching based on query params allowed)
def index(request, extra_context=None, user=AnonymousUser()):
"""
Render the edX main page.
extra_context is used to allow immediate display of certain modal windows, eg signup,
as used by external_auth.
"""
if extra_context is None:
extra_context = {}
programs_list = []
courses = get_courses(user)
if configuration_helpers.get_value(
"ENABLE_COURSE_SORTING_BY_START_DATE",
settings.FEATURES["ENABLE_COURSE_SORTING_BY_START_DATE"],
):
courses = sort_by_start_date(courses)
else:
courses = sort_by_announcement(courses)
context = {'courses': courses}
context['homepage_overlay_html'] = configuration_helpers.get_value('homepage_overlay_html')
# This appears to be an unused context parameter, at least for the master templates...
context['show_partners'] = configuration_helpers.get_value('show_partners', True)
# TO DISPLAY A YOUTUBE WELCOME VIDEO
# 1) Change False to True
context['show_homepage_promo_video'] = configuration_helpers.get_value('show_homepage_promo_video', False)
# 2) Add your video's YouTube ID (11 chars, eg "123456789xX"), or specify via site configuration
# Note: This value should be moved into a configuration setting and plumbed-through to the
# context via the site configuration workflow, versus living here
youtube_video_id = configuration_helpers.get_value('homepage_promo_video_youtube_id', "your-youtube-id")
context['homepage_promo_video_youtube_id'] = youtube_video_id
# allow for theme override of the courses list
context['courses_list'] = theming_helpers.get_template_path('courses_list.html')
# Insert additional context for use in the template
context.update(extra_context)
# Getting all the programs from course-catalog service. The programs_list is being added to the context but it's
# not being used currently in lms/templates/index.html. To use this list, you need to create a custom theme that
# overrides index.html. The modifications to index.html to display the programs will be done after the support
# for edx-pattern-library is added.
if configuration_helpers.get_value("DISPLAY_PROGRAMS_ON_MARKETING_PAGES",
settings.FEATURES.get("DISPLAY_PROGRAMS_ON_MARKETING_PAGES")):
programs_list = get_programs_data(user)
context["programs_list"] = programs_list
return render_to_response('index.html', context)
def process_survey_link(survey_link, user):
"""
If {UNIQUE_ID} appears in the link, replace it with a unique id for the user.
Currently, this is sha1(user.username). Otherwise, return survey_link.
"""
return survey_link.format(UNIQUE_ID=unique_id_for_user(user))
def cert_info(user, course_overview, course_mode):
"""
Get the certificate info needed to render the dashboard section for the given
student and course.
Arguments:
user (User): A user.
course_overview (CourseOverview): A course.
course_mode (str): The enrollment mode (honor, verified, audit, etc.)
Returns:
dict: Empty dict if certificates are disabled or hidden, or a dictionary with keys:
'status': one of 'generating', 'ready', 'notpassing', 'processing', 'restricted'
'show_download_url': bool
'download_url': url, only present if show_download_url is True
'show_disabled_download_button': bool -- true if state is 'generating'
'show_survey_button': bool
'survey_url': url, only if show_survey_button is True
'grade': if status is not 'processing'
'can_unenroll': if status allows for unenrollment
"""
if not course_overview.may_certify():
return {}
return _cert_info(
user,
course_overview,
certificate_status_for_student(user, course_overview.id),
course_mode
)
def reverification_info(statuses):
"""
Returns reverification-related information for *all* of user's enrollments whose
reverification status is in statuses.
Args:
statuses (list): a list of reverification statuses we want information for
example: ["must_reverify", "denied"]
Returns:
dictionary of lists: dictionary with one key per status, e.g.
dict["must_reverify"] = []
dict["must_reverify"] = [some information]
"""
reverifications = defaultdict(list)
# Sort the data by the reverification_end_date
for status in statuses:
if reverifications[status]:
reverifications[status].sort(key=lambda x: x.date)
return reverifications
def get_course_enrollments(user, org_to_include, orgs_to_exclude):
"""
Given a user, return a filtered set of his or her course enrollments.
Arguments:
user (User): the user in question.
org_to_include (str): If not None, ONLY courses of this org will be returned.
orgs_to_exclude (list[str]): If org_to_include is not None, this
argument is ignored. Else, courses of this org will be excluded.
Returns:
generator[CourseEnrollment]: a sequence of enrollments to be displayed
on the user's dashboard.
"""
for enrollment in CourseEnrollment.enrollments_for_user(user):
# If the course is missing or broken, log an error and skip it.
course_overview = enrollment.course_overview
if not course_overview:
log.error(
"User %s enrolled in broken or non-existent course %s",
user.username,
enrollment.course_id
)
continue
# Filter out anything that is not attributed to the current ORG.
if org_to_include and course_overview.location.org != org_to_include:
continue
# Conversely, filter out any enrollments with courses attributed to current ORG.
elif course_overview.location.org in orgs_to_exclude:
continue
# Else, include the enrollment.
else:
yield enrollment
def _cert_info(user, course_overview, cert_status, course_mode): # pylint: disable=unused-argument
"""
Implements the logic for cert_info -- split out for testing.
Arguments:
user (User): A user.
course_overview (CourseOverview): A course.
course_mode (str): The enrollment mode (honor, verified, audit, etc.)
"""
# simplify the status for the template using this lookup table
template_state = {
CertificateStatuses.generating: 'generating',
CertificateStatuses.downloadable: 'ready',
CertificateStatuses.notpassing: 'notpassing',
CertificateStatuses.restricted: 'restricted',
CertificateStatuses.auditing: 'auditing',
CertificateStatuses.audit_passing: 'auditing',
CertificateStatuses.audit_notpassing: 'auditing',
CertificateStatuses.unverified: 'unverified',
}
default_status = 'processing'
default_info = {
'status': default_status,
'show_disabled_download_button': False,
'show_download_url': False,
'show_survey_button': False,
'can_unenroll': True,
}
if cert_status is None:
return default_info
is_hidden_status = cert_status['status'] in ('unavailable', 'processing', 'generating', 'notpassing', 'auditing')
if course_overview.certificates_display_behavior == 'early_no_info' and is_hidden_status:
return {}
status = template_state.get(cert_status['status'], default_status)
status_dict = {
'status': status,
'show_download_url': status == 'ready',
'show_disabled_download_button': status == 'generating',
'mode': cert_status.get('mode', None),
'linked_in_url': None,
'can_unenroll': status not in DISABLE_UNENROLL_CERT_STATES,
}
if (status in ('generating', 'ready', 'notpassing', 'restricted', 'auditing', 'unverified') and
course_overview.end_of_course_survey_url is not None):
status_dict.update({
'show_survey_button': True,
'survey_url': process_survey_link(course_overview.end_of_course_survey_url, user)})
else:
status_dict['show_survey_button'] = False
if status == 'ready':
# showing the certificate web view button if certificate is ready state and feature flags are enabled.
if has_html_certificates_enabled(course_overview.id, course_overview):
if course_overview.has_any_active_web_certificate:
status_dict.update({
'show_cert_web_view': True,
'cert_web_view_url': get_certificate_url(course_id=course_overview.id, uuid=cert_status['uuid'])
})
else:
# don't show download certificate button if we don't have an active certificate for course
status_dict['show_download_url'] = False
elif 'download_url' not in cert_status:
log.warning(
u"User %s has a downloadable cert for %s, but no download url",
user.username,
course_overview.id
)
return default_info
else:
status_dict['download_url'] = cert_status['download_url']
# If enabled, show the LinkedIn "add to profile" button
# Clicking this button sends the user to LinkedIn where they
# can add the certificate information to their profile.
linkedin_config = LinkedInAddToProfileConfiguration.current()
# posting certificates to LinkedIn is not currently
# supported in White Labels
if linkedin_config.enabled and not theming_helpers.is_request_in_themed_site():
status_dict['linked_in_url'] = linkedin_config.add_to_profile_url(
course_overview.id,
course_overview.display_name,
cert_status.get('mode'),
cert_status['download_url']
)
if status in {'generating', 'ready', 'notpassing', 'restricted', 'auditing', 'unverified'}:
persisted_grade = CourseGradeFactory().get_persisted(user, course_overview)
if persisted_grade is not None:
status_dict['grade'] = unicode(persisted_grade.percent)
elif 'grade' in cert_status:
status_dict['grade'] = cert_status['grade']
else:
# Note: as of 11/20/2012, we know there are students in this state-- cs169.1x,
# who need to be regraded (we weren't tracking 'notpassing' at first).
# We can add a log.warning here once we think it shouldn't happen.
return default_info
return status_dict
@ensure_csrf_cookie
def signin_user(request):
"""Deprecated. To be replaced by :class:`student_account.views.login_and_registration_form`."""
external_auth_response = external_auth_login(request)
if external_auth_response is not None:
return external_auth_response
# Determine the URL to redirect to following login:
redirect_to = get_next_url_for_login_page(request)
if request.user.is_authenticated():
return redirect(redirect_to)
third_party_auth_error = None
for msg in messages.get_messages(request):
if msg.extra_tags.split()[0] == "social-auth":
# msg may or may not be translated. Try translating [again] in case we are able to:
third_party_auth_error = _(unicode(msg)) # pylint: disable=translation-of-non-string
break
context = {
'login_redirect_url': redirect_to, # This gets added to the query string of the "Sign In" button in the header
# Bool injected into JS to submit form if we're inside a running third-
# party auth pipeline; distinct from the actual instance of the running
# pipeline, if any.
'pipeline_running': 'true' if pipeline.running(request) else 'false',
'pipeline_url': auth_pipeline_urls(pipeline.AUTH_ENTRY_LOGIN, redirect_url=redirect_to),
'platform_name': configuration_helpers.get_value(
'platform_name',
settings.PLATFORM_NAME
),
'third_party_auth_error': third_party_auth_error
}
return render_to_response('login.html', context)
@ensure_csrf_cookie
def register_user(request, extra_context=None):
"""Deprecated. To be replaced by :class:`student_account.views.login_and_registration_form`."""
# Determine the URL to redirect to following login:
redirect_to = get_next_url_for_login_page(request)
if request.user.is_authenticated():
return redirect(redirect_to)
external_auth_response = external_auth_register(request)
if external_auth_response is not None:
return external_auth_response
context = {
'login_redirect_url': redirect_to, # This gets added to the query string of the "Sign In" button in the header
'email': '',
'name': '',
'running_pipeline': None,
'pipeline_urls': auth_pipeline_urls(pipeline.AUTH_ENTRY_REGISTER, redirect_url=redirect_to),
'platform_name': configuration_helpers.get_value(
'platform_name',
settings.PLATFORM_NAME
),
'selected_provider': '',
'username': '',
}
if extra_context is not None:
context.update(extra_context)
if context.get("extauth_domain", '').startswith(
openedx.core.djangoapps.external_auth.views.SHIBBOLETH_DOMAIN_PREFIX
):
return render_to_response('register-shib.html', context)
# If third-party auth is enabled, prepopulate the form with data from the
# selected provider.
if third_party_auth.is_enabled() and pipeline.running(request):
running_pipeline = pipeline.get(request)
current_provider = provider.Registry.get_from_pipeline(running_pipeline)
if current_provider is not None:
overrides = current_provider.get_register_form_data(running_pipeline.get('kwargs'))
overrides['running_pipeline'] = running_pipeline
overrides['selected_provider'] = current_provider.name
context.update(overrides)
return render_to_response('register.html', context)
def complete_course_mode_info(course_id, enrollment, modes=None):
"""
We would like to compute some more information from the given course modes
and the user's current enrollment
Returns the given information:
- whether to show the course upsell information
- numbers of days until they can't upsell anymore
"""
if modes is None:
modes = CourseMode.modes_for_course_dict(course_id)
mode_info = {'show_upsell': False, 'days_for_upsell': None}
# we want to know if the user is already enrolled as verified or credit and
# if verified is an option.
if CourseMode.VERIFIED in modes and enrollment.mode in CourseMode.UPSELL_TO_VERIFIED_MODES:
mode_info['show_upsell'] = True
mode_info['verified_sku'] = modes['verified'].sku
mode_info['verified_bulk_sku'] = modes['verified'].bulk_sku
# if there is an expiration date, find out how long from now it is
if modes['verified'].expiration_datetime:
today = datetime.datetime.now(UTC).date()
mode_info['days_for_upsell'] = (modes['verified'].expiration_datetime.date() - today).days
return mode_info
def is_course_blocked(request, redeemed_registration_codes, course_key):
"""Checking either registration is blocked or not ."""
blocked = False
for redeemed_registration in redeemed_registration_codes:
# registration codes may be generated via Bulk Purchase Scenario
# we have to check only for the invoice generated registration codes
# that their invoice is valid or not
if redeemed_registration.invoice_item:
if not redeemed_registration.invoice_item.invoice.is_valid:
blocked = True
# disabling email notifications for unpaid registration courses
Optout.objects.get_or_create(user=request.user, course_id=course_key)
log.info(
u"User %s (%s) opted out of receiving emails from course %s",
request.user.username,
request.user.email,
course_key,
)
track.views.server_track(
request,
"change-email1-settings",
{"receive_emails": "no", "course": course_key.to_deprecated_string()},
page='dashboard',
)
break
return blocked
@login_required
@ensure_csrf_cookie
def dashboard(request):
"""
Provides the LMS dashboard view
TODO: This is lms specific and does not belong in common code.
Arguments:
request: The request object.
Returns:
The dashboard response.
"""
user = request.user
platform_name = configuration_helpers.get_value("platform_name", settings.PLATFORM_NAME)
enable_verified_certificates = configuration_helpers.get_value(
'ENABLE_VERIFIED_CERTIFICATES',
settings.FEATURES.get('ENABLE_VERIFIED_CERTIFICATES')
)
display_course_modes_on_dashboard = configuration_helpers.get_value(
'DISPLAY_COURSE_MODES_ON_DASHBOARD',
settings.FEATURES.get('DISPLAY_COURSE_MODES_ON_DASHBOARD', True)
)
# we want to filter and only show enrollments for courses within
# the 'ORG' defined in configuration.
course_org_filter = configuration_helpers.get_value('course_org_filter')
# Let's filter out any courses in an "org" that has been declared to be
# in a configuration
org_filter_out_set = configuration_helpers.get_all_orgs()
# remove our current org from the "filter out" list, if applicable
if course_org_filter:
org_filter_out_set.remove(course_org_filter)
# Build our (course, enrollment) list for the user, but ignore any courses that no
# longer exist (because the course IDs have changed). Still, we don't delete those
# enrollments, because it could have been a data push snafu.
course_enrollments = list(get_course_enrollments(user, course_org_filter, org_filter_out_set))
# sort the enrollment pairs by the enrollment date
course_enrollments.sort(key=lambda x: x.created, reverse=True)
# Retrieve the course modes for each course
enrolled_course_ids = [enrollment.course_id for enrollment in course_enrollments]
__, unexpired_course_modes = CourseMode.all_and_unexpired_modes_for_courses(enrolled_course_ids)
course_modes_by_course = {
course_id: {
mode.slug: mode
for mode in modes
}
for course_id, modes in unexpired_course_modes.iteritems()
}
# Check to see if the student has recently enrolled in a course.
# If so, display a notification message confirming the enrollment.
enrollment_message = _create_recent_enrollment_message(
course_enrollments, course_modes_by_course
)
course_optouts = Optout.objects.filter(user=user).values_list('course_id', flat=True)
message = ""
if not user.is_active:
message = render_to_string(
'registration/activate_account_notice.html',
{'email': user.email, 'platform_name': platform_name}
)
# Global staff can see what courses errored on their dashboard
staff_access = False
errored_courses = {}
if has_access(user, 'staff', 'global'):
# Show any courses that errored on load
staff_access = True
errored_courses = modulestore().get_errored_courses()
show_courseware_links_for = frozenset(
enrollment.course_id for enrollment in course_enrollments
if has_access(request.user, 'load', enrollment.course_overview)
and has_access(request.user, 'view_courseware_with_prerequisites', enrollment.course_overview)
)
# Find programs associated with courses being displayed. This information
# is passed in the template context to allow rendering of program-related
# information on the dashboard.
meter = programs_utils.ProgramProgressMeter(user, enrollments=course_enrollments)
programs_by_run = meter.engaged_programs(by_run=True)
# Construct a dictionary of course mode information
# used to render the course list. We re-use the course modes dict
# we loaded earlier to avoid hitting the database.
course_mode_info = {
enrollment.course_id: complete_course_mode_info(
enrollment.course_id, enrollment,
modes=course_modes_by_course[enrollment.course_id]
)
for enrollment in course_enrollments
}
# Determine the per-course verification status
# This is a dictionary in which the keys are course locators
# and the values are one of:
#
# VERIFY_STATUS_NEED_TO_VERIFY
# VERIFY_STATUS_SUBMITTED
# VERIFY_STATUS_APPROVED
# VERIFY_STATUS_MISSED_DEADLINE
#
# Each of which correspond to a particular message to display
# next to the course on the dashboard.
#
# If a course is not included in this dictionary,
# there is no verification messaging to display.
verify_status_by_course = check_verify_status_by_course(user, course_enrollments)
cert_statuses = {
enrollment.course_id: cert_info(request.user, enrollment.course_overview, enrollment.mode)
for enrollment in course_enrollments
}
# only show email settings for Mongo course and when bulk email is turned on
show_email_settings_for = frozenset(
enrollment.course_id for enrollment in course_enrollments if (
BulkEmailFlag.feature_enabled(enrollment.course_id)
)
)
# Verification Attempts
# Used to generate the "you must reverify for course x" banner
verification_status, verification_msg = SoftwareSecurePhotoVerification.user_status(user)
# Gets data for midcourse reverifications, if any are necessary or have failed
statuses = ["approved", "denied", "pending", "must_reverify"]
reverifications = reverification_info(statuses)
show_refund_option_for = frozenset(
enrollment.course_id for enrollment in course_enrollments
if enrollment.refundable()
)
block_courses = frozenset(
enrollment.course_id for enrollment in course_enrollments
if is_course_blocked(
request,
CourseRegistrationCode.objects.filter(
course_id=enrollment.course_id,
registrationcoderedemption__redeemed_by=request.user
),
enrollment.course_id
)
)
enrolled_courses_either_paid = frozenset(
enrollment.course_id for enrollment in course_enrollments
if enrollment.is_paid_course()
)
# If there are *any* denied reverifications that have not been toggled off,
# we'll display the banner
denied_banner = any(item.display for item in reverifications["denied"])
# Populate the Order History for the side-bar.
order_history_list = order_history(user, course_org_filter=course_org_filter, org_filter_out_set=org_filter_out_set)
# get list of courses having pre-requisites yet to be completed
courses_having_prerequisites = frozenset(
enrollment.course_id for enrollment in course_enrollments
if enrollment.course_overview.pre_requisite_courses
)
courses_requirements_not_met = get_pre_requisite_courses_not_completed(user, courses_having_prerequisites)
if 'notlive' in request.GET:
redirect_message = _("The course you are looking for does not start until {date}.").format(
date=request.GET['notlive']
)
elif 'course_closed' in request.GET:
redirect_message = _("The course you are looking for is closed for enrollment as of {date}.").format(
date=request.GET['course_closed']
)
else:
redirect_message = ''
context = {
'enrollment_message': enrollment_message,
'redirect_message': redirect_message,
'course_enrollments': course_enrollments,
'course_optouts': course_optouts,
'message': message,
'staff_access': staff_access,
'errored_courses': errored_courses,
'show_courseware_links_for': show_courseware_links_for,
'all_course_modes': course_mode_info,
'cert_statuses': cert_statuses,
'credit_statuses': _credit_statuses(user, course_enrollments),
'show_email_settings_for': show_email_settings_for,
'reverifications': reverifications,
'verification_status': verification_status,
'verification_status_by_course': verify_status_by_course,
'verification_msg': verification_msg,
'show_refund_option_for': show_refund_option_for,
'block_courses': block_courses,
'denied_banner': denied_banner,
'billing_email': settings.PAYMENT_SUPPORT_EMAIL,
'user': user,
'logout_url': reverse('logout'),
'platform_name': platform_name,
'enrolled_courses_either_paid': enrolled_courses_either_paid,
'provider_states': [],
'order_history_list': order_history_list,
'courses_requirements_not_met': courses_requirements_not_met,
'nav_hidden': True,
'programs_by_run': programs_by_run,
'show_program_listing': ProgramsApiConfig.current().show_program_listing,
'disable_courseware_js': True,
'display_course_modes_on_dashboard': enable_verified_certificates and display_course_modes_on_dashboard,
}
ecommerce_service = EcommerceService()
if ecommerce_service.is_enabled(request.user):
context.update({
'use_ecommerce_payment_flow': True,
'ecommerce_payment_page': ecommerce_service.payment_page_url(),
})
response = render_to_response('dashboard.html', context)
set_user_info_cookie(response, request)
return response
def _create_recent_enrollment_message(course_enrollments, course_modes): # pylint: disable=invalid-name
"""
Builds a recent course enrollment message.
Constructs a new message template based on any recent course enrollments
for the student.
Args:
course_enrollments (list[CourseEnrollment]): a list of course enrollments.
course_modes (dict): Mapping of course ID's to course mode dictionaries.
Returns:
A string representing the HTML message output from the message template.
None if there are no recently enrolled courses.
"""
recently_enrolled_courses = _get_recently_enrolled_courses(course_enrollments)
if recently_enrolled_courses:
enroll_messages = [
{
"course_id": enrollment.course_overview.id,
"course_name": enrollment.course_overview.display_name,
"allow_donation": _allow_donation(course_modes, enrollment.course_overview.id, enrollment)
}
for enrollment in recently_enrolled_courses
]
platform_name = configuration_helpers.get_value('platform_name', settings.PLATFORM_NAME)
return render_to_string(
'enrollment/course_enrollment_message.html',
{'course_enrollment_messages': enroll_messages, 'platform_name': platform_name}
)
def _get_recently_enrolled_courses(course_enrollments):
"""
Given a list of enrollments, filter out all but recent enrollments.
Args:
course_enrollments (list[CourseEnrollment]): A list of course enrollments.
Returns:
list[CourseEnrollment]: A list of recent course enrollments.
"""
seconds = DashboardConfiguration.current().recent_enrollment_time_delta
time_delta = (datetime.datetime.now(UTC) - datetime.timedelta(seconds=seconds))
return [
enrollment for enrollment in course_enrollments
# If the enrollment has no created date, we are explicitly excluding the course
# from the list of recent enrollments.
if enrollment.is_active and enrollment.created > time_delta
]
def _allow_donation(course_modes, course_id, enrollment):
"""Determines if the dashboard will request donations for the given course.
Check if donations are configured for the platform, and if the current course is accepting donations.
Args:
course_modes (dict): Mapping of course ID's to course mode dictionaries.
course_id (str): The unique identifier for the course.
enrollment(CourseEnrollment): The enrollment object in which the user is enrolled
Returns:
True if the course is allowing donations.
"""
if course_id not in course_modes:
flat_unexpired_modes = {
unicode(course_id): [mode for mode in modes]
for course_id, modes in course_modes.iteritems()
}
flat_all_modes = {
unicode(course_id): [mode.slug for mode in modes]
for course_id, modes in CourseMode.all_modes_for_courses([course_id]).iteritems()
}
log.error(
u'Can not find `%s` in course modes.`%s`. All modes: `%s`',
course_id,
flat_unexpired_modes,
flat_all_modes
)
donations_enabled = DonationConfiguration.current().enabled
return (
donations_enabled and
enrollment.mode in course_modes[course_id] and
course_modes[course_id][enrollment.mode].min_price == 0
)
def _update_email_opt_in(request, org):
"""Helper function used to hit the profile API if email opt-in is enabled."""
email_opt_in = request.POST.get('email_opt_in')
if email_opt_in is not None:
email_opt_in_boolean = email_opt_in == 'true'
preferences_api.update_email_opt_in(request.user, org, email_opt_in_boolean)
def _credit_statuses(user, course_enrollments):
"""
Retrieve the status for credit courses.
A credit course is a course for which a user can purchased
college credit. The current flow is:
1. User becomes eligible for credit (submits verifications, passes the course, etc.)
2. User purchases credit from a particular credit provider.
3. User requests credit from the provider, usually creating an account on the provider's site.
4. The credit provider notifies us whether the user's request for credit has been accepted or rejected.
The dashboard is responsible for communicating the user's state in this flow.
Arguments:
user (User): The currently logged-in user.
course_enrollments (list[CourseEnrollment]): List of enrollments for the
user.
Returns: dict
The returned dictionary has keys that are `CourseKey`s and values that
are dictionaries with:
* eligible (bool): True if the user is eligible for credit in this course.
* deadline (datetime): The deadline for purchasing and requesting credit for this course.
* purchased (bool): Whether the user has purchased credit for this course.
* provider_name (string): The display name of the credit provider.
* provider_status_url (string): A URL the user can visit to check on their credit request status.
* request_status (string): Either "pending", "approved", or "rejected"
* error (bool): If true, an unexpected error occurred when retrieving the credit status,
so the user should contact the support team.
Example:
>>> _credit_statuses(user, course_enrollments)
{
CourseKey.from_string("edX/DemoX/Demo_Course"): {
"course_key": "edX/DemoX/Demo_Course",
"eligible": True,
"deadline": 2015-11-23 00:00:00 UTC,
"purchased": True,
"provider_name": "Hogwarts",
"provider_status_url": "http://example.com/status",
"request_status": "pending",
"error": False
}
}
"""
from openedx.core.djangoapps.credit import api as credit_api
# Feature flag off
if not settings.FEATURES.get("ENABLE_CREDIT_ELIGIBILITY"):
return {}
request_status_by_course = {
request["course_key"]: request["status"]
for request in credit_api.get_credit_requests_for_user(user.username)
}
credit_enrollments = {
enrollment.course_id: enrollment
for enrollment in course_enrollments
if enrollment.mode == "credit"
}
# When a user purchases credit in a course, the user's enrollment
# mode is set to "credit" and an enrollment attribute is set
# with the ID of the credit provider. We retrieve *all* such attributes
# here to minimize the number of database queries.
purchased_credit_providers = {
attribute.enrollment.course_id: attribute.value
for attribute in CourseEnrollmentAttribute.objects.filter(
namespace="credit",
name="provider_id",
enrollment__in=credit_enrollments.values()
).select_related("enrollment")
}
provider_info_by_id = {
provider["id"]: provider
for provider in credit_api.get_credit_providers()
}
statuses = {}
for eligibility in credit_api.get_eligibilities_for_user(user.username):
course_key = CourseKey.from_string(unicode(eligibility["course_key"]))
providers_names = get_credit_provider_display_names(course_key)
status = {
"course_key": unicode(course_key),
"eligible": True,
"deadline": eligibility["deadline"],
"purchased": course_key in credit_enrollments,
"provider_name": make_providers_strings(providers_names),
"provider_status_url": None,
"provider_id": None,
"request_status": request_status_by_course.get(course_key),
"error": False,
}
# If the user has purchased credit, then include information about the credit
# provider from which the user purchased credit.
# We retrieve the provider's ID from the an "enrollment attribute" set on the user's
# enrollment when the user's order for credit is fulfilled by the E-Commerce service.
if status["purchased"]:
provider_id = purchased_credit_providers.get(course_key)
if provider_id is None:
status["error"] = True
log.error(
u"Could not find credit provider associated with credit enrollment "
u"for user %s in course %s. The user will not be able to see his or her "
u"credit request status on the student dashboard. This attribute should "
u"have been set when the user purchased credit in the course.",
user.id, course_key
)
else:
provider_info = provider_info_by_id.get(provider_id, {})
status["provider_name"] = provider_info.get("display_name")
status["provider_status_url"] = provider_info.get("status_url")
status["provider_id"] = provider_id
statuses[course_key] = status
return statuses
@transaction.non_atomic_requests
@require_POST
@outer_atomic(read_committed=True)
def change_enrollment(request, check_access=True):
"""
Modify the enrollment status for the logged-in user.
TODO: This is lms specific and does not belong in common code.
The request parameter must be a POST request (other methods return 405)
that specifies course_id and enrollment_action parameters. If course_id or
enrollment_action is not specified, if course_id is not valid, if
enrollment_action is something other than "enroll" or "unenroll", if
enrollment_action is "enroll" and enrollment is closed for the course, or
if enrollment_action is "unenroll" and the user is not enrolled in the
course, a 400 error will be returned. If the user is not logged in, 403
will be returned; it is important that only this case return 403 so the
front end can redirect the user to a registration or login page when this
happens. This function should only be called from an AJAX request, so
the error messages in the responses should never actually be user-visible.
Args:
request (`Request`): The Django request object
Keyword Args:
check_access (boolean): If True, we check that an accessible course actually
exists for the given course_key before we enroll the student.
The default is set to False to avoid breaking legacy code or
code with non-standard flows (ex. beta tester invitations), but
for any standard enrollment flow you probably want this to be True.
Returns:
Response
"""
# Get the user
user = request.user
# Ensure the user is authenticated
if not user.is_authenticated():
return HttpResponseForbidden()
# Ensure we received a course_id
action = request.POST.get("enrollment_action")
if 'course_id' not in request.POST:
return HttpResponseBadRequest(_("Course id not specified"))
try:
course_id = SlashSeparatedCourseKey.from_deprecated_string(request.POST.get("course_id"))
except InvalidKeyError:
log.warning(
u"User %s tried to %s with invalid course id: %s",
user.username,
action,
request.POST.get("course_id"),
)
return HttpResponseBadRequest(_("Invalid course id"))
if action == "enroll":
# Make sure the course exists
# We don't do this check on unenroll, or a bad course id can't be unenrolled from
if not modulestore().has_course(course_id):
log.warning(
u"User %s tried to enroll in non-existent course %s",
user.username,
course_id
)
return HttpResponseBadRequest(_("Course id is invalid"))
# Record the user's email opt-in preference
if settings.FEATURES.get('ENABLE_MKTG_EMAIL_OPT_IN'):
_update_email_opt_in(request, course_id.org)
available_modes = CourseMode.modes_for_course_dict(course_id)
# Check whether the user is blocked from enrolling in this course
# This can occur if the user's IP is on a global blacklist
# or if the user is enrolling in a country in which the course
# is not available.
redirect_url = embargo_api.redirect_if_blocked(
course_id, user=user, ip_address=get_ip(request),
url=request.path
)
if redirect_url:
return HttpResponse(redirect_url)
# Check that auto enrollment is allowed for this course
# (= the course is NOT behind a paywall)
if CourseMode.can_auto_enroll(course_id):
# Enroll the user using the default mode (audit)
# We're assuming that users of the course enrollment table
# will NOT try to look up the course enrollment model
# by its slug. If they do, it's possible (based on the state of the database)
# for no such model to exist, even though we've set the enrollment type
# to "audit".
try:
enroll_mode = CourseMode.auto_enroll_mode(course_id, available_modes)
if enroll_mode:
CourseEnrollment.enroll(user, course_id, check_access=check_access, mode=enroll_mode)
except Exception: # pylint: disable=broad-except
return HttpResponseBadRequest(_("Could not enroll"))
# If we have more than one course mode or professional ed is enabled,
# then send the user to the choose your track page.
# (In the case of no-id-professional/professional ed, this will redirect to a page that
# funnels users directly into the verification / payment flow)
if CourseMode.has_verified_mode(available_modes) or CourseMode.has_professional_mode(available_modes):
return HttpResponse(
reverse("course_modes_choose", kwargs={'course_id': unicode(course_id)})
)
# Otherwise, there is only one mode available (the default)
return HttpResponse()
elif action == "unenroll":
enrollment = CourseEnrollment.get_enrollment(user, course_id)
if not enrollment:
return HttpResponseBadRequest(_("You are not enrolled in this course"))
certificate_info = cert_info(user, enrollment.course_overview, enrollment.mode)
if certificate_info.get('status') in DISABLE_UNENROLL_CERT_STATES:
return HttpResponseBadRequest(_("Your certificate prevents you from unenrolling from this course"))
CourseEnrollment.unenroll(user, course_id)
return HttpResponse()
else:
return HttpResponseBadRequest(_("Enrollment action is invalid"))
# Need different levels of logging
@ensure_csrf_cookie
def login_user(request, error=""): # pylint: disable=too-many-statements,unused-argument
"""AJAX request to log in the user."""
backend_name = None
email = None
password = None
redirect_url = None
response = None
running_pipeline = None
third_party_auth_requested = third_party_auth.is_enabled() and pipeline.running(request)
third_party_auth_successful = False
trumped_by_first_party_auth = bool(request.POST.get('email')) or bool(request.POST.get('password'))
user = None
platform_name = configuration_helpers.get_value("platform_name", settings.PLATFORM_NAME)
if third_party_auth_requested and not trumped_by_first_party_auth:
# The user has already authenticated via third-party auth and has not
# asked to do first party auth by supplying a username or password. We
# now want to put them through the same logging and cookie calculation
# logic as with first-party auth.
running_pipeline = pipeline.get(request)
username = running_pipeline['kwargs'].get('username')
backend_name = running_pipeline['backend']
third_party_uid = running_pipeline['kwargs']['uid']
requested_provider = provider.Registry.get_from_pipeline(running_pipeline)
try:
user = pipeline.get_authenticated_user(requested_provider, username, third_party_uid)
third_party_auth_successful = True
except User.DoesNotExist:
AUDIT_LOG.warning(
u"Login failed - user with username {username} has no social auth "
"with backend_name {backend_name}".format(
username=username, backend_name=backend_name)
)
message = _(
"You've successfully logged into your {provider_name} account, "
"but this account isn't linked with an {platform_name} account yet."
).format(
platform_name=platform_name,
provider_name=requested_provider.name,
)
message += "<br/><br/>"
message += _(
"Use your {platform_name} username and password to log into {platform_name} below, "
"and then link your {platform_name} account with {provider_name} from your dashboard."
).format(
platform_name=platform_name,
provider_name=requested_provider.name,
)
message += "<br/><br/>"
message += _(
"If you don't have an {platform_name} account yet, "
"click <strong>Register</strong> at the top of the page."
).format(
platform_name=platform_name
)
return HttpResponse(message, content_type="text/plain", status=403)
else:
if 'email' not in request.POST or 'password' not in request.POST:
return JsonResponse({
"success": False,
# TODO: User error message
"value": _('There was an error receiving your login information. Please email us.'),
}) # TODO: this should be status code 400
email = request.POST['email']
password = request.POST['password']
try:
user = User.objects.get(email=email)
except User.DoesNotExist:
if settings.FEATURES['SQUELCH_PII_IN_LOGS']:
AUDIT_LOG.warning(u"Login failed - Unknown user email")
else:
AUDIT_LOG.warning(u"Login failed - Unknown user email: {0}".format(email))
# check if the user has a linked shibboleth account, if so, redirect the user to shib-login
# This behavior is pretty much like what gmail does for shibboleth. Try entering some @stanford.edu
# address into the Gmail login.
if settings.FEATURES.get('AUTH_USE_SHIB') and user:
try:
eamap = ExternalAuthMap.objects.get(user=user)
if eamap.external_domain.startswith(openedx.core.djangoapps.external_auth.views.SHIBBOLETH_DOMAIN_PREFIX):
return JsonResponse({
"success": False,
"redirect": reverse('shib-login'),
}) # TODO: this should be status code 301 # pylint: disable=fixme
except ExternalAuthMap.DoesNotExist:
# This is actually the common case, logging in user without external linked login
AUDIT_LOG.info(u"User %s w/o external auth attempting login", user)
# see if account has been locked out due to excessive login failures
user_found_by_email_lookup = user
if user_found_by_email_lookup and LoginFailures.is_feature_enabled():
if LoginFailures.is_user_locked_out(user_found_by_email_lookup):
lockout_message = _('This account has been temporarily locked due '
'to excessive login failures. Try again later.')
return JsonResponse({
"success": False,
"value": lockout_message,
}) # TODO: this should be status code 429 # pylint: disable=fixme
# see if the user must reset his/her password due to any policy settings
if user_found_by_email_lookup and PasswordHistory.should_user_reset_password_now(user_found_by_email_lookup):
return JsonResponse({
"success": False,
"value": _('Your password has expired due to password policy on this account. You must '
'reset your password before you can log in again. Please click the '
'"Forgot Password" link on this page to reset your password before logging in again.'),
}) # TODO: this should be status code 403 # pylint: disable=fixme
# if the user doesn't exist, we want to set the username to an invalid
# username so that authentication is guaranteed to fail and we can take
# advantage of the ratelimited backend
username = user.username if user else ""
if not third_party_auth_successful:
try:
user = authenticate(username=username, password=password, request=request)
# this occurs when there are too many attempts from the same IP address
except RateLimitException:
return JsonResponse({
"success": False,
"value": _('Too many failed login attempts. Try again later.'),
}) # TODO: this should be status code 429 # pylint: disable=fixme
if user is None:
# tick the failed login counters if the user exists in the database
if user_found_by_email_lookup and LoginFailures.is_feature_enabled():
LoginFailures.increment_lockout_counter(user_found_by_email_lookup)
# if we didn't find this username earlier, the account for this email
# doesn't exist, and doesn't have a corresponding password
if username != "":
if settings.FEATURES['SQUELCH_PII_IN_LOGS']:
loggable_id = user_found_by_email_lookup.id if user_found_by_email_lookup else "<unknown>"
AUDIT_LOG.warning(u"Login failed - password for user.id: {0} is invalid".format(loggable_id))
else:
AUDIT_LOG.warning(u"Login failed - password for {0} is invalid".format(email))
return JsonResponse({
"success": False,
"value": _('Email or password is incorrect.'),
}) # TODO: this should be status code 400 # pylint: disable=fixme
# successful login, clear failed login attempts counters, if applicable
if LoginFailures.is_feature_enabled():
LoginFailures.clear_lockout_counter(user)
# Track the user's sign in
if hasattr(settings, 'LMS_SEGMENT_KEY') and settings.LMS_SEGMENT_KEY:
tracking_context = tracker.get_tracker().resolve_context()
analytics.identify(
user.id,
{
'email': email,
'username': username
},
{
# Disable MailChimp because we don't want to update the user's email
# and username in MailChimp on every page load. We only need to capture
# this data on registration/activation.
'MailChimp': False
}
)
analytics.track(
user.id,
"edx.bi.user.account.authenticated",
{
'category': "conversion",
'label': request.POST.get('course_id'),
'provider': None
},
context={
'ip': tracking_context.get('ip'),
'Google Analytics': {
'clientId': tracking_context.get('client_id')
}
}
)
if user is not None and user.is_active:
try:
# We do not log here, because we have a handler registered
# to perform logging on successful logins.
login(request, user)
if request.POST.get('remember') == 'true':
request.session.set_expiry(604800)
log.debug("Setting user session to never expire")
else:
request.session.set_expiry(0)
except Exception as exc: # pylint: disable=broad-except
AUDIT_LOG.critical("Login failed - Could not create session. Is memcached running?")
log.critical("Login failed - Could not create session. Is memcached running?")
log.exception(exc)
raise
redirect_url = None # The AJAX method calling should know the default destination upon success
if third_party_auth_successful:
redirect_url = pipeline.get_complete_url(backend_name)
response = JsonResponse({
"success": True,
"redirect_url": redirect_url,
})
# Ensure that the external marketing site can
# detect that the user is logged in.
return set_logged_in_cookies(request, response, user)
if settings.FEATURES['SQUELCH_PII_IN_LOGS']:
AUDIT_LOG.warning(u"Login failed - Account not active for user.id: {0}, resending activation".format(user.id))
else:
AUDIT_LOG.warning(u"Login failed - Account not active for user {0}, resending activation".format(username))
reactivation_email_for_user(user)
not_activated_msg = _("Before you sign in, you need to activate your account. We have sent you an "
"email message with instructions for activating your account.")
return JsonResponse({
"success": False,
"value": not_activated_msg,
}) # TODO: this should be status code 400 # pylint: disable=fixme
@csrf_exempt
@require_POST
@social_utils.strategy("social:complete")
def login_oauth_token(request, backend):
"""
Authenticate the client using an OAuth access token by using the token to
retrieve information from a third party and matching that information to an
existing user.
"""
warnings.warn("Please use AccessTokenExchangeView instead.", DeprecationWarning)
backend = request.backend
if isinstance(backend, social_oauth.BaseOAuth1) or isinstance(backend, social_oauth.BaseOAuth2):
if "access_token" in request.POST:
# Tell third party auth pipeline that this is an API call
request.session[pipeline.AUTH_ENTRY_KEY] = pipeline.AUTH_ENTRY_LOGIN_API
user = None
try:
user = backend.do_auth(request.POST["access_token"])
except (HTTPError, AuthException):
pass
# do_auth can return a non-User object if it fails
if user and isinstance(user, User):
login(request, user)
return JsonResponse(status=204)
else:
# Ensure user does not re-enter the pipeline
request.social_strategy.clean_partial_pipeline()
return JsonResponse({"error": "invalid_token"}, status=401)
else:
return JsonResponse({"error": "invalid_request"}, status=400)
raise Http404
@require_GET
@login_required
@ensure_csrf_cookie
def manage_user_standing(request):
"""
Renders the view used to manage user standing. Also displays a table
of user accounts that have been disabled and who disabled them.
"""
if not request.user.is_staff:
raise Http404
all_disabled_accounts = UserStanding.objects.filter(
account_status=UserStanding.ACCOUNT_DISABLED
)
all_disabled_users = [standing.user for standing in all_disabled_accounts]
headers = ['username', 'account_changed_by']
rows = []
for user in all_disabled_users:
row = [user.username, user.standing.changed_by]
rows.append(row)
context = {'headers': headers, 'rows': rows}
return render_to_response("manage_user_standing.html", context)
@require_POST
@login_required
@ensure_csrf_cookie
def disable_account_ajax(request):
"""
Ajax call to change user standing. Endpoint of the form
in manage_user_standing.html
"""
if not request.user.is_staff:
raise Http404
username = request.POST.get('username')
context = {}
if username is None or username.strip() == '':
context['message'] = _('Please enter a username')
return JsonResponse(context, status=400)
account_action = request.POST.get('account_action')
if account_action is None:
context['message'] = _('Please choose an option')
return JsonResponse(context, status=400)
username = username.strip()
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
context['message'] = _("User with username {} does not exist").format(username)
return JsonResponse(context, status=400)
else:
user_account, _success = UserStanding.objects.get_or_create(
user=user, defaults={'changed_by': request.user},
)
if account_action == 'disable':
user_account.account_status = UserStanding.ACCOUNT_DISABLED
context['message'] = _("Successfully disabled {}'s account").format(username)
log.info(u"%s disabled %s's account", request.user, username)
elif account_action == 'reenable':
user_account.account_status = UserStanding.ACCOUNT_ENABLED
context['message'] = _("Successfully reenabled {}'s account").format(username)
log.info(u"%s reenabled %s's account", request.user, username)
else:
context['message'] = _("Unexpected account status")
return JsonResponse(context, status=400)
user_account.changed_by = request.user
user_account.standing_last_changed_at = datetime.datetime.now(UTC)
user_account.save()
return JsonResponse(context)
@login_required
@ensure_csrf_cookie
def change_setting(request):
"""JSON call to change a profile setting: Right now, location"""
# TODO (vshnayder): location is no longer used
u_prof = UserProfile.objects.get(user=request.user) # request.user.profile_cache
if 'location' in request.POST:
u_prof.location = request.POST['location']
u_prof.save()
return JsonResponse({
"success": True,
"location": u_prof.location,
})
class AccountValidationError(Exception):
def __init__(self, message, field):
super(AccountValidationError, self).__init__(message)
self.field = field
@receiver(post_save, sender=User)
def user_signup_handler(sender, **kwargs): # pylint: disable=unused-argument
"""
handler that saves the user Signup Source
when the user is created
"""
if 'created' in kwargs and kwargs['created']:
site = configuration_helpers.get_value('SITE_NAME')
if site:
user_signup_source = UserSignupSource(user=kwargs['instance'], site=site)
user_signup_source.save()
log.info(u'user {} originated from a white labeled "Microsite"'.format(kwargs['instance'].id))
def _do_create_account(form, custom_form=None):
"""
Given cleaned post variables, create the User and UserProfile objects, as well as the
registration for this user.
Returns a tuple (User, UserProfile, Registration).
Note: this function is also used for creating test users.
"""
errors = {}
errors.update(form.errors)
if custom_form:
errors.update(custom_form.errors)
if errors:
raise ValidationError(errors)
user = User(
username=form.cleaned_data["username"],
email=form.cleaned_data["email"],
is_active=False
)
user.set_password(form.cleaned_data["password"])
registration = Registration()
# TODO: Rearrange so that if part of the process fails, the whole process fails.
# Right now, we can have e.g. no registration e-mail sent out and a zombie account
try:
with transaction.atomic():
user.save()
if custom_form:
custom_model = custom_form.save(commit=False)
custom_model.user = user
custom_model.save()
except IntegrityError:
# Figure out the cause of the integrity error
if len(User.objects.filter(username=user.username)) > 0:
raise AccountValidationError(
_("An account with the Public Username '{username}' already exists.").format(username=user.username),
field="username"
)
elif len(User.objects.filter(email=user.email)) > 0:
raise AccountValidationError(
_("An account with the Email '{email}' already exists.").format(email=user.email),
field="email"
)
else:
raise
# add this account creation to password history
# NOTE, this will be a NOP unless the feature has been turned on in configuration
password_history_entry = PasswordHistory()
password_history_entry.create(user)
registration.register(user)
profile_fields = [
"name", "level_of_education", "gender", "mailing_address", "city", "country", "goals",
"year_of_birth"
]
profile = UserProfile(
user=user,
**{key: form.cleaned_data.get(key) for key in profile_fields}
)
extended_profile = form.cleaned_extended_profile
if extended_profile:
profile.meta = json.dumps(extended_profile)
try:
profile.save()
except Exception: # pylint: disable=broad-except
log.exception("UserProfile creation failed for user {id}.".format(id=user.id))
raise
return (user, profile, registration)
def create_account_with_params(request, params):
"""
Given a request and a dict of parameters (which may or may not have come
from the request), create an account for the requesting user, including
creating a comments service user object and sending an activation email.
This also takes external/third-party auth into account, updates that as
necessary, and authenticates the user for the request's session.
Does not return anything.
Raises AccountValidationError if an account with the username or email
specified by params already exists, or ValidationError if any of the given
parameters is invalid for any other reason.
Issues with this code:
* It is not transactional. If there is a failure part-way, an incomplete
account will be created and left in the database.
* Third-party auth passwords are not verified. There is a comment that
they are unused, but it would be helpful to have a sanity check that
they are sane.
* It is over 300 lines long (!) and includes disprate functionality, from
registration e-mails to all sorts of other things. It should be broken
up into semantically meaningful functions.
* The user-facing text is rather unfriendly (e.g. "Username must be a
minimum of two characters long" rather than "Please use a username of
at least two characters").
"""
# Copy params so we can modify it; we can't just do dict(params) because if
# params is request.POST, that results in a dict containing lists of values
params = dict(params.items())
# allow to define custom set of required/optional/hidden fields via configuration
extra_fields = configuration_helpers.get_value(
'REGISTRATION_EXTRA_FIELDS',
getattr(settings, 'REGISTRATION_EXTRA_FIELDS', {})
)
# Boolean of whether a 3rd party auth provider and credentials were provided in
# the API so the newly created account can link with the 3rd party account.
#
# Note: this is orthogonal to the 3rd party authentication pipeline that occurs
# when the account is created via the browser and redirect URLs.
should_link_with_social_auth = third_party_auth.is_enabled() and 'provider' in params
if should_link_with_social_auth or (third_party_auth.is_enabled() and pipeline.running(request)):
params["password"] = pipeline.make_random_password()
# Add a form requirement for data sharing consent if the EnterpriseCustomer
# for the request requires it at login
extra_fields['data_sharing_consent'] = data_sharing_consent_requirement_at_login(request)
# if doing signup for an external authorization, then get email, password, name from the eamap
# don't use the ones from the form, since the user could have hacked those
# unless originally we didn't get a valid email or name from the external auth
# TODO: We do not check whether these values meet all necessary criteria, such as email length
do_external_auth = 'ExternalAuthMap' in request.session
if do_external_auth:
eamap = request.session['ExternalAuthMap']
try:
validate_email(eamap.external_email)
params["email"] = eamap.external_email
except ValidationError:
pass
if eamap.external_name.strip() != '':
params["name"] = eamap.external_name
params["password"] = eamap.internal_password
log.debug(u'In create_account with external_auth: user = %s, email=%s', params["name"], params["email"])
extended_profile_fields = configuration_helpers.get_value('extended_profile_fields', [])
enforce_password_policy = (
settings.FEATURES.get("ENFORCE_PASSWORD_POLICY", False) and
not do_external_auth
)
# Can't have terms of service for certain SHIB users, like at Stanford
registration_fields = getattr(settings, 'REGISTRATION_EXTRA_FIELDS', {})
tos_required = (
registration_fields.get('terms_of_service') != 'hidden' or
registration_fields.get('honor_code') != 'hidden'
) and (
not settings.FEATURES.get("AUTH_USE_SHIB") or
not settings.FEATURES.get("SHIB_DISABLE_TOS") or
not do_external_auth or
not eamap.external_domain.startswith(openedx.core.djangoapps.external_auth.views.SHIBBOLETH_DOMAIN_PREFIX)
)
form = AccountCreationForm(
data=params,
extra_fields=extra_fields,
extended_profile_fields=extended_profile_fields,
enforce_username_neq_password=True,
enforce_password_policy=enforce_password_policy,
tos_required=tos_required,
)
custom_form = get_registration_extension_form(data=params)
# Perform operations within a transaction that are critical to account creation
with transaction.atomic():
# first, create the account
(user, profile, registration) = _do_create_account(form, custom_form)
# next, link the account with social auth, if provided via the API.
# (If the user is using the normal register page, the social auth pipeline does the linking, not this code)
if should_link_with_social_auth:
backend_name = params['provider']
request.social_strategy = social_utils.load_strategy(request)
redirect_uri = reverse('social:complete', args=(backend_name, ))
request.backend = social_utils.load_backend(request.social_strategy, backend_name, redirect_uri)
social_access_token = params.get('access_token')
if not social_access_token:
raise ValidationError({
'access_token': [
_("An access_token is required when passing value ({}) for provider.").format(
params['provider']
)
]
})
request.session[pipeline.AUTH_ENTRY_KEY] = pipeline.AUTH_ENTRY_REGISTER_API
pipeline_user = None
error_message = ""
try:
pipeline_user = request.backend.do_auth(social_access_token, user=user)
except AuthAlreadyAssociated:
error_message = _("The provided access_token is already associated with another user.")
except (HTTPError, AuthException):
error_message = _("The provided access_token is not valid.")
if not pipeline_user or not isinstance(pipeline_user, User):
# Ensure user does not re-enter the pipeline
request.social_strategy.clean_partial_pipeline()
raise ValidationError({'access_token': [error_message]})
# Perform operations that are non-critical parts of account creation
preferences_api.set_user_preference(user, LANGUAGE_KEY, get_language())
if settings.FEATURES.get('ENABLE_DISCUSSION_EMAIL_DIGEST'):
try:
enable_notifications(user)
except Exception: # pylint: disable=broad-except
log.exception("Enable discussion notifications failed for user {id}.".format(id=user.id))
dog_stats_api.increment("common.student.account_created")
# If the user is registering via 3rd party auth, track which provider they use
third_party_provider = None
running_pipeline = None
if third_party_auth.is_enabled() and pipeline.running(request):
running_pipeline = pipeline.get(request)
third_party_provider = provider.Registry.get_from_pipeline(running_pipeline)
# Store received data sharing consent field values in the pipeline for use
# by any downstream pipeline elements which require them.
running_pipeline['kwargs']['data_sharing_consent'] = form.cleaned_data.get('data_sharing_consent', None)
# Track the user's registration
if hasattr(settings, 'LMS_SEGMENT_KEY') and settings.LMS_SEGMENT_KEY:
tracking_context = tracker.get_tracker().resolve_context()
identity_args = [
user.id, # pylint: disable=no-member
{
'email': user.email,
'username': user.username,
'name': profile.name,
# Mailchimp requires the age & yearOfBirth to be integers, we send a sane integer default if falsey.
'age': profile.age or -1,
'yearOfBirth': profile.year_of_birth or datetime.datetime.now(UTC).year,
'education': profile.level_of_education_display,
'address': profile.mailing_address,
'gender': profile.gender_display,
'country': unicode(profile.country),
}
]
if hasattr(settings, 'MAILCHIMP_NEW_USER_LIST_ID'):
identity_args.append({
"MailChimp": {
"listId": settings.MAILCHIMP_NEW_USER_LIST_ID
}
})
analytics.identify(*identity_args)
analytics.track(
user.id,
"edx.bi.user.account.registered",
{
'category': 'conversion',
'label': params.get('course_id'),
'provider': third_party_provider.name if third_party_provider else None
},
context={
'ip': tracking_context.get('ip'),
'Google Analytics': {
'clientId': tracking_context.get('client_id')
}
}
)
# Announce registration
REGISTER_USER.send(sender=None, user=user, profile=profile)
create_comments_service_user(user)
# Don't send email if we are:
#
# 1. Doing load testing.
# 2. Random user generation for other forms of testing.
# 3. External auth bypassing activation.
# 4. Have the platform configured to not require e-mail activation.
# 5. Registering a new user using a trusted third party provider (with skip_email_verification=True)
#
# Note that this feature is only tested as a flag set one way or
# the other for *new* systems. we need to be careful about
# changing settings on a running system to make sure no users are
# left in an inconsistent state (or doing a migration if they are).
send_email = (
not settings.FEATURES.get('SKIP_EMAIL_VALIDATION', None) and
not settings.FEATURES.get('AUTOMATIC_AUTH_FOR_TESTING') and
not (do_external_auth and settings.FEATURES.get('BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH')) and
not (
third_party_provider and third_party_provider.skip_email_verification and
user.email == running_pipeline['kwargs'].get('details', {}).get('email')
)
)
if send_email:
dest_addr = user.email
context = {
'name': profile.name,
'key': registration.activation_key,
}
# composes activation email
subject = render_to_string('emails/activation_email_subject.txt', context)
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
message = render_to_string('emails/activation_email.txt', context)
from_address = configuration_helpers.get_value(
'email_from_address',
settings.DEFAULT_FROM_EMAIL
)
if settings.FEATURES.get('REROUTE_ACTIVATION_EMAIL'):
dest_addr = settings.FEATURES['REROUTE_ACTIVATION_EMAIL']
message = ("Activation for %s (%s): %s\n" % (user, user.email, profile.name) +
'-' * 80 + '\n\n' + message)
send_activation_email.delay(subject, message, from_address, dest_addr)
else:
registration.activate()
_enroll_user_in_pending_courses(user) # Enroll student in any pending courses
# Immediately after a user creates an account, we log them in. They are only
# logged in until they close the browser. They can't log in again until they click
# the activation link from the email.
new_user = authenticate(username=user.username, password=params['password'])
login(request, new_user)
request.session.set_expiry(0)
try:
record_registration_attributions(request, new_user)
# Don't prevent a user from registering due to attribution errors.
except Exception: # pylint: disable=broad-except
log.exception('Error while attributing cookies to user registration.')
# TODO: there is no error checking here to see that the user actually logged in successfully,
# and is not yet an active user.
if new_user is not None:
AUDIT_LOG.info(u"Login success on new account creation - {0}".format(new_user.username))
if do_external_auth:
eamap.user = new_user
eamap.dtsignup = datetime.datetime.now(UTC)
eamap.save()
AUDIT_LOG.info(u"User registered with external_auth %s", new_user.username)
AUDIT_LOG.info(u'Updated ExternalAuthMap for %s to be %s', new_user.username, eamap)
if settings.FEATURES.get('BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH'):
log.info('bypassing activation email')
new_user.is_active = True
new_user.save()
AUDIT_LOG.info(u"Login activated on extauth account - {0} ({1})".format(new_user.username, new_user.email))
return new_user
def _enroll_user_in_pending_courses(student):
"""
Enroll student in any pending courses he/she may have.
"""
ceas = CourseEnrollmentAllowed.objects.filter(email=student.email)
for cea in ceas:
if cea.auto_enroll:
enrollment = CourseEnrollment.enroll(student, cea.course_id)
manual_enrollment_audit = ManualEnrollmentAudit.get_manual_enrollment_by_email(student.email)
if manual_enrollment_audit is not None:
# get the enrolled by user and reason from the ManualEnrollmentAudit table.
# then create a new ManualEnrollmentAudit table entry for the same email
# different transition state.
ManualEnrollmentAudit.create_manual_enrollment_audit(
manual_enrollment_audit.enrolled_by, student.email, ALLOWEDTOENROLL_TO_ENROLLED,
manual_enrollment_audit.reason, enrollment
)
def record_affiliate_registration_attribution(request, user):
"""
Attribute this user's registration to the referring affiliate, if
applicable.
"""
affiliate_id = request.COOKIES.get(settings.AFFILIATE_COOKIE_NAME)
if user and affiliate_id:
UserAttribute.set_user_attribute(user, REGISTRATION_AFFILIATE_ID, affiliate_id)
def record_utm_registration_attribution(request, user):
"""
Attribute this user's registration to the latest UTM referrer, if
applicable.
"""
utm_cookie_name = RegistrationCookieConfiguration.current().utm_cookie_name
utm_cookie = request.COOKIES.get(utm_cookie_name)
if user and utm_cookie:
utm = json.loads(utm_cookie)
for utm_parameter_name in REGISTRATION_UTM_PARAMETERS:
utm_parameter = utm.get(utm_parameter_name)
if utm_parameter:
UserAttribute.set_user_attribute(
user,
REGISTRATION_UTM_PARAMETERS.get(utm_parameter_name),
utm_parameter
)
created_at_unixtime = utm.get('created_at')
if created_at_unixtime:
# We divide by 1000 here because the javascript timestamp generated is in milliseconds not seconds.
# PYTHON: time.time() => 1475590280.823698
# JS: new Date().getTime() => 1475590280823
created_at_datetime = datetime.datetime.fromtimestamp(int(created_at_unixtime) / float(1000), tz=UTC)
UserAttribute.set_user_attribute(
user,
REGISTRATION_UTM_CREATED_AT,
created_at_datetime
)
def record_registration_attributions(request, user):
"""
Attribute this user's registration based on referrer cookies.
"""
record_affiliate_registration_attribution(request, user)
record_utm_registration_attribution(request, user)
@csrf_exempt
def create_account(request, post_override=None):
"""
JSON call to create new edX account.
Used by form in signup_modal.html, which is included into navigation.html
"""
warnings.warn("Please use RegistrationView instead.", DeprecationWarning)
try:
user = create_account_with_params(request, post_override or request.POST)
except AccountValidationError as exc:
return JsonResponse({'success': False, 'value': exc.message, 'field': exc.field}, status=400)
except ValidationError as exc:
field, error_list = next(exc.message_dict.iteritems())
return JsonResponse(
{
"success": False,
"field": field,
"value": error_list[0],
},
status=400
)
redirect_url = None # The AJAX method calling should know the default destination upon success
# Resume the third-party-auth pipeline if necessary.
if third_party_auth.is_enabled() and pipeline.running(request):
running_pipeline = pipeline.get(request)
redirect_url = pipeline.get_complete_url(running_pipeline['backend'])
response = JsonResponse({
'success': True,
'redirect_url': redirect_url,
})
set_logged_in_cookies(request, response, user)
return response
def auto_auth(request):
"""
Create or configure a user account, then log in as that user.
Enabled only when
settings.FEATURES['AUTOMATIC_AUTH_FOR_TESTING'] is true.
Accepts the following querystring parameters:
* `username`, `email`, and `password` for the user account
* `full_name` for the user profile (the user's full name; defaults to the username)
* `staff`: Set to "true" to make the user global staff.
* `course_id`: Enroll the student in the course with `course_id`
* `roles`: Comma-separated list of roles to grant the student in the course with `course_id`
* `no_login`: Define this to create the user but not login
* `redirect`: Set to "true" will redirect to the `redirect_to` value if set, or
course home page if course_id is defined, otherwise it will redirect to dashboard
* `redirect_to`: will redirect to to this url
* `is_active` : make/update account with status provided as 'is_active'
If username, email, or password are not provided, use
randomly generated credentials.
"""
# Generate a unique name to use if none provided
unique_name = uuid.uuid4().hex[0:30]
# Use the params from the request, otherwise use these defaults
username = request.GET.get('username', unique_name)
password = request.GET.get('password', unique_name)
email = request.GET.get('email', unique_name + "@example.com")
full_name = request.GET.get('full_name', username)
is_staff = request.GET.get('staff', None)
is_superuser = request.GET.get('superuser', None)
course_id = request.GET.get('course_id', None)
redirect_to = request.GET.get('redirect_to', None)
active_status = request.GET.get('is_active')
# mode has to be one of 'honor'/'professional'/'verified'/'audit'/'no-id-professional'/'credit'
enrollment_mode = request.GET.get('enrollment_mode', 'honor')
active_status = (not active_status or active_status == 'true')
course_key = None
if course_id:
course_key = CourseLocator.from_string(course_id)
role_names = [v.strip() for v in request.GET.get('roles', '').split(',') if v.strip()]
redirect_when_done = request.GET.get('redirect', '').lower() == 'true' or redirect_to
login_when_done = 'no_login' not in request.GET
form = AccountCreationForm(
data={
'username': username,
'email': email,
'password': password,
'name': full_name,
},
tos_required=False
)
# Attempt to create the account.
# If successful, this will return a tuple containing
# the new user object.
try:
user, profile, reg = _do_create_account(form)
except (AccountValidationError, ValidationError):
# Attempt to retrieve the existing user.
user = User.objects.get(username=username)
user.email = email
user.set_password(password)
user.is_active = active_status
user.save()
profile = UserProfile.objects.get(user=user)
reg = Registration.objects.get(user=user)
# Set the user's global staff bit
if is_staff is not None:
user.is_staff = (is_staff == "true")
user.save()
if is_superuser is not None:
user.is_superuser = (is_superuser == "true")
user.save()
if active_status:
reg.activate()
reg.save()
# ensure parental consent threshold is met
year = datetime.date.today().year
age_limit = settings.PARENTAL_CONSENT_AGE_LIMIT
profile.year_of_birth = (year - age_limit) - 1
profile.save()
# Enroll the user in a course
if course_key is not None:
CourseEnrollment.enroll(user, course_key, mode=enrollment_mode)
# Apply the roles
for role_name in role_names:
role = Role.objects.get(name=role_name, course_id=course_key)
user.roles.add(role)
# Log in as the user
if login_when_done:
user = authenticate(username=username, password=password)
login(request, user)
create_comments_service_user(user)
# Provide the user with a valid CSRF token
# then return a 200 response unless redirect is true
if redirect_when_done:
# Redirect to specific page if specified
if redirect_to:
redirect_url = redirect_to
# Redirect to course info page if course_id is known
elif course_id:
try:
# redirect to course info page in LMS
redirect_url = reverse(
'info',
kwargs={'course_id': course_id}
)
except NoReverseMatch:
# redirect to course outline page in Studio
redirect_url = reverse(
'course_handler',
kwargs={'course_key_string': course_id}
)
else:
try:
# redirect to dashboard for LMS
redirect_url = reverse('dashboard')
except NoReverseMatch:
# redirect to home for Studio
redirect_url = reverse('home')
return redirect(redirect_url)
elif request.META.get('HTTP_ACCEPT') == 'application/json':
response = JsonResponse({
'created_status': u"Logged in" if login_when_done else "Created",
'username': username,
'email': email,
'password': password,
'user_id': user.id, # pylint: disable=no-member
'anonymous_id': anonymous_id_for_user(user, None),
})
else:
success_msg = u"{} user {} ({}) with password {} and user_id {}".format(
u"Logged in" if login_when_done else "Created",
username, email, password, user.id # pylint: disable=no-member
)
response = HttpResponse(success_msg)
response.set_cookie('csrftoken', csrf(request)['csrf_token'])
return response
@ensure_csrf_cookie
def activate_account(request, key):
"""When link in activation e-mail is clicked"""
regs = Registration.objects.filter(activation_key=key)
if len(regs) == 1:
user_logged_in = request.user.is_authenticated()
already_active = True
if not regs[0].user.is_active:
regs[0].activate()
already_active = False
# Enroll student in any pending courses he/she may have if auto_enroll flag is set
_enroll_user_in_pending_courses(regs[0].user)
resp = render_to_response(
"registration/activation_complete.html",
{
'user_logged_in': user_logged_in,
'already_active': already_active
}
)
return resp
if len(regs) == 0:
return render_to_response(
"registration/activation_invalid.html",
{'csrf': csrf(request)['csrf_token']}
)
return HttpResponseServerError(_("Unknown error. Please e-mail us to let us know how it happened."))
@csrf_exempt
@require_POST
def password_reset(request):
""" Attempts to send a password reset e-mail. """
# Add some rate limiting here by re-using the RateLimitMixin as a helper class
limiter = BadRequestRateLimiter()
if limiter.is_rate_limit_exceeded(request):
AUDIT_LOG.warning("Rate limit exceeded in password_reset")
return HttpResponseForbidden()
form = PasswordResetFormNoActive(request.POST)
if form.is_valid():
form.save(use_https=request.is_secure(),
from_email=configuration_helpers.get_value('email_from_address', settings.DEFAULT_FROM_EMAIL),
request=request)
# When password change is complete, a "edx.user.settings.changed" event will be emitted.
# But because changing the password is multi-step, we also emit an event here so that we can
# track where the request was initiated.
tracker.emit(
SETTING_CHANGE_INITIATED,
{
"setting": "password",
"old": None,
"new": None,
"user_id": request.user.id,
}
)
destroy_oauth_tokens(request.user)
else:
# bad user? tick the rate limiter counter
AUDIT_LOG.info("Bad password_reset user passed in.")
limiter.tick_bad_request_counter(request)
return JsonResponse({
'success': True,
'value': render_to_string('registration/password_reset_done.html', {}),
})
def uidb36_to_uidb64(uidb36):
"""
Needed to support old password reset URLs that use base36-encoded user IDs
https://github.com/django/django/commit/1184d077893ff1bc947e45b00a4d565f3df81776#diff-c571286052438b2e3190f8db8331a92bR231
Args:
uidb36: base36-encoded user ID
Returns: base64-encoded user ID. Otherwise returns a dummy, invalid ID
"""
try:
uidb64 = force_text(urlsafe_base64_encode(force_bytes(base36_to_int(uidb36))))
except ValueError:
uidb64 = '1' # dummy invalid ID (incorrect padding for base64)
return uidb64
def validate_password(user, password):
"""
Tie in password policy enforcement as an optional level of
security protection
Args:
user: the user object whose password we're checking.
password: the user's proposed new password.
Returns:
is_valid_password: a boolean indicating if the new password
passes the validation.
err_msg: an error message if there's a violation of one of the password
checks. Otherwise, `None`.
"""
err_msg = None
if settings.FEATURES.get('ENFORCE_PASSWORD_POLICY', False):
try:
validate_password_strength(password)
except ValidationError as err:
err_msg = _('Password: ') + '; '.join(err.messages)
# also, check the password reuse policy
if not PasswordHistory.is_allowable_password_reuse(user, password):
if user.is_staff:
num_distinct = settings.ADVANCED_SECURITY_CONFIG['MIN_DIFFERENT_STAFF_PASSWORDS_BEFORE_REUSE']
else:
num_distinct = settings.ADVANCED_SECURITY_CONFIG['MIN_DIFFERENT_STUDENT_PASSWORDS_BEFORE_REUSE']
# Because of how ngettext is, splitting the following into shorter lines would be ugly.
# pylint: disable=line-too-long
err_msg = ungettext(
"You are re-using a password that you have used recently. You must have {num} distinct password before reusing a previous password.",
"You are re-using a password that you have used recently. You must have {num} distinct passwords before reusing a previous password.",
num_distinct
).format(num=num_distinct)
# also, check to see if passwords are getting reset too frequent
if PasswordHistory.is_password_reset_too_soon(user):
num_days = settings.ADVANCED_SECURITY_CONFIG['MIN_TIME_IN_DAYS_BETWEEN_ALLOWED_RESETS']
# Because of how ngettext is, splitting the following into shorter lines would be ugly.
# pylint: disable=line-too-long
err_msg = ungettext(
"You are resetting passwords too frequently. Due to security policies, {num} day must elapse between password resets.",
"You are resetting passwords too frequently. Due to security policies, {num} days must elapse between password resets.",
num_days
).format(num=num_days)
is_password_valid = err_msg is None
return is_password_valid, err_msg
def password_reset_confirm_wrapper(request, uidb36=None, token=None):
"""
A wrapper around django.contrib.auth.views.password_reset_confirm.
Needed because we want to set the user as active at this step.
We also optionally do some additional password policy checks.
"""
# convert old-style base36-encoded user id to base64
uidb64 = uidb36_to_uidb64(uidb36)
platform_name = {
"platform_name": configuration_helpers.get_value('platform_name', settings.PLATFORM_NAME)
}
try:
uid_int = base36_to_int(uidb36)
user = User.objects.get(id=uid_int)
except (ValueError, User.DoesNotExist):
# if there's any error getting a user, just let django's
# password_reset_confirm function handle it.
return password_reset_confirm(
request, uidb64=uidb64, token=token, extra_context=platform_name
)
if request.method == 'POST':
password = request.POST['new_password1']
is_password_valid, password_err_msg = validate_password(user, password)
if not is_password_valid:
# We have a password reset attempt which violates some security
# policy. Use the existing Django template to communicate that
# back to the user.
context = {
'validlink': False,
'form': None,
'title': _('Password reset unsuccessful'),
'err_msg': password_err_msg,
}
context.update(platform_name)
return TemplateResponse(
request, 'registration/password_reset_confirm.html', context
)
# remember what the old password hash is before we call down
old_password_hash = user.password
response = password_reset_confirm(
request, uidb64=uidb64, token=token, extra_context=platform_name
)
# If password reset was unsuccessful a template response is returned (status_code 200).
# Check if form is invalid then show an error to the user.
# Note if password reset was successful we get response redirect (status_code 302).
if response.status_code == 200 and not response.context_data['form'].is_valid():
response.context_data['err_msg'] = _('Error in resetting your password. Please try again.')
return response
# get the updated user
updated_user = User.objects.get(id=uid_int)
# did the password hash change, if so record it in the PasswordHistory
if updated_user.password != old_password_hash:
entry = PasswordHistory()
entry.create(updated_user)
else:
response = password_reset_confirm(
request, uidb64=uidb64, token=token, extra_context=platform_name
)
response_was_successful = response.context_data.get('validlink')
if response_was_successful and not user.is_active:
user.is_active = True
user.save()
return response
def reactivation_email_for_user(user):
try:
reg = Registration.objects.get(user=user)
except Registration.DoesNotExist:
return JsonResponse({
"success": False,
"error": _('No inactive user with this e-mail exists'),
}) # TODO: this should be status code 400 # pylint: disable=fixme
context = {
'name': user.profile.name,
'key': reg.activation_key,
}
subject = render_to_string('emails/activation_email_subject.txt', context)
subject = ''.join(subject.splitlines())
message = render_to_string('emails/activation_email.txt', context)
from_address = configuration_helpers.get_value('email_from_address', settings.DEFAULT_FROM_EMAIL)
try:
user.email_user(subject, message, from_address)
except Exception: # pylint: disable=broad-except
log.error(
u'Unable to send reactivation email from "%s" to "%s"',
from_address,
user.email,
exc_info=True
)
return JsonResponse({
"success": False,
"error": _('Unable to send reactivation email')
}) # TODO: this should be status code 500 # pylint: disable=fixme
return JsonResponse({"success": True})
def validate_new_email(user, new_email):
"""
Given a new email for a user, does some basic verification of the new address If any issues are encountered
with verification a ValueError will be thrown.
"""
try:
validate_email(new_email)
except ValidationError:
raise ValueError(_('Valid e-mail address required.'))
if new_email == user.email:
raise ValueError(_('Old email is the same as the new email.'))
if User.objects.filter(email=new_email).count() != 0:
raise ValueError(_('An account with this e-mail already exists.'))
def do_email_change_request(user, new_email, activation_key=None):
"""
Given a new email for a user, does some basic verification of the new address and sends an activation message
to the new address. If any issues are encountered with verification or sending the message, a ValueError will
be thrown.
"""
pec_list = PendingEmailChange.objects.filter(user=user)
if len(pec_list) == 0:
pec = PendingEmailChange()
pec.user = user
else:
pec = pec_list[0]
# if activation_key is not passing as an argument, generate a random key
if not activation_key:
activation_key = uuid.uuid4().hex
pec.new_email = new_email
pec.activation_key = activation_key
pec.save()
context = {
'key': pec.activation_key,
'old_email': user.email,
'new_email': pec.new_email
}
subject = render_to_string('emails/email_change_subject.txt', context)
subject = ''.join(subject.splitlines())
message = render_to_string('emails/email_change.txt', context)
from_address = configuration_helpers.get_value(
'email_from_address',
settings.DEFAULT_FROM_EMAIL
)
try:
mail.send_mail(subject, message, from_address, [pec.new_email])
except Exception: # pylint: disable=broad-except
log.error(u'Unable to send email activation link to user from "%s"', from_address, exc_info=True)
raise ValueError(_('Unable to send email activation link. Please try again later.'))
# When the email address change is complete, a "edx.user.settings.changed" event will be emitted.
# But because changing the email address is multi-step, we also emit an event here so that we can
# track where the request was initiated.
tracker.emit(
SETTING_CHANGE_INITIATED,
{
"setting": "email",
"old": context['old_email'],
"new": context['new_email'],
"user_id": user.id,
}
)
@ensure_csrf_cookie
def confirm_email_change(request, key): # pylint: disable=unused-argument
"""
User requested a new e-mail. This is called when the activation
link is clicked. We confirm with the old e-mail, and update
"""
with transaction.atomic():
try:
pec = PendingEmailChange.objects.get(activation_key=key)
except PendingEmailChange.DoesNotExist:
response = render_to_response("invalid_email_key.html", {})
transaction.set_rollback(True)
return response
user = pec.user
address_context = {
'old_email': user.email,
'new_email': pec.new_email
}
if len(User.objects.filter(email=pec.new_email)) != 0:
response = render_to_response("email_exists.html", {})
transaction.set_rollback(True)
return response
subject = render_to_string('emails/email_change_subject.txt', address_context)
subject = ''.join(subject.splitlines())
message = render_to_string('emails/confirm_email_change.txt', address_context)
u_prof = UserProfile.objects.get(user=user)
meta = u_prof.get_meta()
if 'old_emails' not in meta:
meta['old_emails'] = []
meta['old_emails'].append([user.email, datetime.datetime.now(UTC).isoformat()])
u_prof.set_meta(meta)
u_prof.save()
# Send it to the old email...
try:
user.email_user(
subject,
message,
configuration_helpers.get_value('email_from_address', settings.DEFAULT_FROM_EMAIL)
)
except Exception: # pylint: disable=broad-except
log.warning('Unable to send confirmation email to old address', exc_info=True)
response = render_to_response("email_change_failed.html", {'email': user.email})
transaction.set_rollback(True)
return response
user.email = pec.new_email
user.save()
pec.delete()
# And send it to the new email...
try:
user.email_user(
subject,
message,
configuration_helpers.get_value('email_from_address', settings.DEFAULT_FROM_EMAIL)
)
except Exception: # pylint: disable=broad-except
log.warning('Unable to send confirmation email to new address', exc_info=True)
response = render_to_response("email_change_failed.html", {'email': pec.new_email})
transaction.set_rollback(True)
return response
response = render_to_response("email_change_successful.html", address_context)
return response
@require_POST
@login_required
@ensure_csrf_cookie
def change_email_settings(request):
"""Modify logged-in user's setting for receiving emails from a course."""
user = request.user
course_id = request.POST.get("course_id")
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
receive_emails = request.POST.get("receive_emails")
if receive_emails:
optout_object = Optout.objects.filter(user=user, course_id=course_key)
if optout_object:
optout_object.delete()
log.info(
u"User %s (%s) opted in to receive emails from course %s",
user.username,
user.email,
course_id,
)
track.views.server_track(
request,
"change-email-settings",
{"receive_emails": "yes", "course": course_id},
page='dashboard',
)
else:
Optout.objects.get_or_create(user=user, course_id=course_key)
log.info(
u"User %s (%s) opted out of receiving emails from course %s",
user.username,
user.email,
course_id,
)
track.views.server_track(
request,
"change-email-settings",
{"receive_emails": "no", "course": course_id},
page='dashboard',
)
return JsonResponse({"success": True})
class LogoutView(TemplateView):
"""
Logs out user and redirects.
The template should load iframes to log the user out of OpenID Connect services.
See http://openid.net/specs/openid-connect-logout-1_0.html.
"""
oauth_client_ids = []
template_name = 'logout.html'
# Keep track of the page to which the user should ultimately be redirected.
target = reverse_lazy('cas-logout') if settings.FEATURES.get('AUTH_USE_CAS') else '/'
def dispatch(self, request, *args, **kwargs): # pylint: disable=missing-docstring
# We do not log here, because we have a handler registered to perform logging on successful logouts.
request.is_from_logout = True
# Get the list of authorized clients before we clear the session.
self.oauth_client_ids = request.session.get(edx_oauth2_provider.constants.AUTHORIZED_CLIENTS_SESSION_KEY, [])
logout(request)
# If we don't need to deal with OIDC logouts, just redirect the user.
if LogoutViewConfiguration.current().enabled and self.oauth_client_ids:
response = super(LogoutView, self).dispatch(request, *args, **kwargs)
else:
response = redirect(self.target)
# Clear the cookie used by the edx.org marketing site
delete_logged_in_cookies(response)
return response
def _build_logout_url(self, url):
"""
Builds a logout URL with the `no_redirect` query string parameter.
Args:
url (str): IDA logout URL
Returns:
str
"""
scheme, netloc, path, query_string, fragment = urlsplit(url)
query_params = parse_qs(query_string)
query_params['no_redirect'] = 1
new_query_string = urlencode(query_params, doseq=True)
return urlunsplit((scheme, netloc, path, new_query_string, fragment))
def get_context_data(self, **kwargs):
context = super(LogoutView, self).get_context_data(**kwargs)
# Create a list of URIs that must be called to log the user out of all of the IDAs.
uris = Client.objects.filter(client_id__in=self.oauth_client_ids,
logout_uri__isnull=False).values_list('logout_uri', flat=True)
referrer = self.request.META.get('HTTP_REFERER', '').strip('/')
logout_uris = []
for uri in uris:
if not referrer or (referrer and not uri.startswith(referrer)):
logout_uris.append(self._build_logout_url(uri))
context.update({
'target': self.target,
'logout_uris': logout_uris,
})
return context
| pabloborrego93/edx-platform | common/djangoapps/student/views.py | Python | agpl-3.0 | 109,785 | [
"VisIt"
] | 6d9e8d40310f67c9d7702efeb7a40fbca8f28072cbb85cbcd514717560e93323 |
import pylab as pyl
import cPickle as pickle
from scipy.stats import scoreatpercentile
galaxies = pickle.load(open('galaxies.pickle', 'rb'))
galaxies = filter(lambda galaxy: galaxy.ston_I > 30., galaxies)
f1 = pyl.figure(1, figsize=(6,4))
f1s1 = f1.add_subplot(111)
# for the clipped points
arrow_right = [[0,0],[-1,1],[0,0],[-2,0],[0,0],[-1,-1],[0,0]]
arrow_down = [[0.,0.], [-1., 1], [0.,0.],[0.,2.], [0.,0.], [1, 1]]
icd = pyl.asarray([galaxy.ICD_IH*100 for galaxy in galaxies])
sfr = pyl.asarray([pyl.log10(galaxy.ssfr) for galaxy in galaxies])
# plot the data
f1s1.scatter(icd, sfr, c='0.8',edgecolor='0.8', s=25, label='Data')
#plot the outliers
for i,s in zip(icd, sfr):
if s < -10:
pyl.scatter(i, -10, s=100, marker=None, verts = arrow_down)
if i > 50:
pyl.scatter(50, s, s=100, marker=None, verts = arrow_right)
bins = pyl.linspace(icd.min(),50, 10)
delta = bins[1]-bins[0]
idx = pyl.digitize(icd,bins)
running_median = [pyl.median(sfr[idx==k]) for k in range(10)]
#upper = [scoreatpercentile(sfr[idx==k], 75) for k in range(1,7)]
#lower = [scoreatpercentile(sfr[idx==k], 25) for k in range(1,7)]
pyl.plot(bins-delta/2, running_median, '#A60628', lw=4, label='Median')
#pyl.plot(bins-delta/2, upper, '#348ABD', '--', lw=4, label='Quartile')
#pyl.plot(bins-delta/2, lower, '#348ABD', '--', lw=4)
# add the speagle relation
from astLib.astCalc import tz
t = tz(2.25)
m = 10
sfr = (0.84 - 0.026*t)*m - (6.51-0.11*t)
f1s1.axhspan(sfr-m - 0.2, sfr-m + 0.2, color='#AAF0D1', label='speagle+14',
zorder=0)
#f1s1.axhline(sfr-m, lw=2, c='purple', label='speagle+14')
#f1s1.axhline(sfr-m - 0.2, lw=2, c='purple')
#f1s1.axhline(sfr-m + 0.2, lw=2, c='purple')
f1s1.set_xlim(-5,50)
f1s1.set_ylim(-10,-6.5)
f1s1.set_xlabel(r"$\xi[i_{775},H_{160}]$ (%)")
f1s1.set_ylabel(r'Log sSFR ($yr^{-1}$)')
pyl.legend(loc='upper right')
pyl.tight_layout()
pyl.show()
| boada/ICD | sandbox/plot_icd_vs_ssfr.py | Python | mit | 1,901 | [
"Galaxy"
] | a35569faaa8ddd8d28c05e1743c98b4f9ce66137a00838535347a910d28be5e2 |
#!/usr/bin/env python
# ===============================================================================
# Copyright 2017 Geoscience Australia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
'''
CSV2NetCDFConverter concrete class for converting data to netCDF
Created on 28Mar.2018
@author: Andrew Turner
'''
#TODO update creationg date
from collections import OrderedDict
import numpy as np
import cx_Oracle
from geophys_utils.netcdf_converter import ToNetCDFConverter, NetCDFVariable
from geophys_utils import points2convex_hull
import sys
import re
from datetime import datetime
import yaml
import os
import logging
# # Create the Logger
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# Create the console handler and set logging level
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.DEBUG)
# Create a formatter for log messages
logger_formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# Add the Formatter to the Handler
console_handler.setFormatter(logger_formatter)
# Add the Handler to the Logger
logger.addHandler(console_handler)
class Grav2NetCDFConverter(ToNetCDFConverter):
'''
CSV2NetCDFConverter concrete class for converting CSV data to netCDF
'''
gravity_metadata_list = [
# 'ENO', not needed
'SURVEYID',
'SURVEYNAME',
'COUNTRYID',
'STATEGROUP',
'STATIONS', #number of stations?
# 'GRAVACC', - variable
#'GRAVDATUM' as variable attribute
# 'GNDELEVACC', - variable
# 'GNDELEVMETH', - variable
# 'GNDELEVDATUM', - variable - 6 outliers
# 'RELIAB', variable - 5 outliers
'LAYOUT',
# 'ACCESS_CODE', filtered
# 'ENTRYDATE', not needed
# 'ENTEREDBY', not needed
# 'LASTUPDATE', not needed
# 'UPDATEDBY', not needed
# 'GRAVACCUNITS', #always um. In grav acc var attribute - may be null sometimes
# 'GRAVACCMETHOD', variable
# 'GNDELEVACCUNITS', # always m maybe some as null. In gravlevacc var attribut
# 'GNDELEVACCMETHOD', as variable
# 'ELLIPSOIDHGTDATUM', # always - always GRS80 now as variable attriubte of ellipsoidhgt
# 'ELLIPSOIDHGTMETH', methods deemed not required for analysis
# 'ELLIPSOIDHGTACC', # as variable
# 'ELLIPSOIDHGTACCMETHOD',# methods deemed not required for analysis
# 'ELLIPSOIDHGTACCUOM', # as variable attribute
'SURVEYTYPE',
# 'DATATYPES', not needed
# 'UNO', not needed
'OPERATOR',
'CONTRACTOR',
'PROCESSOR',
'CLIENT', # nulls
'OWNER', # nulls
'LEGISLATION', # nulls
# 'STATE',
'PROJ_LEADER', # nulls
'ON_OFF', #?
#'STARTDATE', moved to global attributes for enhanced searching
#'ENDDATE', moved to global attributes for enhanced searching
'VESSEL_TYPE', # nulls
'VESSEL', # nulls
'SPACEMIN', # can add uom which is metres
'SPACEMAX',
# 'LOCMETHOD', -not needed
#'ACCURACY', as point variable
#'GEODETIC_DATUM',
#'PROJECTION', # the data is given in the netcdf as gda94 unprojected. The values in the projection are Ellispoids
# 'QA_CODE', not needed
#'RELEASEDATE', # not needed but open for discussion
#'COMMENTS', # not needed but open for discussion
# 'DATA_ACTIVITY_CODE',
# 'NLAT', already in global attributes
# 'SLAT', already in global attributes
# 'ELONG', already in global attributes
# 'WLONG', already in global attributes
# 'ANO', not needed
# 'QABY', not needed
# 'QADATE', not needed
# 'CONFID_UNTIL', not needed
]
try:
logger.debug(os.path.splitext(__file__)[0] + '_settings.yml')
settings = yaml.safe_load(open(os.path.splitext(__file__)[0] + '_settings.yml'))
logger.debug('Settings' + str(settings))
except:
logger.debug("Yaml load fail")
settings = {}
def get_keys_and_values_table(self, table_name: str):
"""
Retrieves all data from a specified table, converts into a dictionary, and returns as a string. Used for tables
with the key and value information such as accuracy or methodology.
e.g. 'SUR': 'Positions determined by optical surveying methods or measured on surveyed points.'
"""
sql_statement = 'select * from gravity.{}'.format(table_name)
query_result = self.cursor.execute(sql_statement)
keys_and_values_dict = OrderedDict()
for s in query_result:
# for every instance in the table, add the 1st and 2nd column as key, value in a python dict
keys_and_values_dict[s[0]] = s[1]
# returns as string. Python dict not accepted.
return keys_and_values_dict
def get_value_for_key(self, value_column: str, table_name: str, key_column: str, key: str):
"""
Retrieves all data from a specified table, converts into a dictionary, and returns as a string. Used for tables
with the key and value information such as accuracy or methodology.
e.g. 'SUR': 'Positions determined by optical surveying methods or measured on surveyed points.'
"""
cleaned_key = str(key)
list_of_characters_to_remove = ["\(", "\)", "\'", "\,"]
for character in list_of_characters_to_remove:
cleaned_key = re.sub(character, '', cleaned_key)
sql_statement = "select {0} from gravity.{1} where {2} = '{3}'".format(value_column, table_name, key_column, cleaned_key)
query_result = self.cursor.execute(sql_statement)
key_to_return = str(next(query_result))
for character in list_of_characters_to_remove:
key_to_return = re.sub(character, '', key_to_return)
return key_to_return
def __init__(self, nc_out_path, survey_id, con, sql_strings_dict_from_yaml, netcdf_format='NETCDF4'):
"""
Concrete constructor for subclass CSV2NetCDFConverter
Needs to initialise object with everything that is required for the other Concrete methods
N.B: Make sure this base class constructor is called from the subclass constructor
"""
ToNetCDFConverter.__init__(self, nc_out_path, netcdf_format)
self.cursor = con.cursor()
self.survey_id = survey_id
self.sql_strings_dict_from_yaml = sql_strings_dict_from_yaml
self.survey_metadata = self.get_survey_metadata()
def get_survey_metadata(self):
"""
Retrieve all data from the gravsurveys and joined a.surveys tables for the current surveyid in the loop.
Uses same filters as other sql queries.
:return:
"""
# TODO are the filters needed in the sql? It will pass this survey id if no observation data is used later on?
formatted_sql = self.sql_strings_dict_from_yaml['get_survey_metadata'].format(self.survey_id)
query_result = self.cursor.execute(formatted_sql)
field_names = [field_desc[0] for field_desc in query_result.description]
survey_row = next(query_result)
return dict(zip(field_names, survey_row))
def get_survey_wide_value_from_obs_table(self, field):
"""
Helper function to retrieve a survey wide value from the observations table. The returning value is tested
to be the only possible value (or null) within that survey.
:param field: The target column in the observations table.
:return: The first value of the specified field of the observations table.
"""
formatted_sql = self.sql_strings_dict_from_yaml['get_data'].format('o1.'+field, "null", self.survey_id)
formatted_sql = formatted_sql.replace('select', 'select distinct', 1) # Only retrieve distinct results
formatted_sql = re.sub('order by .*$', '', formatted_sql) # Don't bother sorting
query_result = self.cursor.execute(formatted_sql)
value = None
for result in query_result:
logger.debug('value: {}, result: {}'.format(value, result))
if value is None:
value = result[0]
assert value is None or result[0] == value or result[0] is None, 'Variant value found in survey-wide column {}'.format(field)
return value
def get_global_attributes(self):
'''
Concrete method to return dict of global attribute <key>:<value> pairs
'''
metadata_dict = {'title': self.survey_metadata['SURVEYNAME'],
'survey_id': self.survey_id,
'Conventions': "CF-1.6,ACDD-1.3",
'keywords': 'points, gravity, ground digital data, geophysical survey, survey {0}, {1}, {2}, Earth sciences,'
' geophysics, geoscientificInformation'.format(self.survey_id, self.survey_metadata['COUNTRYID'], self.survey_metadata['STATEGROUP']),
'geospatial_lon_min': np.min(self.nc_output_dataset.variables['longitude']),
'geospatial_lon_max': np.max(self.nc_output_dataset.variables['longitude']),
'geospatial_lon_units': "degrees_east",
'geospatial_long_resolution': "point",
'geospatial_lat_min': np.min(self.nc_output_dataset.variables['latitude']),
'geospatial_lat_max': np.max(self.nc_output_dataset.variables['latitude']),
'geospatial_lat_units': "degrees_north",
'geospatial_lat_resolution': "point",
'history': "Pulled from point gravity database at Geoscience Australia",
'summary': "This gravity survey, {0}, {1} located in {2} measures the slight variations in the earth's "
"gravity based on the underlying structure or geology".format(self.survey_id,
self.survey_metadata['SURVEYNAME'],
self.survey_metadata['STATEGROUP']),
'location_accuracy_min': np.min(self.nc_output_dataset.variables['locacc']),
'location_accuracy_max': np.max(self.nc_output_dataset.variables['locacc']),
'time_coverage_start': str(self.survey_metadata.get('STARTDATE')),
'time_coverage_end': str(self.survey_metadata.get('ENDDATE')),
'time_coverage_duration': str(self.survey_metadata.get('ENDDATE') - self.survey_metadata.get('STARTDATE'))
if self.survey_metadata.get('STARTDATE') else "Unknown",
'date_created': datetime.now().isoformat(),
'institution': 'Geoscience Australia',
'source': 'ground observation',
#'references': '',## Published or web-based references that describe the data or methods used to produce it.
'cdm_data_type': 'Point'
}
try:
#Compute convex hull and add GML representation to metadata
coordinates = np.array(list(zip(self.nc_output_dataset.variables['longitude'][:],
self.nc_output_dataset.variables['latitude'][:]
)
)
)
if len(coordinates) >=3:
convex_hull = points2convex_hull(coordinates)
metadata_dict['geospatial_bounds'] = 'POLYGON((' + ', '.join([' '.join(
['%.4f' % ordinate for ordinate in coordinates]) for coordinates in convex_hull]) + '))'
elif len(coordinates) == 2: # Two points - make bounding box
bounding_box = [[min(coordinates[:,0]), min(coordinates[:,1])],
[max(coordinates[:,0]), min(coordinates[:,1])],
[max(coordinates[:,0]), max(coordinates[:,1])],
[min(coordinates[:,0]), max(coordinates[:,1])],
[min(coordinates[:,0]), min(coordinates[:,1])]
]
metadata_dict['geospatial_bounds'] = 'POLYGON((' + ', '.join([' '.join(
['%.4f' % ordinate for ordinate in coordinates]) for coordinates in bounding_box]) + '))'
elif len(coordinates) == 1: # Single point
#TODO: Check whether this is allowable under ACDD
metadata_dict['geospatial_bounds'] = 'POINT((' + ' '.join(
['%.4f' % ordinate for ordinate in coordinates[0]]) + '))'
except:
logger.warning('Unable to write global attribute "geospatial_bounds"')
return metadata_dict
def get_dimensions(self):
'''
Concrete method to return OrderedDict of <dimension_name>:<dimension_size> pairs
'''
formatted_sql = self.sql_strings_dict_from_yaml['get_dimensions'].format(self.survey_id)
self.cursor.execute(formatted_sql)
point_count = int(next(self.cursor)[0])
dimensions = OrderedDict()
dimensions['point'] = point_count # number of points per survey
for field_value in Grav2NetCDFConverter.settings['field_names'].values():
if field_value.get('lookup_table'):
lookup_dict = self.get_keys_and_values_table(field_value['lookup_table'])
new_dimension_name = field_value['short_name'].lower()
dimensions[new_dimension_name] = len(lookup_dict)
# print(dimensions[new_dimension_name])
else:
pass
# print(dimensions['point'])
return dimensions
def variable_generator(self):
'''
Concrete generator to yield NetCDFVariable objects
'''
def get_data(field_yml_settings_dict):
"""
Call an sql query to retrieve a data list of the specified field. A different query is called for freeair
and bouguer.
:param field_yml_settings_dict:
:return: data list
"""
if field_name in ['Freeair', 'Bouguer']:
formatted_sql = self.sql_strings_dict_from_yaml['get_data'].format(field_yml_settings_dict['database_field_name'],
field_yml_settings_dict['fill_value'], self.survey_id)
else:
formatted_sql = self.sql_strings_dict_from_yaml['get_data'].format('o1.'+field_yml_settings_dict['database_field_name'],
field_yml_settings_dict['fill_value'],
self.survey_id)
try:
self.cursor.execute(formatted_sql)
except:
logger.debug(formatted_sql)
raise
print("cursor")
print(type(self.cursor))
data_list = [x[0] for x in self.cursor] # get the first index. Otherwise each point is within its own tuple.
# for i in self.cursor:
# data_list.append(
# i[0])
# print(data_list)
return data_list
def generate_ga_metadata_dict():
gravity_metadata = {}
for key, value in iter(self.survey_metadata.items()):
for metadata_attribute in Grav2NetCDFConverter.gravity_metadata_list:
if value is not None:
if key == metadata_attribute:
if type(value) == datetime:
gravity_metadata[key] = value.isoformat()
else:
gravity_metadata[key] = value
logger.debug("GA gravity metadata")
logger.debug(gravity_metadata)
return gravity_metadata
def handle_key_value_cases(field_yml_settings_dict, lookup_table_dict):
"""
:param field_yml_settings_dict: field settings as written in the yml file e.g. {'short_name': 'Ellipsoidhgt', 'dtype':
'float32', 'database_field_name': 'ELLIPSOIDHGT', 'long_name': 'Ellipsoid Height', 'units': 'm',
'fill_value': -99999.9, 'datum': 'ELLIPSOIDHGTDATUM'}
:param lookup_table_dict: Dict of key and values pulled from oracle for tables such as accuracy and
methodology.
:return:
"""
logger.debug('- - - - - - - - - - - - - - - -')
logger.debug('handle_key_value_cases() with field value: ' + str(field_value) + ' and lookup_table_dict: ' + str(lookup_table_dict))
# get the keys into a list
lookup_key_list = [lookup_key for lookup_key in lookup_table_dict.keys()]
# create the lookup table to convert variables with strings as keys.
lookup_dict = {lookup_key: lookup_key_list.index(lookup_key)
for lookup_key in lookup_key_list}
# get the array of numeric foreign key values
field_data_array = get_data(field_yml_settings_dict)
# transform the data_list into the mapped value.
transformed_data_list = [lookup_dict.get(lookup_key) for lookup_key in field_data_array]
# loop through the table_key_dict and the lookup table. When a match is found add the new mapped key to
# the existing value of the table_key_dict in a new dict
converted_attributes_dict = {lookup_table_dict[key]: value
for key, value in lookup_table_dict.items()}
#===================================================================
# converted_dict = {}
# for keys, values in lookup_table_dict.items():
# for map_key, map_value in lookup_dict.items():
# if keys == map_key:
# converted_dict[map_value] = lookup_table_dict[keys]
#===================================================================
return transformed_data_list, converted_attributes_dict
def get_field_description(target_field):
"""
Helper function to retrieve the field description from a connected oracle database
:param target_field:
:return field_description:
"""
sql_statement = self.sql_strings_dict_from_yaml['get_field_description'].format(target_field.upper())
self.cursor.execute(sql_statement)
field_description = str(next(self.cursor)[0])
return field_description
def build_attribute_dict_and_data_list_of_variables(field_yml_settings_dict):
"""
For each field, the correct attributes are added. This is based on the grav2netcd_converter settings.
The data is converted for values that have function as a lookup table. For each field, the relevant
attribute dictionary, and np data array is returned.
:param field_name: field name as written in yml file e.g. Ellipsoidhgt
:param field_yml_settings_dict: field settings as written in the yml file e.g. {'short_name': 'Ellipsoidhgt', 'dtype':
'float32', 'database_field_name': 'ELLIPSOIDHGT', 'long_name': 'Ellipsoid Height', 'units': 'm',
'fill_value': -99999.9, 'datum': 'ELLIPSOIDHGTDATUM'}
:return: for each field value, return its attribute dictionary, and data array or converted data array.
"""
# values to parse into NetCDFVariable attributes list. Once passed they become a netcdf variable attribute.
# lookup_table is later converted to comments.
list_of_possible_value = ['long_name', 'standard_name', 'units', 'dtype', 'lookup_table', 'dem', 'datum']
logger.debug('-----------------')
logger.debug("Field Values: " + str(field_yml_settings_dict))
converted_data_array = []
attributes_dict = {}
for value in list_of_possible_value:
logger.debug("Value in list_of_possible_value: " + str(value))
# if the field value is in the list of accepted values then add to attributes dict
if field_yml_settings_dict.get(value):
logger.debug("Processing: " + str(value))
# some key values are already int8 and don't need to be converted. Thus a flag is included in the
if value == 'lookup_table':
logger.debug('Converting ' + str(value) + 'string keys to int8 with 0 as 1st index')
converted_data_list, converted_key_value_dict = handle_key_value_cases(field_yml_settings_dict,
self.get_keys_and_values_table(field_yml_settings_dict.get('lookup_table')))
logger.debug('Adding converted lookup table as variable attribute...')
# this replaces ['comments'] values set in the previous if statement.
# attributes_dict['comments'] = str(converted_key_value_dict)
converted_data_array = np.array(converted_data_list, field_yml_settings_dict['dtype'])
# for the one case where a column in the observation table (tcdem) needs to be added as the
# attribute of varaible in the netcdf file.
if value == 'dem' or value == 'datum':
# the grav datum needs to be converted from its key value
if field_yml_settings_dict.get('short_name') == 'Grav':
gravdatum_key = self.get_survey_wide_value_from_obs_table(field_yml_settings_dict.get(value))
attributes_dict[value] = self.get_value_for_key("DESCRIPTION", "GRAVDATUMS", "GRAVDATUM", gravdatum_key)
# while TCDEM and ELLIPSOIDHGTDATUM do not
else:
attributes_dict[value] = self.get_survey_wide_value_from_obs_table(field_yml_settings_dict.get(value))
# if None is returned then remove the attribute
if attributes_dict[value] is None:
attributes_dict.pop(value)
else:
pass
# for all other values, simply add them to attributes_dict
else:
attributes_dict[value] = field_yml_settings_dict[value]
logger.debug('attributes_dict["{}"] = {}'.format(value, field_yml_settings_dict[value]))
# if the value isn't in the list of accepted attributes
else:
logger.debug(str(value) + ' is not found in yaml config or is not set as an accepted attribute.')
logger.debug('Attributes_dict' + str(attributes_dict))
# if the data array contained a lookup and was converted, return it and the attribute dict.
if len(converted_data_array) > 0:
return converted_data_array, attributes_dict
# else get the non converted data and return it in an numpy array and the and the attribute dict too
else:
data_array = np.array(get_data(field_yml_settings_dict), dtype=field_yml_settings_dict['dtype'])
return data_array, attributes_dict
# ------------------------------------------------------------------------------------
# Begin yielding NetCDFVariables
# ------------------------------------------------------------------------------------
# ---------------------------------------------------------------------------
# crs variable creation for GDA94
# ---------------------------------------------------------------------------
yield self.build_crs_variable('''\
GEOGCS["GDA94",
DATUM["Geocentric_Datum_of_Australia_1994",
SPHEROID["GRS 1980",6378137,298.257222101,
AUTHORITY["EPSG","7019"]],
TOWGS84[0,0,0,0,0,0,0],
AUTHORITY["EPSG","6283"]],
PRIMEM["Greenwich",0,
AUTHORITY["EPSG","8901"]],
UNIT["degree",0.0174532925199433,
AUTHORITY["EPSG","9122"]],
AUTHORITY["EPSG","4283"]]
'''
)
# ---------------------------------------------------------------------------
# non acc convention survey level metadata grouped into one variable
# ---------------------------------------------------------------------------
yield NetCDFVariable(short_name='ga_gravity_metadata',
data=0,
dimensions=[], # Scalar
fill_value=None,
attributes=generate_ga_metadata_dict(),
dtype='int8' # Byte datatype
)
# ---------------------------------------------------------------------------
# The point dimension variables and their assocciated lookup table variables
# ---------------------------------------------------------------------------
# Loop through the defined variables in the yaml config and construct as netcdf variables.
for field_name, field_value in Grav2NetCDFConverter.settings['field_names'].items():
# convert strings to int or floats for int8 and float32 to get the required data type for the fill value
if field_value['dtype'] == 'int8':
fill_value = int(field_value['fill_value'])
elif field_value['dtype'] == 'float32':
fill_value = float(field_value['fill_value'])
else:
fill_value = field_value['fill_value']
data, attributes = build_attribute_dict_and_data_list_of_variables(field_value)
if field_value.get('lookup_table'):
# get the values from the lookup table dict and convert into a np.array
lookup_table_dict = self.get_keys_and_values_table(field_value['lookup_table'])
grid_value_list = [value for value in iter(lookup_table_dict.values())]
lookup_table_array = np.array(grid_value_list)
attributes.pop('dtype', None)
attributes.pop('lookup_table', None)
dim_name = field_value['short_name'].lower()
# Yield lookup table with same name as field
yield NetCDFVariable(short_name=dim_name,
data=lookup_table_array,
dimensions=[dim_name],
fill_value=fill_value,
attributes=attributes
)
# Yield index table with name of <field_name>_index
index_attributes = dict(attributes)
index_attributes['long_name'] = "zero-based index of value in " + dim_name
index_attributes['lookup'] = dim_name
yield NetCDFVariable(short_name=((field_value.get('standard_name') or field_value['short_name']) + '_index').lower(),
data=data,
dimensions=['point'],
fill_value=fill_value,
attributes=index_attributes
)
else: # Not a lookup field
yield NetCDFVariable(short_name=(field_value.get('standard_name') or field_value['short_name']).lower(),
data=data,
dimensions=['point'],
fill_value=fill_value,
attributes=attributes
)
def main():
# get user input and connect to oracle
assert len(sys.argv) >= 4, '....'
nc_out_path = sys.argv[1]
u_id = sys.argv[2]
oracle_database = sys.argv[3]
pw = sys.argv[4]
con = cx_Oracle.connect(u_id, pw, oracle_database)
survey_cursor = con.cursor()
# get sql strings from yaml file
yaml_sql_settings = yaml.safe_load(open(os.path.splitext(__file__)[0] + '_sql_strings.yml'))
sql_strings_dict = yaml_sql_settings['sql_strings_dict']
# execute sql to return surveys to convert to netcdf
survey_cursor.execute(sql_strings_dict['sql_get_surveyids'])
# tidy the survey id strings
survey_id_list = [re.search('\d+', survey_row[0]).group()
for survey_row in survey_cursor
]
logger.debug('Survey count = {}'.format(len(survey_id_list)))
# Loop through he survey lists to make a netcdf file based off each one.
for survey in survey_id_list:
logger.debug("Processing for survey: " + str(survey))
#try:
g2n = Grav2NetCDFConverter(nc_out_path + "/" + str(survey) + '.nc', survey, con, sql_strings_dict)
g2n.convert2netcdf()
logger.info('Finished writing netCDF file {}'.format(nc_out_path))
logger.info('-------------------------------------------------------------------')
logger.info('Global attributes:')
logger.info('-------------------------------------------------------------------')
for key, value in iter(g2n.nc_output_dataset.__dict__.items()):
logger.info(str(key) + ": " + str(value))
logger.info('-'*30)
logger.info('Dimensions:')
logger.info('-'*30)
logger.info(g2n.nc_output_dataset.dimensions)
logger.info('-'*30)
logger.info('Variables:')
logger.info('-'*30)
logger.info(g2n.nc_output_dataset.variables)
#print(g2n.nc_output_dataset.file_format)
#print(g2n.nc_output_dataset.variables[''])
#print(g2n.nc_output_dataset.variables)
# for data in g2n.nc_output_dataset.variables['Reliab lookup table']:
# print(data)
del g2n
# except Exception as e:
if __name__ == '__main__':
main()
| alex-ip/geophys_utils | geophys_utils/netcdf_converter/grav2netcdf_converter.py | Python | apache-2.0 | 31,301 | [
"NetCDF"
] | 3aa970aba015c836020d70fba195706f33cdeab711bc131598039a5c118eb1f7 |
from check_grad import check_grad
from utils import *
from logistic import *
import matplotlib.pyplot as plt
def run_logistic_regression(hyperparameters):
# TODO specify training data
train_inputs, train_targets = load_train()
valid_inputs, valid_targets = load_valid()
test_inputs, test_targets = load_test()
# N is number of examples; M is the number of features per example.
N, M = train_inputs.shape
# Logistic regression weights
# TODO:Initialize to random weights here.
weights = 0.1*np.random.randn(M+1,1)
# Verify that your logistic function produces the right gradient.
# diff should be very close to 0.
run_check_grad(hyperparameters)
# Begin learning with gradient descent
logging = np.zeros((hyperparameters['num_iterations'], 6))
for t in xrange(hyperparameters['num_iterations']):
# Find the negative log posterior and its derivatives w.r.t. the weights.
f, df, predictions = logistic(weights, train_inputs, train_targets, hyperparameters)
# Evaluate the prediction.
cross_entropy_train, frac_correct_train = evaluate(train_targets, predictions)
if np.isnan(f) or np.isinf(f):
raise ValueError("nan/inf error")
# update parameters
weights = weights - hyperparameters['learning_rate'] * df / N
# Make a prediction on the valid_inputs.
predictions_valid = logistic_predict(weights, valid_inputs)
# Evaluate the prediction.
cross_entropy_valid, frac_correct_valid = evaluate(valid_targets, predictions_valid)
# Find the negative log posterior of validation set
f_valid, df_valid, prediction_valid = logistic(weights, valid_inputs, valid_targets, hyperparameters)
# Make a prediction on the test_inputs.
predictions_test = logistic_predict(weights, test_inputs)
# Evaluate the prediction.
cross_entropy_test, frac_correct_test = evaluate(test_targets, predictions_test)
logging[t] = [f/N, f, frac_correct_train*100, f_valid, frac_correct_valid*100,frac_correct_test*100 ]
return logging
def run_check_grad(hyperparameters):
"""Performs gradient check on logistic function.
"""
# This creates small random data with 7 examples and
# 9 dimensions and checks the gradient on that data.
num_examples = 7
num_dimensions = 9
weights = np.random.randn(num_dimensions+1, 1)
data = np.random.randn(num_examples, num_dimensions)
targets = (np.random.rand(num_examples, 1) > 0.5).astype(int)
diff = check_grad(logistic, # function to check
weights,
0.001, # perturbation
data,
targets,
hyperparameters)
print "diff =", diff
if __name__ == '__main__':
# TODO: Set hyperparameters
hyperparameters = {
'learning_rate': 0.5,
'weight_regularization':True, # boolean, True for using Gaussian prior on weights
'num_iterations': 40,
'weight_decay': 0.01 # related to standard deviation of weight prior
}
# average over multiple runs
num_runs = 1
logging = np.zeros((hyperparameters['num_iterations'], 6))
for i in xrange(num_runs):
logging += run_logistic_regression(hyperparameters)
logging /= num_runs
# TODO generate plots
plt.plot(logging[:,2],marker='+',label='training set')
plt.plot(logging[:,4],marker='*',label='validation set')
plt.plot(logging[:,5],marker='h',label='test set')
plt.legend(loc='lower right')
plt.title('Plot of Fraction Correct vs. Iteration Times on training set and validation set')
plt.xlabel('Iteration Times')
plt.ylabel('Fraction Correct')
plt.show()
| ouyangyike/Machine-Learning-and-Data-Mining | Logistic Regression/logistic_regression_regularized1.py | Python | mit | 3,874 | [
"Gaussian"
] | 1fce982298ec537ad68d8ab6a4784d79d80cbe000d4f62935a39e4835b8c0328 |
from tabulate import tabulate
from collections import OrderedDict
vals = """
#bcftools
986798
real 3m49.024s
user 3m48.620s
sys 0m7.848s
#cyvcf2
984214
real 4m5.190s
user 4m5.048s
sys 0m0.112s
#pysam
984214
real 33m12.417s
user 33m11.920s
sys 0m0.244s
""".split("#")
vals = [x.strip() for x in vals if x.strip()]
key = 'time (seconds)'
def parse_group(g):
lines = g.split("\n")
d = OrderedDict([
('name', lines[0].strip()),
('time', next(x for x in lines if x.startswith('user')).split()[1].rstrip('s'))
])
minutes = float(d['time'].split('m')[0])
seconds = float(d['time'].split('m')[1])
d[key] = 60 * minutes + seconds
del d['time']
return d
tbl = [parse_group(g) for g in vals]
base = next(d for d in tbl if d['name'] == 'cyvcf2')[key]
for d in tbl:
d['ratio'] = "%.2f" % (d[key] / base)
d[key] = "%.1f" % d[key]
print(r"\begin{table}[h]")
print(r"\caption{Timing VCF filtering}")
print(tabulate(tbl, headers="keys", tablefmt="latex"))
print(r"\end{table}")
| brentp/cyvcf2 | scripts/table.py | Python | mit | 1,060 | [
"pysam"
] | 089843496cc6f76934e4ee04579dc2941fed64f093af0dfaa77f58122e0fa4e5 |
import tensorflow as tf
import re
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_boolean('use_fp16', False,
"""Train the model using fp16.""")
tf.app.flags.DEFINE_integer('sequence_length', 1200,
"""Number of batches to run.""")
tf.app.flags.DEFINE_integer('embedding_size', 100,
"""Number of batches to run.""")
tf.app.flags.DEFINE_integer('NUM_CLASSES', 2,
"""Number of batches to run.""")
tf.app.flags.DEFINE_integer('num_filters', 64,
"""Number of batches to run.""")
tf.flags.DEFINE_string("filter_sizes", "2,3,4,5", "Comma-separated filter sizes (default: '3,4,5')")
# FLAGS.num_filters
tf.flags.DEFINE_string("TOWER_NAME", "tower", "multi gpu tower name")
# FLAGS.TOWER_NAME = 'tower'
def _variable_on_cpu(name, shape, initializer, trainable):
"""Helper to create a Variable stored on CPU memory.
Args:
name: name of the variable
shape: list of ints
initializer: initializer for Variable
Returns:
Variable Tensor
"""
with tf.device('/cpu:0'):
dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype, trainable=trainable)
return var
def _variable_with_weight_decay(name, shape, stddev, wd, trainable):
"""Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
wd: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
Returns:
Variable Tensor
"""
dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
var = _variable_on_cpu(
name,
shape,
tf.truncated_normal_initializer(stddev=stddev, dtype=dtype),
trainable)
if wd is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
def _variable_with_weight_decay_xavier(name, shape, wd, trainable):
"""Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
wd: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
Returns:
Variable Tensor
"""
dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
var = _variable_on_cpu(
name,
shape,
tf.contrib.layers.xavier_initializer(),
trainable)
if wd is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
def _activation_summary(x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measures the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
tensor_name = re.sub('%s_[0-9]*/' % FLAGS.TOWER_NAME, '', x.op.name)
tf.summary.histogram(tensor_name + '/activations', x)
tf.summary.scalar(tensor_name + '/sparsity',
tf.nn.zero_fraction(x))
class CNN(object):
def __init__(self, vocab_size):
self.input_x = tf.placeholder(tf.int32, [None, FLAGS.sequence_length], name="input_x")
self.input_y = tf.placeholder(tf.float32, [None, FLAGS.NUM_CLASSES], name="input_y")
self.embedding = tf.placeholder(tf.float32, [vocab_size, FLAGS.embedding_size])
print 'self.embedding', self.embedding
self.w2v = _variable_with_weight_decay('embedding',
shape=[vocab_size, FLAGS.embedding_size],
stddev=0.1,
wd=None,
trainable=False)
print 'self.w2v', self.w2v
self.embedding_params = self.w2v.assign(self.embedding)
print 'self.embedding_params',self.embedding_params
# embedding_init = self.w2v.assign(self.embedding)
def lookup(self):
# embedded_tokens = tf.nn.embedding_lookup(self.embedding_params, self.input_x)
embedded_tokens = tf.nn.embedding_lookup(self.w2v, self.input_x)
print 'embedded_tokens', embedded_tokens
self.embedded_tokens_expanded = tf.expand_dims(embedded_tokens, -1)
# (?, 1200, 100, 1)
print 'embedded_tokens_expanded', self.embedded_tokens_expanded
return self.embedded_tokens_expanded, self.input_y
def inference(self, txts, dropout_keep_prob=1.0):
"""Build the cnn based sentiment prediction model.
Args:
txts: text returned from get_inputs().
Returns:
Logits.
"""
# We instantiate all variables using tf.get_variable() instead of
# tf.Variable() in order to share variables across multiple GPU training runs.
# If we only ran this model on a single GPU, we could simplify this function
# by replacing all instances of tf.get_variable() with tf.Variable().
#
pooled_outputs = []
for i, filter_size in enumerate(list(map(int, FLAGS.filter_sizes.split(",")))):
with tf.variable_scope("conv-maxpool-%s" % filter_size) as scope:
cnn_shape = [filter_size, FLAGS.embedding_size, 1, FLAGS.num_filters]
kernel = _variable_with_weight_decay('weights',
shape=cnn_shape,
stddev=0.1,
wd=None,
trainable=True)
conv = tf.nn.conv2d(txts, kernel, [1, 1, 1, 1], padding='VALID')
biases = _variable_on_cpu('biases', [FLAGS.num_filters], tf.constant_initializer(0.0), trainable=True)
pre_activation = tf.nn.bias_add(conv, biases)
conv_out = tf.nn.relu(pre_activation, name=scope.name)
_activation_summary(conv_out)
ksize = [1, FLAGS.sequence_length - filter_size + 1, 1, 1]
print 'filter_size', filter_size
print 'ksize', ksize
print 'conv_out', conv_out
pooled = tf.nn.max_pool(conv_out, ksize=ksize, strides=[1, 1, 1, 1],
padding='VALID', name='pool1')
norm_pooled = tf.nn.lrn(pooled, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
name='norm1')
# pooled_outputs.append(pooled)
pooled_outputs.append(norm_pooled)
# print 'norm1', norm1
num_filters_total = FLAGS.num_filters * len(list(map(int, FLAGS.filter_sizes.split(","))))
h_pool = tf.concat(pooled_outputs, 3)
h_pool = tf.concat(pooled_outputs, 3)
h_pool_flat = tf.reshape(h_pool, [-1, num_filters_total])
print 'h_pool', h_pool
print 'h_pool_flat', h_pool_flat
h_drop = tf.nn.dropout(h_pool_flat, dropout_keep_prob)
# num_filters_total = num_filters * 1
# norm_flat = tf.reshape(norm1, [-1, num_filters_total])
with tf.variable_scope('softmax_linear') as scope:
weights = _variable_with_weight_decay_xavier('weights', [num_filters_total, FLAGS.NUM_CLASSES],
wd=0.2, trainable=True)
biases = _variable_on_cpu('biases', [FLAGS.NUM_CLASSES],
tf.constant_initializer(0.1), trainable=True)
softmax_linear = tf.add(tf.matmul(h_pool_flat, weights), biases, name=scope.name)
_activation_summary(softmax_linear)
return softmax_linear
def loss(self, logits, labels):
"""Add L2Loss to all the trainable variables.
Add summary for "Loss" and "Loss/avg".
Args:
logits: Logits from inference().
labels: Labels from distorted_inputs or inputs(). 1-D tensor
of shape [batch_size]
Returns:
Loss tensor of type float.
"""
# Calculate the average cross entropy loss across the batch.
# labels = tf.cast(labels, tf.int64)
# labels = tf.cast(tf.argmax(labels, 1), tf.int64)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels, name='cross_entropy_per_example')
# cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits, name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
tf.add_to_collection('losses', cross_entropy_mean)
golds = tf.argmax(labels, 1, name="golds")
predictions = tf.argmax(logits, 1, name="predictions")
correct_predictions = tf.equal(predictions, tf.argmax(labels, 1))
accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
# The total loss is defined as the cross entropy loss plus all of the weight
# decay terms (L2 loss).
return tf.add_n(tf.get_collection('losses'), name='total_loss'), accuracy
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
class W2V(object):
__metaclass__ = Singleton
def __init__(self, vocab_size, trainable):
if trainable is True:
print 'w2v is trainable'
else:
print 'w2v is STATIC (NOT trainable)'
self.embedding = tf.placeholder(tf.float32, [vocab_size, FLAGS.embedding_size])
self.w2v = _variable_with_weight_decay('embedding',
shape=[vocab_size, FLAGS.embedding_size],
stddev=0.1,
wd=None,
trainable=trainable)
self.embedding_params = self.w2v.assign(self.embedding)
class CnnMulti(object):
def __init__(self, namespace):
self.input_x = tf.placeholder(tf.int32, [None, FLAGS.sequence_length], name="input_x_%s" % namespace)
self.input_y = tf.placeholder(tf.float32, [None, FLAGS.NUM_CLASSES], name="input_y_%s" % namespace)
def lookup(self, w2v_vars):
embedded_tokens = tf.nn.embedding_lookup(w2v_vars, self.input_x)
self.embedded_tokens_expanded = tf.expand_dims(embedded_tokens, -1)
return self.embedded_tokens_expanded, self.input_y
def inference(self, txts, dropout_keep_prob=1.0):
"""Build the cnn based sentiment prediction model.
Args:
txts: text returned from get_inputs().
Returns:
Logits.
"""
# We instantiate all variables using tf.get_variable() instead of
# tf.Variable() in order to share variables across multiple GPU training runs.
# If we only ran this model on a single GPU, we could simplify this function
# by replacing all instances of tf.get_variable() with tf.Variable().
#
pooled_outputs = []
for i, filter_size in enumerate(list(map(int, FLAGS.filter_sizes.split(",")))):
with tf.variable_scope("conv-maxpool-%s" % filter_size) as scope:
cnn_shape = [filter_size, FLAGS.embedding_size, 1, FLAGS.num_filters]
kernel = _variable_with_weight_decay('weights',
shape=cnn_shape,
stddev=0.1,
wd=None,
trainable=True)
conv = tf.nn.conv2d(txts, kernel, [1, 1, 1, 1], padding='VALID')
biases = _variable_on_cpu('biases', [FLAGS.num_filters], tf.constant_initializer(0.0), trainable=True)
pre_activation = tf.nn.bias_add(conv, biases)
conv_out = tf.nn.relu(pre_activation, name=scope.name)
_activation_summary(conv_out)
ksize = [1, FLAGS.sequence_length - filter_size + 1, 1, 1]
print 'filter_size', filter_size
print 'ksize', ksize
print 'conv_out', conv_out
pooled = tf.nn.max_pool(conv_out, ksize=ksize, strides=[1, 1, 1, 1],
padding='VALID', name='pool1')
norm_pooled = tf.nn.lrn(pooled, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
name='norm1')
# pooled_outputs.append(pooled)
pooled_outputs.append(norm_pooled)
# print 'norm1', norm1
num_filters_total = FLAGS.num_filters * len(list(map(int, FLAGS.filter_sizes.split(","))))
h_pool = tf.concat(pooled_outputs, 3)
h_pool = tf.concat(pooled_outputs, 3)
h_pool_flat = tf.reshape(h_pool, [-1, num_filters_total])
print 'h_pool', h_pool
print 'h_pool_flat', h_pool_flat
h_drop = tf.nn.dropout(h_pool_flat, dropout_keep_prob)
# num_filters_total = num_filters * 1
# norm_flat = tf.reshape(norm1, [-1, num_filters_total])
with tf.variable_scope('softmax_linear') as scope:
weights = _variable_with_weight_decay_xavier('weights', [num_filters_total, FLAGS.NUM_CLASSES],
wd=0.2, trainable=True)
biases = _variable_on_cpu('biases', [FLAGS.NUM_CLASSES],
tf.constant_initializer(0.1), trainable=True)
softmax_linear = tf.add(tf.matmul(h_pool_flat, weights), biases, name=scope.name)
_activation_summary(softmax_linear)
return softmax_linear
def loss(self, logits, labels):
"""Add L2Loss to all the trainable variables.
Add summary for "Loss" and "Loss/avg".
Args:
logits: Logits from inference().
labels: Labels from distorted_inputs or inputs(). 1-D tensor
of shape [batch_size]
Returns:
Loss tensor of type float.
"""
# Calculate the average cross entropy loss across the batch.
# labels = tf.cast(labels, tf.int64)
# labels = tf.cast(tf.argmax(labels, 1), tf.int64)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels, name='cross_entropy_per_example')
# cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits, name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
tf.add_to_collection('losses', cross_entropy_mean)
golds = tf.argmax(labels, 1, name="golds")
predictions = tf.argmax(logits, 1, name="predictions")
correct_predictions = tf.equal(predictions, tf.argmax(labels, 1))
accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
# The total loss is defined as the cross entropy loss plus all of the weight
# decay terms (L2 loss).
return tf.add_n(tf.get_collection('losses'), name='total_loss'), accuracy
| bgshin/vddc | src/models/cnn_model.py | Python | apache-2.0 | 16,090 | [
"Gaussian"
] | 7e5289bad12f18a9676c55b1d3eedd8e21362216e360d052f36c03d51f1c72e5 |
# Copyright 2010-2017, The University of Melbourne
# Copyright 2010-2017, Brian May
#
# This file is part of Karaage.
#
# Karaage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Karaage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Karaage If not, see <http://www.gnu.org/licenses/>.
import re
import sys
import django.db.transaction
import tldap.transaction
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from karaage.projects.models import Project
try:
input = raw_input
except NameError:
pass
class Command(BaseCommand):
help = 'Change a pid for a project and all accounts for that project'
def add_arguments(self, parser):
parser.add_argument('old_pid', type=str)
parser.add_argument('new_pid', type=str)
@django.db.transaction.atomic
@tldap.transaction.commit_on_success
def handle(self, *args, **options):
old = options['old_pid']
new = options['new_pid']
try:
project = Project.objects.get(pid=old)
except Project.DoesNotExist:
raise CommandError('project %s does not exist' % old)
project_re = re.compile(r'^%s$' % settings.PROJECT_VALIDATION_RE)
if not project_re.search(new):
raise CommandError(settings.PROJECT_VALIDATION_ERROR_MSG)
while True:
confirm = input(
'Change project "%s" to "%s (yes,no): ' % (old, new))
if confirm == 'yes':
break
elif confirm == 'no':
return sys.exit(0)
else:
print("Please enter yes or no")
project.pid = new
project.save()
print("Changed pid on project")
print("Done")
| brianmay/karaage | karaage/management/commands/change_pid.py | Python | gpl-3.0 | 2,212 | [
"Brian"
] | 9c220c921a4b8dd9841dd89e5197bfdef5b082fff7f429416a689931ef6b75ef |
#!/usr/bin/python
# coding=utf-8
# Concept from Jaroslaw Zachwieja <grok!warwick.ac.uk> & TJ <linux!tjworld.net>
# from their work in gegpsd.py included in gpsd project (http://catb.org/gpsd)
# This is a time limited demo for the curious, or those without a gps. If it
# doesn't work, you need to use the 'regular' gegps.py and use another gps device,
# as "host='wadda.ddns.net'" will not be up forever. 20141205 Psst, Line #17.
"""creates Google Earth kml file (/tmp/gps3_live.kml) for realtime (4 second GE default) updates of gps coordinates"""
__author__ = 'Moe'
__copyright__ = "Copyright 2014 Moe"
__license__ = "MIT" # TODO: figure this out and finish requirements
__version__ = "0.1a"
import time
import gps3
the_connection = gps3.GPSDSocket(host='wadda.ddns.net') # A demo address TODO: needs work for commandline host selection
the_fix = gps3.Fix()
the_link = '/tmp/gps3_live.kml' # AFAIK, 'Links' call href on time events or entry/exit Multiple href may be possible.
the_file = '/tmp/gps3_static.kml'
the_history = []
live_link = ("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n"
"<kml xmlns=\"http://www.opengis.net/kml/2.2\" xmlns:gx=\"http://www.google.com/kml/ext/2.2\" xmlns:kml=\"http://www.opengis.net/kml/2.2\" xmlns:atom=\"http://www.w3.org/2005/Atom\">\n"
"<NetworkLink>\n"
" <name>GPS3 Live</name>\n"
" <Link>\n"
" <href>{0}</href>\n"
" <refreshMode>onInterval</refreshMode>\n"
" </Link>\n"
"</NetworkLink>\n"
"</kml>").format(the_file) # inserts 'the file' into a refresh mode default 4 second
f = open(the_link, 'w')
f.write(live_link)
f.close()
try:
for new_data in the_connection:
if new_data:
the_fix.refresh(new_data)
if not isinstance(the_fix.TPV['speed'], str): # lat/lon might be a better determinate of when data is 'valid'
speed = the_fix.TPV['speed']
latitude = the_fix.TPV['lat']
longitude = the_fix.TPV['lon']
altitude = the_fix.TPV['alt']
if isinstance(the_fix.TPV['track'], str): # 'track' frequently is missing and returns as 'n/a'
heading = the_fix.TPV['track']
else:
heading = round(the_fix.TPV['track']) # and heading percision in hundreths is just clutter.
the_history.append(longitude)
the_history.append(latitude)
the_history.append(altitude)
hist_string = str(the_history).replace(' ', '') # GE > 7.1.xxxx spits up on spaces in <coordinates>
static_file = ("<?xml version = \"1.0\" encoding = \"UTF-8\"?>\n"
"<kml xmlns = \"http://www.opengis.net/kml/2.2\" xmlns:gx = \"http://www.google.com/kml/ext/2.2\" xmlns:kml = \"http://www.opengis.net/kml/2.2\" xmlns:atom = \"http://www.w3.org/2005/Atom\">\n"
"<Folder>\n"
" <description> Frankie likes walking and stopping </description>\n"
" <Placemark id = \"point\">\n"
" <name>{0:.2f} m/s {4}°</name>\n"
" <description>Current gps reading\nAltitude: {3} Metres</description>\n"
" <LookAt>\n"
" <longitude>{1}</longitude>\n"
" <latitude>{2}</latitude>\n"
" <range>600</range>\n"
" <tilt>0</tilt>\n"
" <heading>0</heading>\n"
" </LookAt>\n"
" <Point>\n"
" <coordinates>{1},{2},{3}</coordinates>\n"
" </Point>\n"
" </Placemark>\n"
" <Placemark id = \"path\">\n"
" <name>Pin Scratches</name>\n"
" <description>GPS Trail of Tears</description>\n"
" <LineString>\n"
" <tessellate>1</tessellate>\n"
" <coordinates>{5}</coordinates>\n"
" </LineString>\n"
" </Placemark>\n"
"</Folder>\n"
"</kml>").format(speed, longitude, latitude, altitude, heading, hist_string.strip('[]'))
f = open(the_file, 'w')
f.write(static_file)
f.close()
else:
pass
time.sleep(1) # default GE refresh rate is 4 seconds, therefore no refresh older than 1 second from itself.
except KeyboardInterrupt:
the_connection.close()
print("\nTerminated by user\nGood Bye.\n")
# End
| matbor/gps3 | demo_gegps3.py | Python | mit | 4,957 | [
"MOE"
] | a446fec34fbc2d39d39202b6b127d60acf09cac5f2cd990d0e6214e65528b56c |
"""
Acceptance tests for Studio's Setting pages
"""
import re
from .base_studio_test import StudioCourseTest
from ...pages.studio.settings_certificates import CertificatesPage
from ...pages.studio.settings_advanced import AdvancedSettingsPage
class CertificatesTest(StudioCourseTest):
"""
Tests for settings/certificates Page.
"""
def setUp(self): # pylint: disable=arguments-differ
super(CertificatesTest, self).setUp(is_staff=True)
self.certificates_page = CertificatesPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.advanced_settings_page = AdvancedSettingsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.course_advanced_settings = dict()
def make_signatory_data(self, prefix='First'):
"""
Makes signatory dict which can be used in the tests to create certificates
"""
return {
'name': '{prefix} Signatory Name'.format(prefix=prefix),
'title': '{prefix} Signatory Title'.format(prefix=prefix),
'organization': '{prefix} Signatory Organization'.format(prefix=prefix),
}
def create_and_verify_certificate(self, course_title_override, existing_certs, signatories):
"""
Creates a new certificate and verifies that it was properly created.
"""
self.assertEqual(existing_certs, len(self.certificates_page.certificates))
if existing_certs == 0:
self.certificates_page.wait_for_first_certificate_button()
self.certificates_page.click_first_certificate_button()
else:
self.certificates_page.wait_for_add_certificate_button()
self.certificates_page.click_add_certificate_button()
certificate = self.certificates_page.certificates[existing_certs]
# Set the certificate properties
certificate.course_title = course_title_override
# add signatories
added_signatories = 0
for idx, signatory in enumerate(signatories):
certificate.signatories[idx].name = signatory['name']
certificate.signatories[idx].title = signatory['title']
certificate.signatories[idx].organization = signatory['organization']
certificate.signatories[idx].upload_signature_image('Signature-{}.png'.format(idx))
added_signatories += 1
if len(signatories) > added_signatories:
certificate.click_add_signatory_button()
# Save the certificate
self.assertEqual(certificate.get_text('.action-primary'), "Create")
certificate.click_create_certificate_button()
self.assertIn(course_title_override, certificate.course_title)
return certificate
def test_no_certificates_by_default(self):
"""
Scenario: Ensure that message telling me to create a new certificate is
shown when no certificate exist.
Given I have a course without certificates
When I go to the Certificates page in Studio
Then I see "You have not created any certificates yet." message and
a link with text "Set up your certificate"
"""
self.certificates_page.visit()
self.assertTrue(self.certificates_page.no_certificates_message_shown)
self.assertIn(
"You have not created any certificates yet.",
self.certificates_page.no_certificates_message_text
)
self.assertIn(
"Set up your certificate",
self.certificates_page.new_certificate_link_text
)
def test_can_create_and_edit_certficate(self):
"""
Scenario: Ensure that the certificates can be created and edited correctly.
Given I have a course without certificates
When I click button 'Add your first Certificate'
And I set new the course title override and signatory and click the button 'Create'
Then I see the new certificate is added and has correct data
When I edit the certificate
And I change the name and click the button 'Save'
Then I see the certificate is saved successfully and has the new name
"""
self.certificates_page.visit()
self.certificates_page.wait_for_first_certificate_button()
certificate = self.create_and_verify_certificate(
"Course Title Override",
0,
[self.make_signatory_data('first'), self.make_signatory_data('second')]
)
# Edit the certificate
certificate.click_edit_certificate_button()
certificate.course_title = "Updated Course Title Override 2"
self.assertEqual(certificate.get_text('.action-primary'), "Save")
certificate.click_save_certificate_button()
self.assertIn("Updated Course Title Override 2", certificate.course_title)
def test_can_delete_certificate(self):
"""
Scenario: Ensure that the user can delete certificate.
Given I have a course with 1 certificate
And I go to the Certificates page
When I delete the Certificate with name "New Certificate"
Then I see that there is no certificate
When I refresh the page
Then I see that the certificate has been deleted
"""
self.certificates_page.visit()
certificate = self.create_and_verify_certificate(
"Course Title Override",
0,
[self.make_signatory_data('first'), self.make_signatory_data('second')]
)
certificate.wait_for_certificate_delete_button()
self.assertEqual(len(self.certificates_page.certificates), 1)
# Delete the certificate we just created
certificate.click_delete_certificate_button()
self.certificates_page.click_confirmation_prompt_primary_button()
# Reload the page and confirm there are no certificates
self.certificates_page.visit()
self.assertEqual(len(self.certificates_page.certificates), 0)
def test_can_create_and_edit_signatories_of_certficate(self):
"""
Scenario: Ensure that the certificates can be created with signatories and edited correctly.
Given I have a course without certificates
When I click button 'Add your first Certificate'
And I set new the course title override and signatory and click the button 'Create'
Then I see the new certificate is added and has one signatory inside it
When I click 'Edit' button of signatory panel
And I set the name and click the button 'Save' icon
Then I see the signatory name updated with newly set name
When I refresh the certificates page
Then I can see course has one certificate with new signatory name
When I click 'Edit' button of signatory panel
And click on 'Close' button
Then I can see no change in signatory detail
"""
self.certificates_page.visit()
certificate = self.create_and_verify_certificate(
"Course Title Override",
0,
[self.make_signatory_data('first')]
)
self.assertEqual(len(self.certificates_page.certificates), 1)
# Edit the signatory in certificate
signatory = certificate.signatories[0]
signatory.edit()
signatory.name = 'Updated signatory name'
signatory.title = 'Update signatory title'
signatory.organization = 'Updated signatory organization'
signatory.save()
self.assertEqual(len(self.certificates_page.certificates), 1)
#Refreshing the page, So page have the updated certificate object.
self.certificates_page.refresh()
signatory = self.certificates_page.certificates[0].signatories[0]
self.assertIn("Updated signatory name", signatory.name)
self.assertIn("Update signatory title", signatory.title)
self.assertIn("Updated signatory organization", signatory.organization)
signatory.edit()
signatory.close()
self.assertIn("Updated signatory name", signatory.name)
def test_can_cancel_creation_of_certificate(self):
"""
Scenario: Ensure that creation of a certificate can be canceled correctly.
Given I have a course without certificates
When I click button 'Add your first Certificate'
And I set name of certificate and click the button 'Cancel'
Then I see that there is no certificates in the course
"""
self.certificates_page.visit()
self.certificates_page.click_first_certificate_button()
certificate = self.certificates_page.certificates[0]
certificate.course_title = "Title Override"
certificate.click_cancel_edit_certificate()
self.assertEqual(len(self.certificates_page.certificates), 0)
def test_line_breaks_in_signatory_title(self):
"""
Scenario: Ensure that line breaks are properly reflected in certificate
Given I have a certificate with signatories
When I add signatory title with new line character
Then I see line break in certificate title
"""
self.certificates_page.visit()
certificate = self.create_and_verify_certificate(
"Course Title Override",
0,
[
{
'name': 'Signatory Name',
'title': 'Signatory title with new line character \n',
'organization': 'Signatory Organization',
}
]
)
certificate.wait_for_certificate_delete_button()
# Make sure certificate is created
self.assertEqual(len(self.certificates_page.certificates), 1)
signatory_title = self.certificates_page.get_first_signatory_title()
self.assertNotEqual([], re.findall(r'<br\s*/?>', signatory_title))
def test_course_number_in_certificate_details_view(self):
"""
Scenario: Ensure that Course Number is displayed in certificate details view
Given I have a certificate
When I visit certificate details page on studio
Then I see Course Number next to Course Name
"""
self.certificates_page.visit()
certificate = self.create_and_verify_certificate(
"Course Title Override",
0,
[self.make_signatory_data('first')]
)
certificate.wait_for_certificate_delete_button()
# Make sure certificate is created
self.assertEqual(len(self.certificates_page.certificates), 1)
course_number = self.certificates_page.get_course_number()
self.assertEqual(self.course_info['number'], course_number)
def test_course_number_override_in_certificate_details_view(self):
"""
Scenario: Ensure that Course Number Override is displayed in certificate details view
Given I have a certificate
When I visit certificate details page on studio
Then I see Course Number Override next to Course Name
"""
self.course_advanced_settings.update(
{'Course Number Display String': 'Course Number Override String'}
)
self.certificates_page.visit()
certificate = self.create_and_verify_certificate(
"Course Title Override",
0,
[self.make_signatory_data('first')]
)
certificate.wait_for_certificate_delete_button()
# Make sure certificate is created
self.assertEqual(len(self.certificates_page.certificates), 1)
# set up course number override in Advanced Settings Page
self.advanced_settings_page.visit()
self.advanced_settings_page.set_values(self.course_advanced_settings)
self.advanced_settings_page.wait_for_ajax()
self.certificates_page.visit()
course_number_override = self.certificates_page.get_course_number_override()
self.assertEqual(self.course_advanced_settings['Course Number Display String'], course_number_override)
| adoosii/edx-platform | common/test/acceptance/tests/studio/test_studio_settings_certificates.py | Python | agpl-3.0 | 12,236 | [
"VisIt"
] | bbbcd44265757fade4b5df17b8dea3384da4b7a18afce9e177e9d74c4bef156a |
"""
Wrappers for calls to Mayavi2's `mlab` module for plotting
:mod:`fatiando.mesher` objects and automating common tasks.
**Objects**
* :func:`~fatiando.vis.myv.prisms`
* :func:`~fatiando.vis.myv.polyprisms`
* :func:`~fatiando.vis.myv.points`
* :func:`~fatiando.vis.myv.tesseroids`
**Misc objects**
* :func:`~fatiando.vis.myv.outline`
* :func:`~fatiando.vis.myv.axes`
* :func:`~fatiando.vis.myv.wall_north`
* :func:`~fatiando.vis.myv.wall_south`
* :func:`~fatiando.vis.myv.wall_east`
* :func:`~fatiando.vis.myv.wall_west`
* :func:`~fatiando.vis.myv.wall_top`
* :func:`~fatiando.vis.myv.wall_bottom`
* :func:`~fatiando.vis.myv.earth`
* :func:`~fatiando.vis.myv.core`
* :func:`~fatiando.vis.myv.continents`
* :func:`~fatiando.vis.myv.meridians`
* :func:`~fatiando.vis.myv.parallels`
**Helpers**
* :func:`~fatiando.vis.myv.figure`
* :func:`~fatiando.vis.myv.title`
* :func:`~fatiando.vis.myv.show`
* :func:`~fatiando.vis.myv.savefig`
----
"""
import numpy
from fatiando import utils
from fatiando.constants import MEAN_EARTH_RADIUS
# Do lazy imports of mlab and tvtk to avoid the slow imports when I don't need
# 3D plotting
mlab = None
tvtk = None
BuiltinSurface = None
def _lazy_import_BuiltinSurface():
"""
Do the lazy import of BuiltinSurface
"""
global BuiltinSurface
if BuiltinSurface is None:
from mayavi.sources.builtin_surface import BuiltinSurface
def _lazy_import_mlab():
"""
Do the lazy import of mlab
"""
global mlab
# For campatibility with versions of Mayavi2 < 4
if mlab is None:
try:
from mayavi import mlab
except ImportError:
from enthought.mayavi import mlab
def _lazy_import_tvtk():
"""
Do the lazy import of tvtk
"""
global tvtk
# For campatibility with versions of Mayavi2 < 4
if tvtk is None:
try:
from tvtk.api import tvtk
except ImportError:
from enthought.tvtk.api import tvtk
def title(text, color=(0, 0, 0), size=0.3, height=1):
"""
Draw a title on a Mayavi figure.
.. warning:: Must be called **after** you've plotted something (e.g.,
prisms) to the figure. This is a bug.
Parameters:
* text : str
The title
* color : tuple = (r, g, b)
RGB of the color of the text
* size : float
The size of the text
* height : float
The height where the title will be placed on the screen
"""
_lazy_import_mlab()
mlab.title(text, color=color, size=size, height=height)
def savefig(fname, magnification=None):
"""
Save a snapshot the current Mayavi figure to a file.
Parameters:
* fname : str
The name of the file. The format is deduced from the extension.
* magnification : int or None
If not None, then the scaling between the pixels on the screen, and the
pixels in the file saved.
"""
_lazy_import_mlab()
if magnification is None:
mlab.savefig(fname)
else:
mlab.savefig(fname, magnification=magnification)
def show():
"""
Show the 3D plot of Mayavi2.
Enters a loop until the window is closed.
"""
_lazy_import_mlab()
mlab.show()
def points(points, color=(0, 0, 0), size=200., opacity=1, spherical=False):
"""
Plot a series of 3D points.
.. note:: Still doesn't plot points with physical properties.
Parameters:
* points : list
The list of points to plot. Each point is an [x, y, z] list with the
x, y, and z coordinates of the point
* color : tuple = (r, g, b)
RGB of the color of the points
* size : float
The size of the points in meters
* opacity : float
Decimal percentage of opacity
* spherical : True or False
If True, will assume the points are in [lon, lat, height] format (in
degrees and meters)
Returns:
* glyph
The Mayavi Glyph object corresponding to the points
"""
_lazy_import_mlab()
if spherical:
lon, lat, height = numpy.transpose(points)
x, y, z = utils.sph2cart(lon, lat, height)
else:
x, y, z = numpy.transpose(points)
glyph = mlab.points3d(x, y, z, color=color, opacity=opacity)
glyph.glyph.glyph.scaling = False
glyph.glyph.glyph_source.glyph_source.radius = size
return glyph
def polyprisms(prisms, prop=None, style='surface', opacity=1, edges=True,
vmin=None, vmax=None, cmap='blue-red', color=None,
linewidth=1, edgecolor=(0, 0, 0), scale=(1, 1, 1)):
"""
Plot a list of 3D polygonal prisms using Mayavi2.
Will not plot a value None in *prisms*.
Parameters:
* prisms : list of :class:`fatiando.mesher.PolygonalPrism`
The prisms
* prop : str or None
The physical property of the prisms to use as the color scale. If a
prism doesn't have *prop*, or if it is None, then it will not be
plotted If prop is a vector (like magnetization), will use the
intensity (norm).
* style : str
Either ``'surface'`` for solid prisms or ``'wireframe'`` for just the
contour
* opacity : float
Decimal percentage of opacity
* edges : True or False
Wether or not to display the edges of the prisms in black lines. Will
ignore this if ``style='wireframe'``
* vmin, vmax : float
Min and max values for the color scale. If *None* will default to
the min and max of *prop* in the prisms.
* cmap : Mayavi colormap
Color map to use. See the 'Colors and Legends' menu on the Mayavi2 GUI
for valid color maps.
* color : None or tuple = (r, g, b)
If not None, then for all prisms to have this RGB color
* linewidth : float
The width of the lines (edges) of the prisms.
* edgecolor : tuple = (r, g, b)
RGB of the color of the edges. If style='wireframe', then will be
ignored. Use parameter *color* instead
* scale : (sx, sy, sz)
Scale factors used to exaggerate on a particular direction, e.g., if
scale = (1, 1, 2), the vertical dimension will be 2x larger than the
others
Returns:
* surface
the last element on the pipeline
"""
if style not in ['surface', 'wireframe']:
raise ValueError("Invalid style '%s'" % (style))
if opacity > 1. or opacity < 0:
raise ValueError("Invalid opacity %g. Must be in range [1,0]"
% (opacity))
# mlab and tvtk are really slow to import
_lazy_import_mlab()
_lazy_import_tvtk()
if prop is None:
label = 'scalar'
else:
label = prop
points = []
polygons = []
scalars = []
offset = 0
for prism in prisms:
if prism is None or (prop is not None and prop not in prism.props):
continue
x, y = prism.x, prism.y
nverts = prism.nverts
if prop is None:
scalar = 0.
else:
p = prism.props[prop]
if isinstance(p, int) or isinstance(p, float):
scalar = p
else:
scalar = numpy.linalg.norm(p)
# The top surface
points.extend(
reversed(numpy.transpose([x, y, prism.z1 * numpy.ones_like(x)])))
polygons.append(range(offset, offset + nverts))
scalars.extend(scalar * numpy.ones(nverts))
offset += nverts
# The bottom surface
points.extend(
reversed(numpy.transpose([x, y, prism.z2 * numpy.ones_like(x)])))
polygons.append(range(offset, offset + nverts))
scalars.extend(scalar * numpy.ones(nverts))
offset += nverts
# The sides
for i in xrange(nverts):
x1, y1 = x[i], y[i]
x2, y2 = x[(i + 1) % nverts], y[(i + 1) % nverts]
points.extend([[x1, y1, prism.z1], [x2, y2, prism.z1],
[x2, y2, prism.z2], [x1, y1, prism.z2]])
polygons.append(range(offset, offset + 4))
scalars.extend(scalar * numpy.ones(4))
offset += 4
mesh = tvtk.PolyData(points=points, polys=polygons)
mesh.point_data.scalars = numpy.array(scalars)
mesh.point_data.scalars.name = label
if vmin is None:
vmin = min(scalars)
if vmax is None:
vmax = max(scalars)
if style == 'wireframe':
surf = mlab.pipeline.surface(mlab.pipeline.add_dataset(mesh),
vmax=vmax, vmin=vmin, colormap=cmap)
surf.actor.property.representation = 'wireframe'
surf.actor.property.line_width = linewidth
if style == 'surface':
# The triangle filter is needed because VTK doesnt seem to handle
# convex polygons too well
dataset = mlab.pipeline.triangle_filter(
mlab.pipeline.add_dataset(mesh))
surf = mlab.pipeline.surface(dataset, vmax=vmax, vmin=vmin,
colormap=cmap)
surf.actor.property.representation = 'surface'
surf.actor.property.edge_visibility = 0
if edges:
edge = mlab.pipeline.surface(mlab.pipeline.add_dataset(mesh))
edge.actor.property.representation = 'wireframe'
edge.actor.mapper.scalar_visibility = 0
edge.actor.property.line_width = linewidth
edge.actor.property.opacity = opacity
edge.actor.property.color = edgecolor
edge.actor.actor.scale = scale
surf.actor.property.opacity = opacity
if color is not None:
surf.actor.mapper.scalar_visibility = 0
surf.actor.property.color = color
surf.actor.actor.scale = scale
return surf
def tesseroids(tesseroids, prop=None, style='surface', opacity=1, edges=True,
vmin=None, vmax=None, cmap='blue-red', color=None,
linewidth=1, edgecolor=(0, 0, 0), scale=(1, 1, 1)):
"""
Plot a list of tesseroids using Mayavi2.
Will not plot a value None in *tesseroids*
Parameters:
* tesseroids : list of :class:`fatiando.mesher.Tesseroid`
The tesseroids
* prop : str or None
The physical property of the tesseroids to use as the color scale. If a
tesseroid doesn't have *prop*, or if it is None, then it will not be
plotted. If prop is a vector (like magnetization), will use the
intensity (norm).
* style : str
Either ``'surface'`` for solid tesseroids or ``'wireframe'`` for just
the contour
* opacity : float
Decimal percentage of opacity
* edges : True or False
Wether or not to display the edges of the tesseroids in black lines.
Will ignore this if ``style='wireframe'``
* vmin, vmax : float
Min and max values for the color scale. If *None* will default to
the min and max of *prop*.
* cmap : Mayavi colormap
Color map to use. See the 'Colors and Legends' menu on the Mayavi2 GUI
for valid color maps.
* color : None or tuple = (r, g, b)
If not None, then for all tesseroids to have this RGB color
* linewidth : float
The width of the lines (edges) of the tesseroids.
* edgecolor : tuple = (r, g, b)
RGB of the color of the edges. If style='wireframe', then will be
ignored. Use parameter *color* instead
* scale : (slon, slat, sz)
Scale factors used to exaggerate on a particular direction, e.g., if
scale = (1, 1, 2), the vertical dimension will be 2x larger than the
others
Returns:
* surface
the last element on the pipeline
"""
if style not in ['surface', 'wireframe']:
raise ValueError("Invalid style '%s'" % (style))
if opacity > 1. or opacity < 0:
raise ValueError("Invalid opacity %g. Must be in range [1,0]"
% (opacity))
# mlab and tvtk are really slow to import
_lazy_import_mlab()
_lazy_import_tvtk()
if prop is None:
label = 'scalar'
else:
label = prop
# VTK parameters
points = []
cells = []
offsets = []
offset = 0
mesh_size = 0
celldata = []
# To mark what index in the points the cell starts
start = 0
for tess in tesseroids:
if tess is None or (prop is not None and prop not in tess.props):
continue
w, e, s, n, top, bottom = tess.get_bounds()
w *= scale[0]
e *= scale[0]
s *= scale[1]
n *= scale[1]
top *= scale[2]
bottom *= scale[2]
if prop is None:
scalar = 0.
else:
p = tess.props[prop]
if isinstance(p, int) or isinstance(p, float):
scalar = p
else:
scalar = numpy.linalg.norm(p)
points.extend([
utils.sph2cart(w, s, bottom),
utils.sph2cart(e, s, bottom),
utils.sph2cart(e, n, bottom),
utils.sph2cart(w, n, bottom),
utils.sph2cart(w, s, top),
utils.sph2cart(e, s, top),
utils.sph2cart(e, n, top),
utils.sph2cart(w, n, top),
utils.sph2cart(0.5 * (w + e), s, bottom),
utils.sph2cart(e, 0.5 * (s + n), bottom),
utils.sph2cart(0.5 * (w + e), n, bottom),
utils.sph2cart(w, 0.5 * (s + n), bottom),
utils.sph2cart(0.5 * (w + e), s, top),
utils.sph2cart(e, 0.5 * (s + n), top),
utils.sph2cart(0.5 * (w + e), n, top),
utils.sph2cart(w, 0.5 * (s + n), top),
utils.sph2cart(w, s, 0.5 * (top + bottom)),
utils.sph2cart(e, s, 0.5 * (top + bottom)),
utils.sph2cart(e, n, 0.5 * (top + bottom)),
utils.sph2cart(w, n, 0.5 * (top + bottom))])
cells.append(20)
cells.extend(range(start, start + 20))
start += 20
offsets.append(offset)
offset += 21
celldata.append(scalar)
mesh_size += 1
cell_array = tvtk.CellArray()
cell_array.set_cells(mesh_size, numpy.array(cells))
cell_types = numpy.array([25] * mesh_size, 'i')
vtkmesh = tvtk.UnstructuredGrid(points=numpy.array(points, 'f'))
vtkmesh.set_cells(cell_types, numpy.array(offsets, 'i'), cell_array)
vtkmesh.cell_data.scalars = numpy.array(celldata)
vtkmesh.cell_data.scalars.name = label
dataset = mlab.pipeline.threshold(mlab.pipeline.add_dataset(vtkmesh))
if vmin is None:
vmin = min(vtkmesh.cell_data.scalars)
if vmax is None:
vmax = max(vtkmesh.cell_data.scalars)
if style == 'wireframe':
surf = mlab.pipeline.surface(mlab.pipeline.extract_edges(dataset),
vmax=vmax, vmin=vmin, colormap=cmap)
surf.actor.property.representation = 'wireframe'
surf.actor.property.line_width = linewidth
if style == 'surface':
surf = mlab.pipeline.surface(dataset, vmax=vmax, vmin=vmin,
colormap=cmap)
surf.actor.property.representation = 'surface'
if edges:
edge = mlab.pipeline.surface(mlab.pipeline.extract_edges(dataset),
vmax=vmax, vmin=vmin)
edge.actor.property.representation = 'wireframe'
edge.actor.mapper.scalar_visibility = 0
edge.actor.property.line_width = linewidth
edge.actor.property.opacity = opacity
edge.actor.property.color = edgecolor
surf.actor.property.opacity = opacity
surf.actor.property.backface_culling = False
if color is not None:
surf.actor.mapper.scalar_visibility = 0
surf.actor.property.color = color
return surf
def prisms(prisms, prop=None, style='surface', opacity=1, edges=True,
vmin=None, vmax=None, cmap='blue-red', color=None, linewidth=1,
edgecolor=(0, 0, 0), scale=(1, 1, 1)):
"""
Plot a list of 3D right rectangular prisms using Mayavi2.
Will not plot a value None in *prisms*
Parameters:
* prisms : list of :class:`fatiando.mesher.Prism`
The prisms
* prop : str or None
The physical property of the prisms to use as the color scale. If a
prism doesn't have *prop*, or if it is None, then it will not be
plotted If prop is a vector (like magnetization), will use the
intensity (norm).
* style : str
Either ``'surface'`` for solid prisms or ``'wireframe'`` for just the
contour
* opacity : float
Decimal percentage of opacity
* edges : True or False
Wether or not to display the edges of the prisms in black lines. Will
ignore this if ``style='wireframe'``
* vmin, vmax : float
Min and max values for the color scale. If *None* will default to
the min and max of *prop* in the prisms.
* cmap : Mayavi colormap
Color map to use. See the 'Colors and Legends' menu on the Mayavi2 GUI
for valid color maps.
* color : None or tuple = (r, g, b)
If not None, then for all prisms to have this RGB color
* linewidth : float
The width of the lines (edges) of the prisms.
* edgecolor : tuple = (r, g, b)
RGB of the color of the edges. If style='wireframe', then will be
ignored. Use parameter *color* instead
* scale : (sx, sy, sz)
Scale factors used to exaggerate on a particular direction, e.g., if
scale = (1, 1, 2), the vertical dimension will be 2x larger than the
others
Returns:
* surface
the last element on the pipeline
"""
if style not in ['surface', 'wireframe']:
raise ValueError("Invalid style '%s'" % (style))
if opacity > 1. or opacity < 0:
raise ValueError("Invalid opacity %g. Must be in range [1,0]"
% (opacity))
# mlab and tvtk are really slow to import
_lazy_import_mlab()
_lazy_import_tvtk()
if prop is None:
label = 'scalar'
else:
label = prop
# VTK parameters
points = []
cells = []
offsets = []
offset = 0
mesh_size = 0
celldata = []
# To mark what index in the points the cell starts
start = 0
for prism in prisms:
if prism is None or (prop is not None and prop not in prism.props):
continue
x1, x2, y1, y2, z1, z2 = prism.get_bounds()
if prop is None:
scalar = 0.
else:
p = prism.props[prop]
if isinstance(p, int) or isinstance(p, float):
scalar = p
else:
scalar = numpy.linalg.norm(p)
points.extend([[x1, y1, z1], [x2, y1, z1], [x2, y2, z1], [x1, y2, z1],
[x1, y1, z2], [x2, y1, z2], [x2, y2, z2], [x1, y2, z2]])
cells.append(8)
cells.extend([i for i in xrange(start, start + 8)])
start += 8
offsets.append(offset)
offset += 9
celldata.append(scalar)
mesh_size += 1
cell_array = tvtk.CellArray()
cell_array.set_cells(mesh_size, numpy.array(cells))
cell_types = numpy.array([12] * mesh_size, 'i')
vtkmesh = tvtk.UnstructuredGrid(points=numpy.array(points, 'f'))
vtkmesh.set_cells(cell_types, numpy.array(offsets, 'i'), cell_array)
vtkmesh.cell_data.scalars = numpy.array(celldata)
vtkmesh.cell_data.scalars.name = label
dataset = mlab.pipeline.threshold(mlab.pipeline.add_dataset(vtkmesh))
if vmin is None:
vmin = min(vtkmesh.cell_data.scalars)
if vmax is None:
vmax = max(vtkmesh.cell_data.scalars)
surf = mlab.pipeline.surface(dataset, vmax=vmax, vmin=vmin, colormap=cmap)
if style == 'wireframe':
surf.actor.property.representation = 'wireframe'
surf.actor.property.line_width = linewidth
if style == 'surface':
surf.actor.property.representation = 'surface'
if edges:
surf.actor.property.edge_visibility = 1
surf.actor.property.line_width = linewidth
surf.actor.property.edge_color = edgecolor
surf.actor.property.opacity = opacity
if color is not None:
surf.actor.mapper.scalar_visibility = 0
surf.actor.property.color = color
surf.actor.actor.scale = scale
return surf
def figure(size=None, zdown=True, color=(1, 1, 1)):
"""
Create a default figure in Mayavi with white background
Parameters:
* size : tuple = (dx, dy)
The size of the figure. If ``None`` will use the default size.
* zdown : True or False
If True, will turn the figure upside-down to make the z-axis point down
* color : tuple = (r, g, b)
RGB of the color of the background
Return:
* fig : Mayavi figure object
The figure
"""
_lazy_import_mlab()
if size is None:
fig = mlab.figure(bgcolor=color)
else:
fig = mlab.figure(bgcolor=color, size=size)
if zdown:
fig.scene.camera.view_up = numpy.array([0., 0., -1.])
fig.scene.camera.elevation(60.)
fig.scene.camera.azimuth(180.)
return fig
def outline(extent=None, color=(0, 0, 0), width=2, scale=(1, 1, 1)):
"""
Create a default outline in Mayavi2.
Parameters:
* extent : list = [xmin, xmax, ymin, ymax, zmin, zmax]
Default if the objects extent.
* color : tuple = (r, g, b)
RGB of the color of the axes and text
* width : float
Line width
* scale : (slon, slat, sz)
Scale factors used to exaggerate on a particular direction, e.g., if
scale = (1, 1, 2), the vertical dimension will be 2x larger than the
others
Returns:
* outline : Mayavi outline instace
The outline in the pipeline
"""
_lazy_import_mlab()
outline = mlab.outline(color=color, line_width=width)
if extent is not None:
outline.bounds = extent
outline.actor.actor.scale = scale
return outline
def axes(plot, nlabels=5, extent=None, ranges=None, color=(0, 0, 0),
width=2, fmt="%-#.2f"):
"""
Add an Axes module to a Mayavi2 plot or dataset.
Parameters:
* plot
Either the plot (as returned by one of the plotting functions of this
module) or a TVTK dataset.
* nlabels : int
Number of labels on the axes
* extent : list = [xmin, xmax, ymin, ymax, zmin, zmax]
Default if the objects extent.
* ranges : list = [xmin, xmax, ymin, ymax, zmin, zmax]
What will be display in the axes labels. Default is *extent*
* color : tuple = (r, g, b)
RGB of the color of the axes and text
* width : float
Line width
* fmt : str
Label number format
Returns:
* axes : Mayavi axes instace
The axes object in the pipeline
"""
_lazy_import_mlab()
a = mlab.axes(plot, nb_labels=nlabels, color=color)
a.label_text_property.color = color
a.title_text_property.color = color
if extent is not None:
a.axes.bounds = extent
if ranges is not None:
a.axes.ranges = ranges
a.axes.use_ranges = True
a.property.line_width = width
a.axes.label_format = fmt
a.axes.x_label, a.axes.y_label, a.axes.z_label = "N", "E", "Z"
return a
def wall_north(bounds, color=(0, 0, 0), opacity=0.1, scale=(1, 1, 1)):
"""
Draw a 3D wall in Mayavi2 on the North side.
.. note:: Remember that x->North, y->East and z->Down
Parameters:
* bounds : list = [xmin, xmax, ymin, ymax, zmin, zmax]
The extent of the region where the wall is placed
* color : tuple = (r, g, b)
RGB of the color of the wall
* opacity : float
Decimal percentage of opacity
* scale : (slon, slat, sz)
Scale factors used to exaggerate on a particular direction, e.g., if
scale = (1, 1, 2), the vertical dimension will be 2x larger than the
others
"""
s, n, w, e, t, b = bounds
_wall([n, n, w, e, b, t], color, opacity, scale)
def wall_south(bounds, color=(0, 0, 0), opacity=0.1, scale=(1, 1, 1)):
"""
Draw a 3D wall in Mayavi2 on the South side.
.. note:: Remember that x->North, y->East and z->Down
Parameters:
* bounds : list = [xmin, xmax, ymin, ymax, zmin, zmax]
The extent of the region where the wall is placed
* color : tuple = (r, g, b)
RGB of the color of the wall
* opacity : float
Decimal percentage of opacity
* scale : (slon, slat, sz)
Scale factors used to exaggerate on a particular direction, e.g., if
scale = (1, 1, 2), the vertical dimension will be 2x larger than the
others
"""
s, n, w, e, t, b = bounds
_wall([s, s, w, e, b, t], color, opacity, scale)
def wall_east(bounds, color=(0, 0, 0), opacity=0.1, scale=(1, 1, 1)):
"""
Draw a 3D wall in Mayavi2 on the East side.
.. note:: Remember that x->North, y->East and z->Down
Parameters:
* bounds : list = [xmin, xmax, ymin, ymax, zmin, zmax]
The extent of the region where the wall is placed
* color : tuple = (r, g, b)
RGB of the color of the wall
* opacity : float
Decimal percentage of opacity
* scale : (slon, slat, sz)
Scale factors used to exaggerate on a particular direction, e.g., if
scale = (1, 1, 2), the vertical dimension will be 2x larger than the
others
"""
s, n, w, e, t, b = bounds
_wall([s, n, e, e, b, t], color, opacity, scale)
def wall_west(bounds, color=(0, 0, 0), opacity=0.1, scale=(1, 1, 1)):
"""
Draw a 3D wall in Mayavi2 on the West side.
.. note:: Remember that x->North, y->East and z->Down
Parameters:
* bounds : list = [xmin, xmax, ymin, ymax, zmin, zmax]
The extent of the region where the wall is placed
* color : tuple = (r, g, b)
RGB of the color of the wall
* opacity : float
Decimal percentage of opacity
* scale : (slon, slat, sz)
Scale factors used to exaggerate on a particular direction, e.g., if
scale = (1, 1, 2), the vertical dimension will be 2x larger than the
others
"""
s, n, w, e, t, b = bounds
_wall([s, n, w, w, b, t], color, opacity, scale)
def wall_top(bounds, color=(0, 0, 0), opacity=0.1, scale=(1, 1, 1)):
"""
Draw a 3D wall in Mayavi2 on the Top side.
.. note:: Remember that x->North, y->East and z->Down
Parameters:
* bounds : list = [xmin, xmax, ymin, ymax, zmin, zmax]
The extent of the region where the wall is placed
* color : tuple = (r, g, b)
RGB of the color of the wall
* opacity : float
Decimal percentage of opacity
* scale : (slon, slat, sz)
Scale factors used to exaggerate on a particular direction, e.g., if
scale = (1, 1, 2), the vertical dimension will be 2x larger than the
others
"""
s, n, w, e, t, b = bounds
_wall([s, n, w, e, t, t], color, opacity, scale)
def wall_bottom(bounds, color=(0, 0, 0), opacity=0.1, scale=(1, 1, 1)):
"""
Draw a 3D wall in Mayavi2 on the Bottom side.
.. note:: Remember that x->North, y->East and z->Down
Parameters:
* bounds : list = [xmin, xmax, ymin, ymax, zmin, zmax]
The extent of the region where the wall is placed
* color : tuple = (r, g, b)
RGB of the color of the wall
* opacity : float
Decimal percentage of opacity
* scale : (slon, slat, sz)
Scale factors used to exaggerate on a particular direction, e.g., if
scale = (1, 1, 2), the vertical dimension will be 2x larger than the
others
"""
s, n, w, e, t, b = bounds
_wall([s, n, w, e, b, b], color, opacity, scale)
def _wall(bounds, color, opacity, scale):
"""Generate a 3D wall in Mayavi"""
_lazy_import_mlab()
p = mlab.pipeline.builtin_surface()
p.source = 'outline'
p.data_source.bounds = bounds
p.data_source.generate_faces = 1
su = mlab.pipeline.surface(p)
su.actor.property.color = color
su.actor.property.opacity = opacity
su.actor.actor.scale = scale
def continents(color=(0, 0, 0), linewidth=1, resolution=2, opacity=1,
radius=MEAN_EARTH_RADIUS):
"""
Plot the outline of the continents.
Parameters:
* color : tuple
RGB color of the lines. Default = black
* linewidth : float
The width of the continent lines
* resolution : float
The data_source.on_ratio parameter that controls the resolution of the
continents
* opacity : float
The opacity of the lines. Must be between 0 and 1
* radius : float
The radius of the sphere where the continents will be plotted. Defaults
to the mean Earth radius
Returns:
* continents : Mayavi surface
The Mayavi surface element of the continents
"""
_lazy_import_mlab()
_lazy_import_BuiltinSurface()
continents_src = BuiltinSurface(source='earth', name='Continents')
continents_src.data_source.on_ratio = resolution
continents_src.data_source.radius = MEAN_EARTH_RADIUS
surf = mlab.pipeline.surface(continents_src, color=color)
surf.actor.property.line_width = linewidth
surf.actor.property.opacity = opacity
return surf
def earth(color=(0.4, 0.5, 1.0), opacity=1):
"""
Draw a sphere representing the Earth.
Parameters:
* color : tuple
RGB color of the sphere. Defaults to ocean blue.
* opacity : float
The opacity of the sphere. Must be between 0 and 1
Returns:
* sphere : Mayavi surface
The Mayavi surface element of the sphere
"""
_lazy_import_mlab()
sphere = mlab.points3d(0, 0, 0, scale_mode='none',
scale_factor=2 * MEAN_EARTH_RADIUS, color=color,
resolution=50, opacity=opacity, name='Earth')
sphere.actor.property.specular = 0.45
sphere.actor.property.specular_power = 5
sphere.actor.property.backface_culling = True
return sphere
def core(inner=False, color=(1, 0, 0), opacity=1):
"""
Draw a sphere representing the Earth's core.
Parameters:
* inner : True or False
If True, will use the radius of the inner core, else the outer core.
* color : tuple
RGB color of the sphere. Defaults to red.
* opacity : float
The opacity of the sphere. Must be between 0 and 1
Returns:
* sphere : Mayavi surface
The Mayavi surface element of the sphere
"""
_lazy_import_mlab()
if inner:
radius = 1216000.
name = 'Inner core'
else:
radius = 3486000.
name = 'Core'
sphere = mlab.points3d(0, 0, 0, scale_mode='none',
scale_factor=2. * radius, color=color,
resolution=50, opacity=opacity, name=name)
sphere.actor.property.specular = 0.45
sphere.actor.property.specular_power = 5
sphere.actor.property.backface_culling = True
return sphere
def meridians(longitudes, color=(0, 0, 0), linewidth=1, opacity=1):
"""
Draw meridians on the Earth.
Parameters:
* longitudes : list
The longitudes where the meridians will be drawn.
* color : tuple
RGB color of the lines. Defaults to black.
* linewidth : float
The width of the lines
* opacity : float
The opacity of the lines. Must be between 0 and 1
Returns:
* lines : Mayavi surface
The Mayavi surface element of the lines
"""
lats = numpy.linspace(-90, 270., 100)
x, y, z = [], [], []
for lon in longitudes:
coords = utils.sph2cart(numpy.ones_like(lats) * lon, lats, 0)
x.extend(coords[0].tolist())
y.extend(coords[1].tolist())
z.extend(coords[2].tolist())
x, y, z = numpy.array(x), numpy.array(y), numpy.array(z)
lines = mlab.plot3d(x, y, z, color=color, opacity=opacity,
tube_radius=None)
lines.actor.property.line_width = linewidth
return lines
def parallels(latitudes, color=(0, 0, 0), linewidth=1, opacity=1):
"""
Draw parallels on the Earth.
Parameters:
* latitudes : list
The latitudes where the parallels will be drawn.
* color : tuple
RGB color of the lines. Defaults to black.
* linewidth : float
The width of the lines
* opacity : float
The opacity of the lines. Must be between 0 and 1
Returns:
* lines : list
List of the Mayavi surface elements of each line
"""
lons = numpy.linspace(0, 360., 100)
parallels = []
for lat in latitudes:
x, y, z = utils.sph2cart(lons, numpy.ones_like(lons) * lat, 0)
lines = mlab.plot3d(x, y, z, color=color, opacity=opacity,
tube_radius=None)
lines.actor.property.line_width = linewidth
parallels.append(lines)
return parallels
| eusoubrasileiro/fatiando | fatiando/vis/myv.py | Python | bsd-3-clause | 32,932 | [
"Mayavi",
"VTK"
] | ccdc4de09fae59ce51792de421324c0131fb82af2c405bf884d1da1217de7c84 |
# -*- coding: utf-8 -*-
# Copyright 2007-2021 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import copy
import warnings
import inspect
from contextlib import contextmanager
from datetime import datetime
from itertools import product
import logging
from pint import UnitRegistry, UndefinedUnitError
from pathlib import Path
import numpy as np
from scipy import integrate
from scipy import signal as sp_signal
import dask.array as da
from matplotlib import pyplot as plt
import traits.api as t
import numbers
from hyperspy.axes import AxesManager
from hyperspy import io
from hyperspy.drawing import mpl_hie, mpl_hse, mpl_he
from hyperspy.learn.mva import MVA, LearningResults
import hyperspy.misc.utils
from hyperspy.misc.utils import DictionaryTreeBrowser
from hyperspy.drawing import signal as sigdraw
from hyperspy.defaults_parser import preferences
from hyperspy.misc.io.tools import ensure_directory
from hyperspy.misc.utils import iterable_not_string
from hyperspy.external.progressbar import progressbar
from hyperspy.exceptions import SignalDimensionError, DataDimensionError
from hyperspy.misc import rgb_tools
from hyperspy.misc.utils import underline, isiterable
from hyperspy.misc.hist_tools import histogram
from hyperspy.drawing.utils import animate_legend
from hyperspy.drawing.marker import markers_metadata_dict_to_markers
from hyperspy.misc.slicing import SpecialSlicers, FancySlicing
from hyperspy.misc.utils import slugify
from hyperspy.misc.utils import is_binned # remove in v2.0
from hyperspy.docstrings.signal import (
ONE_AXIS_PARAMETER, MANY_AXIS_PARAMETER, OUT_ARG, NAN_FUNC, OPTIMIZE_ARG,
RECHUNK_ARG, SHOW_PROGRESSBAR_ARG, PARALLEL_ARG, MAX_WORKERS_ARG,
CLUSTER_SIGNALS_ARG, HISTOGRAM_BIN_ARGS, HISTOGRAM_MAX_BIN_ARGS)
from hyperspy.docstrings.plot import (BASE_PLOT_DOCSTRING, PLOT1D_DOCSTRING,
BASE_PLOT_DOCSTRING_PARAMETERS,
PLOT2D_KWARGS_DOCSTRING)
from hyperspy.docstrings.utils import REBIN_ARGS
from hyperspy.events import Events, Event
from hyperspy.interactive import interactive
from hyperspy.misc.signal_tools import (are_signals_aligned,
broadcast_signals)
from hyperspy.misc.math_tools import outer_nd, hann_window_nth_order, check_random_state
from hyperspy.exceptions import VisibleDeprecationWarning
_logger = logging.getLogger(__name__)
class ModelManager(object):
"""Container for models
"""
class ModelStub(object):
def __init__(self, mm, name):
self._name = name
self._mm = mm
self.restore = lambda: mm.restore(self._name)
self.remove = lambda: mm.remove(self._name)
self.pop = lambda: mm.pop(self._name)
self.restore.__doc__ = "Returns the stored model"
self.remove.__doc__ = "Removes the stored model"
self.pop.__doc__ = \
"Returns the stored model and removes it from storage"
def __repr__(self):
return repr(self._mm._models[self._name])
def __init__(self, signal, dictionary=None):
self._signal = signal
self._models = DictionaryTreeBrowser()
self._add_dictionary(dictionary)
def _add_dictionary(self, dictionary=None):
if dictionary is not None:
for k, v in dictionary.items():
if k.startswith('_') or k in ['restore', 'remove']:
raise KeyError("Can't add dictionary with key '%s'" % k)
k = slugify(k, True)
self._models.set_item(k, v)
setattr(self, k, self.ModelStub(self, k))
def _set_nice_description(self, node, names):
ans = {'date': datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
'dimensions': self._signal.axes_manager._get_dimension_str(),
}
node.add_dictionary(ans)
for n in names:
node.add_node('components.' + n)
def _save(self, name, dictionary):
_abc = 'abcdefghijklmnopqrstuvwxyz'
def get_letter(models):
howmany = len(models)
if not howmany:
return 'a'
order = int(np.log(howmany) / np.log(26)) + 1
letters = [_abc, ] * order
for comb in product(*letters):
guess = "".join(comb)
if guess not in models.keys():
return guess
if name is None:
name = get_letter(self._models)
else:
name = self._check_name(name)
if name in self._models:
self.remove(name)
self._models.add_node(name)
node = self._models.get_item(name)
names = [c['name'] for c in dictionary['components']]
self._set_nice_description(node, names)
node.set_item('_dict', dictionary)
setattr(self, name, self.ModelStub(self, name))
def store(self, model, name=None):
"""If the given model was created from this signal, stores it
Parameters
----------
model : :py:class:`~hyperspy.model.BaseModel` (or subclass)
The model to store in the signal
name : str or None
The name for the model to be stored with
See also
--------
remove
restore
pop
"""
if model.signal is self._signal:
self._save(name, model.as_dictionary())
else:
raise ValueError("The model is created from a different signal, "
"you should store it there")
def _check_name(self, name, existing=False):
if not isinstance(name, str):
raise KeyError('Name has to be a string')
if name.startswith('_'):
raise KeyError('Name cannot start with "_" symbol')
if '.' in name:
raise KeyError('Name cannot contain dots (".")')
name = slugify(name, True)
if existing:
if name not in self._models:
raise KeyError(
"Model named '%s' is not currently stored" %
name)
return name
def remove(self, name):
"""Removes the given model
Parameters
----------
name : str
The name of the model to remove
See also
--------
restore
store
pop
"""
name = self._check_name(name, True)
delattr(self, name)
self._models.__delattr__(name)
def pop(self, name):
"""Returns the restored model and removes it from storage
Parameters
----------
name : str
The name of the model to restore and remove
See also
--------
restore
store
remove
"""
name = self._check_name(name, True)
model = self.restore(name)
self.remove(name)
return model
def restore(self, name):
"""Returns the restored model
Parameters
----------
name : str
The name of the model to restore
See also
--------
remove
store
pop
"""
name = self._check_name(name, True)
d = self._models.get_item(name + '._dict').as_dictionary()
return self._signal.create_model(dictionary=copy.deepcopy(d))
def __repr__(self):
return repr(self._models)
def __len__(self):
return len(self._models)
def __getitem__(self, name):
name = self._check_name(name, True)
return getattr(self, name)
class MVATools(object):
# TODO: All of the plotting methods here should move to drawing
def _plot_factors_or_pchars(self, factors, comp_ids=None,
calibrate=True, avg_char=False,
same_window=True, comp_label='PC',
img_data=None,
plot_shifts=True, plot_char=4,
cmap=plt.cm.gray, quiver_color='white',
vector_scale=1,
per_row=3, ax=None):
"""Plot components from PCA or ICA, or peak characteristics.
Parameters
----------
comp_ids : None, int, or list of ints
If None, returns maps of all components.
If int, returns maps of components with ids from 0 to given
int.
if list of ints, returns maps of components with ids in
given list.
calibrate : bool
If True, plots are calibrated according to the data in the
axes manager.
same_window : bool
If True, plots each factor to the same window. They are not scaled.
Default True.
comp_label : str
Title of the plot
cmap : a matplotlib colormap
The colormap used for factor images or any peak characteristic
scatter map overlay. Default is the matplotlib gray colormap
(``plt.cm.gray``).
Other Parameters
----------------
img_data : 2D numpy array,
The array to overlay peak characteristics onto. If None,
defaults to the average image of your stack.
plot_shifts : bool, default is True
If true, plots a quiver (arrow) plot showing the shifts for
each
peak present in the component being plotted.
plot_char : None or int
If int, the id of the characteristic to plot as the colored
scatter plot.
Possible components are:
* 4: peak height
* 5: peak orientation
* 6: peak eccentricity
quiver_color : any color recognized by matplotlib
Determines the color of vectors drawn for
plotting peak shifts.
vector_scale : integer or None
Scales the quiver plot arrows. The vector is defined as one data
unit along the X axis. If shifts are small, set vector_scale so
that when they are multiplied by vector_scale, they are on the
scale of the image plot. If None, uses matplotlib's autoscaling.
Returns
-------
matplotlib figure or list of figure if same_window=False
"""
if same_window is None:
same_window = True
if comp_ids is None:
comp_ids = range(factors.shape[1])
elif not hasattr(comp_ids, '__iter__'):
comp_ids = range(comp_ids)
n = len(comp_ids)
if same_window:
rows = int(np.ceil(n / float(per_row)))
fig_list = []
if n < per_row:
per_row = n
if same_window and self.axes_manager.signal_dimension == 2:
f = plt.figure(figsize=(4 * per_row, 3 * rows))
else:
f = plt.figure()
for i in range(len(comp_ids)):
if self.axes_manager.signal_dimension == 1:
if same_window:
ax = plt.gca()
else:
if i > 0:
f = plt.figure()
plt.title('%s' % comp_label)
ax = f.add_subplot(111)
ax = sigdraw._plot_1D_component(
factors=factors,
idx=comp_ids[i],
axes_manager=self.axes_manager,
ax=ax,
calibrate=calibrate,
comp_label=comp_label,
same_window=same_window)
if same_window:
plt.legend(ncol=factors.shape[1] // 2, loc='best')
elif self.axes_manager.signal_dimension == 2:
if same_window:
ax = f.add_subplot(rows, per_row, i + 1)
else:
if i > 0:
f = plt.figure()
plt.title('%s' % comp_label)
ax = f.add_subplot(111)
sigdraw._plot_2D_component(factors=factors,
idx=comp_ids[i],
axes_manager=self.axes_manager,
calibrate=calibrate, ax=ax,
cmap=cmap, comp_label=comp_label)
if not same_window:
fig_list.append(f)
if same_window: # Main title for same window
title = '%s' % comp_label
if self.axes_manager.signal_dimension == 1:
plt.title(title)
else:
plt.suptitle(title)
try:
plt.tight_layout()
except BaseException:
pass
if not same_window:
return fig_list
else:
return f
def _plot_loadings(self, loadings, comp_ids, calibrate=True,
same_window=True, comp_label=None,
with_factors=False, factors=None,
cmap=plt.cm.gray, no_nans=False, per_row=3,
axes_decor='all'):
if same_window is None:
same_window = True
if comp_ids is None:
comp_ids = range(loadings.shape[0])
elif not hasattr(comp_ids, '__iter__'):
comp_ids = range(comp_ids)
n = len(comp_ids)
if same_window:
rows = int(np.ceil(n / float(per_row)))
fig_list = []
if n < per_row:
per_row = n
if same_window and self.axes_manager.signal_dimension == 2:
f = plt.figure(figsize=(4 * per_row, 3 * rows))
else:
f = plt.figure()
for i in range(n):
if self.axes_manager.navigation_dimension == 1:
if same_window:
ax = plt.gca()
else:
if i > 0:
f = plt.figure()
plt.title('%s' % comp_label)
ax = f.add_subplot(111)
elif self.axes_manager.navigation_dimension == 2:
if same_window:
ax = f.add_subplot(rows, per_row, i + 1)
else:
if i > 0:
f = plt.figure()
plt.title('%s' % comp_label)
ax = f.add_subplot(111)
sigdraw._plot_loading(
loadings, idx=comp_ids[i], axes_manager=self.axes_manager,
no_nans=no_nans, calibrate=calibrate, cmap=cmap,
comp_label=comp_label, ax=ax, same_window=same_window,
axes_decor=axes_decor)
if not same_window:
fig_list.append(f)
if same_window: # Main title for same window
title = '%s' % comp_label
if self.axes_manager.navigation_dimension == 1:
plt.title(title)
else:
plt.suptitle(title)
try:
plt.tight_layout()
except BaseException:
pass
if not same_window:
if with_factors:
return fig_list, self._plot_factors_or_pchars(
factors, comp_ids=comp_ids, calibrate=calibrate,
same_window=same_window, comp_label=comp_label,
per_row=per_row)
else:
return fig_list
else:
if self.axes_manager.navigation_dimension == 1:
plt.legend(ncol=loadings.shape[0] // 2, loc='best')
animate_legend(f)
if with_factors:
return f, self._plot_factors_or_pchars(factors,
comp_ids=comp_ids,
calibrate=calibrate,
same_window=same_window,
comp_label=comp_label,
per_row=per_row)
else:
return f
def _export_factors(self,
factors,
folder=None,
comp_ids=None,
multiple_files=True,
save_figures=False,
save_figures_format='png',
factor_prefix=None,
factor_format=None,
comp_label=None,
cmap=plt.cm.gray,
plot_shifts=True,
plot_char=4,
img_data=None,
same_window=False,
calibrate=True,
quiver_color='white',
vector_scale=1,
no_nans=True, per_row=3):
from hyperspy._signals.signal2d import Signal2D
from hyperspy._signals.signal1d import Signal1D
if multiple_files is None:
multiple_files = True
if factor_format is None:
factor_format = 'hspy'
# Select the desired factors
if comp_ids is None:
comp_ids = range(factors.shape[1])
elif not hasattr(comp_ids, '__iter__'):
comp_ids = range(comp_ids)
mask = np.zeros(factors.shape[1], dtype=np.bool)
for idx in comp_ids:
mask[idx] = 1
factors = factors[:, mask]
if save_figures is True:
plt.ioff()
fac_plots = self._plot_factors_or_pchars(factors,
comp_ids=comp_ids,
same_window=same_window,
comp_label=comp_label,
img_data=img_data,
plot_shifts=plot_shifts,
plot_char=plot_char,
cmap=cmap,
per_row=per_row,
quiver_color=quiver_color,
vector_scale=vector_scale)
for idx in range(len(comp_ids)):
filename = '%s_%02i.%s' % (factor_prefix, comp_ids[idx],
save_figures_format)
if folder is not None:
filename = Path(folder, filename)
ensure_directory(filename)
_args = {'dpi': 600,
'format': save_figures_format}
fac_plots[idx].savefig(filename, **_args)
plt.ion()
elif multiple_files is False:
if self.axes_manager.signal_dimension == 2:
# factor images
axes_dicts = []
axes = self.axes_manager.signal_axes[::-1]
shape = (axes[1].size, axes[0].size)
factor_data = np.rollaxis(
factors.reshape((shape[0], shape[1], -1)), 2)
axes_dicts.append(axes[0].get_axis_dictionary())
axes_dicts.append(axes[1].get_axis_dictionary())
axes_dicts.append({'name': 'factor_index',
'scale': 1.,
'offset': 0.,
'size': int(factors.shape[1]),
'units': 'factor',
'index_in_array': 0, })
s = Signal2D(factor_data,
axes=axes_dicts,
metadata={
'General': {'title': '%s from %s' % (
factor_prefix,
self.metadata.General.title),
}})
elif self.axes_manager.signal_dimension == 1:
axes = [self.axes_manager.signal_axes[0].get_axis_dictionary(),
{'name': 'factor_index',
'scale': 1.,
'offset': 0.,
'size': int(factors.shape[1]),
'units': 'factor',
'index_in_array': 0,
}]
axes[0]['index_in_array'] = 1
s = Signal1D(
factors.T, axes=axes, metadata={
"General": {
'title': '%s from %s' %
(factor_prefix, self.metadata.General.title), }})
filename = '%ss.%s' % (factor_prefix, factor_format)
if folder is not None:
filename = Path(folder, filename)
s.save(filename)
else: # Separate files
if self.axes_manager.signal_dimension == 1:
axis_dict = self.axes_manager.signal_axes[0].\
get_axis_dictionary()
axis_dict['index_in_array'] = 0
for dim, index in zip(comp_ids, range(len(comp_ids))):
s = Signal1D(factors[:, index],
axes=[axis_dict, ],
metadata={
"General": {'title': '%s from %s' % (
factor_prefix,
self.metadata.General.title),
}})
filename = '%s-%i.%s' % (factor_prefix,
dim,
factor_format)
if folder is not None:
filename = Path(folder, filename)
s.save(filename)
if self.axes_manager.signal_dimension == 2:
axes = self.axes_manager.signal_axes
axes_dicts = [axes[0].get_axis_dictionary(),
axes[1].get_axis_dictionary()]
axes_dicts[0]['index_in_array'] = 0
axes_dicts[1]['index_in_array'] = 1
factor_data = factors.reshape(
self.axes_manager._signal_shape_in_array + [-1, ])
for dim, index in zip(comp_ids, range(len(comp_ids))):
im = Signal2D(factor_data[..., index],
axes=axes_dicts,
metadata={
"General": {'title': '%s from %s' % (
factor_prefix,
self.metadata.General.title),
}})
filename = '%s-%i.%s' % (factor_prefix,
dim,
factor_format)
if folder is not None:
filename = Path(folder, filename)
im.save(filename)
def _export_loadings(self,
loadings,
folder=None,
comp_ids=None,
multiple_files=True,
loading_prefix=None,
loading_format="hspy",
save_figures_format='png',
comp_label=None,
cmap=plt.cm.gray,
save_figures=False,
same_window=False,
calibrate=True,
no_nans=True,
per_row=3):
from hyperspy._signals.signal2d import Signal2D
from hyperspy._signals.signal1d import Signal1D
if multiple_files is None:
multiple_files = True
if loading_format is None:
loading_format = 'hspy'
if comp_ids is None:
comp_ids = range(loadings.shape[0])
elif not hasattr(comp_ids, '__iter__'):
comp_ids = range(comp_ids)
mask = np.zeros(loadings.shape[0], dtype=np.bool)
for idx in comp_ids:
mask[idx] = 1
loadings = loadings[mask]
if save_figures is True:
plt.ioff()
sc_plots = self._plot_loadings(loadings, comp_ids=comp_ids,
calibrate=calibrate,
same_window=same_window,
comp_label=comp_label,
cmap=cmap, no_nans=no_nans,
per_row=per_row)
for idx in range(len(comp_ids)):
filename = '%s_%02i.%s' % (loading_prefix, comp_ids[idx],
save_figures_format)
if folder is not None:
filename = Path(folder, filename)
ensure_directory(filename)
_args = {'dpi': 600,
'format': save_figures_format}
sc_plots[idx].savefig(filename, **_args)
plt.ion()
elif multiple_files is False:
if self.axes_manager.navigation_dimension == 2:
axes_dicts = []
axes = self.axes_manager.navigation_axes[::-1]
shape = (axes[1].size, axes[0].size)
loading_data = loadings.reshape((-1, shape[0], shape[1]))
axes_dicts.append(axes[0].get_axis_dictionary())
axes_dicts[0]['index_in_array'] = 1
axes_dicts.append(axes[1].get_axis_dictionary())
axes_dicts[1]['index_in_array'] = 2
axes_dicts.append({'name': 'loading_index',
'scale': 1.,
'offset': 0.,
'size': int(loadings.shape[0]),
'units': 'factor',
'index_in_array': 0, })
s = Signal2D(loading_data,
axes=axes_dicts,
metadata={
"General": {'title': '%s from %s' % (
loading_prefix,
self.metadata.General.title),
}})
elif self.axes_manager.navigation_dimension == 1:
cal_axis = self.axes_manager.navigation_axes[0].\
get_axis_dictionary()
cal_axis['index_in_array'] = 1
axes = [{'name': 'loading_index',
'scale': 1.,
'offset': 0.,
'size': int(loadings.shape[0]),
'units': 'comp_id',
'index_in_array': 0, },
cal_axis]
s = Signal2D(loadings,
axes=axes,
metadata={
"General": {'title': '%s from %s' % (
loading_prefix,
self.metadata.General.title),
}})
filename = '%ss.%s' % (loading_prefix, loading_format)
if folder is not None:
filename = Path(folder, filename)
s.save(filename)
else: # Separate files
if self.axes_manager.navigation_dimension == 1:
axis_dict = self.axes_manager.navigation_axes[0].\
get_axis_dictionary()
axis_dict['index_in_array'] = 0
for dim, index in zip(comp_ids, range(len(comp_ids))):
s = Signal1D(loadings[index],
axes=[axis_dict, ])
filename = '%s-%i.%s' % (loading_prefix,
dim,
loading_format)
if folder is not None:
filename = Path(folder, filename)
s.save(filename)
elif self.axes_manager.navigation_dimension == 2:
axes_dicts = []
axes = self.axes_manager.navigation_axes[::-1]
shape = (axes[0].size, axes[1].size)
loading_data = loadings.reshape((-1, shape[0], shape[1]))
axes_dicts.append(axes[0].get_axis_dictionary())
axes_dicts[0]['index_in_array'] = 0
axes_dicts.append(axes[1].get_axis_dictionary())
axes_dicts[1]['index_in_array'] = 1
for dim, index in zip(comp_ids, range(len(comp_ids))):
s = Signal2D(loading_data[index, ...],
axes=axes_dicts,
metadata={
"General": {'title': '%s from %s' % (
loading_prefix,
self.metadata.General.title),
}})
filename = '%s-%i.%s' % (loading_prefix,
dim,
loading_format)
if folder is not None:
filename = Path(folder, filename)
s.save(filename)
def plot_decomposition_factors(self,
comp_ids=None,
calibrate=True,
same_window=True,
title=None,
cmap=plt.cm.gray,
per_row=3,
**kwargs,
):
"""Plot factors from a decomposition. In case of 1D signal axis, each
factors line can be toggled on and off by clicking on their
corresponding line in the legend.
Parameters
----------
comp_ids : None, int, or list (of ints)
If `comp_ids` is ``None``, maps of all components will be
returned if the `output_dimension` was defined when executing
:py:meth:`~hyperspy.learn.mva.MVA.decomposition`. Otherwise it
raises a :py:exc:`ValueError`.
If `comp_ids` is an int, maps of components with ids from 0 to
the given value will be returned. If `comp_ids` is a list of
ints, maps of components with ids contained in the list will be
returned.
calibrate : bool
If ``True``, calibrates plots where calibration is available
from the axes_manager. If ``False``, plots are in pixels/channels.
same_window : bool
If ``True``, plots each factor to the same window. They are
not scaled. Default is ``True``.
title : str
Title of the plot.
cmap : :py:class:`~matplotlib.colors.Colormap`
The colormap used for the factor images, or for peak
characteristics. Default is the matplotlib gray colormap
(``plt.cm.gray``).
per_row : int
The number of plots in each row, when the `same_window`
parameter is ``True``.
See also
--------
plot_decomposition_loadings, plot_decomposition_results
"""
if self.axes_manager.signal_dimension > 2:
raise NotImplementedError("This method cannot plot factors of "
"signals of dimension higher than 2."
"You can use "
"`plot_decomposition_results` instead.")
if self.learning_results.factors is None:
raise RuntimeError("No learning results found. A 'decomposition' "
"needs to be performed first.")
if same_window is None:
same_window = True
if self.learning_results.factors is None:
raise RuntimeError("Run a decomposition first.")
factors = self.learning_results.factors
if comp_ids is None:
if self.learning_results.output_dimension:
comp_ids = self.learning_results.output_dimension
else:
raise ValueError(
"Please provide the number of components to plot via the "
"`comp_ids` argument")
comp_label = kwargs.get("comp_label", None)
title = _change_API_comp_label(title, comp_label)
if title is None:
title = self._get_plot_title('Decomposition factors of',
same_window=same_window)
return self._plot_factors_or_pchars(factors,
comp_ids=comp_ids,
calibrate=calibrate,
same_window=same_window,
comp_label=title,
cmap=cmap,
per_row=per_row)
def plot_bss_factors(
self,
comp_ids=None,
calibrate=True,
same_window=True,
title=None,
cmap=plt.cm.gray,
per_row=3,
**kwargs,
):
"""Plot factors from blind source separation results. In case of 1D
signal axis, each factors line can be toggled on and off by clicking
on their corresponding line in the legend.
Parameters
----------
comp_ids : None, int, or list (of ints)
If `comp_ids` is ``None``, maps of all components will be
returned. If it is an int, maps of components with ids from 0 to
the given value will be returned. If `comp_ids` is a list of
ints, maps of components with ids contained in the list will be
returned.
calibrate : bool
If ``True``, calibrates plots where calibration is available
from the axes_manager. If ``False``, plots are in pixels/channels.
same_window : bool
if ``True``, plots each factor to the same window. They are
not scaled. Default is ``True``.
title : str
Title of the plot.
cmap : :py:class:`~matplotlib.colors.Colormap`
The colormap used for the factor images, or for peak
characteristics. Default is the matplotlib gray colormap
(``plt.cm.gray``).
per_row : int
The number of plots in each row, when the `same_window`
parameter is ``True``.
See also
--------
plot_bss_loadings, plot_bss_results
"""
if self.axes_manager.signal_dimension > 2:
raise NotImplementedError("This method cannot plot factors of "
"signals of dimension higher than 2."
"You can use "
"`plot_decomposition_results` instead.")
if self.learning_results.bss_factors is None:
raise RuntimeError("No learning results found. A "
"'blind_source_separation' needs to be "
"performed first.")
if same_window is None:
same_window = True
factors = self.learning_results.bss_factors
comp_label = kwargs.get("comp_label", None)
title = _change_API_comp_label(title, comp_label)
if title is None:
title = self._get_plot_title('BSS factors of',
same_window=same_window)
return self._plot_factors_or_pchars(factors,
comp_ids=comp_ids,
calibrate=calibrate,
same_window=same_window,
comp_label=title,
per_row=per_row)
def plot_decomposition_loadings(self,
comp_ids=None,
calibrate=True,
same_window=True,
title=None,
with_factors=False,
cmap=plt.cm.gray,
no_nans=False,
per_row=3,
axes_decor='all',
**kwargs,
):
"""Plot loadings from a decomposition. In case of 1D navigation axis,
each loading line can be toggled on and off by clicking on the legended
line.
Parameters
----------
comp_ids : None, int, or list (of ints)
If `comp_ids` is ``None``, maps of all components will be
returned if the `output_dimension` was defined when executing
:py:meth:`~hyperspy.learn.mva.MVA.decomposition`.
Otherwise it raises a :py:exc:`ValueError`.
If `comp_ids` is an int, maps of components with ids from 0 to
the given value will be returned. If `comp_ids` is a list of
ints, maps of components with ids contained in the list will be
returned.
calibrate : bool
if ``True``, calibrates plots where calibration is available
from the axes_manager. If ``False``, plots are in pixels/channels.
same_window : bool
if ``True``, plots each factor to the same window. They are
not scaled. Default is ``True``.
title : str
Title of the plot.
with_factors : bool
If ``True``, also returns figure(s) with the factors for the
given comp_ids.
cmap : :py:class:`~matplotlib.colors.Colormap`
The colormap used for the loadings images, or for peak
characteristics. Default is the matplotlib gray colormap
(``plt.cm.gray``).
no_nans : bool
If ``True``, removes ``NaN``'s from the loading plots.
per_row : int
The number of plots in each row, when the `same_window`
parameter is ``True``.
axes_decor : str or None, optional
One of: ``'all'``, ``'ticks'``, ``'off'``, or ``None``
Controls how the axes are displayed on each image; default is
``'all'``
If ``'all'``, both ticks and axis labels will be shown.
If ``'ticks'``, no axis labels will be shown, but ticks/labels will.
If ``'off'``, all decorations and frame will be disabled.
If ``None``, no axis decorations will be shown, but ticks/frame
will.
See also
--------
plot_decomposition_factors, plot_decomposition_results
"""
if self.axes_manager.navigation_dimension > 2:
raise NotImplementedError("This method cannot plot loadings of "
"dimension higher than 2."
"You can use "
"`plot_decomposition_results` instead.")
if self.learning_results.loadings is None:
raise RuntimeError("No learning results found. A 'decomposition' "
"needs to be performed first.")
if same_window is None:
same_window = True
if self.learning_results.loadings is None:
raise RuntimeError("Run a decomposition first.")
loadings = self.learning_results.loadings.T
if with_factors:
factors = self.learning_results.factors
else:
factors = None
if comp_ids is None:
if self.learning_results.output_dimension:
comp_ids = self.learning_results.output_dimension
else:
raise ValueError(
"Please provide the number of components to plot via the "
"`comp_ids` argument")
comp_label = kwargs.get("comp_label", None)
title = _change_API_comp_label(title, comp_label)
if title is None:
title = self._get_plot_title(
'Decomposition loadings of', same_window=same_window)
return self._plot_loadings(
loadings,
comp_ids=comp_ids,
with_factors=with_factors,
factors=factors,
same_window=same_window,
comp_label=title,
cmap=cmap,
no_nans=no_nans,
per_row=per_row,
axes_decor=axes_decor)
def plot_bss_loadings(self,
comp_ids=None,
calibrate=True,
same_window=True,
title=None,
with_factors=False,
cmap=plt.cm.gray,
no_nans=False,
per_row=3,
axes_decor='all',
**kwargs,
):
"""Plot loadings from blind source separation results. In case of 1D
navigation axis, each loading line can be toggled on and off by
clicking on their corresponding line in the legend.
Parameters
----------
comp_ids : None, int, or list (of ints)
If `comp_ids` is ``None``, maps of all components will be
returned. If it is an int, maps of components with ids from 0 to
the given value will be returned. If `comp_ids` is a list of
ints, maps of components with ids contained in the list will be
returned.
calibrate : bool
if ``True``, calibrates plots where calibration is available
from the axes_manager. If ``False``, plots are in pixels/channels.
same_window : bool
If ``True``, plots each factor to the same window. They are
not scaled. Default is ``True``.
comp_label : str
Will be deprecated in 2.0, please use `title` instead
title : str
Title of the plot.
with_factors : bool
If `True`, also returns figure(s) with the factors for the
given `comp_ids`.
cmap : :py:class:`~matplotlib.colors.Colormap`
The colormap used for the loading image, or for peak
characteristics,. Default is the matplotlib gray colormap
(``plt.cm.gray``).
no_nans : bool
If ``True``, removes ``NaN``'s from the loading plots.
per_row : int
The number of plots in each row, when the `same_window`
parameter is ``True``.
axes_decor : str or None, optional
One of: ``'all'``, ``'ticks'``, ``'off'``, or ``None``
Controls how the axes are displayed on each image;
default is ``'all'``
If ``'all'``, both ticks and axis labels will be shown
If ``'ticks'``, no axis labels will be shown, but ticks/labels will
If ``'off'``, all decorations and frame will be disabled
If ``None``, no axis decorations will be shown, but ticks/frame will
See also
--------
plot_bss_factors, plot_bss_results
"""
if self.axes_manager.navigation_dimension > 2:
raise NotImplementedError("This method cannot plot loadings of "
"dimension higher than 2."
"You can use "
"`plot_bss_results` instead.")
if self.learning_results.bss_loadings is None:
raise RuntimeError("No learning results found. A "
"'blind_source_separation' needs to be "
"performed first.")
if same_window is None:
same_window = True
comp_label = kwargs.get("comp_label", None)
title = _change_API_comp_label(title, comp_label)
if title is None:
title = self._get_plot_title(
'BSS loadings of', same_window=same_window)
loadings = self.learning_results.bss_loadings.T
if with_factors:
factors = self.learning_results.bss_factors
else:
factors = None
return self._plot_loadings(
loadings,
comp_ids=comp_ids,
with_factors=with_factors,
factors=factors,
same_window=same_window,
comp_label=title,
cmap=cmap,
no_nans=no_nans,
per_row=per_row,
axes_decor=axes_decor)
def _get_plot_title(self, base_title='Loadings', same_window=True):
title_md = self.metadata.General.title
title = "%s %s" % (base_title, title_md)
if title_md == '': # remove the 'of' if 'title' is a empty string
title = title.replace(' of ', '')
if not same_window:
title = title.replace('loadings', 'loading')
return title
def export_decomposition_results(self, comp_ids=None,
folder=None,
calibrate=True,
factor_prefix='factor',
factor_format="hspy",
loading_prefix='loading',
loading_format="hspy",
comp_label=None,
cmap=plt.cm.gray,
same_window=False,
multiple_files=True,
no_nans=True,
per_row=3,
save_figures=False,
save_figures_format='png'):
"""Export results from a decomposition to any of the supported
formats.
Parameters
----------
comp_ids : None, int, or list (of ints)
If None, returns all components/loadings.
If an int, returns components/loadings with ids from 0 to the
given value.
If a list of ints, returns components/loadings with ids provided in
the given list.
folder : str or None
The path to the folder where the file will be saved.
If ``None``, the current folder is used by default.
factor_prefix : str
The prefix that any exported filenames for factors/components
begin with
factor_format : str
The extension of the format that you wish to save the factors to.
Default is ``'hspy'``. See `loading_format` for more details.
loading_prefix : str
The prefix that any exported filenames for factors/components
begin with
loading_format : str
The extension of the format that you wish to save to. default
is ``'hspy'``. The format determines the kind of output:
* For image formats (``'tif'``, ``'png'``, ``'jpg'``, etc.),
plots are created using the plotting flags as below, and saved
at 600 dpi. One plot is saved per loading.
* For multidimensional formats (``'rpl'``, ``'hspy'``), arrays
are saved in single files. All loadings are contained in the
one file.
* For spectral formats (``'msa'``), each loading is saved to a
separate file.
multiple_files : bool
If ``True``, one file will be created for each factor and loading.
Otherwise, only two files will be created, one for
the factors and another for the loadings. The default value can
be chosen in the preferences.
save_figures : bool
If ``True`` the same figures that are obtained when using the plot
methods will be saved with 600 dpi resolution
Note
----
The following parameters are only used when ``save_figures = True``:
Other Parameters
----------------
calibrate : :py:class:`bool`
If ``True``, calibrates plots where calibration is available
from the axes_manager. If ``False``, plots are in pixels/channels.
same_window : :py:class:`bool`
If ``True``, plots each factor to the same window.
comp_label : :py:class:`str`
the label that is either the plot title (if plotting in separate
windows) or the label in the legend (if plotting in the same window)
cmap : :py:class:`~matplotlib.colors.Colormap`
The colormap used for images, such as factors, loadings, or for peak
characteristics. Default is the matplotlib gray colormap
(``plt.cm.gray``).
per_row : :py:class:`int`
The number of plots in each row, when the `same_window`
parameter is ``True``.
save_figures_format : :py:class:`str`
The image format extension.
See also
--------
get_decomposition_factors, get_decomposition_loadings
"""
factors = self.learning_results.factors
loadings = self.learning_results.loadings.T
self._export_factors(factors,
folder=folder,
comp_ids=comp_ids,
calibrate=calibrate,
multiple_files=multiple_files,
factor_prefix=factor_prefix,
factor_format=factor_format,
comp_label=comp_label,
save_figures=save_figures,
cmap=cmap,
no_nans=no_nans,
same_window=same_window,
per_row=per_row,
save_figures_format=save_figures_format)
self._export_loadings(loadings,
comp_ids=comp_ids, folder=folder,
calibrate=calibrate,
multiple_files=multiple_files,
loading_prefix=loading_prefix,
loading_format=loading_format,
comp_label=comp_label,
cmap=cmap,
save_figures=save_figures,
same_window=same_window,
no_nans=no_nans,
per_row=per_row)
def export_cluster_results(self,
cluster_ids=None,
folder=None,
calibrate=True,
center_prefix='cluster_center',
center_format="hspy",
membership_prefix='cluster_label',
membership_format="hspy",
comp_label=None,
cmap=plt.cm.gray,
same_window=False,
multiple_files=True,
no_nans=True,
per_row=3,
save_figures=False,
save_figures_format='png'):
"""Export results from a cluster analysis to any of the supported
formats.
Parameters
----------
cluster_ids : None, int, or list of ints
if None, returns all clusters/centers.
if int, returns clusters/centers with ids from 0 to
given int.
if list of ints, returnsclusters/centers with ids in
given list.
folder : str or None
The path to the folder where the file will be saved.
If `None` the
current folder is used by default.
center_prefix : string
The prefix that any exported filenames for
cluster centers
begin with
center_format : string
The extension of the format that you wish to save to. Default is
"hspy". See `loading format` for more details.
label_prefix : string
The prefix that any exported filenames for
cluster labels
begin with
label_format : string
The extension of the format that you wish to save to. default
is "hspy". The format determines the kind of output.
* For image formats (``'tif'``, ``'png'``, ``'jpg'``, etc.),
plots are created using the plotting flags as below, and saved
at 600 dpi. One plot is saved per loading.
* For multidimensional formats (``'rpl'``, ``'hspy'``), arrays
are saved in single files. All loadings are contained in the
one file.
* For spectral formats (``'msa'``), each loading is saved to a
separate file.
multiple_files : bool
If True, on exporting a file per center will
be created. Otherwise only two files will be created, one for
the centers and another for the membership. The default value can
be chosen in the preferences.
save_figures : bool
If True the same figures that are obtained when using the plot
methods will be saved with 600 dpi resolution
Plotting options (for save_figures = True ONLY)
----------------------------------------------
calibrate : bool
if True, calibrates plots where calibration is available
from
the axes_manager. If False, plots are in pixels/channels.
same_window : bool
if True, plots each factor to the same window.
comp_label : string, the label that is either the plot title
(if plotting in separate windows) or the label in the legend
(if plotting in the same window)
cmap : The colormap used for the factor image, or for peak
characteristics, the colormap used for the scatter plot of
some peak characteristic.
per_row : int, the number of plots in each row, when the
same_window
parameter is True.
save_figures_format : str
The image format extension.
See Also
--------
get_cluster_signals,
get_cluster_labels.
"""
factors = self.learning_results.cluster_centers.T
loadings = self.learning_results.cluster_labels
self._export_factors(factors,
folder=folder,
comp_ids=cluster_ids,
calibrate=calibrate,
multiple_files=multiple_files,
factor_prefix=center_prefix,
factor_format=center_format,
comp_label=comp_label,
save_figures=save_figures,
cmap=cmap,
no_nans=no_nans,
same_window=same_window,
per_row=per_row,
save_figures_format=save_figures_format)
self._export_loadings(loadings,
comp_ids=cluster_ids,
folder=folder,
calibrate=calibrate,
multiple_files=multiple_files,
loading_prefix=membership_prefix,
loading_format=membership_format,
comp_label=comp_label,
cmap=cmap,
save_figures=save_figures,
same_window=same_window,
no_nans=no_nans,
per_row=per_row)
def export_bss_results(self,
comp_ids=None,
folder=None,
calibrate=True,
multiple_files=True,
save_figures=False,
factor_prefix='bss_factor',
factor_format="hspy",
loading_prefix='bss_loading',
loading_format="hspy",
comp_label=None, cmap=plt.cm.gray,
same_window=False,
no_nans=True,
per_row=3,
save_figures_format='png'):
"""Export results from ICA to any of the supported formats.
Parameters
----------
comp_ids : None, int, or list (of ints)
If None, returns all components/loadings.
If an int, returns components/loadings with ids from 0 to the
given value.
If a list of ints, returns components/loadings with ids provided in
the given list.
folder : str or None
The path to the folder where the file will be saved.
If ``None`` the current folder is used by default.
factor_prefix : str
The prefix that any exported filenames for factors/components
begin with
factor_format : str
The extension of the format that you wish to save the factors to.
Default is ``'hspy'``. See `loading_format` for more details.
loading_prefix : str
The prefix that any exported filenames for factors/components
begin with
loading_format : str
The extension of the format that you wish to save to. default
is ``'hspy'``. The format determines the kind of output:
* For image formats (``'tif'``, ``'png'``, ``'jpg'``, etc.),
plots are created using the plotting flags as below, and saved
at 600 dpi. One plot is saved per loading.
* For multidimensional formats (``'rpl'``, ``'hspy'``), arrays
are saved in single files. All loadings are contained in the
one file.
* For spectral formats (``'msa'``), each loading is saved to a
separate file.
multiple_files : bool
If ``True``, one file will be created for each factor and loading.
Otherwise, only two files will be created, one for
the factors and another for the loadings. The default value can
be chosen in the preferences.
save_figures : bool
If ``True``, the same figures that are obtained when using the plot
methods will be saved with 600 dpi resolution
Note
----
The following parameters are only used when ``save_figures = True``:
Other Parameters
----------------
calibrate : :py:class:`bool`
If ``True``, calibrates plots where calibration is available
from the axes_manager. If ``False``, plots are in pixels/channels.
same_window : :py:class:`bool`
If ``True``, plots each factor to the same window.
comp_label : :py:class:`str`
the label that is either the plot title (if plotting in separate
windows) or the label in the legend (if plotting in the same window)
cmap : :py:class:`~matplotlib.colors.Colormap`
The colormap used for images, such as factors, loadings, or
for peak characteristics. Default is the matplotlib gray colormap
(``plt.cm.gray``).
per_row : :py:class:`int`
The number of plots in each row, when the `same_window`
parameter is ``True``.
save_figures_format : :py:class:`str`
The image format extension.
See also
--------
get_bss_factors, get_bss_loadings
"""
factors = self.learning_results.bss_factors
loadings = self.learning_results.bss_loadings.T
self._export_factors(factors,
folder=folder,
comp_ids=comp_ids,
calibrate=calibrate,
multiple_files=multiple_files,
factor_prefix=factor_prefix,
factor_format=factor_format,
comp_label=comp_label,
save_figures=save_figures,
cmap=cmap,
no_nans=no_nans,
same_window=same_window,
per_row=per_row,
save_figures_format=save_figures_format)
self._export_loadings(loadings,
comp_ids=comp_ids,
folder=folder,
calibrate=calibrate,
multiple_files=multiple_files,
loading_prefix=loading_prefix,
loading_format=loading_format,
comp_label=comp_label,
cmap=cmap,
save_figures=save_figures,
same_window=same_window,
no_nans=no_nans,
per_row=per_row,
save_figures_format=save_figures_format)
def _get_loadings(self, loadings):
if loadings is None:
raise RuntimeError("No learning results found.")
from hyperspy.api import signals
data = loadings.T.reshape(
(-1,) + self.axes_manager.navigation_shape[::-1])
if data.shape[0] > 1:
signal = signals.BaseSignal(
data,
axes=(
[{"size": data.shape[0], "navigate": True}] +
self.axes_manager._get_navigation_axes_dicts()))
for axis in signal.axes_manager._axes[1:]:
axis.navigate = False
else:
signal = self._get_navigation_signal(data.squeeze())
return signal
def _get_factors(self, factors):
if factors is None:
raise RuntimeError("No learning results found.")
signal = self.__class__(
factors.T.reshape((-1,) + self.axes_manager.signal_shape[::-1]),
axes=[{"size": factors.shape[-1], "navigate": True}] +
self.axes_manager._get_signal_axes_dicts())
signal.set_signal_type(self.metadata.Signal.signal_type)
for axis in signal.axes_manager._axes[1:]:
axis.navigate = False
return signal
def get_decomposition_loadings(self):
"""Return the decomposition loadings.
Returns
-------
signal : :py:class:`~hyperspy.signal.BaseSignal` (or subclass)
See also
--------
get_decomposition_factors, export_decomposition_results
"""
if self.learning_results.loadings is None:
raise RuntimeError("Run a decomposition first.")
signal = self._get_loadings(self.learning_results.loadings)
signal.axes_manager._axes[0].name = "Decomposition component index"
signal.metadata.General.title = "Decomposition loadings of " + \
self.metadata.General.title
return signal
def get_decomposition_factors(self):
"""Return the decomposition factors.
Returns
-------
signal : :py:class:`~hyperspy.signal.BaseSignal` (or subclass)
See also
--------
get_decomposition_loadings, export_decomposition_results
"""
if self.learning_results.factors is None:
raise RuntimeError("Run a decomposition first.")
signal = self._get_factors(self.learning_results.factors)
signal.axes_manager._axes[0].name = "Decomposition component index"
signal.metadata.General.title = ("Decomposition factors of " +
self.metadata.General.title)
return signal
def get_bss_loadings(self):
"""Return the blind source separation loadings.
Returns
-------
signal : :py:class:`~hyperspy.signal.BaseSignal` (or subclass)
See also
--------
get_bss_factors, export_bss_results
"""
signal = self._get_loadings(
self.learning_results.bss_loadings)
signal.axes_manager[0].name = "BSS component index"
signal.metadata.General.title = ("BSS loadings of " +
self.metadata.General.title)
return signal
def get_bss_factors(self):
"""Return the blind source separation factors.
Returns
-------
signal : :py:class:`~hyperspy.signal.BaseSignal` (or subclass)
See also
--------
get_bss_loadings, export_bss_results
"""
signal = self._get_factors(self.learning_results.bss_factors)
signal.axes_manager[0].name = "BSS component index"
signal.metadata.General.title = ("BSS factors of " +
self.metadata.General.title)
return signal
def plot_bss_results(self,
factors_navigator="smart_auto",
loadings_navigator="smart_auto",
factors_dim=2,
loadings_dim=2,):
"""Plot the blind source separation factors and loadings.
Unlike :py:meth:`~hyperspy.signal.MVATools.plot_bss_factors` and
:py:meth:`~hyperspy.signal.MVATools.plot_bss_loadings`,
this method displays one component at a time. Therefore it provides a
more compact visualization than then other two methods.
The loadings and factors are displayed in different windows and each
has its own navigator/sliders to navigate them if they are
multidimensional. The component index axis is synchronized between
the two.
Parameters
----------
factors_navigator : str, None, or :py:class:`~hyperspy.signal.BaseSignal` (or subclass)
One of: ``'smart_auto'``, ``'auto'``, ``None``, ``'spectrum'`` or a
:py:class:`~hyperspy.signal.BaseSignal` object.
``'smart_auto'`` (default) displays sliders if the navigation
dimension is less than 3. For a description of the other options
see the :py:meth:`~hyperspy.signal.BaseSignal.plot` documentation
for details.
loadings_navigator : str, None, or :py:class:`~hyperspy.signal.BaseSignal` (or subclass)
See the `factors_navigator` parameter
factors_dim : int
Currently HyperSpy cannot plot a signal when the signal dimension is
higher than two. Therefore, to visualize the BSS results when the
factors or the loadings have signal dimension greater than 2,
the data can be viewed as spectra (or images) by setting this
parameter to 1 (or 2). (The default is 2)
loadings_dim : int
See the ``factors_dim`` parameter
See also
--------
plot_bss_factors, plot_bss_loadings, plot_decomposition_results
"""
factors = self.get_bss_factors()
loadings = self.get_bss_loadings()
_plot_x_results(factors=factors, loadings=loadings,
factors_navigator=factors_navigator,
loadings_navigator=loadings_navigator,
factors_dim=factors_dim,
loadings_dim=loadings_dim)
def plot_decomposition_results(self,
factors_navigator="smart_auto",
loadings_navigator="smart_auto",
factors_dim=2,
loadings_dim=2):
"""Plot the decomposition factors and loadings.
Unlike :py:meth:`~hyperspy.signal.MVATools.plot_decomposition_factors`
and :py:meth:`~hyperspy.signal.MVATools.plot_decomposition_loadings`,
this method displays one component at a time. Therefore it provides a
more compact visualization than then other two methods. The loadings
and factors are displayed in different windows and each has its own
navigator/sliders to navigate them if they are multidimensional. The
component index axis is synchronized between the two.
Parameters
----------
factors_navigator : str, None, or :py:class:`~hyperspy.signal.BaseSignal` (or subclass)
One of: ``'smart_auto'``, ``'auto'``, ``None``, ``'spectrum'`` or a
:py:class:`~hyperspy.signal.BaseSignal` object.
``'smart_auto'`` (default) displays sliders if the navigation
dimension is less than 3. For a description of the other options
see the :py:meth:`~hyperspy.signal.BaseSignal.plot` documentation
for details.
loadings_navigator : str, None, or :py:class:`~hyperspy.signal.BaseSignal` (or subclass)
See the `factors_navigator` parameter
factors_dim : int
Currently HyperSpy cannot plot a signal when the signal dimension is
higher than two. Therefore, to visualize the BSS results when the
factors or the loadings have signal dimension greater than 2,
the data can be viewed as spectra (or images) by setting this
parameter to 1 (or 2). (The default is 2)
loadings_dim : int
See the ``factors_dim`` parameter
See also
--------
plot_decomposition_factors, plot_decomposition_loadings,
plot_bss_results
"""
factors = self.get_decomposition_factors()
loadings = self.get_decomposition_loadings()
_plot_x_results(factors=factors, loadings=loadings,
factors_navigator=factors_navigator,
loadings_navigator=loadings_navigator,
factors_dim=factors_dim,
loadings_dim=loadings_dim)
def get_cluster_labels(self, merged=False):
"""Return cluster labels as a Signal.
Parameters
----------
merged : bool
If False the cluster label signal has a navigation axes of length
number_of_clusters and the signal along the the navigation
direction is binary - 0 the point is not in the cluster, 1 it is
included. If True, the cluster labels are merged (no navigation
axes). The value of the signal at any point will be between -1 and
the number of clusters. -1 represents the points that
were masked for cluster analysis if any.
See Also
--------
get_cluster_signals
Returns
-------
signal Hyperspy signal of cluster labels
"""
if self.learning_results.cluster_labels is None:
raise RuntimeError(
"Cluster analysis needs to be performed first.")
if merged:
data = (np.arange(1, self.learning_results.number_of_clusters + 1)
[:, np.newaxis] *
self.learning_results.cluster_labels ).sum(0) - 1
label_signal = self._get_loadings(data)
else:
label_signal = self._get_loadings(
self.learning_results.cluster_labels.T)
label_signal.axes_manager._axes[0].name = "Cluster index"
label_signal.metadata.General.title = (
"Cluster labels of " + self.metadata.General.title)
return label_signal
def _get_cluster_signals_factors(self, signal):
if self.learning_results.cluster_centroid_signals is None:
raise RuntimeError("Cluster analysis needs to be performed first.")
if signal == "mean":
members = self.learning_results.cluster_labels.sum(1, keepdims=True)
cs = self.learning_results.cluster_sum_signals / members
elif signal == "sum":
cs=self.learning_results.cluster_sum_signals
elif signal == "centroid":
cs=self.learning_results.cluster_centroid_signals
return cs
def get_cluster_signals(self, signal="mean"):
"""Return the cluster centers as a Signal.
Parameters
----------
%s
See Also
--------
get_cluster_labels
"""
cs = self._get_cluster_signals_factors(signal=signal)
signal = self._get_factors(cs.T)
signal.axes_manager._axes[0].name="Cluster index"
signal.metadata.General.title = (
f"Cluster {signal} signals of {self.metadata.General.title}")
return signal
get_cluster_signals.__doc__ %= (CLUSTER_SIGNALS_ARG)
def get_cluster_distances(self):
"""Euclidian distances to the centroid of each cluster
See Also
--------
get_cluster_signals
Returns
-------
signal
Hyperspy signal of cluster distances
"""
if self.learning_results.cluster_distances is None:
raise RuntimeError("Cluster analysis needs to be performed first.")
distance_signal = self._get_loadings(self.learning_results.cluster_distances.T)
distance_signal.axes_manager._axes[0].name = "Cluster index"
distance_signal.metadata.General.title = \
"Cluster distances of " + self.metadata.General.title
return distance_signal
def plot_cluster_signals(
self,
signal="mean",
cluster_ids=None,
calibrate=True,
same_window=True,
comp_label="Cluster centers",
per_row=3):
"""Plot centers from a cluster analysis.
Parameters
----------
%s
cluster_ids : None, int, or list of ints
if None, returns maps of all clusters.
if int, returns maps of clusters with ids from 0 to given
int.
if list of ints, returns maps of clusters with ids in
given list.
calibrate :
if True, calibrates plots where calibration is available
from the axes_manager. If False, plots are in pixels/channels.
same_window : bool
if True, plots each center to the same window. They are
not scaled.
comp_label : string
the label that is either the plot title (if plotting in
separate windows) or the label in the legend (if plotting
in the same window)
per_row : int
the number of plots in each row, when the same_window parameter is
True.
See Also
--------
plot_cluster_labels
"""
if self.axes_manager.signal_dimension > 2:
raise NotImplementedError("This method cannot plot factors of "
"signals of dimension higher than 2.")
cs = self._get_cluster_signals_factors(signal=signal)
if same_window is None:
same_window = True
factors = cs.T
if cluster_ids is None:
cluster_ids = range(factors.shape[1])
return self._plot_factors_or_pchars(factors,
comp_ids=cluster_ids,
calibrate=calibrate,
same_window=same_window,
comp_label=comp_label,
per_row=per_row)
plot_cluster_signals.__doc__ %= (CLUSTER_SIGNALS_ARG)
def plot_cluster_labels(
self,
cluster_ids=None,
calibrate=True,
same_window=True,
with_centers=False,
cmap=plt.cm.gray,
no_nans=False,
per_row=3,
axes_decor='all',
title=None,
**kwargs):
"""Plot cluster labels from a cluster analysis. In case of 1D navigation axis,
each loading line can be toggled on and off by clicking on the legended
line.
Parameters
----------
cluster_ids : None, int, or list of ints
if None (default), returns maps of all components using the
number_of_cluster was defined when
executing ``cluster``. Otherwise it raises a ValueError.
if int, returns maps of cluster labels with ids from 0 to
given int.
if list of ints, returns maps of cluster labels with ids in
given list.
calibrate : bool
if True, calibrates plots where calibration is available
from the axes_manager. If False, plots are in pixels/channels.
same_window : bool
if True, plots each factor to the same window. They are
not scaled. Default is True.
title : string
Title of the plot.
with_centers : bool
If True, also returns figure(s) with the cluster centers for the
given cluster_ids.
cmap : matplotlib colormap
The colormap used for the factor image, or for peak
characteristics, the colormap used for the scatter plot of
some peak characteristic.
no_nans : bool
If True, removes NaN's from the loading plots.
per_row : int
the number of plots in each row, when the same_window
parameter is True.
axes_decor : {'all', 'ticks', 'off', None}, optional
Controls how the axes are displayed on each image; default is 'all'
If 'all', both ticks and axis labels will be shown
If 'ticks', no axis labels will be shown, but ticks/labels will
If 'off', all decorations and frame will be disabled
If None, no axis decorations will be shown, but ticks/frame will
See Also
--------
plot_cluster_signals, plot_cluster_results.
"""
if self.axes_manager.navigation_dimension > 2:
raise NotImplementedError("This method cannot plot labels of "
"dimension higher than 2."
"You can use "
"`plot_cluster_results` instead.")
if same_window is None:
same_window = True
labels = self.learning_results.cluster_labels.astype("uint")
if with_centers:
centers = self.learning_results.cluster_centers.T
else:
centers = None
if cluster_ids is None:
cluster_ids = range(labels.shape[0])
comp_label = kwargs.get("comp_label", None)
title = _change_API_comp_label(title, comp_label)
if title is None:
title = self._get_plot_title(
'Cluster labels of', same_window=same_window)
return self._plot_loadings(labels,
comp_ids=cluster_ids,
with_factors=with_centers,
factors=centers,
same_window=same_window,
comp_label=title,
cmap=cmap,
no_nans=no_nans,
per_row=per_row,
axes_decor=axes_decor)
def plot_cluster_distances(
self,
cluster_ids=None,
calibrate=True,
same_window=True,
with_centers=False,
cmap=plt.cm.gray,
no_nans=False,
per_row=3,
axes_decor='all',
title=None,
**kwargs):
"""Plot the euclidian distances to the centroid of each cluster.
In case of 1D navigation axis,
each line can be toggled on and off by clicking on the legended
line.
Parameters
----------
cluster_ids : None, int, or list of ints
if None (default), returns maps of all components using the
number_of_cluster was defined when
executing ``cluster``. Otherwise it raises a ValueError.
if int, returns maps of cluster labels with ids from 0 to
given int.
if list of ints, returns maps of cluster labels with ids in
given list.
calibrate : bool
if True, calibrates plots where calibration is available
from the axes_manager. If False, plots are in pixels/channels.
same_window : bool
if True, plots each factor to the same window. They are
not scaled. Default is True.
title : string
Title of the plot.
with_centers : bool
If True, also returns figure(s) with the cluster centers for the
given cluster_ids.
cmap : matplotlib colormap
The colormap used for the factor image, or for peak
characteristics, the colormap used for the scatter plot of
some peak characteristic.
no_nans : bool
If True, removes NaN's from the loading plots.
per_row : int
the number of plots in each row, when the same_window
parameter is True.
axes_decor : {'all', 'ticks', 'off', None}, optional
Controls how the axes are displayed on each image; default is 'all'
If 'all', both ticks and axis labels will be shown
If 'ticks', no axis labels will be shown, but ticks/labels will
If 'off', all decorations and frame will be disabled
If None, no axis decorations will be shown, but ticks/frame will
See Also
--------
plot_cluster_signals, plot_cluster_results, plot_cluster_labels
"""
if self.axes_manager.navigation_dimension > 2:
raise NotImplementedError("This method cannot plot labels of "
"dimension higher than 2."
"You can use "
"`plot_cluster_results` instead.")
if same_window is None:
same_window = True
distances = self.learning_results.cluster_distances
if with_centers:
centers = self.learning_results.cluster_centers.T
else:
centers = None
if cluster_ids is None:
cluster_ids = range(distances.shape[0])
comp_label = kwargs.get("comp_label", None)
title = _change_API_comp_label(title, comp_label)
if title is None:
title = self._get_plot_title(
'Cluster distances of', same_window=same_window)
return self._plot_loadings(distances,
comp_ids=cluster_ids,
with_factors=with_centers,
factors=centers,
same_window=same_window,
comp_label=title,
cmap=cmap,
no_nans=no_nans,
per_row=per_row,
axes_decor=axes_decor)
def plot_cluster_results(self,
centers_navigator="smart_auto",
labels_navigator="smart_auto",
centers_dim=2,
labels_dim=2,
):
"""Plot the cluster labels and centers.
Unlike `plot_cluster_labels` and `plot_cluster_signals`, this
method displays one component at a time.
Therefore it provides a more compact visualization than then other
two methods. The labels and centers are displayed in different
windows and each has its own navigator/sliders to navigate them if
they are multidimensional. The component index axis is synchronized
between the two.
Parameters
----------
centers_navigator, labels_navigator : {"smart_auto",
"auto", None, "spectrum", Signal}
"smart_auto" (default) displays sliders if the navigation
dimension is less than 3. For a description of the other options
see `plot` documentation for details.
labels_dim, centers_dims : int
Currently HyperSpy cannot plot signals of dimension higher than
two. Therefore, to visualize the clustering results when the
centers or the labels have signal dimension greater than 2
we can view the data as spectra(images) by setting this parameter
to 1(2). (Default 2)
See Also
--------
plot_cluster_signals, plot_cluster_labels.
"""
centers = self.get_cluster_signals()
distances = self.get_cluster_distances()
self.get_cluster_labels(merged=True).plot()
_plot_x_results(factors=centers,
loadings=distances,
factors_navigator=centers_navigator,
loadings_navigator=labels_navigator,
factors_dim=centers_dim,
loadings_dim=labels_dim)
def _plot_x_results(factors, loadings, factors_navigator, loadings_navigator,
factors_dim, loadings_dim):
factors.axes_manager._axes[0] = loadings.axes_manager._axes[0]
if loadings.axes_manager.signal_dimension > 2:
loadings.axes_manager.set_signal_dimension(loadings_dim)
if factors.axes_manager.signal_dimension > 2:
factors.axes_manager.set_signal_dimension(factors_dim)
if (loadings_navigator == "smart_auto" and
loadings.axes_manager.navigation_dimension < 3):
loadings_navigator = "slider"
else:
loadings_navigator = "auto"
if (factors_navigator == "smart_auto" and
(factors.axes_manager.navigation_dimension < 3 or
loadings_navigator is not None)):
factors_navigator = None
else:
factors_navigator = "auto"
loadings.plot(navigator=loadings_navigator)
factors.plot(navigator=factors_navigator)
def _change_API_comp_label(title, comp_label):
if comp_label is not None:
if title is None:
title = comp_label
warnings.warn("The 'comp_label' argument will be deprecated "
"in 2.0, please use 'title' instead",
VisibleDeprecationWarning)
else:
warnings.warn("The 'comp_label' argument will be deprecated "
"in 2.0, Since you are already using the 'title'",
"argument, 'comp_label' is ignored.",
VisibleDeprecationWarning)
return title
class SpecialSlicersSignal(SpecialSlicers):
def __setitem__(self, i, j):
"""x.__setitem__(i, y) <==> x[i]=y
"""
if isinstance(j, BaseSignal):
j = j.data
array_slices = self.obj._get_array_slices(i, self.isNavigation)
self.obj.data[array_slices] = j
def __len__(self):
return self.obj.axes_manager.signal_shape[0]
class BaseSetMetadataItems(t.HasTraits):
def __init__(self, signal):
for key, value in self.mapping.items():
if signal.metadata.has_item(key):
setattr(self, value, signal.metadata.get_item(key))
self.signal = signal
def store(self, *args, **kwargs):
for key, value in self.mapping.items():
if getattr(self, value) != t.Undefined:
self.signal.metadata.set_item(key, getattr(self, value))
class BaseSignal(FancySlicing,
MVA,
MVATools,):
_dtype = "real"
_signal_dimension = -1
_signal_type = ""
_lazy = False
_alias_signal_types = []
_additional_slicing_targets = [
"metadata.Signal.Noise_properties.variance",
]
def __init__(self, data, **kwds):
"""Create a Signal from a numpy array.
Parameters
----------
data : :py:class:`numpy.ndarray`
The signal data. It can be an array of any dimensions.
axes : [dict/axes], optional
List of either dictionaries or axes objects to define the axes (see
the documentation of the :py:class:`~hyperspy.axes.AxesManager`
class for more details).
attributes : dict, optional
A dictionary whose items are stored as attributes.
metadata : dict, optional
A dictionary containing a set of parameters
that will to stores in the ``metadata`` attribute.
Some parameters might be mandatory in some cases.
original_metadata : dict, optional
A dictionary containing a set of parameters
that will to stores in the ``original_metadata`` attribute. It
typically contains all the parameters that has been
imported from the original data file.
"""
# the 'full_initialisation' keyword is private API to be used by the
# _assign_subclass method. Purposely not exposed as public API.
# Its purpose is to avoid creating new attributes, which breaks events
# and to reduce overhead when changing 'signal_type'.
if kwds.get('full_initialisation', True):
self._create_metadata()
self.models = ModelManager(self)
self.learning_results = LearningResults()
kwds['data'] = data
self._load_dictionary(kwds)
self._plot = None
self.inav = SpecialSlicersSignal(self, True)
self.isig = SpecialSlicersSignal(self, False)
self.events = Events()
self.events.data_changed = Event("""
Event that triggers when the data has changed
The event trigger when the data is ready for consumption by any
process that depend on it as input. Plotted signals automatically
connect this Event to its `BaseSignal.plot()`.
Note: The event only fires at certain specific times, not everytime
that the `BaseSignal.data` array changes values.
Arguments:
obj: The signal that owns the data.
""", arguments=['obj'])
def _create_metadata(self):
self.metadata = DictionaryTreeBrowser()
mp = self.metadata
mp.add_node("_HyperSpy")
mp.add_node("General")
mp.add_node("Signal")
mp._HyperSpy.add_node("Folding")
folding = mp._HyperSpy.Folding
folding.unfolded = False
folding.signal_unfolded = False
folding.original_shape = None
folding.original_axes_manager = None
self.original_metadata = DictionaryTreeBrowser()
self.tmp_parameters = DictionaryTreeBrowser()
def __repr__(self):
if self.metadata._HyperSpy.Folding.unfolded:
unfolded = "unfolded "
else:
unfolded = ""
string = '<'
string += self.__class__.__name__
string += ", title: %s" % self.metadata.General.title
string += ", %sdimensions: %s" % (
unfolded,
self.axes_manager._get_dimension_str())
string += '>'
return string
def _binary_operator_ruler(self, other, op_name):
exception_message = (
"Invalid dimensions for this operation")
if isinstance(other, BaseSignal):
# Both objects are signals
oam = other.axes_manager
sam = self.axes_manager
if sam.navigation_shape == oam.navigation_shape and \
sam.signal_shape == oam.signal_shape:
# They have the same signal shape.
# The signal axes are aligned but there is
# no guarantee that data axes area aligned so we make sure that
# they are aligned for the operation.
sdata = self._data_aligned_with_axes
odata = other._data_aligned_with_axes
if op_name in INPLACE_OPERATORS:
self.data = getattr(sdata, op_name)(odata)
self.axes_manager._sort_axes()
return self
else:
ns = self._deepcopy_with_new_data(
getattr(sdata, op_name)(odata))
ns.axes_manager._sort_axes()
return ns
else:
# Different navigation and/or signal shapes
if not are_signals_aligned(self, other):
raise ValueError(exception_message)
else:
# They are broadcastable but have different number of axes
ns, no = broadcast_signals(self, other)
sdata = ns.data
odata = no.data
if op_name in INPLACE_OPERATORS:
# This should raise a ValueError if the operation
# changes the shape of the object on the left.
self.data = getattr(sdata, op_name)(odata)
self.axes_manager._sort_axes()
return self
else:
ns.data = getattr(sdata, op_name)(odata)
return ns
else:
# Second object is not a Signal
if op_name in INPLACE_OPERATORS:
getattr(self.data, op_name)(other)
return self
else:
return self._deepcopy_with_new_data(
getattr(self.data, op_name)(other))
def _unary_operator_ruler(self, op_name):
return self._deepcopy_with_new_data(getattr(self.data, op_name)())
def _check_signal_dimension_equals_one(self):
if self.axes_manager.signal_dimension != 1:
raise SignalDimensionError(self.axes_manager.signal_dimension, 1)
def _check_signal_dimension_equals_two(self):
if self.axes_manager.signal_dimension != 2:
raise SignalDimensionError(self.axes_manager.signal_dimension, 2)
def _deepcopy_with_new_data(self, data=None, copy_variance=False,
copy_navigator=False,
copy_learning_results=False):
"""Returns a deepcopy of itself replacing the data.
This method has an advantage over the default :py:func:`copy.deepcopy`
in that it does not copy the data, which can save memory.
Parameters
----------
data : None or :py:class:`numpy.ndarray`
copy_variance : bool
Whether to copy the variance of the signal to the new copy
copy_navigator : bool
Whether to copy the navigator of the signal to the new copy
copy_learning_results : bool
Whether to copy the learning_results of the signal to the new copy
Returns
-------
ns : :py:class:`~hyperspy.signal.BaseSignal` (or subclass)
The newly copied signal
"""
old_np = None
old_navigator = None
old_learning_results = None
try:
old_data = self.data
self.data = None
old_plot = self._plot
self._plot = None
old_models = self.models._models
if not copy_variance and "Noise_properties" in self.metadata.Signal:
old_np = self.metadata.Signal.Noise_properties
del self.metadata.Signal.Noise_properties
if not copy_navigator and self.metadata.has_item('_HyperSpy.navigator'):
old_navigator = self.metadata._HyperSpy.navigator
del self.metadata._HyperSpy.navigator
if not copy_learning_results:
old_learning_results = self.learning_results
del self.learning_results
self.models._models = DictionaryTreeBrowser()
ns = self.deepcopy()
ns.data = data
return ns
finally:
self.data = old_data
self._plot = old_plot
self.models._models = old_models
if old_np is not None:
self.metadata.Signal.Noise_properties = old_np
if old_navigator is not None:
self.metadata._HyperSpy.navigator = old_navigator
if old_learning_results is not None:
self.learning_results = old_learning_results
def as_lazy(self, copy_variance=True, copy_navigator=True,
copy_learning_results=True):
"""
Create a copy of the given Signal as a
:py:class:`~hyperspy._signals.lazy.LazySignal`.
Parameters
----------
copy_variance : bool
Whether or not to copy the variance from the original Signal to
the new lazy version. Default is True.
copy_navigator : bool
Whether or not to copy the navigator from the original Signal to
the new lazy version. Default is True.
copy_learning_results : bool
Whether to copy the learning_results from the original signal to
the new lazy version. Default is True.
Returns
-------
res : :py:class:`~hyperspy._signals.lazy.LazySignal`
The same signal, converted to be lazy
"""
res = self._deepcopy_with_new_data(
self.data,
copy_variance=copy_variance,
copy_navigator=copy_navigator,
copy_learning_results=copy_learning_results
)
res._lazy = True
res._assign_subclass()
return res
def _summary(self):
string = "\n\tTitle: "
string += self.metadata.General.title
if self.metadata.has_item("Signal.signal_type"):
string += "\n\tSignal type: "
string += self.metadata.Signal.signal_type
string += "\n\tData dimensions: "
string += str(self.axes_manager.shape)
string += "\n\tData type: "
string += str(self.data.dtype)
return string
def _print_summary(self):
print(self._summary())
@property
def data(self):
"""The underlying data structure as a :py:class:`numpy.ndarray` (or
:py:class:`dask.array.Array`, if the Signal is lazy)."""
return self._data
@data.setter
def data(self, value):
from dask.array import Array
if isinstance(value, Array):
if not value.ndim:
value = value.reshape((1,))
self._data = value
else:
self._data = np.atleast_1d(np.asanyarray(value))
def _load_dictionary(self, file_data_dict):
"""Load data from dictionary.
Parameters
----------
file_data_dict : dict
A dictionary containing at least a 'data' keyword with an array of
arbitrary dimensions. Additionally the dictionary can contain the
following items:
* data: the signal data. It can be an array of any dimensions.
* axes: a dictionary to define the axes (see the documentation of
the :py:class:`~hyperspy.axes.AxesManager` class for more details).
* attributes: a dictionary whose items are stored as attributes.
* metadata: a dictionary containing a set of parameters that will
to stores in the `metadata` attribute. Some parameters might be
mandatory in some cases.
* original_metadata: a dictionary containing a set of parameters
that will to stores in the `original_metadata` attribute. It
typically contains all the parameters that has been
imported from the original data file.
"""
self.data = file_data_dict['data']
oldlazy = self._lazy
if 'models' in file_data_dict:
self.models._add_dictionary(file_data_dict['models'])
if 'axes' not in file_data_dict:
file_data_dict['axes'] = self._get_undefined_axes_list()
self.axes_manager = AxesManager(
file_data_dict['axes'])
if 'metadata' not in file_data_dict:
file_data_dict['metadata'] = {}
if 'original_metadata' not in file_data_dict:
file_data_dict['original_metadata'] = {}
if 'attributes' in file_data_dict:
for key, value in file_data_dict['attributes'].items():
if hasattr(self, key):
if isinstance(value, dict):
for k, v in value.items():
setattr(getattr(self, key), k, v)
else:
setattr(self, key, value)
self.original_metadata.add_dictionary(
file_data_dict['original_metadata'])
self.metadata.add_dictionary(
file_data_dict['metadata'])
if "title" not in self.metadata.General:
self.metadata.General.title = ''
if (self._signal_type or not self.metadata.has_item("Signal.signal_type")):
self.metadata.Signal.signal_type = self._signal_type
if "learning_results" in file_data_dict:
self.learning_results.__dict__.update(
file_data_dict["learning_results"])
if self._lazy is not oldlazy:
self._assign_subclass()
# TODO: try to find a way to use dask ufuncs when called with lazy data (e.g.
# np.log(s) -> da.log(s.data) wrapped.
def __array__(self, dtype=None):
if dtype:
return self.data.astype(dtype)
else:
return self.data
def __array_wrap__(self, array, context=None):
signal = self._deepcopy_with_new_data(array)
if context is not None:
# ufunc, argument of the ufunc, domain of the ufunc
# In ufuncs with multiple outputs, domain indicates which output
# is currently being prepared (eg. see modf).
# In ufuncs with a single output, domain is 0
uf, objs, huh = context
def get_title(signal, i=0):
g = signal.metadata.General
if g.title:
return g.title
else:
return "Untitled Signal %s" % (i + 1)
title_strs = []
i = 0
for obj in objs:
if isinstance(obj, BaseSignal):
title_strs.append(get_title(obj, i))
i += 1
else:
title_strs.append(str(obj))
signal.metadata.General.title = "%s(%s)" % (
uf.__name__, ", ".join(title_strs))
return signal
def squeeze(self):
"""Remove single-dimensional entries from the shape of an array
and the axes. See :py:func:`numpy.squeeze` for more details.
Returns
-------
s : signal
A new signal object with single-entry dimensions removed
Examples
--------
>>> s = hs.signals.Signal2D(np.random.random((2,1,1,6,8,8)))
<Signal2D, title: , dimensions: (6, 1, 1, 2|8, 8)>
>>> s = s.squeeze()
>>> s
<Signal2D, title: , dimensions: (6, 2|8, 8)>
"""
# We deepcopy everything but data
self = self._deepcopy_with_new_data(self.data)
for ax in (self.axes_manager.signal_axes, self.axes_manager.navigation_axes):
for axis in reversed(ax):
if axis.size == 1:
self._remove_axis(axis.index_in_axes_manager)
self.data = self.data.squeeze()
return self
def _to_dictionary(self, add_learning_results=True, add_models=False,
add_original_metadata=True):
"""Returns a dictionary that can be used to recreate the signal.
All items but `data` are copies.
Parameters
----------
add_learning_results : bool, optional
Whether or not to include any multivariate learning results in
the outputted dictionary. Default is True.
add_models : bool, optional
Whether or not to include any models in the outputted dictionary.
Default is False
add_original_metadata : bool
Whether or not to include the original_medata in the outputted
dictionary. Default is True.
Returns
-------
dic : dict
The dictionary that can be used to recreate the signal
"""
dic = {'data': self.data,
'axes': self.axes_manager._get_axes_dicts(),
'metadata': copy.deepcopy(self.metadata.as_dictionary()),
'tmp_parameters': self.tmp_parameters.as_dictionary(),
'attributes': {'_lazy': self._lazy},
}
if add_original_metadata:
dic['original_metadata'] = copy.deepcopy(
self.original_metadata.as_dictionary()
)
if add_learning_results and hasattr(self, 'learning_results'):
dic['learning_results'] = copy.deepcopy(
self.learning_results.__dict__)
if add_models:
dic['models'] = self.models._models.as_dictionary()
return dic
def _get_undefined_axes_list(self):
axes = []
for s in self.data.shape:
axes.append({'size': int(s), })
return axes
def __call__(self, axes_manager=None, fft_shift=False):
if axes_manager is None:
axes_manager = self.axes_manager
value = np.atleast_1d(self.data.__getitem__(
axes_manager._getitem_tuple))
if isinstance(value, da.Array):
value = np.asarray(value)
if fft_shift:
value = np.fft.fftshift(value)
return value
@property
def navigator(self):
return self.metadata.get_item('_HyperSpy.navigator')
@navigator.setter
def navigator(self, navigator):
self.metadata.set_item('_HyperSpy.navigator', navigator)
def plot(self, navigator="auto", axes_manager=None, plot_markers=True,
**kwargs):
"""%s
%s
%s
%s
"""
if self._plot is not None:
self._plot.close()
if 'power_spectrum' in kwargs:
if not np.issubdtype(self.data.dtype, np.complexfloating):
raise ValueError('The parameter `power_spectrum` required a '
'signal with complex data type.')
del kwargs['power_spectrum']
if axes_manager is None:
axes_manager = self.axes_manager
if self.is_rgbx is True:
if axes_manager.navigation_size < 2:
navigator = None
else:
navigator = "slider"
if axes_manager.signal_dimension == 0:
self._plot = mpl_he.MPL_HyperExplorer()
elif axes_manager.signal_dimension == 1:
# Hyperspectrum
self._plot = mpl_hse.MPL_HyperSignal1D_Explorer()
elif axes_manager.signal_dimension == 2:
self._plot = mpl_hie.MPL_HyperImage_Explorer()
else:
raise ValueError(
"Plotting is not supported for this view. "
"Try e.g. 's.transpose(signal_axes=1).plot()' for "
"plotting as a 1D signal, or "
"'s.transpose(signal_axes=(1,2)).plot()' "
"for plotting as a 2D signal.")
self._plot.axes_manager = axes_manager
self._plot.signal_data_function = self.__call__
if self.metadata.has_item("Signal.quantity"):
self._plot.quantity_label = self.metadata.Signal.quantity
if self.metadata.General.title:
title = self.metadata.General.title
self._plot.signal_title = title
elif self.tmp_parameters.has_item('filename'):
self._plot.signal_title = self.tmp_parameters.filename
def get_static_explorer_wrapper(*args, **kwargs):
if np.issubdtype(navigator.data.dtype, np.complexfloating):
return np.abs(navigator())
else:
return navigator()
def get_1D_sum_explorer_wrapper(*args, **kwargs):
navigator = self
# Sum over all but the first navigation axis.
am = navigator.axes_manager
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=UserWarning,
module='hyperspy'
)
navigator = navigator.sum(
am.signal_axes + am.navigation_axes[1:]
)
return np.nan_to_num(navigator.data).squeeze()
def get_dynamic_explorer_wrapper(*args, **kwargs):
navigator.axes_manager.indices = self.axes_manager.indices[
navigator.axes_manager.signal_dimension:]
navigator.axes_manager._update_attributes()
if np.issubdtype(navigator().dtype, np.complexfloating):
return np.abs(navigator())
else:
return navigator()
if not isinstance(navigator, BaseSignal) and navigator == "auto":
if self.navigator is not None:
navigator = self.navigator
elif (self.axes_manager.navigation_dimension > 1 and
np.any(np.array([not axis.is_uniform for axis in
self.axes_manager.navigation_axes]))):
navigator = "slider"
elif (self.axes_manager.navigation_dimension == 1 and
self.axes_manager.signal_dimension == 1):
if (self.axes_manager.navigation_axes[0].is_uniform and
self.axes_manager.signal_axes[0].is_uniform):
navigator = "data"
else:
navigator = "spectrum"
elif self.axes_manager.navigation_dimension > 0:
if self.axes_manager.signal_dimension == 0:
navigator = self.deepcopy()
else:
navigator = interactive(
self.sum,
self.events.data_changed,
self.axes_manager.events.any_axis_changed,
self.axes_manager.signal_axes)
if navigator.axes_manager.navigation_dimension == 1:
navigator = interactive(
navigator.as_signal1D,
navigator.events.data_changed,
navigator.axes_manager.events.any_axis_changed, 0)
else:
navigator = interactive(
navigator.as_signal2D,
navigator.events.data_changed,
navigator.axes_manager.events.any_axis_changed,
(0, 1))
else:
navigator = None
# Navigator properties
if axes_manager.navigation_axes:
# check first if we have a signal to avoid comparion of signal with
# string
if isinstance(navigator, BaseSignal):
def is_shape_compatible(navigation_shape, shape):
return (navigation_shape == shape or
navigation_shape[:2] == shape or
(navigation_shape[0],) == shape
)
# Static navigator
if is_shape_compatible(axes_manager.navigation_shape,
navigator.axes_manager.signal_shape):
self._plot.navigator_data_function = get_static_explorer_wrapper
# Static transposed navigator
elif is_shape_compatible(axes_manager.navigation_shape,
navigator.axes_manager.navigation_shape):
navigator = navigator.T
self._plot.navigator_data_function = get_static_explorer_wrapper
# Dynamic navigator
elif (axes_manager.navigation_shape ==
navigator.axes_manager.signal_shape +
navigator.axes_manager.navigation_shape):
self._plot.navigator_data_function = get_dynamic_explorer_wrapper
else:
raise ValueError(
"The dimensions of the provided (or stored) navigator "
"are not compatible with this signal.")
elif navigator == "slider":
self._plot.navigator_data_function = "slider"
elif navigator is None:
self._plot.navigator_data_function = None
elif navigator == "data":
if np.issubdtype(self.data.dtype, np.complexfloating):
self._plot.navigator_data_function = lambda axes_manager=None: np.abs(
self.data)
else:
self._plot.navigator_data_function = lambda axes_manager=None: self.data
elif navigator == "spectrum":
self._plot.navigator_data_function = get_1D_sum_explorer_wrapper
else:
raise ValueError(
'navigator must be one of "spectrum","auto", '
'"slider", None, a Signal instance')
self._plot.plot(**kwargs)
self.events.data_changed.connect(self.update_plot, [])
p = self._plot.signal_plot if self._plot.signal_plot else self._plot.navigator_plot
p.events.closed.connect(
lambda: self.events.data_changed.disconnect(self.update_plot),
[])
if plot_markers:
if self.metadata.has_item('Markers'):
self._plot_permanent_markers()
plot.__doc__ %= (BASE_PLOT_DOCSTRING, BASE_PLOT_DOCSTRING_PARAMETERS,
PLOT1D_DOCSTRING, PLOT2D_KWARGS_DOCSTRING)
def save(self, filename=None, overwrite=None, extension=None,
**kwds):
"""Saves the signal in the specified format.
The function gets the format from the specified extension (see
:ref:`supported-formats` in the User Guide for more information):
* ``'hspy'`` for HyperSpy's HDF5 specification
* ``'rpl'`` for Ripple (useful to export to Digital Micrograph)
* ``'msa'`` for EMSA/MSA single spectrum saving.
* ``'unf'`` for SEMPER unf binary format.
* ``'blo'`` for Blockfile diffraction stack saving.
* Many image formats such as ``'png'``, ``'tiff'``, ``'jpeg'``...
If no extension is provided the default file format as defined
in the `preferences` is used.
Please note that not all the formats supports saving datasets of
arbitrary dimensions, e.g. ``'msa'`` only supports 1D data, and
blockfiles only supports image stacks with a `navigation_dimension` < 2.
Each format accepts a different set of parameters. For details
see the specific format documentation.
Parameters
----------
filename : str or None
If None (default) and `tmp_parameters.filename` and
`tmp_parameters.folder` are defined, the
filename and path will be taken from there. A valid
extension can be provided e.g. ``'my_file.rpl'``
(see `extension` parameter).
overwrite : None or bool
If None, if the file exists it will query the user. If
True(False) it does(not) overwrite the file if it exists.
extension : None or str
The extension of the file that defines the file format.
Allowable string values are: {``'hspy'``, ``'hdf5'``, ``'rpl'``,
``'msa'``, ``'unf'``, ``'blo'``, ``'emd'``, and common image
extensions e.g. ``'tiff'``, ``'png'``, etc.}
``'hspy'`` and ``'hdf5'`` are equivalent. Use ``'hdf5'`` if
compatibility with HyperSpy versions older than 1.2 is required.
If ``None``, the extension is determined from the following list in
this order:
i) the filename
ii) `Signal.tmp_parameters.extension`
iii) ``'hspy'`` (the default extension)
chunks : tuple or True or None (default)
HyperSpy, Nexus and EMD NCEM format only. Define chunks used when
saving. The chunk shape should follow the order of the array
(``s.data.shape``), not the shape of the ``axes_manager``.
If None and lazy signal, the dask array chunking is used.
If None and non-lazy signal, the chunks are estimated automatically
to have at least one chunk per signal space.
If True, the chunking is determined by the the h5py ``guess_chunk``
function.
save_original_metadata : bool , default : False
Nexus file only. Option to save hyperspy.original_metadata with
the signal. A loaded Nexus file may have a large amount of data
when loaded which you may wish to omit on saving
use_default : bool , default : False
Nexus file only. Define the default dataset in the file.
If set to True the signal or first signal in the list of signals
will be defined as the default (following Nexus v3 data rules).
"""
if filename is None:
if (self.tmp_parameters.has_item('filename') and
self.tmp_parameters.has_item('folder')):
filename = Path(
self.tmp_parameters.folder,
self.tmp_parameters.filename)
extension = (self.tmp_parameters.extension
if not extension
else extension)
elif self.metadata.has_item('General.original_filename'):
filename = self.metadata.General.original_filename
else:
raise ValueError('File name not defined')
filename = Path(filename)
if extension is not None:
filename = filename.with_suffix(f".{extension}")
io.save(filename, self, overwrite=overwrite, **kwds)
def _replot(self):
if self._plot is not None:
if self._plot.is_active:
self.plot()
def update_plot(self):
"""
If this Signal has been plotted, update the signal and navigator
plots, as appropriate.
"""
if self._plot is not None and self._plot.is_active:
if self._plot.signal_plot is not None:
self._plot.signal_plot.update()
if self._plot.navigator_plot is not None:
self._plot.navigator_plot.update()
def get_dimensions_from_data(self):
"""Get the dimension parameters from the Signal's underlying data.
Useful when the data structure was externally modified, or when the
spectrum image was not loaded from a file
"""
dc = self.data
for axis in self.axes_manager._axes:
axis.size = int(dc.shape[axis.index_in_array])
def crop(self, axis, start=None, end=None, convert_units=False):
"""Crops the data in a given axis. The range is given in pixels.
Parameters
----------
axis : int or str
Specify the data axis in which to perform the cropping
operation. The axis can be specified using the index of the
axis in `axes_manager` or the axis name.
start : int, float, or None
The beginning of the cropping interval. If type is ``int``,
the value is taken as the axis index. If type is ``float`` the index
is calculated using the axis calibration. If `start`/`end` is
``None`` the method crops from/to the low/high end of the axis.
end : int, float, or None
The end of the cropping interval. If type is ``int``,
the value is taken as the axis index. If type is ``float`` the index
is calculated using the axis calibration. If `start`/`end` is
``None`` the method crops from/to the low/high end of the axis.
convert_units : bool
Default is ``False``. If ``True``, convert the units using the
:py:meth:`~hyperspy.axes.AxesManager.convert_units` method
of the :py:class:`~hyperspy.axes.AxesManager`. If ``False``,
does nothing.
"""
axis = self.axes_manager[axis]
i1, i2 = axis._get_index(start), axis._get_index(end)
# To prevent an axis error, which may confuse users
if i1 is not None and i2 is not None and not i1 != i2:
raise ValueError("The `start` and `end` values need to be "
"different.")
# We take a copy to guarantee the continuity of the data
self.data = self.data[
(slice(None),) * axis.index_in_array + (slice(i1, i2),
Ellipsis)]
axis.crop(i1, i2)
self.get_dimensions_from_data()
self.squeeze()
self.events.data_changed.trigger(obj=self)
if convert_units:
self.axes_manager.convert_units(axis)
def swap_axes(self, axis1, axis2, optimize=False):
"""Swap two axes in the signal.
Parameters
----------
axis1%s
axis2%s
%s
Returns
-------
s : :py:class:`~hyperspy.signal.BaseSignal` (or subclass)
A copy of the object with the axes swapped.
See also
--------
rollaxis
"""
axis1 = self.axes_manager[axis1].index_in_array
axis2 = self.axes_manager[axis2].index_in_array
s = self._deepcopy_with_new_data(self.data.swapaxes(axis1, axis2))
am = s.axes_manager
am._update_trait_handlers(remove=True)
c1 = am._axes[axis1]
c2 = am._axes[axis2]
c1.slice, c2.slice = c2.slice, c1.slice
c1.navigate, c2.navigate = c2.navigate, c1.navigate
c1.is_binned, c2.is_binned = c2.is_binned, c1.is_binned
am._axes[axis1] = c2
am._axes[axis2] = c1
am._update_attributes()
am._update_trait_handlers(remove=False)
if optimize:
s._make_sure_data_is_contiguous()
return s
swap_axes.__doc__ %= (ONE_AXIS_PARAMETER, ONE_AXIS_PARAMETER, OPTIMIZE_ARG)
def rollaxis(self, axis, to_axis, optimize=False):
"""Roll the specified axis backwards, until it lies in a given position.
Parameters
----------
axis %s The axis to roll backwards.
The positions of the other axes do not change relative to one
another.
to_axis %s The axis is rolled until it lies before this other axis.
%s
Returns
-------
s : :py:class:`~hyperspy.signal.BaseSignal` (or subclass)
Output signal.
See also
--------
:py:func:`numpy.roll`, swap_axes
Examples
--------
>>> s = hs.signals.Signal1D(np.ones((5,4,3,6)))
>>> s
<Signal1D, title: , dimensions: (3, 4, 5, 6)>
>>> s.rollaxis(3, 1)
<Signal1D, title: , dimensions: (3, 4, 5, 6)>
>>> s.rollaxis(2,0)
<Signal1D, title: , dimensions: (5, 3, 4, 6)>
"""
axis = self.axes_manager[axis].index_in_array
to_index = self.axes_manager[to_axis].index_in_array
if axis == to_index:
return self.deepcopy()
new_axes_indices = hyperspy.misc.utils.rollelem(
[axis_.index_in_array for axis_ in self.axes_manager._axes],
index=axis,
to_index=to_index)
s = self._deepcopy_with_new_data(self.data.transpose(new_axes_indices))
s.axes_manager._axes = hyperspy.misc.utils.rollelem(
s.axes_manager._axes,
index=axis,
to_index=to_index)
s.axes_manager._update_attributes()
if optimize:
s._make_sure_data_is_contiguous()
return s
rollaxis.__doc__ %= (ONE_AXIS_PARAMETER, ONE_AXIS_PARAMETER, OPTIMIZE_ARG)
@property
def _data_aligned_with_axes(self):
"""Returns a view of `data` with is axes aligned with the Signal axes.
"""
if self.axes_manager.axes_are_aligned_with_data:
return self.data
else:
am = self.axes_manager
nav_iia_r = am.navigation_indices_in_array[::-1]
sig_iia_r = am.signal_indices_in_array[::-1]
# nav_sort = np.argsort(nav_iia_r)
# sig_sort = np.argsort(sig_iia_r) + len(nav_sort)
data = self.data.transpose(nav_iia_r + sig_iia_r)
return data
def _validate_rebin_args_and_get_factors(self, new_shape=None, scale=None):
if new_shape is None and scale is None:
raise ValueError("One of new_shape, or scale must be specified")
elif new_shape is None and scale is None:
raise ValueError(
"Only one out of new_shape or scale should be specified. "
"Not both.")
elif new_shape:
if len(new_shape) != len(self.data.shape):
raise ValueError("Wrong new_shape size")
for axis in self.axes_manager._axes:
if axis.is_uniform is False:
raise NotImplementedError(
"Rebinning of non-uniform axes is not yet implemented.")
new_shape_in_array = np.array([new_shape[axis.index_in_axes_manager]
for axis in self.axes_manager._axes])
factors = np.array(self.data.shape) / new_shape_in_array
else:
if len(scale) != len(self.data.shape):
raise ValueError("Wrong scale size")
for axis in self.axes_manager._axes:
if axis.is_uniform is False:
raise NotImplementedError(
"Rebinning of non-uniform axes is not yet implemented.")
factors = np.array([scale[axis.index_in_axes_manager]
for axis in self.axes_manager._axes])
return factors # Factors are in array order
def rebin(self, new_shape=None, scale=None, crop=True, dtype=None,
out=None):
"""
Rebin the signal into a smaller or larger shape, based on linear
interpolation. Specify **either** `new_shape` or `scale`. Scale of 1
means no binning and scale less than one results in up-sampling.
Parameters
----------
%s
%s
Returns
-------
s : :py:class:`~hyperspy.signal.BaseSignal` (or subclass)
The resulting cropped signal.
Raises
------
NotImplementedError
If trying to rebin over a non-uniform axis.
Examples
--------
>>> spectrum = hs.signals.EDSTEMSpectrum(np.ones([4, 4, 10]))
>>> spectrum.data[1, 2, 9] = 5
>>> print(spectrum)
<EDXTEMSpectrum, title: dimensions: (4, 4|10)>
>>> print ('Sum = ', sum(sum(sum(spectrum.data))))
Sum = 164.0
>>> scale = [2, 2, 5]
>>> test = spectrum.rebin(scale)
>>> print(test)
<EDSTEMSpectrum, title: dimensions (2, 2|2)>
>>> print('Sum = ', sum(sum(sum(test.data))))
Sum = 164.0
>>> s = hs.signals.Signal1D(np.ones((2, 5, 10), dtype=np.uint8)
>>> print(s)
<Signal1D, title: , dimensions: (5, 2|10)>
>>> print(s.data.dtype)
uint8
Use dtype=np.unit16 to specify a dtype
>>> s2 = s.rebin(scale=(5, 2, 1), dtype=np.uint16)
>>> print(s2.data.dtype)
uint16
Use dtype="same" to keep the same dtype
>>> s3 = s.rebin(scale=(5, 2, 1), dtype="same")
>>> print(s3.data.dtype)
uint8
By default `dtype=None`, the dtype is determined by the behaviour of
numpy.sum, in this case, unsigned integer of the same precision as
the platform interger
>>> s4 = s.rebin(scale=(5, 2, 1))
>>> print(s4.data.dtype)
uint64
"""
# TODO: Adapt so that it works if a non_uniform_axis exists, but is not
# changed; for new_shape, a non_uniform_axis should be interpolated to a
# linear grid
factors = self._validate_rebin_args_and_get_factors(
new_shape=new_shape,
scale=scale,)
s = out or self._deepcopy_with_new_data(None, copy_variance=True)
data = hyperspy.misc.array_tools.rebin(
self.data, scale=factors, crop=crop, dtype=dtype)
if out:
if out._lazy:
out.data = data
else:
out.data[:] = data
else:
s.data = data
s.get_dimensions_from_data()
for axis, axis_src in zip(s.axes_manager._axes,
self.axes_manager._axes):
factor = factors[axis.index_in_array]
axis.scale = axis_src.scale * factor
axis.offset = axis_src.offset + (factor - 1) * axis_src.scale / 2
if s.metadata.has_item('Signal.Noise_properties.variance'):
if isinstance(s.metadata.Signal.Noise_properties.variance,
BaseSignal):
var = s.metadata.Signal.Noise_properties.variance
s.metadata.Signal.Noise_properties.variance = var.rebin(
new_shape=new_shape, scale=scale, crop=crop, out=out,
dtype=dtype)
if out is None:
return s
else:
out.events.data_changed.trigger(obj=out)
rebin.__doc__ %= (REBIN_ARGS, OUT_ARG)
def split(self,
axis='auto',
number_of_parts='auto',
step_sizes='auto'):
"""Splits the data into several signals.
The split can be defined by giving the `number_of_parts`, a homogeneous
step size, or a list of customized step sizes. By default (``'auto'``),
the function is the reverse of :py:func:`~hyperspy.misc.utils.stack`.
Parameters
----------
axis %s
If ``'auto'`` and if the object has been created with
:py:func:`~hyperspy.misc.utils.stack` (and ``stack_metadata=True``),
this method will return the former list of signals (information
stored in `metadata._HyperSpy.Stacking_history`).
If it was not created with :py:func:`~hyperspy.misc.utils.stack`,
the last navigation axis will be used.
number_of_parts : str or int
Number of parts in which the spectrum image will be split. The
splitting is homogeneous. When the axis size is not divisible
by the `number_of_parts` the remainder data is lost without
warning. If `number_of_parts` and `step_sizes` is ``'auto'``,
`number_of_parts` equals the length of the axis,
`step_sizes` equals one, and the axis is suppressed from each
sub-spectrum.
step_sizes : str, list (of ints), or int
Size of the split parts. If ``'auto'``, the `step_sizes` equals one.
If an int is given, the splitting is homogeneous.
Examples
--------
>>> s = hs.signals.Signal1D(random.random([4,3,2]))
>>> s
<Signal1D, title: , dimensions: (3, 4|2)>
>>> s.split()
[<Signal1D, title: , dimensions: (3 |2)>,
<Signal1D, title: , dimensions: (3 |2)>,
<Signal1D, title: , dimensions: (3 |2)>,
<Signal1D, title: , dimensions: (3 |2)>]
>>> s.split(step_sizes=2)
[<Signal1D, title: , dimensions: (3, 2|2)>,
<Signal1D, title: , dimensions: (3, 2|2)>]
>>> s.split(step_sizes=[1,2])
[<Signal1D, title: , dimensions: (3, 1|2)>,
<Signal1D, title: , dimensions: (3, 2|2)>]
Raises
------
NotImplementedError
If trying to split along a non-uniform axis.
Returns
-------
splitted : list
A list of the split signals
"""
if number_of_parts != 'auto' and step_sizes != 'auto':
raise ValueError(
"You can define step_sizes or number_of_parts but not both."
)
shape = self.data.shape
signal_dict = self._to_dictionary(add_learning_results=False)
if axis == 'auto':
mode = 'auto'
if hasattr(self.metadata._HyperSpy, 'Stacking_history'):
stack_history = self.metadata._HyperSpy.Stacking_history
axis_in_manager = stack_history.axis
step_sizes = stack_history.step_sizes
else:
axis_in_manager = self.axes_manager[-1 +
1j].index_in_axes_manager
else:
mode = 'manual'
axis_in_manager = self.axes_manager[axis].index_in_axes_manager
axis = self.axes_manager[axis_in_manager].index_in_array
len_axis = self.axes_manager[axis_in_manager].size
if self.axes_manager[axis].is_uniform is False:
raise NotImplementedError(
"Splitting of signals over a non-uniform axis is not implemented")
if number_of_parts == 'auto' and step_sizes == 'auto':
step_sizes = 1
number_of_parts = len_axis
elif step_sizes == 'auto':
if number_of_parts > shape[axis]:
raise ValueError(
"The number of parts is greater than the axis size."
)
step_sizes = ([shape[axis] // number_of_parts, ] * number_of_parts)
if isinstance(step_sizes, numbers.Integral):
step_sizes = [step_sizes] * int(len_axis / step_sizes)
splitted = []
cut_index = np.array([0] + step_sizes).cumsum()
axes_dict = signal_dict['axes']
for i in range(len(cut_index) - 1):
axes_dict[axis]['offset'] = self.axes_manager._axes[
axis].index2value(cut_index[i])
axes_dict[axis]['size'] = cut_index[i + 1] - cut_index[i]
data = self.data[
(slice(None), ) * axis +
(slice(cut_index[i], cut_index[i + 1]), Ellipsis)]
signal_dict['data'] = data
splitted += self.__class__(**signal_dict),
if number_of_parts == len_axis \
or step_sizes == [1] * len_axis:
for i, signal1D in enumerate(splitted):
signal1D.data = signal1D.data[
signal1D.axes_manager._get_data_slice([(axis, 0)])]
signal1D._remove_axis(axis_in_manager)
if mode == 'auto' and hasattr(
self.original_metadata, 'stack_elements'):
for i, spectrum in enumerate(splitted):
se = self.original_metadata.stack_elements['element' + str(i)]
spectrum.metadata = copy.deepcopy(
se['metadata'])
spectrum.original_metadata = copy.deepcopy(
se['original_metadata'])
spectrum.metadata.General.title = se.metadata.General.title
return splitted
split.__doc__ %= (ONE_AXIS_PARAMETER)
def _unfold(self, steady_axes, unfolded_axis):
"""Modify the shape of the data by specifying the axes whose
dimension do not change and the axis over which the remaining axes will
be unfolded
Parameters
----------
steady_axes : list
The indices of the axes which dimensions do not change
unfolded_axis : int
The index of the axis over which all the rest of the axes (except
the steady axes) will be unfolded
See also
--------
fold
Notes
-----
WARNING: this private function does not modify the signal subclass
and it is intended for internal use only. To unfold use the public
:py:meth:`~hyperspy.signal.BaseSignal.unfold`,
:py:meth:`~hyperspy.signal.BaseSignal.unfold_navigation_space`,
:py:meth:`~hyperspy.signal.BaseSignal.unfold_signal_space` instead.
It doesn't make sense to perform an unfolding when `dim` < 2
"""
if self.data.squeeze().ndim < 2:
return
# We need to store the original shape and coordinates to be used
# by the fold function only if it has not been already stored by a
# previous unfold
folding = self.metadata._HyperSpy.Folding
if folding.unfolded is False:
folding.original_shape = self.data.shape
folding.original_axes_manager = self.axes_manager
folding.unfolded = True
new_shape = [1] * len(self.data.shape)
for index in steady_axes:
new_shape[index] = self.data.shape[index]
new_shape[unfolded_axis] = -1
self.data = self.data.reshape(new_shape)
self.axes_manager = self.axes_manager.deepcopy()
uname = ''
uunits = ''
to_remove = []
for axis, dim in zip(self.axes_manager._axes, new_shape):
if dim == 1:
uname += ',' + str(axis)
uunits = ',' + str(axis.units)
to_remove.append(axis)
ua = self.axes_manager._axes[unfolded_axis]
ua.name = str(ua) + uname
ua.units = str(ua.units) + uunits
ua.size = self.data.shape[unfolded_axis]
for axis in to_remove:
self.axes_manager.remove(axis.index_in_axes_manager)
self.data = self.data.squeeze()
self._assign_subclass()
def unfold(self, unfold_navigation=True, unfold_signal=True):
"""Modifies the shape of the data by unfolding the signal and
navigation dimensions separately
Parameters
----------
unfold_navigation : bool
Whether or not to unfold the navigation dimension(s) (default:
``True``)
unfold_signal : bool
Whether or not to unfold the signal dimension(s) (default:
``True``)
Returns
-------
needed_unfolding : bool
Whether or not one of the axes needed unfolding (and that
unfolding was performed)
Note
----
It doesn't make sense to perform an unfolding when the total number
of dimensions is < 2.
"""
unfolded = False
if unfold_navigation:
if self.unfold_navigation_space():
unfolded = True
if unfold_signal:
if self.unfold_signal_space():
unfolded = True
return unfolded
@contextmanager
def unfolded(self, unfold_navigation=True, unfold_signal=True):
"""Use this function together with a `with` statement to have the
signal be unfolded for the scope of the `with` block, before
automatically refolding when passing out of scope.
See also
--------
unfold, fold
Examples
--------
>>> import numpy as np
>>> s = BaseSignal(np.random.random((64,64,1024)))
>>> with s.unfolded():
# Do whatever needs doing while unfolded here
pass
"""
unfolded = self.unfold(unfold_navigation, unfold_signal)
try:
yield unfolded
finally:
if unfolded is not False:
self.fold()
def unfold_navigation_space(self):
"""Modify the shape of the data to obtain a navigation space of
dimension 1
Returns
-------
needed_unfolding : bool
Whether or not the navigation space needed unfolding (and whether
it was performed)
"""
if self.axes_manager.navigation_dimension < 2:
needed_unfolding = False
else:
needed_unfolding = True
steady_axes = [
axis.index_in_array for axis in
self.axes_manager.signal_axes]
unfolded_axis = (
self.axes_manager.navigation_axes[0].index_in_array)
self._unfold(steady_axes, unfolded_axis)
if self.metadata.has_item('Signal.Noise_properties.variance'):
variance = self.metadata.Signal.Noise_properties.variance
if isinstance(variance, BaseSignal):
variance.unfold_navigation_space()
return needed_unfolding
def unfold_signal_space(self):
"""Modify the shape of the data to obtain a signal space of
dimension 1
Returns
-------
needed_unfolding : bool
Whether or not the signal space needed unfolding (and whether
it was performed)
"""
if self.axes_manager.signal_dimension < 2:
needed_unfolding = False
else:
needed_unfolding = True
steady_axes = [
axis.index_in_array for axis in
self.axes_manager.navigation_axes]
unfolded_axis = self.axes_manager.signal_axes[0].index_in_array
self._unfold(steady_axes, unfolded_axis)
self.metadata._HyperSpy.Folding.signal_unfolded = True
if self.metadata.has_item('Signal.Noise_properties.variance'):
variance = self.metadata.Signal.Noise_properties.variance
if isinstance(variance, BaseSignal):
variance.unfold_signal_space()
return needed_unfolding
def fold(self):
"""If the signal was previously unfolded, fold it back"""
folding = self.metadata._HyperSpy.Folding
# Note that == must be used instead of is True because
# if the value was loaded from a file its type can be np.bool_
if folding.unfolded is True:
self.data = self.data.reshape(folding.original_shape)
self.axes_manager = folding.original_axes_manager
folding.original_shape = None
folding.original_axes_manager = None
folding.unfolded = False
folding.signal_unfolded = False
self._assign_subclass()
if self.metadata.has_item('Signal.Noise_properties.variance'):
variance = self.metadata.Signal.Noise_properties.variance
if isinstance(variance, BaseSignal):
variance.fold()
def _make_sure_data_is_contiguous(self):
if self.data.flags['C_CONTIGUOUS'] is False:
_logger.info("{0!r} data is replaced by its optimized copy, see "
"optimize parameter of ``Basesignal.transpose`` "
"for more information.".format(self))
self.data = np.ascontiguousarray(self.data)
def _iterate_signal(self, iterpath=None):
"""Iterates over the signal data. It is faster than using the signal
iterator, because it avoids making deepcopy of metadata and other
attributes.
Parameters
----------
iterpath : None or str or iterable
Any valid iterpath supported by the axes_manager.
Returns
-------
numpy array when iterating over the navigation space
"""
original_index = self.axes_manager.indices
if iterpath is None:
_logger.warning('The default iterpath will change in HyperSpy 2.0.')
with self.axes_manager.switch_iterpath(iterpath):
self.axes_manager.indices = tuple(
[0 for _ in self.axes_manager.navigation_axes]
)
for _ in self.axes_manager:
yield self()
# restore original index
self.axes_manager.indices = original_index
def _cycle_signal(self):
"""Cycles over the signal data.
It is faster than using the signal iterator.
Warning! could produce a infinite loop.
"""
if self.axes_manager.navigation_size < 2:
while True:
yield self()
return # pragma: no cover
self._make_sure_data_is_contiguous()
axes = [axis.index_in_array for
axis in self.axes_manager.signal_axes]
if axes:
unfolded_axis = (
self.axes_manager.navigation_axes[0].index_in_array)
new_shape = [1] * len(self.data.shape)
for axis in axes:
new_shape[axis] = self.data.shape[axis]
new_shape[unfolded_axis] = -1
else: # signal_dimension == 0
new_shape = (-1, 1)
axes = [1]
unfolded_axis = 0
# Warning! if the data is not contigous it will make a copy!!
data = self.data.reshape(new_shape)
getitem = [0] * len(data.shape)
for axis in axes:
getitem[axis] = slice(None)
i = 0
Ni = data.shape[unfolded_axis]
while True:
getitem[unfolded_axis] = i
yield(data[tuple(getitem)])
i += 1
i = 0 if i == Ni else i
def _remove_axis(self, axes):
am = self.axes_manager
axes = am[axes]
if not np.iterable(axes):
axes = (axes,)
if am.navigation_dimension + am.signal_dimension > len(axes):
old_signal_dimension = am.signal_dimension
am.remove(axes)
if old_signal_dimension != am.signal_dimension:
self._assign_subclass()
else:
# Create a "Scalar" axis because the axis is the last one left and
# HyperSpy does not # support 0 dimensions
from hyperspy.misc.utils import add_scalar_axis
add_scalar_axis(self)
def _ma_workaround(self, s, function, axes, ar_axes, out):
# TODO: Remove if and when numpy.ma accepts tuple `axis`
# Basically perform unfolding, but only on data. We don't care about
# the axes since the function will consume it/them.
if not np.iterable(ar_axes):
ar_axes = (ar_axes,)
ar_axes = sorted(ar_axes)
new_shape = list(self.data.shape)
for index in ar_axes[1:]:
new_shape[index] = 1
new_shape[ar_axes[0]] = -1
data = self.data.reshape(new_shape).squeeze()
if out:
data = np.atleast_1d(function(data, axis=ar_axes[0],))
if data.shape == out.data.shape:
out.data[:] = data
out.events.data_changed.trigger(obj=out)
else:
raise ValueError(
"The output shape %s does not match the shape of "
"`out` %s" % (data.shape, out.data.shape))
else:
s.data = function(data, axis=ar_axes[0],)
s._remove_axis([ax.index_in_axes_manager for ax in axes])
return s
def _apply_function_on_data_and_remove_axis(self, function, axes,
out=None, **kwargs):
axes = self.axes_manager[axes]
if not np.iterable(axes):
axes = (axes,)
# Use out argument in numpy function when available for operations that
# do not return scalars in numpy.
np_out = not len(self.axes_manager._axes) == len(axes)
ar_axes = tuple(ax.index_in_array for ax in axes)
if len(ar_axes) == 0:
# no axes is provided, so no operation needs to be done but we
# still need to finished the execution of the function properly.
if out:
out.data[:] = self.data
out.events.data_changed.trigger(obj=out)
return
else:
return self
elif len(ar_axes) == 1:
ar_axes = ar_axes[0]
s = out or self._deepcopy_with_new_data(None)
if np.ma.is_masked(self.data):
return self._ma_workaround(s=s, function=function, axes=axes,
ar_axes=ar_axes, out=out)
if out:
if np_out:
function(self.data, axis=ar_axes, out=out.data,)
else:
data = np.atleast_1d(function(self.data, axis=ar_axes,))
if data.shape == out.data.shape:
out.data[:] = data
else:
raise ValueError(
"The output shape %s does not match the shape of "
"`out` %s" % (data.shape, out.data.shape))
out.events.data_changed.trigger(obj=out)
else:
s.data = np.atleast_1d(
function(self.data, axis=ar_axes,))
s._remove_axis([ax.index_in_axes_manager for ax in axes])
return s
def sum(self, axis=None, out=None, rechunk=True):
"""Sum the data over the given axes.
Parameters
----------
axis %s
%s
%s
Returns
-------
s : :py:class:`~hyperspy.signal.BaseSignal` (or subclasses)
A new Signal containing the sum of the provided Signal along the
specified axes.
Note
----
If you intend to calculate the numerical integral of an unbinned signal,
please use the :py:meth:`integrate1D` function instead. To avoid
erroneous misuse of the `sum` function as integral, it raises a warning
when working with an unbinned, non-uniform axis.
See also
--------
max, min, mean, std, var, indexmax, indexmin, valuemax, valuemin
Examples
--------
>>> import numpy as np
>>> s = BaseSignal(np.random.random((64,64,1024)))
>>> s.data.shape
(64,64,1024)
>>> s.sum(-1).data.shape
(64,64)
"""
if axis is None:
axis = self.axes_manager.navigation_axes
axes = self.axes_manager[axis]
if not np.iterable(axes):
axes = (axes,)
if any([not ax.is_uniform and not is_binned(self, ax) for ax in axes]):
warnings.warn("You are summing over an unbinned, non-uniform axis. "
"The result can not be used as an approximation of "
"the integral of the signal. For this functionality, "
"use integrate1D instead.")
return self._apply_function_on_data_and_remove_axis(
np.sum, axis, out=out, rechunk=rechunk)
sum.__doc__ %= (MANY_AXIS_PARAMETER, OUT_ARG, RECHUNK_ARG)
def max(self, axis=None, out=None, rechunk=True):
"""Returns a signal with the maximum of the signal along at least one
axis.
Parameters
----------
axis %s
%s
%s
Returns
-------
s : :py:class:`~hyperspy.signal.BaseSignal` (or subclasses)
A new Signal containing the maximum of the provided Signal over the
specified axes
See also
--------
min, sum, mean, std, var, indexmax, indexmin, valuemax, valuemin
Examples
--------
>>> import numpy as np
>>> s = BaseSignal(np.random.random((64,64,1024)))
>>> s.data.shape
(64,64,1024)
>>> s.max(-1).data.shape
(64,64)
"""
if axis is None:
axis = self.axes_manager.navigation_axes
return self._apply_function_on_data_and_remove_axis(
np.max, axis, out=out, rechunk=rechunk)
max.__doc__ %= (MANY_AXIS_PARAMETER, OUT_ARG, RECHUNK_ARG)
def min(self, axis=None, out=None, rechunk=True):
"""Returns a signal with the minimum of the signal along at least one
axis.
Parameters
----------
axis %s
%s
%s
Returns
-------
s : :py:class:`~hyperspy.signal.BaseSignal` (or subclasses)
A new Signal containing the minimum of the provided Signal over the
specified axes
See also
--------
max, sum, mean, std, var, indexmax, indexmin, valuemax, valuemin
Examples
--------
>>> import numpy as np
>>> s = BaseSignal(np.random.random((64,64,1024)))
>>> s.data.shape
(64,64,1024)
>>> s.min(-1).data.shape
(64,64)
"""
if axis is None:
axis = self.axes_manager.navigation_axes
return self._apply_function_on_data_and_remove_axis(
np.min, axis, out=out, rechunk=rechunk)
min.__doc__ %= (MANY_AXIS_PARAMETER, OUT_ARG, RECHUNK_ARG)
def mean(self, axis=None, out=None, rechunk=True):
"""Returns a signal with the average of the signal along at least one
axis.
Parameters
----------
axis %s
%s
%s
Returns
-------
s : :py:class:`~hyperspy.signal.BaseSignal` (or subclasses)
A new Signal containing the mean of the provided Signal over the
specified axes
See also
--------
max, min, sum, std, var, indexmax, indexmin, valuemax, valuemin
Examples
--------
>>> import numpy as np
>>> s = BaseSignal(np.random.random((64,64,1024)))
>>> s.data.shape
(64,64,1024)
>>> s.mean(-1).data.shape
(64,64)
"""
if axis is None:
axis = self.axes_manager.navigation_axes
return self._apply_function_on_data_and_remove_axis(
np.mean, axis, out=out, rechunk=rechunk)
mean.__doc__ %= (MANY_AXIS_PARAMETER, OUT_ARG, RECHUNK_ARG)
def std(self, axis=None, out=None, rechunk=True):
"""Returns a signal with the standard deviation of the signal along
at least one axis.
Parameters
----------
axis %s
%s
%s
Returns
-------
s : :py:class:`~hyperspy.signal.BaseSignal` (or subclasses)
A new Signal containing the standard deviation of the provided
Signal over the specified axes
See also
--------
max, min, sum, mean, var, indexmax, indexmin, valuemax, valuemin
Examples
--------
>>> import numpy as np
>>> s = BaseSignal(np.random.random((64,64,1024)))
>>> s.data.shape
(64,64,1024)
>>> s.std(-1).data.shape
(64,64)
"""
if axis is None:
axis = self.axes_manager.navigation_axes
return self._apply_function_on_data_and_remove_axis(
np.std, axis, out=out, rechunk=rechunk)
std.__doc__ %= (MANY_AXIS_PARAMETER, OUT_ARG, RECHUNK_ARG)
def var(self, axis=None, out=None, rechunk=True):
"""Returns a signal with the variances of the signal along at least one
axis.
Parameters
----------
axis %s
%s
%s
Returns
-------
s : :py:class:`~hyperspy.signal.BaseSignal` (or subclasses)
A new Signal containing the variance of the provided Signal over the
specified axes
See also
--------
max, min, sum, mean, std, indexmax, indexmin, valuemax, valuemin
Examples
--------
>>> import numpy as np
>>> s = BaseSignal(np.random.random((64,64,1024)))
>>> s.data.shape
(64,64,1024)
>>> s.var(-1).data.shape
(64,64)
"""
if axis is None:
axis = self.axes_manager.navigation_axes
return self._apply_function_on_data_and_remove_axis(
np.var, axis, out=out, rechunk=rechunk)
var.__doc__ %= (MANY_AXIS_PARAMETER, OUT_ARG, RECHUNK_ARG)
def nansum(self, axis=None, out=None, rechunk=True):
"""%s
"""
if axis is None:
axis = self.axes_manager.navigation_axes
axes = self.axes_manager[axis]
if not np.iterable(axes):
axes = (axes,)
if any([not ax.is_uniform for ax in axes]):
warnings.warn("You are summing over a non-uniform axis. The result "
"can not be used as an approximation of the "
"integral of the signal. For this functionaliy, "
"use integrate1D instead.")
return self._apply_function_on_data_and_remove_axis(
np.nansum, axis, out=out, rechunk=rechunk)
nansum.__doc__ %= (NAN_FUNC.format('sum'))
def nanmax(self, axis=None, out=None, rechunk=True):
"""%s
"""
if axis is None:
axis = self.axes_manager.navigation_axes
return self._apply_function_on_data_and_remove_axis(
np.nanmax, axis, out=out, rechunk=rechunk)
nanmax.__doc__ %= (NAN_FUNC.format('max'))
def nanmin(self, axis=None, out=None, rechunk=True):
"""%s"""
if axis is None:
axis = self.axes_manager.navigation_axes
return self._apply_function_on_data_and_remove_axis(
np.nanmin, axis, out=out, rechunk=rechunk)
nanmin.__doc__ %= (NAN_FUNC.format('min'))
def nanmean(self, axis=None, out=None, rechunk=True):
"""%s """
if axis is None:
axis = self.axes_manager.navigation_axes
return self._apply_function_on_data_and_remove_axis(
np.nanmean, axis, out=out, rechunk=rechunk)
nanmean.__doc__ %= (NAN_FUNC.format('mean'))
def nanstd(self, axis=None, out=None, rechunk=True):
"""%s"""
if axis is None:
axis = self.axes_manager.navigation_axes
return self._apply_function_on_data_and_remove_axis(
np.nanstd, axis, out=out, rechunk=rechunk)
nanstd.__doc__ %= (NAN_FUNC.format('std'))
def nanvar(self, axis=None, out=None, rechunk=True):
"""%s"""
if axis is None:
axis = self.axes_manager.navigation_axes
return self._apply_function_on_data_and_remove_axis(
np.nanvar, axis, out=out, rechunk=rechunk)
nanvar.__doc__ %= (NAN_FUNC.format('var'))
def diff(self, axis, order=1, out=None, rechunk=True):
"""Returns a signal with the `n`-th order discrete difference along
given axis. `i.e.` it calculates the difference between consecutive
values in the given axis: `out[n] = a[n+1] - a[n]`. See
:py:func:`numpy.diff` for more details.
Parameters
----------
axis %s
order : int
The order of the discrete difference.
%s
%s
Returns
-------
s : :py:class:`~hyperspy.signal.BaseSignal` (or subclasses) or None
Note that the size of the data on the given ``axis`` decreases by
the given ``order``. `i.e.` if ``axis`` is ``"x"`` and ``order`` is
2, the `x` dimension is N, ``der``'s `x` dimension is N - 2.
Note
----
If you intend to calculate the numerical derivative, please use the
proper :py:meth:`derivative` function instead. To avoid erroneous
misuse of the `diff` function as derivative, it raises an error when
when working with a non-uniform axis.
See also
--------
derivative, integrate1D, integrate_simpson
Examples
--------
>>> import numpy as np
>>> s = BaseSignal(np.random.random((64,64,1024)))
>>> s.data.shape
(64,64,1024)
>>> s.diff(-1).data.shape
(64,64,1023)
"""
if not self.axes_manager[axis].is_uniform:
raise NotImplementedError(
"Performing a numerical difference on a non-uniform axis "
"is not implemented. Consider using `derivative` instead."
)
s = out or self._deepcopy_with_new_data(None)
data = np.diff(self.data, n=order,
axis=self.axes_manager[axis].index_in_array)
if out is not None:
out.data[:] = data
else:
s.data = data
axis2 = s.axes_manager[axis]
new_offset = self.axes_manager[axis].offset + (order * axis2.scale / 2)
axis2.offset = new_offset
s.get_dimensions_from_data()
if out is None:
return s
else:
out.events.data_changed.trigger(obj=out)
diff.__doc__ %= (ONE_AXIS_PARAMETER, OUT_ARG, RECHUNK_ARG)
def derivative(self, axis, order=1, out=None, **kwargs):
r"""Calculate the numerical derivative along the given axis,
with respect to the calibrated units of that axis.
For a function :math:`y = f(x)` and two consecutive values :math:`x_1`
and :math:`x_2`:
.. math::
\frac{df(x)}{dx} = \frac{y(x_2)-y(x_1)}{x_2-x_1}
Parameters
----------
axis %s
order: int
The order of the derivative.
%s
**kwargs : dict
All extra keyword arguments are passed to :py:func:`numpy.gradient`
Returns
-------
der : :py:class:`~hyperspy.signal.BaseSignal`
Note that the size of the data on the given ``axis`` decreases by
the given ``order``. `i.e.` if ``axis`` is ``"x"`` and ``order`` is
2, if the `x` dimension is N, then ``der``'s `x` dimension is N - 2.
Notes
-----
This function uses numpy.gradient to perform the derivative. See its
documentation for implementation details.
See also
--------
integrate1D, integrate_simpson
"""
# rechunk was a valid keyword up to HyperSpy 1.6
if "rechunk" in kwargs:
del kwargs["rechunk"]
n = order
der_data = self.data
while n:
der_data = np.gradient(
der_data, self.axes_manager[axis].axis,
axis=self.axes_manager[axis].index_in_array, **kwargs)
n -= 1
if out:
out.data = der_data
out.events.data_changed.trigger(obj=out)
else:
return self._deepcopy_with_new_data(der_data)
derivative.__doc__ %= (ONE_AXIS_PARAMETER, OUT_ARG)
def integrate_simpson(self, axis, out=None):
"""Calculate the integral of a Signal along an axis using
`Simpson's rule <https://en.wikipedia.org/wiki/Simpson%%27s_rule>`_.
Parameters
----------
axis %s
%s
Returns
-------
s : :py:class:`~hyperspy.signal.BaseSignal` (or subclasses)
A new Signal containing the integral of the provided Signal along
the specified axis.
See also
--------
derivative, integrate1D
Examples
--------
>>> import numpy as np
>>> s = BaseSignal(np.random.random((64,64,1024)))
>>> s.data.shape
(64,64,1024)
>>> s.integrate_simpson(-1).data.shape
(64,64)
"""
axis = self.axes_manager[axis]
s = out or self._deepcopy_with_new_data(None)
data = integrate.simps(y=self.data, x=axis.axis,
axis=axis.index_in_array)
if out is not None:
out.data[:] = data
out.events.data_changed.trigger(obj=out)
else:
s.data = data
s._remove_axis(axis.index_in_axes_manager)
return s
integrate_simpson.__doc__ %= (ONE_AXIS_PARAMETER, OUT_ARG)
def fft(self, shift=False, apodization=False, real_fft_only=False, **kwargs):
"""Compute the discrete Fourier Transform.
This function computes the discrete Fourier Transform over the signal
axes by means of the Fast Fourier Transform (FFT) as implemented in
numpy.
Parameters
----------
shift : bool, optional
If ``True``, the origin of FFT will be shifted to the centre
(default is ``False``).
apodization : bool or str
Apply an
`apodization window <http://mathworld.wolfram.com/ApodizationFunction.html>`_
before calculating the FFT in order to suppress streaks.
Valid string values are {``'hann'`` or ``'hamming'`` or ``'tukey'``}
If ``True`` or ``'hann'``, applies a Hann window.
If ``'hamming'`` or ``'tukey'``, applies Hamming or Tukey
windows, respectively (default is ``False``).
real_fft_only : bool, default False
If ``True`` and data is real-valued, uses :py:func:`numpy.fft.rfftn`
instead of :py:func:`numpy.fft.fftn`
**kwargs : dict
other keyword arguments are described in :py:func:`numpy.fft.fftn`
Returns
-------
s : :py:class:`~hyperspy._signals.complex_signal.ComplexSignal`
A Signal containing the result of the FFT algorithm
Raises
------
NotImplementedError
If performing FFT along a non-uniform axis.
Examples
--------
>>> im = hs.signals.Signal2D(scipy.misc.ascent())
>>> im.fft()
<ComplexSignal2D, title: FFT of , dimensions: (|512, 512)>
>>> # Use following to plot power spectrum of `im`:
>>> im.fft(shift=True, apodization=True).plot(power_spectrum=True)
Note
----
Requires a uniform axis. For further information see the documentation
of :py:func:`numpy.fft.fftn`
"""
if self.axes_manager.signal_dimension == 0:
raise AttributeError("Signal dimension must be at least one.")
if apodization == True:
apodization = 'hann'
if apodization:
im_fft = self.apply_apodization(window=apodization, inplace=False)
else:
im_fft = self
ax = self.axes_manager
axes = ax.signal_indices_in_array
if any([not axs.is_uniform for axs in self.axes_manager[axes]]):
raise NotImplementedError(
"Not implemented for non-uniform axes.")
use_real_fft = real_fft_only and (self.data.dtype.kind != 'c')
if use_real_fft:
fft_f = np.fft.rfftn
else:
fft_f = np.fft.fftn
if shift:
im_fft = self._deepcopy_with_new_data(np.fft.fftshift(
fft_f(im_fft.data, axes=axes, **kwargs), axes=axes))
else:
im_fft = self._deepcopy_with_new_data(
fft_f(self.data, axes=axes, **kwargs))
im_fft.change_dtype("complex")
im_fft.metadata.General.title = 'FFT of {}'.format(
im_fft.metadata.General.title)
im_fft.metadata.set_item('Signal.FFT.shifted', shift)
if hasattr(self.metadata.Signal, 'quantity'):
self.metadata.Signal.__delattr__('quantity')
ureg = UnitRegistry()
for axis in im_fft.axes_manager.signal_axes:
axis.scale = 1. / axis.size / axis.scale
axis.offset = 0.0
try:
units = ureg.parse_expression(str(axis.units))**(-1)
axis.units = '{:~}'.format(units.units)
except UndefinedUnitError:
_logger.warning('Units are not set or cannot be recognized')
if shift:
axis.offset = -axis.high_value / 2.
return im_fft
def ifft(self, shift=None, return_real=True, **kwargs):
"""
Compute the inverse discrete Fourier Transform.
This function computes the real part of the inverse of the discrete
Fourier Transform over the signal axes by means of the Fast Fourier
Transform (FFT) as implemented in numpy.
Parameters
----------
shift : bool or None, optional
If ``None``, the shift option will be set to the original status
of the FFT using the value in metadata. If no FFT entry is
present in metadata, the parameter will be set to ``False``.
If ``True``, the origin of the FFT will be shifted to the centre.
If ``False``, the origin will be kept at (0, 0)
(default is ``None``).
return_real : bool, default True
If ``True``, returns only the real part of the inverse FFT.
If ``False``, returns all parts.
**kwargs : dict
other keyword arguments are described in :py:func:`numpy.fft.ifftn`
Return
------
s : :py:class:`~hyperspy.signal.BaseSignal` (or subclasses)
A Signal containing the result of the inverse FFT algorithm
Raises
------
NotImplementedError
If performing IFFT along a non-uniform axis.
Examples
--------
>>> import scipy
>>> im = hs.signals.Signal2D(scipy.misc.ascent())
>>> imfft = im.fft()
>>> imfft.ifft()
<Signal2D, title: real(iFFT of FFT of ), dimensions: (|512, 512)>
Note
----
Requires a uniform axis. For further information see the documentation
of :py:func:`numpy.fft.ifftn`
"""
if self.axes_manager.signal_dimension == 0:
raise AttributeError("Signal dimension must be at least one.")
ax = self.axes_manager
axes = ax.signal_indices_in_array
if any([not axs.is_uniform for axs in self.axes_manager[axes]]):
raise NotImplementedError(
"Not implemented for non-uniform axes.")
if shift is None:
shift = self.metadata.get_item('Signal.FFT.shifted', False)
if shift:
im_ifft = self._deepcopy_with_new_data(np.fft.ifftn(
np.fft.ifftshift(self.data, axes=axes), axes=axes, **kwargs))
else:
im_ifft = self._deepcopy_with_new_data(np.fft.ifftn(
self.data, axes=axes, **kwargs))
im_ifft.metadata.General.title = 'iFFT of {}'.format(
im_ifft.metadata.General.title)
if im_ifft.metadata.has_item('Signal.FFT'):
del im_ifft.metadata.Signal.FFT
if return_real:
im_ifft = im_ifft.real
ureg = UnitRegistry()
for axis in im_ifft.axes_manager.signal_axes:
axis.scale = 1. / axis.size / axis.scale
try:
units = ureg.parse_expression(str(axis.units)) ** (-1)
axis.units = '{:~}'.format(units.units)
except UndefinedUnitError:
_logger.warning('Units are not set or cannot be recognized')
axis.offset = 0.
return im_ifft
def integrate1D(self, axis, out=None):
"""Integrate the signal over the given axis.
The integration is performed using
`Simpson's rule <https://en.wikipedia.org/wiki/Simpson%%27s_rule>`_ if
`axis.is_binned` is ``False`` and simple summation over the given axis
if ``True`` (along binned axes, the detector already provides
integrated counts per bin).
Parameters
----------
axis %s
%s
Returns
-------
s : :py:class:`~hyperspy.signal.BaseSignal` (or subclasses)
A new Signal containing the integral of the provided Signal along
the specified axis.
See also
--------
integrate_simpson, derivative
Examples
--------
>>> import numpy as np
>>> s = BaseSignal(np.random.random((64,64,1024)))
>>> s.data.shape
(64,64,1024)
>>> s.integrate1D(-1).data.shape
(64,64)
"""
if is_binned(self, axis=axis):
# in v2 replace by
# self.axes_manager[axis].is_binned
return self.sum(axis=axis, out=out)
else:
return self.integrate_simpson(axis=axis, out=out)
integrate1D.__doc__ %= (ONE_AXIS_PARAMETER, OUT_ARG)
def indexmin(self, axis, out=None, rechunk=True):
"""Returns a signal with the index of the minimum along an axis.
Parameters
----------
axis %s
%s
%s
Returns
-------
s : :py:class:`~hyperspy.signal.BaseSignal` (or subclasses)
A new Signal containing the indices of the minimum along the
specified axis. Note: the data `dtype` is always ``int``.
See also
--------
max, min, sum, mean, std, var, indexmax, valuemax, valuemin
Examples
--------
>>> import numpy as np
>>> s = BaseSignal(np.random.random((64,64,1024)))
>>> s.data.shape
(64,64,1024)
>>> s.indexmin(-1).data.shape
(64,64)
"""
return self._apply_function_on_data_and_remove_axis(
np.argmin, axis, out=out, rechunk=rechunk)
indexmin.__doc__ %= (ONE_AXIS_PARAMETER, OUT_ARG, RECHUNK_ARG)
def indexmax(self, axis, out=None, rechunk=True):
"""Returns a signal with the index of the maximum along an axis.
Parameters
----------
axis %s
%s
%s
Returns
-------
s : :py:class:`~hyperspy.signal.BaseSignal` (or subclasses)
A new Signal containing the indices of the maximum along the
specified axis. Note: the data `dtype` is always ``int``.
See also
--------
max, min, sum, mean, std, var, indexmin, valuemax, valuemin
Examples
--------
>>> import numpy as np
>>> s = BaseSignal(np.random.random((64,64,1024)))
>>> s.data.shape
(64,64,1024)
>>> s.indexmax(-1).data.shape
(64,64)
"""
return self._apply_function_on_data_and_remove_axis(
np.argmax, axis, out=out, rechunk=rechunk)
indexmax.__doc__ %= (ONE_AXIS_PARAMETER, OUT_ARG, RECHUNK_ARG)
def valuemax(self, axis, out=None, rechunk=True):
"""Returns a signal with the value of coordinates of the maximum along
an axis.
Parameters
----------
axis %s
%s
%s
Returns
-------
s : :py:class:`~hyperspy.signal.BaseSignal` (or subclasses)
A new Signal containing the calibrated coordinate values of the
maximum along the specified axis.
See also
--------
max, min, sum, mean, std, var, indexmax, indexmin, valuemin
Examples
--------
>>> import numpy as np
>>> s = BaseSignal(np.random.random((64,64,1024)))
>>> s.data.shape
(64,64,1024)
>>> s.valuemax(-1).data.shape
(64,64)
"""
idx = self.indexmax(axis)
data = self.axes_manager[axis].index2value(idx.data)
if out is None:
idx.data = data
return idx
else:
out.data[:] = data
out.events.data_changed.trigger(obj=out)
valuemax.__doc__ %= (ONE_AXIS_PARAMETER, OUT_ARG, RECHUNK_ARG)
def valuemin(self, axis, out=None, rechunk=True):
"""Returns a signal with the value of coordinates of the minimum along
an axis.
Parameters
----------
axis %s
%s
%s
Returns
-------
s : :py:class:`~hyperspy.signal.BaseSignal` (or subclasses)
A new Signal containing the calibrated coordinate values of the
minimum along the specified axis.
See also
--------
max, min, sum, mean, std, var, indexmax, indexmin, valuemax
"""
idx = self.indexmin(axis)
data = self.axes_manager[axis].index2value(idx.data)
if out is None:
idx.data = data
return idx
else:
out.data[:] = data
out.events.data_changed.trigger(obj=out)
valuemin.__doc__ %= (ONE_AXIS_PARAMETER, OUT_ARG, RECHUNK_ARG)
def get_histogram(self, bins='fd', range_bins=None, max_num_bins=250, out=None,
**kwargs):
"""Return a histogram of the signal data.
More sophisticated algorithms for determining the bins can be used
by passing a string as the ``bins`` argument. Other than the ``'blocks'``
and ``'knuth'`` methods, the available algorithms are the same as
:py:func:`numpy.histogram`.
Note: The lazy version of the algorithm only supports ``"scott"``
and ``"fd"`` as a string argument for ``bins``.
Parameters
----------
%s
range_bins : tuple or None, optional
the minimum and maximum range for the histogram. If
`range_bins` is ``None``, (``x.min()``, ``x.max()``) will be used.
%s
%s
%s
**kwargs
other keyword arguments (weight and density) are described in
:py:func:`numpy.histogram`.
Returns
-------
hist_spec : :py:class:`~hyperspy._signals.signal1d.Signal1D`
A 1D spectrum instance containing the histogram.
See also
--------
* print_summary_statistics
* :py:func:`numpy.histogram`
* :py:func:`dask.histogram`
Examples
--------
>>> s = hs.signals.Signal1D(np.random.normal(size=(10, 100)))
>>> # Plot the data histogram
>>> s.get_histogram().plot()
>>> # Plot the histogram of the signal at the current coordinates
>>> s.get_current_signal().get_histogram().plot()
"""
from hyperspy import signals
data = self.data[~np.isnan(self.data)].flatten()
hist, bin_edges = histogram(
data,
bins=bins,
max_num_bins=max_num_bins,
range=range_bins,
**kwargs
)
if out is None:
hist_spec = signals.Signal1D(hist)
else:
hist_spec = out
if hist_spec.data.shape == hist.shape:
hist_spec.data[:] = hist
else:
hist_spec.data = hist
if isinstance(bins, str) and bins == 'blocks':
hist_spec.axes_manager.signal_axes[0].axis = bin_edges[:-1]
warnings.warn(
"The option `bins='blocks'` is not fully supported in this "
"version of HyperSpy. It should be used for plotting purposes "
"only.",
UserWarning,
)
else:
hist_spec.axes_manager[0].scale = bin_edges[1] - bin_edges[0]
hist_spec.axes_manager[0].offset = bin_edges[0]
hist_spec.axes_manager[0].size = hist.shape[-1]
hist_spec.axes_manager[0].name = 'value'
hist_spec.axes_manager[0].is_binned = True
hist_spec.metadata.General.title = (self.metadata.General.title +
" histogram")
if out is None:
return hist_spec
else:
out.events.data_changed.trigger(obj=out)
get_histogram.__doc__ %= (HISTOGRAM_BIN_ARGS, HISTOGRAM_MAX_BIN_ARGS, OUT_ARG, RECHUNK_ARG)
def map(
self,
function,
show_progressbar=None,
parallel=None,
max_workers=None,
inplace=True,
ragged=None,
output_signal_size=None,
output_dtype=None,
**kwargs
):
"""Apply a function to the signal data at all the navigation
coordinates.
The function must operate on numpy arrays. It is applied to the data at
each navigation coordinate pixel-py-pixel. Any extra keyword arguments
are passed to the function. The keywords can take different values at
different coordinates. If the function takes an `axis` or `axes`
argument, the function is assumed to be vectorized and the signal axes
are assigned to `axis` or `axes`. Otherwise, the signal is iterated
over the navigation axes and a progress bar is displayed to monitor the
progress.
In general, only navigation axes (order, calibration, and number) are
guaranteed to be preserved.
Parameters
----------
function : :std:term:`function`
Any function that can be applied to the signal.
%s
%s
%s
inplace : bool, default True
if ``True``, the data is replaced by the result. Otherwise
a new Signal with the results is returned.
ragged : None or bool, default None
Indicates if the results for each navigation pixel are of identical
shape (and/or numpy arrays to begin with). If ``None``,
the appropriate choice is made while processing. If True in case
of lazy signal, the signal will be compute at the end of the
mapping. Note: ``None`` is not allowed for Lazy signals!
**kwargs : dict
All extra keyword arguments are passed to the provided function
Notes
-----
If the function results do not have identical shapes, the result is an
array of navigation shape, where each element corresponds to the result
of the function (of arbitrary object type), called a "ragged array". As
such, most functions are not able to operate on the result and the data
should be used directly.
This method is similar to Python's :py:func:`python:map` that can
also be utilized with a :py:class:`~hyperspy.signal.BaseSignal`
instance for similar purposes. However, this method has the advantage of
being faster because it iterates the underlying numpy data array
instead of the :py:class:`~hyperspy.signal.BaseSignal`.
Examples
--------
Apply a Gaussian filter to all the images in the dataset. The sigma
parameter is constant:
>>> import scipy.ndimage
>>> im = hs.signals.Signal2D(np.random.random((10, 64, 64)))
>>> im.map(scipy.ndimage.gaussian_filter, sigma=2.5)
Apply a Gaussian filter to all the images in the dataset. The signal
parameter is variable:
>>> im = hs.signals.Signal2D(np.random.random((10, 64, 64)))
>>> sigmas = hs.signals.BaseSignal(np.linspace(2,5,10)).T
>>> im.map(scipy.ndimage.gaussian_filter, sigma=sigmas)
Note
----
Currently requires a uniform axis.
"""
if self.axes_manager.navigation_shape == () and self._lazy:
_logger.info("Converting signal to a non-lazy signal because there are no nav dimensions")
self.compute()
# Sepate ndkwargs depending on if they are BaseSignals.
ndkwargs = {}
ndkeys = [key for key in kwargs if isinstance(kwargs[key], BaseSignal)]
for key in ndkeys:
if kwargs[key].axes_manager.navigation_shape == self.axes_manager.navigation_shape:
ndkwargs[key] = kwargs.pop(key)
elif kwargs[key].axes_manager.navigation_shape == () or kwargs[key].axes_manager.navigation_shape == (1,):
kwargs[key] = np.squeeze(kwargs[key].data) # this really isn't an iterating signal.
else:
raise ValueError(f'The size of the navigation_shape for the kwarg {key} '
f'(<{kwargs[key].axes_manager.navigation_shape}> must be consistent'
f'with the size of the mapped signal '
f'<{self.axes_manager.navigation_shape}>')
# TODO: Consider support for non-uniform signal axis
if any([not ax.is_uniform for ax in self.axes_manager.signal_axes]):
_logger.warning(
"At least one axis of the signal is non-uniform. Can your "
"`function` operate on non-uniform axes?")
else:
# Check if the signal axes have inhomogeneous scales and/or units and
# display in warning if yes.
scale = set()
units = set()
for i in range(len(self.axes_manager.signal_axes)):
scale.add(self.axes_manager.signal_axes[i].scale)
units.add(self.axes_manager.signal_axes[i].units)
if len(units) != 1 or len(scale) != 1:
_logger.warning(
"The function you applied does not take into "
"account the difference of units and of scales in-between"
" axes.")
# If the function has an axis argument and the signal dimension is 1,
# we suppose that it can operate on the full array and we don't
# iterate over the coordinates.
fargs = []
try:
# numpy ufunc operate element-wise on the inputs and we don't
# except them to have an axis argument
if not isinstance(function, np.ufunc):
fargs = inspect.signature(function).parameters.keys()
else:
_logger.warning(f"The function `{function.__name__}` can "
"direcly operate on hyperspy signals and it "
"is not necessary to use `map`.")
except TypeError as error:
# This is probably a Cython function that is not supported by
# inspect.
_logger.warning(error)
if not ndkwargs and (self.axes_manager.signal_dimension == 1 and
"axis" in fargs):
kwargs['axis'] = self.axes_manager.signal_axes[-1].index_in_array
res = self._map_all(function, inplace=inplace, **kwargs)
# If the function has an axes argument
# we suppose that it can operate on the full array and we don't
# iterate over the coordinates.
elif not ndkwargs and "axes" in fargs and not parallel:
kwargs['axes'] = tuple([axis.index_in_array for axis in
self.axes_manager.signal_axes])
res = self._map_all(function, inplace=inplace, **kwargs)
else:
if self._lazy:
kwargs["output_signal_size"] = output_signal_size
kwargs["output_dtype"] = output_dtype
# Iteration over coordinates.
res = self._map_iterate(function, iterating_kwargs=ndkwargs,
show_progressbar=show_progressbar,
parallel=parallel,
max_workers=max_workers,
ragged=ragged,
inplace=inplace,
**kwargs)
if inplace:
self.events.data_changed.trigger(obj=self)
return res
map.__doc__ %= (SHOW_PROGRESSBAR_ARG, PARALLEL_ARG, MAX_WORKERS_ARG)
def _map_all(self, function, inplace=True, **kwargs):
"""The function has to have either 'axis' or 'axes' keyword argument,
and hence support operating on the full dataset efficiently.
Replaced for lazy signals"""
newdata = function(self.data, **kwargs)
if inplace:
self.data = newdata
return None
return self._deepcopy_with_new_data(newdata)
def _map_iterate(
self,
function,
iterating_kwargs=(),
show_progressbar=None,
parallel=None,
max_workers=None,
ragged=None,
inplace=True,
**kwargs,
):
"""Iterates the signal navigation space applying the function.
Parameters
----------
function : :std:term:`function`
the function to apply
iterating_kwargs : tuple (of tuples)
A tuple with structure (('key1', value1), ('key2', value2), ..)
where the key-value pairs will be passed as kwargs for the
function to be mapped, and the values will be iterated together
with the signal navigation. The value needs to be a signal
instance because passing array can be ambigous and will be removed
in HyperSpy 2.0.
%s
%s
%s
inplace : bool, default True
if ``True``, the data is replaced by the result. Otherwise
a new signal with the results is returned.
ragged : None or bool, default None
Indicates if results for each navigation pixel are of identical
shape (and/or numpy arrays to begin with). If ``None``,
an appropriate choice is made while processing. Note: ``None`` is
not allowed for Lazy signals!
**kwargs : dict
Additional keyword arguments passed to :std:term:`function`
Notes
-----
This method is replaced for lazy signals.
Examples
--------
Pass a larger array of different shape
>>> s = hs.signals.Signal1D(np.arange(20.).reshape((20,1)))
>>> def func(data, value=0):
... return data + value
>>> # pay attention that it's a tuple of tuples - need commas
>>> s._map_iterate(func,
... iterating_kwargs=(('value',
... np.random.rand(5,400).flat),))
>>> s.data.T
array([[ 0.82869603, 1.04961735, 2.21513949, 3.61329091,
4.2481755 , 5.81184375, 6.47696867, 7.07682618,
8.16850697, 9.37771809, 10.42794054, 11.24362699,
12.11434077, 13.98654036, 14.72864184, 15.30855499,
16.96854373, 17.65077064, 18.64925703, 19.16901297]])
Storing function result to other signal (e.g. calculated shifts)
>>> s = hs.signals.Signal1D(np.arange(20.).reshape((5,4)))
>>> def func(data): # the original function
... return data.sum()
>>> result = s._get_navigation_signal().T
>>> def wrapped(*args, data=None):
... return func(data)
>>> result._map_iterate(wrapped,
... iterating_kwargs=(('data', s),))
>>> result.data
array([ 6., 22., 38., 54., 70.])
"""
from os import cpu_count
from hyperspy.misc.utils import create_map_objects, map_result_construction
if show_progressbar is None:
show_progressbar = preferences.General.show_progressbar
if parallel is None:
parallel = preferences.General.parallel
if isinstance(iterating_kwargs, (tuple, list)):
iterating_kwargs = dict((k, v) for k, v in iterating_kwargs)
size = max(1, self.axes_manager.navigation_size)
func, iterators = create_map_objects(function, size, iterating_kwargs, **kwargs)
iterators = (self._iterate_signal(),) + iterators
res_shape = self.axes_manager._navigation_shape_in_array
# no navigation
if not len(res_shape):
res_shape = (1,)
# pre-allocate some space
res_data = np.empty(res_shape, dtype="O")
shapes = set()
if show_progressbar:
pbar = progressbar(total=size, leave=True, disable=not show_progressbar)
# We set this value to equal cpu_count, with a maximum
# of 32 cores, since the earlier default value was inappropriate
# for many-core machines.
if max_workers is None:
max_workers = min(32, cpu_count())
# Avoid any overhead of additional threads
if max_workers < 2:
parallel = False
# parallel or sequential mapping
if parallel:
from concurrent.futures import ThreadPoolExecutor
with ThreadPoolExecutor(max_workers=max_workers) as executor:
for ind, res in zip(
range(res_data.size), executor.map(func, zip(*iterators))
):
res = np.asarray(res)
res_data.flat[ind] = res
if ragged is False:
shapes.add(res.shape)
if len(shapes) != 1:
raise ValueError(
"The result shapes are not identical, but ragged=False"
)
else:
try:
shapes.add(res.shape)
except AttributeError:
shapes.add(None)
if show_progressbar:
pbar.update(1)
else:
from builtins import map
for ind, res in zip(range(res_data.size), map(func, zip(*iterators))):
res = np.asarray(res)
res_data.flat[ind] = res
if ragged is False:
shapes.add(res.shape)
if len(shapes) != 1:
raise ValueError(
"The result shapes are not identical, but ragged=False"
)
else:
try:
shapes.add(res.shape)
except AttributeError:
shapes.add(None)
if show_progressbar:
pbar.update(1)
# Combine data if required
shapes = list(shapes)
suitable_shapes = len(shapes) == 1 and shapes[0] is not None
ragged = ragged or not suitable_shapes
sig_shape = None
if not ragged:
sig_shape = () if shapes[0] == (1,) else shapes[0]
res_data = np.stack(res_data.ravel()).reshape(
self.axes_manager._navigation_shape_in_array + sig_shape
)
res = map_result_construction(self, inplace, res_data, ragged, sig_shape)
return res
_map_iterate.__doc__ %= (SHOW_PROGRESSBAR_ARG, PARALLEL_ARG, MAX_WORKERS_ARG)
def copy(self):
"""
Return a "shallow copy" of this Signal using the
standard library's :py:func:`~copy.copy` function. Note: this will
return a copy of the signal, but it will not duplicate the underlying
data in memory, and both Signals will reference the same data.
See Also
--------
:py:meth:`~hyperspy.signal.BaseSignal.deepcopy`
"""
try:
backup_plot = self._plot
self._plot = None
return copy.copy(self)
finally:
self._plot = backup_plot
def __deepcopy__(self, memo):
dc = type(self)(**self._to_dictionary())
if isinstance(dc.data, np.ndarray):
dc.data = dc.data.copy()
# uncomment if we want to deepcopy models as well:
# dc.models._add_dictionary(
# copy.deepcopy(
# self.models._models.as_dictionary()))
# The Signal subclasses might change the view on init
# The following code just copies the original view
for oaxis, caxis in zip(self.axes_manager._axes,
dc.axes_manager._axes):
caxis.navigate = oaxis.navigate
if dc.metadata.has_item('Markers'):
temp_marker_dict = dc.metadata.Markers.as_dictionary()
markers_dict = markers_metadata_dict_to_markers(
temp_marker_dict,
dc.axes_manager)
dc.metadata.Markers = markers_dict
return dc
def deepcopy(self):
"""
Return a "deep copy" of this Signal using the
standard library's :py:func:`~copy.deepcopy` function. Note: this means
the underlying data structure will be duplicated in memory.
See Also
--------
:py:meth:`~hyperspy.signal.BaseSignal.copy`
"""
return copy.deepcopy(self)
def change_dtype(self, dtype, rechunk=True):
"""Change the data type of a Signal.
Parameters
----------
dtype : str or :py:class:`numpy.dtype`
Typecode string or data-type to which the Signal's data array is
cast. In addition to all the standard numpy :ref:`arrays.dtypes`,
HyperSpy supports four extra dtypes for RGB images: ``'rgb8'``,
``'rgba8'``, ``'rgb16'``, and ``'rgba16'``. Changing from and to
any ``rgb(a)`` `dtype` is more constrained than most other `dtype`
conversions. To change to an ``rgb(a)`` `dtype`,
the `signal_dimension` must be 1, and its size should be 3 (for
``rgb``) or 4 (for ``rgba``) `dtypes`. The original `dtype`
should be ``uint8`` or ``uint16`` if converting to ``rgb(a)8``
or ``rgb(a))16``, and the `navigation_dimension` should be at
least 2. After conversion, the `signal_dimension` becomes 2. The
`dtype` of images with original `dtype` ``rgb(a)8`` or ``rgb(a)16``
can only be changed to ``uint8`` or ``uint16``, and the
`signal_dimension` becomes 1.
%s
Examples
--------
>>> s = hs.signals.Signal1D([1,2,3,4,5])
>>> s.data
array([1, 2, 3, 4, 5])
>>> s.change_dtype('float')
>>> s.data
array([ 1., 2., 3., 4., 5.])
"""
if not isinstance(dtype, np.dtype):
if dtype in rgb_tools.rgb_dtypes:
if self.axes_manager.signal_dimension != 1:
raise AttributeError(
"Only 1D signals can be converted "
"to RGB images.")
if "8" in dtype and self.data.dtype.name != "uint8":
raise AttributeError(
"Only signals with dtype uint8 can be converted to "
"rgb8 images")
elif "16" in dtype and self.data.dtype.name != "uint16":
raise AttributeError(
"Only signals with dtype uint16 can be converted to "
"rgb16 images")
self.data = rgb_tools.regular_array2rgbx(self.data)
self.axes_manager.remove(-1)
self.axes_manager.set_signal_dimension(2)
self._assign_subclass()
return
else:
dtype = np.dtype(dtype)
if rgb_tools.is_rgbx(self.data) is True:
ddtype = self.data.dtype.fields["B"][0]
if ddtype != dtype:
raise ValueError(
"It is only possibile to change to %s." %
ddtype)
self.data = rgb_tools.rgbx2regular_array(self.data)
self.axes_manager._append_axis(
size=self.data.shape[-1],
scale=1,
offset=0,
name="RGB index",
navigate=False,)
self.axes_manager.set_signal_dimension(1)
self._assign_subclass()
return
else:
self.data = self.data.astype(dtype)
self._assign_subclass()
change_dtype.__doc__ %= (RECHUNK_ARG)
def estimate_poissonian_noise_variance(self,
expected_value=None,
gain_factor=None,
gain_offset=None,
correlation_factor=None):
r"""Estimate the Poissonian noise variance of the signal.
The variance is stored in the
`metadata.Signal.Noise_properties.variance` attribute.
The Poissonian noise variance is equal to the expected value. With the
default arguments, this method simply sets the variance attribute to
the given `expected_value`. However, more generally (although then the
noise is not strictly Poissonian), the variance may be proportional to
the expected value. Moreover, when the noise is a mixture of white
(Gaussian) and Poissonian noise, the variance is described by the
following linear model:
.. math::
\mathrm{Var}[X] = (a * \mathrm{E}[X] + b) * c
Where `a` is the `gain_factor`, `b` is the `gain_offset` (the Gaussian
noise variance) and `c` the `correlation_factor`. The correlation
factor accounts for correlation of adjacent signal elements that can
be modeled as a convolution with a Gaussian point spread function.
Parameters
----------
expected_value : :py:data:`None` or :py:class:`~hyperspy.signal.BaseSignal` (or subclasses)
If ``None``, the signal data is taken as the expected value. Note
that this may be inaccurate where the value of `data` is small.
gain_factor : None or float
`a` in the above equation. Must be positive. If ``None``, take the
value from `metadata.Signal.Noise_properties.Variance_linear_model`
if defined. Otherwise, suppose pure Poissonian noise (*i.e.*
``gain_factor=1``). If not ``None``, the value is stored in
`metadata.Signal.Noise_properties.Variance_linear_model`.
gain_offset : None or float
`b` in the above equation. Must be positive. If ``None``, take the
value from `metadata.Signal.Noise_properties.Variance_linear_model`
if defined. Otherwise, suppose pure Poissonian noise (*i.e.*
``gain_offset=0``). If not ``None``, the value is stored in
`metadata.Signal.Noise_properties.Variance_linear_model`.
correlation_factor : None or float
`c` in the above equation. Must be positive. If ``None``, take the
value from `metadata.Signal.Noise_properties.Variance_linear_model`
if defined. Otherwise, suppose pure Poissonian noise (*i.e.*
``correlation_factor=1``). If not ``None``, the value is stored in
`metadata.Signal.Noise_properties.Variance_linear_model`.
"""
if expected_value is None:
expected_value = self
dc = expected_value.data if expected_value._lazy else expected_value.data.copy()
if self.metadata.has_item(
"Signal.Noise_properties.Variance_linear_model"):
vlm = self.metadata.Signal.Noise_properties.Variance_linear_model
else:
self.metadata.add_node(
"Signal.Noise_properties.Variance_linear_model")
vlm = self.metadata.Signal.Noise_properties.Variance_linear_model
if gain_factor is None:
if not vlm.has_item("gain_factor"):
vlm.gain_factor = 1
gain_factor = vlm.gain_factor
if gain_offset is None:
if not vlm.has_item("gain_offset"):
vlm.gain_offset = 0
gain_offset = vlm.gain_offset
if correlation_factor is None:
if not vlm.has_item("correlation_factor"):
vlm.correlation_factor = 1
correlation_factor = vlm.correlation_factor
if gain_offset < 0:
raise ValueError("`gain_offset` must be positive.")
if gain_factor < 0:
raise ValueError("`gain_factor` must be positive.")
if correlation_factor < 0:
raise ValueError("`correlation_factor` must be positive.")
variance = self._estimate_poissonian_noise_variance(dc, gain_factor,
gain_offset,
correlation_factor)
variance = BaseSignal(variance, attributes={'_lazy': self._lazy})
variance.axes_manager = self.axes_manager
variance.metadata.General.title = ("Variance of " + self.metadata.General.title)
self.set_noise_variance(variance)
@staticmethod
def _estimate_poissonian_noise_variance(dc, gain_factor, gain_offset,
correlation_factor):
variance = (dc * gain_factor + gain_offset) * correlation_factor
variance = np.clip(variance, gain_offset * correlation_factor, np.inf)
return variance
def set_noise_variance(self, variance):
"""Set the noise variance of the signal.
Equivalent to ``s.metadata.set_item("Signal.Noise_properties.variance", variance)``.
Parameters
----------
variance : None or float or :py:class:`~hyperspy.signal.BaseSignal` (or subclasses)
Value or values of the noise variance. A value of None is
equivalent to clearing the variance.
Returns
-------
None
"""
if isinstance(variance, BaseSignal):
if (
variance.axes_manager.navigation_shape
!= self.axes_manager.navigation_shape
):
raise ValueError(
"The navigation shape of the `variance` is "
"not equal to the navigation shape of the signal"
)
elif isinstance(variance, numbers.Number):
pass
elif variance is None:
pass
else:
raise ValueError(
"`variance` must be one of [None, float, "
f"hyperspy.signal.BaseSignal], not {type(variance)}."
)
self.metadata.set_item("Signal.Noise_properties.variance", variance)
def get_noise_variance(self):
"""Get the noise variance of the signal, if set.
Equivalent to ``s.metadata.Signal.Noise_properties.variance``.
Parameters
----------
None
Returns
-------
variance : None or float or :py:class:`~hyperspy.signal.BaseSignal` (or subclasses)
Noise variance of the signal, if set.
Otherwise returns None.
"""
if "Signal.Noise_properties.variance" in self.metadata:
return self.metadata.Signal.Noise_properties.variance
return None
def get_current_signal(self, auto_title=True, auto_filename=True):
"""Returns the data at the current coordinates as a
:py:class:`~hyperspy.signal.BaseSignal` subclass.
The signal subclass is the same as that of the current object. All the
axes navigation attributes are set to ``False``.
Parameters
----------
auto_title : bool
If ``True``, the current indices (in parentheses) are appended to
the title, separated by a space.
auto_filename : bool
If ``True`` and `tmp_parameters.filename` is defined
(which is always the case when the Signal has been read from a
file), the filename stored in the metadata is modified by
appending an underscore and the current indices in parentheses.
Returns
-------
cs : :py:class:`~hyperspy.signal.BaseSignal` (or subclass)
The data at the current coordinates as a Signal
Examples
--------
>>> im = hs.signals.Signal2D(np.zeros((2,3, 32,32)))
>>> im
<Signal2D, title: , dimensions: (3, 2, 32, 32)>
>>> im.axes_manager.indices = 2,1
>>> im.get_current_signal()
<Signal2D, title: (2, 1), dimensions: (32, 32)>
"""
metadata = self.metadata.deepcopy()
# Check if marker update
if metadata.has_item('Markers'):
marker_name_list = metadata.Markers.keys()
markers_dict = metadata.Markers.__dict__
for marker_name in marker_name_list:
marker = markers_dict[marker_name]['_dtb_value_']
if marker.auto_update:
marker.axes_manager = self.axes_manager
key_dict = {}
for key in marker.data.dtype.names:
key_dict[key] = marker.get_data_position(key)
marker.set_data(**key_dict)
cs = self.__class__(
self(),
axes=self.axes_manager._get_signal_axes_dicts(),
metadata=metadata.as_dictionary(),
attributes={'_lazy': False})
if cs.metadata.has_item('Markers'):
temp_marker_dict = cs.metadata.Markers.as_dictionary()
markers_dict = markers_metadata_dict_to_markers(
temp_marker_dict,
cs.axes_manager)
cs.metadata.Markers = markers_dict
if auto_filename is True and self.tmp_parameters.has_item('filename'):
cs.tmp_parameters.filename = (self.tmp_parameters.filename +
'_' +
str(self.axes_manager.indices))
cs.tmp_parameters.extension = self.tmp_parameters.extension
cs.tmp_parameters.folder = self.tmp_parameters.folder
if auto_title is True:
cs.metadata.General.title = (cs.metadata.General.title +
' ' + str(self.axes_manager.indices))
cs.axes_manager._set_axis_attribute_values("navigate", False)
return cs
def _get_navigation_signal(self, data=None, dtype=None):
"""Return a signal with the same axes as the navigation space.
Parameters
----------
data : None or :py:class:`numpy.ndarray`, optional
If ``None``, the resulting Signal data is an array of the same
`dtype` as the current one filled with zeros. If a numpy array,
the array must have the correct dimensions.
dtype : :py:class:`numpy.dtype`, optional
The desired data-type for the data array when `data` is ``None``,
e.g., ``numpy.int8``. The default is the data type of the current
signal data.
"""
from dask.array import Array
if data is not None:
ref_shape = (self.axes_manager._navigation_shape_in_array
if self.axes_manager.navigation_dimension != 0
else (1,))
if data.shape != ref_shape:
raise ValueError(
("data.shape %s is not equal to the current navigation "
"shape in array which is %s") %
(str(data.shape), str(ref_shape)))
else:
if dtype is None:
dtype = self.data.dtype
if self.axes_manager.navigation_dimension == 0:
data = np.array([0, ], dtype=dtype)
else:
data = np.zeros(
self.axes_manager._navigation_shape_in_array,
dtype=dtype)
if self.axes_manager.navigation_dimension == 0:
s = BaseSignal(data)
elif self.axes_manager.navigation_dimension == 1:
from hyperspy._signals.signal1d import Signal1D
s = Signal1D(data,
axes=self.axes_manager._get_navigation_axes_dicts())
elif self.axes_manager.navigation_dimension == 2:
from hyperspy._signals.signal2d import Signal2D
s = Signal2D(data,
axes=self.axes_manager._get_navigation_axes_dicts())
else:
s = BaseSignal(
data,
axes=self.axes_manager._get_navigation_axes_dicts())
s.axes_manager.set_signal_dimension(
self.axes_manager.navigation_dimension)
if isinstance(data, Array):
s = s.as_lazy()
return s
def _get_signal_signal(self, data=None, dtype=None):
"""Return a signal with the same axes as the signal space.
Parameters
----------
data : None or :py:class:`numpy.ndarray`, optional
If ``None``, the resulting Signal data is an array of the same
`dtype` as the current one filled with zeros. If a numpy array,
the array must have the correct dimensions.
dtype : :py:class:`numpy.dtype`, optional
The desired data-type for the data array when `data` is ``None``,
e.g., ``numpy.int8``. The default is the data type of the current
signal data.
"""
from dask.array import Array
if data is not None:
ref_shape = (self.axes_manager._signal_shape_in_array
if self.axes_manager.signal_dimension != 0
else (1,))
if data.shape != ref_shape:
raise ValueError(
"data.shape %s is not equal to the current signal shape in"
" array which is %s" % (str(data.shape), str(ref_shape)))
else:
if dtype is None:
dtype = self.data.dtype
if self.axes_manager.signal_dimension == 0:
data = np.array([0, ], dtype=dtype)
else:
data = np.zeros(
self.axes_manager._signal_shape_in_array,
dtype=dtype)
if self.axes_manager.signal_dimension == 0:
s = BaseSignal(data)
s.set_signal_type(self.metadata.Signal.signal_type)
else:
s = self.__class__(data,
axes=self.axes_manager._get_signal_axes_dicts())
if isinstance(data, Array):
s = s.as_lazy()
return s
def __iter__(self):
# Reset AxesManager iteration index
self.axes_manager.__iter__()
return self
def __next__(self):
next(self.axes_manager)
return self.get_current_signal()
def __len__(self):
nitem = int(self.axes_manager.navigation_size)
nitem = nitem if nitem > 0 else 1
return nitem
def as_signal1D(self, spectral_axis, out=None, optimize=True):
"""Return the Signal as a spectrum.
The chosen spectral axis is moved to the last index in the
array and the data is made contiguous for efficient iteration over
spectra. By default, the method ensures the data is stored optimally,
hence often making a copy of the data. See
:py:meth:`~hyperspy.signal.BaseSignal.transpose` for a more general
method with more options.
Parameters
----------
spectral_axis %s
%s
%s
See also
--------
as_signal2D, transpose, :py:func:`hyperspy.misc.utils.transpose`
Examples
--------
>>> img = hs.signals.Signal2D(np.ones((3,4,5,6)))
>>> img
<Signal2D, title: , dimensions: (4, 3, 6, 5)>
>>> img.as_signal1D(-1+1j)
<Signal1D, title: , dimensions: (6, 5, 4, 3)>
>>> img.as_signal1D(0)
<Signal1D, title: , dimensions: (6, 5, 3, 4)>
"""
sp = self.transpose(signal_axes=[spectral_axis], optimize=optimize)
if out is None:
return sp
else:
if out._lazy:
out.data = sp.data
else:
out.data[:] = sp.data
out.events.data_changed.trigger(obj=out)
as_signal1D.__doc__ %= (ONE_AXIS_PARAMETER, OUT_ARG,
OPTIMIZE_ARG.replace('False', 'True'))
def as_signal2D(self, image_axes, out=None, optimize=True):
"""Convert a signal to image (
:py:class:`~hyperspy._signals.signal2d.Signal2D`).
The chosen image axes are moved to the last indices in the
array and the data is made contiguous for efficient
iteration over images.
Parameters
----------
image_axes : tuple (of int, str or :py:class:`~hyperspy.axes.DataAxis`)
Select the image axes. Note that the order of the axes matters
and it is given in the "natural" i.e. `X`, `Y`, `Z`... order.
%s
%s
Raises
------
DataDimensionError
When `data.ndim` < 2
See also
--------
as_signal1D, transpose, :py:func:`hyperspy.misc.utils.transpose`
Examples
--------
>>> s = hs.signals.Signal1D(np.ones((2,3,4,5)))
>>> s
<Signal1D, title: , dimensions: (4, 3, 2, 5)>
>>> s.as_signal2D((0,1))
<Signal2D, title: , dimensions: (5, 2, 4, 3)>
>>> s.to_signal2D((1,2))
<Signal2D, title: , dimensions: (4, 5, 3, 2)>
"""
if self.data.ndim < 2:
raise DataDimensionError(
"A Signal dimension must be >= 2 to be converted to a Signal2D")
im = self.transpose(signal_axes=image_axes, optimize=optimize)
if out is None:
return im
else:
if out._lazy:
out.data = im.data
else:
out.data[:] = im.data
out.events.data_changed.trigger(obj=out)
as_signal2D.__doc__ %= (OUT_ARG, OPTIMIZE_ARG.replace('False', 'True'))
def _assign_subclass(self):
mp = self.metadata
self.__class__ = hyperspy.io.assign_signal_subclass(
dtype=self.data.dtype,
signal_dimension=self.axes_manager.signal_dimension,
signal_type=mp.Signal.signal_type
if "Signal.signal_type" in mp
else self._signal_type,
lazy=self._lazy)
if self._alias_signal_types: # In case legacy types exist:
mp.Signal.signal_type = self._signal_type # set to default!
self.__init__(self.data, full_initialisation=False)
if self._lazy:
self._make_lazy()
def set_signal_type(self, signal_type=""):
"""Set the signal type and convert the current signal accordingly.
The ``signal_type`` attribute specifies the type of data that the signal
contains e.g. electron energy-loss spectroscopy data,
photoemission spectroscopy data, etc.
When setting `signal_type` to a "known" type, HyperSpy converts the
current signal to the most appropriate
:py:class:`hyperspy.signal.BaseSignal` subclass. Known signal types are
signal types that have a specialized
:py:class:`hyperspy.signal.BaseSignal` subclass associated, usually
providing specific features for the analysis of that type of signal.
HyperSpy ships with a minimal set of known signal types. External
packages can register extra signal types. To print a list of
registered signal types in the current installation, call
:py:meth:`hyperspy.utils.print_known_signal_types`, and see
the developer guide for details on how to add new signal_types.
A non-exhaustive list of HyperSpy extensions is also maintained
here: https://github.com/hyperspy/hyperspy-extensions-list.
Parameters
----------
signal_type : str, optional
If no arguments are passed, the ``signal_type`` is set to undefined
and the current signal converted to a generic signal subclass.
Otherwise, set the signal_type to the given signal
type or to the signal type corresponding to the given signal type
alias. Setting the signal_type to a known signal type (if exists)
is highly advisable. If none exists, it is good practice
to set signal_type to a value that best describes the data signal
type.
See Also
--------
* :py:meth:`hyperspy.utils.print_known_signal_types`
Examples
--------
Let's first print all known signal types:
>>> s = hs.signals.Signal1D([0, 1, 2, 3])
>>> s
<Signal1D, title: , dimensions: (|4)>
>>> hs.print_known_signal_types()
+--------------------+---------------------+--------------------+----------+
| signal_type | aliases | class name | package |
+--------------------+---------------------+--------------------+----------+
| DielectricFunction | dielectric function | DielectricFunction | hyperspy |
| EDS_SEM | | EDSSEMSpectrum | hyperspy |
| EDS_TEM | | EDSTEMSpectrum | hyperspy |
| EELS | TEM EELS | EELSSpectrum | hyperspy |
| hologram | | HologramImage | hyperspy |
| MySignal | | MySignal | hspy_ext |
+--------------------+---------------------+--------------------+----------+
We can set the `signal_type` using the `signal_type`:
>>> s.set_signal_type("EELS")
>>> s
<EELSSpectrum, title: , dimensions: (|4)>
>>> s.set_signal_type("EDS_SEM")
>>> s
<EDSSEMSpectrum, title: , dimensions: (|4)>
or any of its aliases:
>>> s.set_signal_type("TEM EELS")
>>> s
<EELSSpectrum, title: , dimensions: (|4)>
To set the `signal_type` to `undefined`, simply call the method without arguments:
>>> s.set_signal_type()
>>> s
<Signal1D, title: , dimensions: (|4)>
"""
if signal_type is None:
warnings.warn(
"`s.set_signal_type(signal_type=None)` is deprecated. "
"Use `s.set_signal_type(signal_type='')` instead.",
VisibleDeprecationWarning
)
self.metadata.Signal.signal_type = signal_type
# _assign_subclass takes care of matching aliases with their
# corresponding signal class
self._assign_subclass()
def set_signal_origin(self, origin):
"""Set the `signal_origin` metadata value.
The `signal_origin` attribute specifies if the data was obtained
through experiment or simulation.
Parameters
----------
origin : str
Typically ``'experiment'`` or ``'simulation'``
"""
self.metadata.Signal.signal_origin = origin
def print_summary_statistics(self, formatter="%.3g", rechunk=True):
"""Prints the five-number summary statistics of the data, the mean, and
the standard deviation.
Prints the mean, standard deviation (std), maximum (max), minimum
(min), first quartile (Q1), median, and third quartile. nans are
removed from the calculations.
Parameters
----------
formatter : str
The number formatter to use for the output
%s
See also
--------
get_histogram
"""
_mean, _std, _min, _q1, _q2, _q3, _max = self._calculate_summary_statistics(
rechunk=rechunk)
print(underline("Summary statistics"))
print("mean:\t" + formatter % _mean)
print("std:\t" + formatter % _std)
print()
print("min:\t" + formatter % _min)
print("Q1:\t" + formatter % _q1)
print("median:\t" + formatter % _q2)
print("Q3:\t" + formatter % _q3)
print("max:\t" + formatter % _max)
print_summary_statistics.__doc__ %= (RECHUNK_ARG)
def _calculate_summary_statistics(self, **kwargs):
data = self.data
data = data[~np.isnan(data)]
_mean = np.nanmean(data)
_std = np.nanstd(data)
_min = np.nanmin(data)
_q1 = np.percentile(data, 25)
_q2 = np.percentile(data, 50)
_q3 = np.percentile(data, 75)
_max = np.nanmax(data)
return _mean, _std, _min, _q1, _q2, _q3, _max
@property
def is_rgba(self):
"""
Whether or not this signal is an RGB + alpha channel `dtype`.
"""
return rgb_tools.is_rgba(self.data)
@property
def is_rgb(self):
"""
Whether or not this signal is an RGB `dtype`.
"""
return rgb_tools.is_rgb(self.data)
@property
def is_rgbx(self):
"""
Whether or not this signal is either an RGB or RGB + alpha channel
`dtype`.
"""
return rgb_tools.is_rgbx(self.data)
def add_marker(
self, marker, plot_on_signal=True, plot_marker=True,
permanent=False, plot_signal=True, render_figure=True):
"""
Add one or several markers to the signal or navigator plot and plot
the signal, if not yet plotted (by default)
Parameters
----------
marker : :py:mod:`hyperspy.drawing.marker` object or iterable
The marker or iterable (list, tuple, ...) of markers to add.
See the :ref:`plot.markers` section in the User Guide if you want
to add a large number of markers as an iterable, since this will
be much faster. For signals with navigation dimensions,
the markers can be made to change for different navigation
indices. See the examples for info.
plot_on_signal : bool
If ``True`` (default), add the marker to the signal.
If ``False``, add the marker to the navigator
plot_marker : bool
If ``True`` (default), plot the marker.
permanent : bool
If ``False`` (default), the marker will only appear in the current
plot. If ``True``, the marker will be added to the
`metadata.Markers` list, and be plotted with
``plot(plot_markers=True)``. If the signal is saved as a HyperSpy
HDF5 file, the markers will be stored in the HDF5 signal and be
restored when the file is loaded.
Examples
--------
>>> import scipy.misc
>>> im = hs.signals.Signal2D(scipy.misc.ascent())
>>> m = hs.markers.rectangle(x1=150, y1=100, x2=400,
>>> y2=400, color='red')
>>> im.add_marker(m)
Adding to a 1D signal, where the point will change
when the navigation index is changed:
>>> s = hs.signals.Signal1D(np.random.random((3, 100)))
>>> marker = hs.markers.point((19, 10, 60), (0.2, 0.5, 0.9))
>>> s.add_marker(marker, permanent=True, plot_marker=True)
Add permanent marker:
>>> s = hs.signals.Signal2D(np.random.random((100, 100)))
>>> marker = hs.markers.point(50, 60, color='red')
>>> s.add_marker(marker, permanent=True, plot_marker=True)
Add permanent marker to signal with 2 navigation dimensions.
The signal has navigation dimensions (3, 2), as the dimensions
gets flipped compared to the output from :py:func:`numpy.random.random`.
To add a vertical line marker which changes for different navigation
indices, the list used to make the marker must be a nested list:
2 lists with 3 elements each (2 x 3):
>>> s = hs.signals.Signal1D(np.random.random((2, 3, 10)))
>>> marker = hs.markers.vertical_line([[1, 3, 5], [2, 4, 6]])
>>> s.add_marker(marker, permanent=True)
Add permanent marker which changes with navigation position, and
do not add it to a current plot:
>>> s = hs.signals.Signal2D(np.random.randint(10, size=(3, 100, 100)))
>>> marker = hs.markers.point((10, 30, 50), (30, 50, 60), color='red')
>>> s.add_marker(marker, permanent=True, plot_marker=False)
>>> s.plot(plot_markers=True) #doctest: +SKIP
Removing a permanent marker:
>>> s = hs.signals.Signal2D(np.random.randint(10, size=(100, 100)))
>>> marker = hs.markers.point(10, 60, color='red')
>>> marker.name = "point_marker"
>>> s.add_marker(marker, permanent=True)
>>> del s.metadata.Markers.point_marker
Adding many markers as a list:
>>> from numpy.random import random
>>> s = hs.signals.Signal2D(np.random.randint(10, size=(100, 100)))
>>> marker_list = []
>>> for i in range(100):
>>> marker = hs.markers.point(random()*100, random()*100, color='red')
>>> marker_list.append(marker)
>>> s.add_marker(marker_list, permanent=True)
"""
if isiterable(marker):
marker_list = marker
else:
marker_list = [marker]
markers_dict = {}
if permanent:
if not self.metadata.has_item('Markers'):
self.metadata.add_node('Markers')
marker_object_list = []
for marker_tuple in list(self.metadata.Markers):
marker_object_list.append(marker_tuple[1])
name_list = self.metadata.Markers.keys()
marker_name_suffix = 1
for m in marker_list:
marker_data_shape = m._get_data_shape()[::-1]
if (not (len(marker_data_shape) == 0)) and (
marker_data_shape != self.axes_manager.navigation_shape):
raise ValueError(
"Navigation shape of the marker must be 0 or the "
"inverse navigation shape as this signal. If the "
"navigation dimensions for the signal is (2, 3), "
"the marker dimension must be (3, 2).")
if (m.signal is not None) and (m.signal is not self):
raise ValueError("Markers can not be added to several signals")
m._plot_on_signal = plot_on_signal
if plot_marker:
if self._plot is None or not self._plot.is_active:
self.plot()
if m._plot_on_signal:
self._plot.signal_plot.add_marker(m)
else:
if self._plot.navigator_plot is None:
self.plot()
self._plot.navigator_plot.add_marker(m)
m.plot(render_figure=False)
if permanent:
for marker_object in marker_object_list:
if m is marker_object:
raise ValueError("Marker already added to signal")
name = m.name
temp_name = name
while temp_name in name_list:
temp_name = name + str(marker_name_suffix)
marker_name_suffix += 1
m.name = temp_name
markers_dict[m.name] = m
m.signal = self
marker_object_list.append(m)
name_list.append(m.name)
if not plot_marker and not permanent:
_logger.warning(
"plot_marker=False and permanent=False does nothing")
if permanent:
self.metadata.Markers = markers_dict
if plot_marker and render_figure:
self._render_figure()
def _render_figure(self, plot=['signal_plot', 'navigation_plot']):
for p in plot:
if hasattr(self._plot, p):
p = getattr(self._plot, p)
p.render_figure()
def _plot_permanent_markers(self):
marker_name_list = self.metadata.Markers.keys()
markers_dict = self.metadata.Markers.__dict__
for marker_name in marker_name_list:
marker = markers_dict[marker_name]['_dtb_value_']
if marker.plot_marker:
if marker._plot_on_signal:
self._plot.signal_plot.add_marker(marker)
else:
self._plot.navigator_plot.add_marker(marker)
marker.plot(render_figure=False)
self._render_figure()
def add_poissonian_noise(self, keep_dtype=True, random_state=None):
"""Add Poissonian noise to the data.
This method works in-place. The resulting data type is ``int64``.
If this is different from the original data type then a warning
is added to the log.
Parameters
----------
keep_dtype : bool, default True
If ``True``, keep the original data type of the signal data. For
example, if the data type was initially ``'float64'``, the result of
the operation (usually ``'int64'``) will be converted to
``'float64'``.
random_state : None or int or RandomState instance, default None
Seed for the random generator.
Note
----
This method uses :py:func:`numpy.random.poisson`
(or :py:func:`dask.array.random.poisson` for lazy signals)
to generate the Poissonian noise.
"""
kwargs = {}
random_state = check_random_state(random_state, lazy=self._lazy)
if self._lazy:
kwargs["chunks"] = self.data.chunks
original_dtype = self.data.dtype
self.data = random_state.poisson(lam=self.data, **kwargs)
if self.data.dtype != original_dtype:
if keep_dtype:
_logger.warning(
f"Changing data type from {self.data.dtype} "
f"to the original {original_dtype}"
)
# Don't change the object if possible
self.data = self.data.astype(original_dtype, copy=False)
else:
_logger.warning(
f"The data type changed from {original_dtype} "
f"to {self.data.dtype}"
)
self.events.data_changed.trigger(obj=self)
def add_gaussian_noise(self, std, random_state=None):
"""Add Gaussian noise to the data.
The operation is performed in-place (*i.e.* the data of the signal
is modified). This method requires the signal to have a float data type,
otherwise it will raise a :py:exc:`TypeError`.
Parameters
----------
std : float
The standard deviation of the Gaussian noise.
random_state : None or int or RandomState instance, default None
Seed for the random generator.
Note
----
This method uses :py:func:`numpy.random.normal` (or
:py:func:`dask.array.random.normal` for lazy signals)
to generate the noise.
"""
if self.data.dtype.char not in np.typecodes["AllFloat"]:
raise TypeError(
"`s.add_gaussian_noise()` requires the data to have "
f"a float datatype, but the current type is '{self.data.dtype}'. "
"To fix this issue, you can change the type using the "
"change_dtype method (e.g. s.change_dtype('float64'))."
)
kwargs = {}
random_state = check_random_state(random_state, lazy=self._lazy)
if self._lazy:
kwargs["chunks"] = self.data.chunks
noise = random_state.normal(loc=0, scale=std, size=self.data.shape, **kwargs)
if self._lazy:
# With lazy data we can't keep the same array object
self.data = self.data + noise
else:
# Don't change the object
self.data += noise
self.events.data_changed.trigger(obj=self)
def transpose(self, signal_axes=None,
navigation_axes=None, optimize=False):
"""Transposes the signal to have the required signal and navigation
axes.
Parameters
----------
signal_axes : None, int, or iterable type
The number (or indices) of axes to convert to signal axes
navigation_axes : None, int, or iterable type
The number (or indices) of axes to convert to navigation axes
%s
Note
----
With the exception of both axes parameters (`signal_axes` and
`navigation_axes` getting iterables, generally one has to be ``None``
(i.e. "floating"). The other one specifies either the required number
or explicitly the indices of axes to move to the corresponding space.
If both are iterables, full control is given as long as all axes
are assigned to one space only.
See also
--------
T, as_signal2D, as_signal1D, :py:func:`hyperspy.misc.utils.transpose`
Examples
--------
>>> # just create a signal with many distinct dimensions
>>> s = hs.signals.BaseSignal(np.random.rand(1,2,3,4,5,6,7,8,9))
>>> s
<BaseSignal, title: , dimensions: (|9, 8, 7, 6, 5, 4, 3, 2, 1)>
>>> s.transpose() # swap signal and navigation spaces
<BaseSignal, title: , dimensions: (9, 8, 7, 6, 5, 4, 3, 2, 1|)>
>>> s.T # a shortcut for no arguments
<BaseSignal, title: , dimensions: (9, 8, 7, 6, 5, 4, 3, 2, 1|)>
>>> # roll to leave 5 axes in navigation space
>>> s.transpose(signal_axes=5)
<BaseSignal, title: , dimensions: (4, 3, 2, 1|9, 8, 7, 6, 5)>
>>> # roll leave 3 axes in navigation space
>>> s.transpose(navigation_axes=3)
<BaseSignal, title: , dimensions: (3, 2, 1|9, 8, 7, 6, 5, 4)>
>>> # 3 explicitly defined axes in signal space
>>> s.transpose(signal_axes=[0, 2, 6])
<BaseSignal, title: , dimensions: (8, 6, 5, 4, 2, 1|9, 7, 3)>
>>> # A mix of two lists, but specifying all axes explicitly
>>> # The order of axes is preserved in both lists
>>> s.transpose(navigation_axes=[1, 2, 3, 4, 5, 8], signal_axes=[0, 6, 7])
<BaseSignal, title: , dimensions: (8, 7, 6, 5, 4, 1|9, 3, 2)>
"""
am = self.axes_manager
ax_list = am._axes
if isinstance(signal_axes, int):
if navigation_axes is not None:
raise ValueError("The navigation_axes are not None, even "
"though just a number was given for "
"signal_axes")
if len(ax_list) < signal_axes:
raise ValueError("Too many signal axes requested")
if signal_axes < 0:
raise ValueError("Can't have negative number of signal axes")
elif signal_axes == 0:
signal_axes = ()
navigation_axes = ax_list[::-1]
else:
navigation_axes = ax_list[:-signal_axes][::-1]
signal_axes = ax_list[-signal_axes:][::-1]
elif iterable_not_string(signal_axes):
signal_axes = tuple(am[ax] for ax in signal_axes)
if navigation_axes is None:
navigation_axes = tuple(ax for ax in ax_list
if ax not in signal_axes)[::-1]
elif iterable_not_string(navigation_axes):
# want to keep the order
navigation_axes = tuple(am[ax] for ax in navigation_axes)
intersection = set(signal_axes).intersection(navigation_axes)
if len(intersection):
raise ValueError("At least one axis found in both spaces:"
" {}".format(intersection))
if len(am._axes) != (len(signal_axes) + len(navigation_axes)):
raise ValueError("Not all current axes were assigned to a "
"space")
else:
raise ValueError("navigation_axes has to be None or an iterable"
" when signal_axes is iterable")
elif signal_axes is None:
if isinstance(navigation_axes, int):
if len(ax_list) < navigation_axes:
raise ValueError("Too many navigation axes requested")
if navigation_axes < 0:
raise ValueError(
"Can't have negative number of navigation axes")
elif navigation_axes == 0:
navigation_axes = ()
signal_axes = ax_list[::-1]
else:
signal_axes = ax_list[navigation_axes:][::-1]
navigation_axes = ax_list[:navigation_axes][::-1]
elif iterable_not_string(navigation_axes):
navigation_axes = tuple(am[ax] for ax in
navigation_axes)
signal_axes = tuple(ax for ax in ax_list
if ax not in navigation_axes)[::-1]
elif navigation_axes is None:
signal_axes = am.navigation_axes
navigation_axes = am.signal_axes
else:
raise ValueError(
"The passed navigation_axes argument is not valid")
else:
raise ValueError("The passed signal_axes argument is not valid")
# translate to axes idx from actual objects for variance
idx_sig = [ax.index_in_axes_manager for ax in signal_axes]
idx_nav = [ax.index_in_axes_manager for ax in navigation_axes]
# From now on we operate with axes in array order
signal_axes = signal_axes[::-1]
navigation_axes = navigation_axes[::-1]
# get data view
array_order = tuple(
ax.index_in_array for ax in navigation_axes)
array_order += tuple(ax.index_in_array for ax in signal_axes)
newdata = self.data.transpose(array_order)
res = self._deepcopy_with_new_data(newdata, copy_variance=True,
copy_learning_results=True)
# reconfigure the axes of the axesmanager:
ram = res.axes_manager
ram._update_trait_handlers(remove=True)
# _axes are ordered in array order
ram._axes = [ram._axes[i] for i in array_order]
for i, ax in enumerate(ram._axes):
if i < len(navigation_axes):
ax.navigate = True
else:
ax.navigate = False
ram._update_attributes()
ram._update_trait_handlers(remove=False)
res._assign_subclass()
var = res.get_noise_variance()
if isinstance(var, BaseSignal):
var = var.transpose(signal_axes=idx_sig,
navigation_axes=idx_nav,
optimize=optimize)
res.set_noise_variance(var)
if optimize:
res._make_sure_data_is_contiguous()
if res.metadata.has_item('Markers'):
# The markers might fail if the navigation dimensions are changed
# so the safest is simply to not carry them over from the
# previous signal.
del res.metadata.Markers
return res
transpose.__doc__ %= (OPTIMIZE_ARG)
@property
def T(self):
"""The transpose of the signal, with signal and navigation spaces
swapped. Enables calling
:py:meth:`~hyperspy.signal.BaseSignal.transpose` with the default
parameters as a property of a Signal.
"""
return self.transpose()
def apply_apodization(self, window='hann',
hann_order=None, tukey_alpha=0.5, inplace=False):
"""
Apply an `apodization window
<http://mathworld.wolfram.com/ApodizationFunction.html>`_ to a Signal.
Parameters
----------
window : str, optional
Select between {``'hann'`` (default), ``'hamming'``, or ``'tukey'``}
hann_order : None or int, optional
Only used if ``window='hann'``
If integer `n` is provided, a Hann window of `n`-th order will be
used. If ``None``, a first order Hann window is used.
Higher orders result in more homogeneous intensity distribution.
tukey_alpha : float, optional
Only used if ``window='tukey'`` (default is 0.5). From the
documentation of
:py:func:`scipy.signal.windows.tukey`:
- Shape parameter of the Tukey window, representing the
fraction of the window inside the cosine tapered region. If
zero, the Tukey window is equivalent to a rectangular window.
If one, the Tukey window is equivalent to a Hann window.
inplace : bool, optional
If ``True``, the apodization is applied in place, *i.e.* the signal
data will be substituted by the apodized one (default is
``False``).
Returns
-------
out : :py:class:`~hyperspy.signal.BaseSignal` (or subclasses), optional
If ``inplace=False``, returns the apodized signal of the same
type as the provided Signal.
Examples
--------
>>> import hyperspy.api as hs
>>> holo = hs.datasets.example_signals.object_hologram()
>>> holo.apply_apodization('tukey', tukey_alpha=0.1).plot()
"""
if window == 'hanning' or window == 'hann':
if hann_order:
def window_function(
m): return hann_window_nth_order(m, hann_order)
else:
def window_function(m): return np.hanning(m)
elif window == 'hamming':
def window_function(m): return np.hamming(m)
elif window == 'tukey':
def window_function(m): return sp_signal.tukey(m, tukey_alpha)
else:
raise ValueError('Wrong type parameter value.')
windows_1d = []
axes = np.array(self.axes_manager.signal_indices_in_array)
for axis, axis_index in zip(self.axes_manager.signal_axes, axes):
if isinstance(self.data, da.Array):
chunks = self.data.chunks[axis_index]
window_da = da.from_array(window_function(axis.size),
chunks=(chunks, ))
windows_1d.append(window_da)
else:
windows_1d.append(window_function(axis.size))
window_nd = outer_nd(*windows_1d).T
# Prepare slicing for multiplication window_nd nparray with data with
# higher dimensionality:
if inplace:
slice_w = []
# Iterate over all dimensions of the data
for i in range(self.data.ndim):
if any(
i == axes): # If current dimension represents one of signal axis, all elements in window
# along current axis to be subscribed
slice_w.append(slice(None))
else: # If current dimension is navigation one, new axis is absent in window and should be created
slice_w.append(None)
self.data = self.data * window_nd[tuple(slice_w)]
self.events.data_changed.trigger(obj=self)
else:
return self * window_nd
def _check_navigation_mask(self, mask):
"""
Check the shape of the navigation mask.
Parameters
----------
mask : numpy array or BaseSignal.
Mask to check the shape.
Raises
------
ValueError
If shape doesn't match the shape of the navigation dimension.
Returns
-------
None.
"""
if isinstance(mask, BaseSignal):
if mask.axes_manager.signal_dimension != 0:
raise ValueError("The navigation mask signal must have the "
"`signal_dimension` equal to 0.")
elif (mask.axes_manager.navigation_shape !=
self.axes_manager.navigation_shape):
raise ValueError("The navigation mask signal must have the "
"same `navigation_shape` as the current "
"signal.")
if isinstance(mask, np.ndarray) and (
mask.shape != self.axes_manager.navigation_shape):
raise ValueError("The shape of the navigation mask array must "
"match `navigation_shape`.")
def _check_signal_mask(self, mask):
"""
Check the shape of the signal mask.
Parameters
----------
mask : numpy array or BaseSignal.
Mask to check the shape.
Raises
------
ValueError
If shape doesn't match the shape of the signal dimension.
Returns
-------
None.
"""
if isinstance(mask, BaseSignal):
if mask.axes_manager.navigation_dimension != 0:
raise ValueError("The signal mask signal must have the "
"`navigation_dimension` equal to 0.")
elif (mask.axes_manager.signal_shape !=
self.axes_manager.signal_shape):
raise ValueError("The signal mask signal must have the same "
"`signal_shape` as the current signal.")
if isinstance(mask, np.ndarray) and (
mask.shape != self.axes_manager.signal_shape):
raise ValueError("The shape of signal mask array must match "
"`signal_shape`.")
ARITHMETIC_OPERATORS = (
"__add__",
"__sub__",
"__mul__",
"__floordiv__",
"__mod__",
"__divmod__",
"__pow__",
"__lshift__",
"__rshift__",
"__and__",
"__xor__",
"__or__",
"__mod__",
"__truediv__",
)
INPLACE_OPERATORS = (
"__iadd__",
"__isub__",
"__imul__",
"__itruediv__",
"__ifloordiv__",
"__imod__",
"__ipow__",
"__ilshift__",
"__irshift__",
"__iand__",
"__ixor__",
"__ior__",
)
COMPARISON_OPERATORS = (
"__lt__",
"__le__",
"__eq__",
"__ne__",
"__ge__",
"__gt__",
)
UNARY_OPERATORS = (
"__neg__",
"__pos__",
"__abs__",
"__invert__",
)
for name in ARITHMETIC_OPERATORS + INPLACE_OPERATORS + COMPARISON_OPERATORS:
exec(
("def %s(self, other):\n" % name) +
(" return self._binary_operator_ruler(other, \'%s\')\n" %
name))
exec("%s.__doc__ = np.ndarray.%s.__doc__" % (name, name))
exec("setattr(BaseSignal, \'%s\', %s)" % (name, name))
# The following commented line enables the operators with swapped
# operands. They should be defined only for commutative operators
# but for simplicity we don't support this at all atm.
# exec("setattr(BaseSignal, \'%s\', %s)" % (name[:2] + "r" + name[2:],
# name))
# Implement unary arithmetic operations
for name in UNARY_OPERATORS:
exec(
("def %s(self):" % name) +
(" return self._unary_operator_ruler(\'%s\')" % name))
exec("%s.__doc__ = int.%s.__doc__" % (name, name))
exec("setattr(BaseSignal, \'%s\', %s)" % (name, name))
| thomasaarholt/hyperspy | hyperspy/signal.py | Python | gpl-3.0 | 252,685 | [
"Gaussian"
] | e16e2829820de5d68b646ed23c1bb8c2edff8029ee1390ca10ffef453197ba96 |
"""
=================================================
Deterministic Tracking with EuDX on Tensor Fields
=================================================
In this example we do deterministic fiber tracking on Tensor fields with EuDX
[Garyfallidis12]_.
This example requires to import example `reconst_dti.py` to run. EuDX was
primarily made with cpu efficiency in mind. Therefore, it should be useful to
give you a quick overview of your reconstruction results with the help of
tracking.
"""
import os
import sys
import numpy as np
import nibabel as nib
if not os.path.exists('tensor_fa.nii.gz'):
import reconst_dti
"""
EuDX will use the directions (eigen vectors) of the Tensors to propagate
streamlines from voxel to voxel and fractional anisotropy to stop tracking.
"""
fa_img = nib.load('tensor_fa.nii.gz')
FA = fa_img.get_data()
evecs_img = nib.load('tensor_evecs.nii.gz')
evecs = evecs_img.get_data()
"""
In the background of the image the fitting will not be accurate because there all
measured signal is mostly noise and possibly we will find FA values with nans
(not a number). We can easily remove these in the following way.
"""
FA[np.isnan(FA)] = 0
"""
EuDX takes as input discretized voxel directions on a unit sphere. Therefore,
it is necessary to discretize the eigen vectors before feeding them in EuDX.
For the discretization procedure we use an evenly distributed sphere of 724
points which we can access using the get_sphere function.
"""
from dipy.data import get_sphere
sphere = get_sphere('symmetric724')
"""
We use quantize_evecs (evecs here stands for eigen vectors) to apply the
discretization.
"""
from dipy.reconst.dti import quantize_evecs
peak_indices = quantize_evecs(evecs, sphere.vertices)
"""
EuDX is the fiber tracking algorithm that we use in this example.
The most important parameters are the first one which represents the
magnitude of the peak of a scalar anisotropic function, the
second which represents the indices of the discretized directions of
the peaks and odf_vertices are the vertices of the input sphere.
"""
from dipy.tracking.eudx import EuDX
eu = EuDX(FA.astype('f8'), peak_indices, seeds=50000, odf_vertices = sphere.vertices, a_low=0.2)
tensor_streamlines = [streamline for streamline in eu]
"""
We can now save the results in the disk. For this purpose we can use the
TrackVis format (``*.trk``). First, we need to create a header.
"""
hdr = nib.trackvis.empty_header()
hdr['voxel_size'] = fa_img.header.get_zooms()[:3]
hdr['voxel_order'] = 'LAS'
hdr['dim'] = FA.shape
"""
Then we need to input the streamlines in the way that Trackvis format expects them.
"""
tensor_streamlines_trk = ((sl, None, None) for sl in tensor_streamlines)
ten_sl_fname = 'tensor_streamlines.trk'
"""
Save the streamlines.
"""
nib.trackvis.write(ten_sl_fname, tensor_streamlines_trk, hdr, points_space='voxel')
"""
If you don't want to use Trackvis to visualize the file you can use our
lightweight `fvtk` module.
"""
try:
from dipy.viz import fvtk
except ImportError:
raise ImportError('Python vtk module is not installed')
sys.exit()
"""
Create a scene.
"""
ren = fvtk.ren()
"""
Every streamline will be coloured according to its orientation
"""
from dipy.viz.colormap import line_colors
"""
fvtk.line adds a streamline actor for streamline visualization
and fvtk.add adds this actor in the scene
"""
fvtk.add(ren, fvtk.streamtube(tensor_streamlines, line_colors(tensor_streamlines)))
print('Saving illustration as tensor_tracks.png')
ren.SetBackground(1, 1, 1)
fvtk.record(ren, n_frames=1, out_path='tensor_tracks.png', size=(600, 600))
"""
.. figure:: tensor_tracks.png
:align: center
**Deterministic streamlines with EuDX on a Tensor Field**.
.. [Garyfallidis12] Garyfallidis E., "Towards an accurate brain tractography", PhD thesis, University of Cambridge, 2012.
.. include:: ../links_names.inc
"""
| villalonreina/dipy | doc/examples/tracking_eudx_tensor.py | Python | bsd-3-clause | 3,891 | [
"VTK"
] | 3daefe1a0d10380649811857a6c408dc030f9f6dfd34a87a18f2ddd5a8679c4a |
from __future__ import (absolute_import, division, print_function)
"""
This example shows how to plot data on rectangular 2D grids
(grids that are not rectlinear in geographic or native map projection
coordinates).
An example of such a grid is the 'POP' grid which is used in
the ocean component NCAR Community Climate System Model (CCSM).
"POP" stands for "Parallel Ocean Program", which was developed
at Los Alamos.
These grids may be thought of as rectangular arrays wrapped around the
globe in the usual way, with one subscript, call it I, associated with
longitude and the other subscript, call it J, associated with latitude,
and then deformed in such a way as to move the top edge of the array to
a circle centered somewhere other than over the North Pole (typically,
over Greenland or Canada) and the bottom edge of the array to a circle
that is centered on the South Pole, but lies entirely within Antarctica.
The lines defined by the rows and columns of the rectangular arrays are
locally orthogonal to each other.
POP grids are used extensively locally in oceanographic and ice models.
"""
from matplotlib import rcParams
import numpy.ma as ma
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
from netCDF4 import Dataset as NetCDFFile
# read in data from netCDF file.
infile = 'ccsm_popgrid.nc'
fpin = NetCDFFile(infile)
tlat = fpin.variables['TLAT'][:]
tlon = fpin.variables['TLONG'][:]
# masked array returned, masked where data == _FillValue
temp = fpin.variables['TEMP'][:]
fpin.close()
# make longitudes monotonically increasing.
tlon = np.where(np.greater_equal(tlon,min(tlon[:,0])),tlon-360,tlon)
# stack grids side-by-side (in longitiudinal direction), so
# any range of longitudes may be plotted on a world map.
tlon = np.concatenate((tlon,tlon+360),1)
tlat = np.concatenate((tlat,tlat),1)
temp = ma.concatenate((temp,temp),1)
tlon = tlon-360.
plt.figure(figsize=(6,8))
plt.subplot(2,1,1)
# subplot 1 just shows POP grid cells.
m = Basemap(projection='merc', lat_ts=20, llcrnrlon=-180, \
urcrnrlon=180, llcrnrlat=-84, urcrnrlat=84, resolution='c')
m.drawcoastlines()
m.fillcontinents(color='white')
x, y = m(tlon,tlat)
im = m.pcolormesh(x,y,ma.masked_array(np.zeros(temp.shape,'f'), temp.mask),
shading='faceted', antialiased=True, cmap=plt.cm.cool,
vmin=0, vmax=0)
# disclaimer: these are not really the grid cells because of the
# way pcolor interprets the x and y args.
plt.title('(A) CCSM POP Grid Cells')
# subplot 2 is a contour plot of surface temperature from the
# CCSM ocean model.
plt.subplot(2,1,2)
m.drawcoastlines()
m.fillcontinents(color='white')
CS1 = m.contourf(x,y,temp,15)
CS2 = m.contour(x,y,temp,15,colors='black',linewidths=0.5)
plt.title('(B) Surface Temp contours on POP Grid')
plt.show()
#plt.savefig('ccsm_popgrid.ps')
| matplotlib/basemap | examples/ccsm_popgrid.py | Python | mit | 2,884 | [
"NetCDF"
] | 83e2c67ff8542b39e4eeba13b0d4cffe9fec46d1e8547102aad31fe8bd30cc66 |
#!/usr/bin/env python
"""OfficialTiming.py - Asap timing script.
Reports timing of various types of dynamics in usec/atom/timestep.
"""
from numpy import *
from asap3 import *
from asap3.md.verlet import VelocityVerlet
from asap3.md.langevin import Langevin
from asap3.md.npt import NPT
from asap3.md.velocitydistribution import MaxwellBoltzmannDistribution
from ase.lattice.cubic import FaceCenteredCubic, BodyCenteredCubic
from ase.lattice.compounds import L1_2
#from Asap.Timing import *
import sys, cPickle, time, commands, os, re
import numpy as np
from asap3.testtools import ReportTest
import asap3.Internal.Threads
from StringIO import StringIO
import sys
import commands
import time
nthreads = 1
if len(sys.argv) > 1 and sys.argv[1] == "-t":
nthreads = AsapThreads()
if len(sys.argv) > 1 and sys.argv[1] == "-T":
nthreads = AsapThreads(2, force=True)
host = commands.getoutput("hostname")
logfilename = "officialtiming.log"
when = time.strftime("%a %d %b %Y %H:%M", time.localtime(time.time()))
asapversion = get_version()
random.seed([42, 12345])
class Logger(StringIO):
def __init__(self, filename):
StringIO.__init__(self)
self.logfile = filename
def write(self, x):
sys.stdout.write(x)
StringIO.write(self, x)
def close(self):
outfile = open(self.logfile, "a")
outfile.write(self.getvalue())
outfile.close()
StringIO.close(self)
logger = Logger(logfilename)
def Timing(name, func, timesteps, natoms):
print "Preparing timing"
func(2)
print "Running timing:", name
startcpu, startwall = time.clock(), time.time()
func(timesteps)
cpu, wall = time.clock() - startcpu, time.time() - startwall
fraction = 100.0 * cpu/wall
print "Time:", cpu, "CPU seconds ", wall, "Wall seconds"
cpu *= 1e6 / (timesteps * natoms)
wall *= 1e6 / (timesteps * natoms)
logger.write("%s: %.2f usec/atom/timestep CPU-time %.2f Wall time (%.1f%%)\n"
% (name, cpu, wall, fraction))
def temperature(atoms):
return 2.0/3.0 * atoms.get_kinetic_energy() / len(atoms) / units.kB
def MakeCu(T=300, size=(29,29,30)):
print "Preparing", T, "K Copper system."
atoms = FaceCenteredCubic(directions=[[1,0,0],[0,1,0],[0,0,1]],
symbol='Cu', size=size)
atoms.set_calculator(EMT())
MaxwellBoltzmannDistribution(atoms, 2*T * units.kB)
#dyn = VelocityVerlet(atoms, 5*units.fs)
dyn = Langevin(atoms, 5*units.fs, T*units.kB, 0.05)
dyn.run(50)
print "Done. Temperature =", temperature(atoms), \
"K. Number of atoms: ", len(atoms)
return atoms
def MakeCu3Ni(T=300):
print "Preparing", T, "K NiCu3 system."
atoms = L1_2(directions=[[1,0,0],[0,1,0],[0,0,1]], symbol=('Ni', 'Cu'),
latticeconstant=3.61, size=(29,29,30))
atoms.set_calculator(EMT())
MaxwellBoltzmannDistribution(atoms, 2*T * units.kB)
#dyn = VelocityVerlet(atoms, 5*units.fs)
dyn = Langevin(atoms, 5*units.fs, T*units.kB, 0.05)
dyn.run(50)
print "Done. Temperature =", temperature(atoms), \
"K. Number of atoms: ", len(atoms)
return atoms
# Make sure that CPU speed is revved up.
dummy = L1_2(directions=[[1,0,0],[0,1,0],[0,0,1]], symbol=('Au', 'Cu'),
latticeconstant=4.08, size=(10,10,10), debug=0)
dummy.set_calculator(EMT())
dyn = Langevin(dummy, 5*units.fs, 300*units.kB, 0.05)
dyn.run(10)
del dummy
logger.write("\n\nRunning on %s %s\n" % (host, when))
modelname = "Unknown CPU model name"
cpumhz = "Unknown CPU speed"
try:
lines = open("/proc/cpuinfo").readlines()
for line in lines:
if line[:10] == "model name":
modelname = line
break
for line in lines:
if line[:7] == "cpu MHz":
cpumhz = line
break
except:
print "Cannot get CPU info from /proc/cpuinfo"
logger.write(cpumhz)
logger.write(modelname)
logger.write(asapversion+"\n")
logger.write("Number of threads: " + str(nthreads)+"\n")
atoms300 = MakeCu()
atoms = Atoms(atoms300)
atoms.set_calculator(EMT())
dyn = VelocityVerlet(atoms, 5*units.fs)
Timing("Ver300", dyn.run, 50, len(atoms))
atoms = MakeCu(1000)
dyn = VelocityVerlet(atoms, 5*units.fs)
Timing("Ver1000", dyn.run, 50, len(atoms))
atoms = Atoms(atoms300)
atoms.set_calculator(EMT())
dyn = Langevin(atoms, 5*units.fs, 300*units.kB, 0.001)
Timing("Langevin", dyn.run, 50, len(atoms))
atoms = Atoms(atoms300)
atoms.set_calculator(EMT())
dyn = NPT(atoms, 5*units.fs, 300*units.kB, 0, 25*units.fs,
(75*units.fs)**2 * 140*units.GPa)
Timing("NPT", dyn.run, 50, len(atoms))
atoms = Atoms(atoms300, pbc=(0,0,0))
atoms.set_calculator(EMT())
dyn = VelocityVerlet(atoms, 5*units.fs)
Timing("FreeBC", dyn.run, 50, len(atoms))
atoms = MakeCu3Ni()
atoms.set_calculator(EMT())
dyn = VelocityVerlet(atoms, 5*units.fs)
Timing("Cu3Ni", dyn.run, 50, len(atoms))
atoms = MakeCu(size=(6,6,7))
atoms.set_calculator(EMT())
dyn = VelocityVerlet(atoms, 5*units.fs)
Timing("Tiny", dyn.run, 5000, len(atoms))
elements = [29]
epsilon = [0.15]
sigma = [2.34]
rcut = 0.5*(sqrt(3)+2)*1.09*sigma[0]
atoms = Atoms(atoms300)
atoms.set_calculator(LennardJones(elements, epsilon, sigma, rcut, True))
dyn = VelocityVerlet(atoms, 5*units.fs)
Timing("L-J", dyn.run, 50, len(atoms))
logger.close()
| auag92/n2dm | Asap-3.8.4/Test/Timing/OfficialTiming.py | Python | mit | 5,335 | [
"ASE"
] | 311046883d662974dac71c2e1e32853c14c3b47d39e61882b00ab3c2ca7436a1 |
from sys import stderr
from time import time
from math import sqrt, ceil
from bitarray import bitarray
from random import random
from tempfile import mkdtemp
import os
import pysam as ps
start_time = time()
def tabout(*args):
"""Return a tab-delimited string from the list
"""
output = ['NA' if x is None else str(x) for x in args]
return "\t".join(output)
def update(message):
"""Print a formatted information message.
"""
print >> stderr, message
print >> stderr, "%d sec. elapsed" % (time() - start_time)
print >> stderr, ""
def read_mq_map(mapname, chromosome, length):
mqmap = bitarray([0]* length)
# read in the mappability information marking the mappable bases
with open(mapname, 'r') as f:
for line in f:
chrom,start,stop = line.strip().split("\t")
if chrom == chromosome:
mqmap[int(start):int(stop)] = True
return mqmap
def CreateTempDir():
"""Use tempfile.mkdtemp to create a temporary directory
"""
name = mkdtemp()
return name
def RemoveDir(path):
try:
os.rmdir(path)
except OSError:
print >> stderr, "%s does not exist. Could not remove."
exit(1)
| aakrosh/rdcleanr | src/utils.py | Python | mit | 1,222 | [
"pysam"
] | 3d7d559e995678f66fa899164b1868224bf7ab1745b9f7f5073fa1222998b908 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Config file that contains all config varibles."""
__author__ = 'Chong Guo <armourcy@email.com>'
__copyright__ = 'Copyright 2018, Chong Guo'
__license__ = 'GPL'
import numpy as np
import tensorflow as tf
# Debug flag, if true, will check model shape using assert in each step and skip gray image check part (to save time)
debug = False
# Image size for training
image_size = 224
# Image resize method
image_resize_method = tf.image.ResizeMethod.BILINEAR
# Parameters for neural network
training_iters = 3000000 # The training iterations number
batch_size = 6 # Batch size for training data
display_step = 50 # Step interval for displaying loss and saving summary during training phase
testing_step = 1000 # Step interval for testing and saving image during training phase
saving_step = 10000 # Step interval for saving model during training phase
shuffle_buffer_size = 2000
# UV channel normalization parameters
u_norm_para = 0.435912
v_norm_para = 0.614777
# Directory for training and testing dataset
training_dir = "train2014"
testing_dir = "test2014"
# Model, result and generated images stored path
summary_path = "summary"
training_summary = summary_path + "/train"
testing_summary = summary_path + "/test"
# Weights for each layer (trainable)
weights = {
'b_conv4': tf.Variable(tf.truncated_normal([1, 1, 512, 256], stddev=0.01), trainable=True),
'b_conv3': tf.Variable(tf.truncated_normal([3, 3, 256, 128], stddev=0.01), trainable=True),
'b_conv2': tf.Variable(tf.truncated_normal([3, 3, 128, 64], stddev=0.01), trainable=True),
'b_conv1': tf.Variable(tf.truncated_normal([3, 3, 64, 3], stddev=0.01), trainable=True),
'b_conv0': tf.Variable(tf.truncated_normal([3, 3, 3, 3], stddev=0.01), trainable=True),
'output_conv': tf.Variable(tf.truncated_normal([3, 3, 3, 2], stddev=0.01), trainable=True),
}
# Gaussian blur kernel (not trainable)
gaussin_blur_3x3 = np.divide([
[1., 2., 1.],
[2., 4., 2.],
[1., 2., 1.],
], 16.) # (3, 3)
gaussin_blur_3x3 = np.stack((gaussin_blur_3x3, gaussin_blur_3x3), axis=-1) # (3, 3, 2)
gaussin_blur_3x3 = np.stack((gaussin_blur_3x3, gaussin_blur_3x3), axis=-1) # (3, 3, 2, 2)
gaussin_blur_5x5 = np.divide([
[1., 4., 7., 4., 1.],
[4., 16., 26., 16., 4.],
[7., 26., 41., 26., 7.],
[4., 16., 26., 16., 4.],
[1., 4., 7., 4., 1.],
], 273.) # (5, 5)
gaussin_blur_5x5 = np.stack((gaussin_blur_5x5, gaussin_blur_5x5), axis=-1) # (5, 5, 2)
gaussin_blur_5x5 = np.stack((gaussin_blur_5x5, gaussin_blur_5x5), axis=-1) # (5, 5, 2, 2)
tf_blur_3x3 = tf.Variable(tf.convert_to_tensor(gaussin_blur_3x3, dtype=tf.float32), trainable=False)
tf_blur_5x5 = tf.Variable(tf.convert_to_tensor(gaussin_blur_5x5, dtype=tf.float32), trainable=False)
| Armour/Automatic-Image-Colorization | config.py | Python | gpl-3.0 | 2,790 | [
"Gaussian"
] | d13a7bdbe7a60eeac93930c73bf6e6f45e13d11a64f3e8a9f5d169f5cdefe974 |
# class generated by DeVIDE::createDeVIDEModuleFromVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkImageConstantPad(SimpleVTKClassModuleBase):
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkImageConstantPad(), 'Processing.',
('vtkImageData',), ('vtkImageData',),
replaceDoc=True,
inputFunctions=None, outputFunctions=None)
| nagyistoce/devide | modules/vtk_basic/vtkImageConstantPad.py | Python | bsd-3-clause | 495 | [
"VTK"
] | eaa1ed6a740a49318701f8bc0c506e20eaa5ca065d0f6802bfc8e40ae98c321c |
#!/usr/bin/env python
#Dan Blankenberg
#For a set of intervals, this tool returns the same set of intervals
#with 2 additional fields: the name of a Table/Feature and the number of
#bases covered. The original intervals are repeated for each Table/Feature.
import sys, struct, optparse, os, random
from galaxy import eggs
import pkg_resources; pkg_resources.require( "bx-python" )
import bx.intervals.io
import bx.bitset
try:
import psyco
psyco.full()
except:
pass
assert sys.version_info[:2] >= ( 2, 4 )
class CachedRangesInFile:
fmt = '<I'
fmt_size = struct.calcsize( fmt )
def __init__( self, filename ):
self.file_size = os.stat( filename ).st_size
self.file = open( filename, 'rb' )
self.length = int( self.file_size / self.fmt_size / 2 )
self._cached_ranges = [ None for i in xrange( self.length ) ]
def __getitem__( self, i ):
if self._cached_ranges[i] is not None:
return self._cached_ranges[i]
if i < 0: i = self.length + i
offset = i * self.fmt_size * 2
self.file.seek( offset )
try:
start = struct.unpack( self.fmt, self.file.read( self.fmt_size ) )[0]
end = struct.unpack( self.fmt, self.file.read( self.fmt_size ) )[0]
except Exception, e:
raise IndexError, e
self._cached_ranges[i] = ( start, end )
return start, end
def __len__( self ):
return self.length
class RegionCoverage:
def __init__( self, filename_base ):
try:
self._coverage = CachedRangesInFile( "%s.covered" % filename_base )
except Exception, e:
#print "Error loading coverage file %s: %s" % ( "%s.covered" % filename_base, e )
self._coverage = []
try:
self._total_coverage = int( open( "%s.total_coverage" % filename_base ).read() )
except Exception, e:
#print "Error loading total coverage file %s: %s" % ( "%s.total_coverage" % filename_base, e )
self._total_coverage = 0
def get_start_index( self, start ):
#binary search: returns index of range closest to start
if start > self._coverage[-1][1]:
return len( self._coverage ) - 1
i = 0
j = len( self._coverage) - 1
while i < j:
k = ( i + j ) / 2
if start <= self._coverage[k][1]:
j = k
else:
i = k + 1
return i
def get_coverage( self, start, end ):
return self.get_coverage_regions_overlap( start, end )[0]
def get_coverage_regions_overlap( self, start, end ):
return self.get_coverage_regions_index_overlap( start, end )[0:2]
def get_coverage_regions_index_overlap( self, start, end ):
if len( self._coverage ) < 1 or start > self._coverage[-1][1] or end < self._coverage[0][0]:
return 0, 0, 0
if self._total_coverage and start <= self._coverage[0][0] and end >= self._coverage[-1][1]:
return self._total_coverage, len( self._coverage ), 0
coverage = 0
region_count = 0
start_index = self.get_start_index( start )
for i in xrange( start_index, len( self._coverage ) ):
c_start, c_end = self._coverage[i]
if c_start > end:
break
if c_start <= end and c_end >= start:
coverage += min( end, c_end ) - max( start, c_start )
region_count += 1
return coverage, region_count, start_index
class CachedCoverageReader:
def __init__( self, base_file_path, buffer = 10, table_names = None ):
self._base_file_path = base_file_path
self._buffer = buffer #number of chromosomes to keep in memory at a time
self._coverage = {}
if table_names is None: table_names = os.listdir( self._base_file_path )
for tablename in table_names: self._coverage[tablename] = {}
def iter_table_coverage_by_region( self, chrom, start, end ):
for tablename, coverage, regions in self.iter_table_coverage_regions_by_region( chrom, start, end ):
yield tablename, coverage
def iter_table_coverage_regions_by_region( self, chrom, start, end ):
for tablename, coverage, regions, index in self.iter_table_coverage_regions_index_by_region( chrom, start, end ):
yield tablename, coverage, regions
def iter_table_coverage_regions_index_by_region( self, chrom, start, end ):
for tablename, chromosomes in self._coverage.iteritems():
if chrom not in chromosomes:
if len( chromosomes ) >= self._buffer:
#randomly remove one chromosome from this table
del chromosomes[ chromosomes.keys().pop( random.randint( 0, self._buffer - 1 ) ) ]
chromosomes[chrom] = RegionCoverage( os.path.join ( self._base_file_path, tablename, chrom ) )
coverage, regions, index = chromosomes[chrom].get_coverage_regions_index_overlap( start, end )
yield tablename, coverage, regions, index
class TableCoverageSummary:
def __init__( self, coverage_reader, chrom_lengths ):
self.coverage_reader = coverage_reader
self.chrom_lengths = chrom_lengths
self.chromosome_coverage = {} #dict of bitset by chromosome holding user's collapsed input intervals
self.total_interval_size = 0 #total size of user's input intervals
self.total_interval_count = 0 #total number of user's input intervals
self.table_coverage = {} #dict of total coverage by user's input intervals by table
self.table_chromosome_size = {} #dict of dict of table:chrom containing total coverage of table for a chrom
self.table_chromosome_count = {} #dict of dict of table:chrom containing total number of coverage ranges of table for a chrom
self.table_regions_overlaped_count = {} #total number of table regions overlaping user's input intervals (non unique)
self.interval_table_overlap_count = {} #total number of user input intervals which overlap table
self.region_size_errors = {} #dictionary of lists of invalid ranges by chromosome
def add_region( self, chrom, start, end ):
chrom_length = self.chrom_lengths.get( chrom )
region_start = min( start, chrom_length )
region_end = min( end, chrom_length )
region_length = region_end - region_start
if region_length < 1 or region_start != start or region_end != end:
if chrom not in self.region_size_errors:
self.region_size_errors[chrom] = []
self.region_size_errors[chrom].append( ( start, end ) )
if region_length < 1: return
self.total_interval_size += region_length
self.total_interval_count += 1
if chrom not in self.chromosome_coverage:
self.chromosome_coverage[chrom] = bx.bitset.BitSet( chrom_length )
self.chromosome_coverage[chrom].set_range( region_start, region_length )
for table_name, coverage, regions in self.coverage_reader.iter_table_coverage_regions_by_region( chrom, region_start, region_end ):
if table_name not in self.table_coverage:
self.table_coverage[table_name] = 0
self.table_chromosome_size[table_name] = {}
self.table_regions_overlaped_count[table_name] = 0
self.interval_table_overlap_count[table_name] = 0
self.table_chromosome_count[table_name] = {}
if chrom not in self.table_chromosome_size[table_name]:
self.table_chromosome_size[table_name][chrom] = self.coverage_reader._coverage[table_name][chrom]._total_coverage
self.table_chromosome_count[table_name][chrom] = len( self.coverage_reader._coverage[table_name][chrom]._coverage )
self.table_coverage[table_name] += coverage
if coverage:
self.interval_table_overlap_count[table_name] += 1
self.table_regions_overlaped_count[table_name] += regions
def iter_table_coverage( self ):
def get_nr_coverage():
#returns non-redundant coverage, where user's input intervals have been collapse to resolve overlaps
table_coverage = {} #dictionary of tables containing number of table bases overlaped by nr intervals
interval_table_overlap_count = {} #dictionary of tables containing number of nr intervals overlaping table
table_regions_overlap_count = {} #dictionary of tables containing number of regions overlaped (unique)
interval_count = 0 #total number of nr intervals
interval_size = 0 #holds total size of nr intervals
region_start_end = {} #holds absolute start,end for each user input chromosome
for chrom, chromosome_bitset in self.chromosome_coverage.iteritems():
#loop through user's collapsed input intervals
end = 0
last_end_index = {}
interval_size += chromosome_bitset.count_range()
while True:
if end >= chromosome_bitset.size: break
start = chromosome_bitset.next_set( end )
if start >= chromosome_bitset.size: break
end = chromosome_bitset.next_clear( start )
interval_count += 1
if chrom not in region_start_end:
region_start_end[chrom] = [start, end]
else:
region_start_end[chrom][1] = end
for table_name, coverage, region_count, start_index in self.coverage_reader.iter_table_coverage_regions_index_by_region( chrom, start, end ):
if table_name not in table_coverage:
table_coverage[table_name] = 0
interval_table_overlap_count[table_name] = 0
table_regions_overlap_count[table_name] = 0
table_coverage[table_name] += coverage
if coverage:
interval_table_overlap_count[table_name] += 1
table_regions_overlap_count[table_name] += region_count
if table_name in last_end_index and last_end_index[table_name] == start_index:
table_regions_overlap_count[table_name] -= 1
last_end_index[table_name] = start_index + region_count - 1
table_region_coverage = {} #total coverage for tables by bounding nr interval region
table_region_count = {} #total number for tables by bounding nr interval region
for chrom, start_end in region_start_end.items():
for table_name, coverage, region_count in self.coverage_reader.iter_table_coverage_regions_by_region( chrom, start_end[0], start_end[1] ):
if table_name not in table_region_coverage:
table_region_coverage[table_name] = 0
table_region_count[table_name] = 0
table_region_coverage[table_name] += coverage
table_region_count[table_name] += region_count
return table_region_coverage, table_region_count, interval_count, interval_size, table_coverage, table_regions_overlap_count, interval_table_overlap_count
table_region_coverage, table_region_count, nr_interval_count, nr_interval_size, nr_table_coverage, nr_table_regions_overlap_count, nr_interval_table_overlap_count = get_nr_coverage()
for table_name in self.table_coverage:
#TODO: determine a type of statistic, then calculate and report here
yield table_name, sum( self.table_chromosome_size.get( table_name, {} ).values() ), sum( self.table_chromosome_count.get( table_name, {} ).values() ), table_region_coverage.get( table_name, 0 ), table_region_count.get( table_name, 0 ), self.total_interval_count, self.total_interval_size, self.table_coverage[table_name], self.table_regions_overlaped_count.get( table_name, 0), self.interval_table_overlap_count.get( table_name, 0 ), nr_interval_count, nr_interval_size, nr_table_coverage[table_name], nr_table_regions_overlap_count.get( table_name, 0 ), nr_interval_table_overlap_count.get( table_name, 0 )
def profile_per_interval( interval_filename, chrom_col, start_col, end_col, out_filename, keep_empty, coverage_reader ):
out = open( out_filename, 'wb' )
for region in bx.intervals.io.NiceReaderWrapper( open( interval_filename, 'rb' ), chrom_col = chrom_col, start_col = start_col, end_col = end_col, fix_strand = True, return_header = False, return_comments = False ):
for table_name, coverage, region_count in coverage_reader.iter_table_coverage_regions_by_region( region.chrom, region.start, region.end ):
if keep_empty or coverage:
#only output regions that have atleast 1 base covered unless empty are requested
out.write( "%s\t%s\t%s\t%s\n" % ( "\t".join( region.fields ), table_name, coverage, region_count ) )
out.close()
def profile_summary( interval_filename, chrom_col, start_col, end_col, out_filename, keep_empty, coverage_reader, chrom_lengths ):
out = open( out_filename, 'wb' )
table_coverage_summary = TableCoverageSummary( coverage_reader, chrom_lengths )
for region in bx.intervals.io.NiceReaderWrapper( open( interval_filename, 'rb' ), chrom_col = chrom_col, start_col = start_col, end_col = end_col, fix_strand = True, return_header = False, return_comments = False ):
table_coverage_summary.add_region( region.chrom, region.start, region.end )
out.write( "#tableName\ttableChromosomeCoverage\ttableChromosomeCount\ttableRegionCoverage\ttableRegionCount\tallIntervalCount\tallIntervalSize\tallCoverage\tallTableRegionsOverlaped\tallIntervalsOverlapingTable\tnrIntervalCount\tnrIntervalSize\tnrCoverage\tnrTableRegionsOverlaped\tnrIntervalsOverlapingTable\n" )
for table_name, table_chromosome_size, table_chromosome_count, table_region_coverage, table_region_count, total_interval_count, total_interval_size, total_coverage, table_regions_overlaped_count, interval_region_overlap_count, nr_interval_count, nr_interval_size, nr_coverage, nr_table_regions_overlaped_count, nr_interval_table_overlap_count in table_coverage_summary.iter_table_coverage():
if keep_empty or total_coverage:
#only output tables that have atleast 1 base covered unless empty are requested
out.write( "%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n" % ( table_name, table_chromosome_size, table_chromosome_count, table_region_coverage, table_region_count, total_interval_count, total_interval_size, total_coverage, table_regions_overlaped_count, interval_region_overlap_count, nr_interval_count, nr_interval_size, nr_coverage, nr_table_regions_overlaped_count, nr_interval_table_overlap_count ) )
out.close()
#report chrom size errors as needed:
if table_coverage_summary.region_size_errors:
print "Regions provided extended beyond known chromosome lengths, and have been truncated as necessary, for the following intervals:"
for chrom, regions in table_coverage_summary.region_size_errors.items():
if len( regions ) > 3:
extra_region_info = ", ... "
else:
extra_region_info = ""
print "%s has max length of %s, exceeded by %s%s." % ( chrom, chrom_lengths.get( chrom ), ", ".join( map( str, regions[:3] ) ), extra_region_info )
class ChromosomeLengths:
def __init__( self, filename ):
self.chroms = {}
try:
for line in open( filename ):
try:
fields = line.strip().split( "\t" )
self.chroms[fields[0]] = int( fields[1] )
except:
continue
except:
pass
def get( self, name ):
return self.chroms.get( name, bx.bitset.MAX )
def __main__():
parser = optparse.OptionParser()
parser.add_option(
'-k','--keep_empty',
action="store_true",
dest='keep_empty',
default=False,
help='Keep tables with 0 coverage'
)
parser.add_option(
'-b','--buffer',
dest='buffer',
type='int',default=10,
help='Number of Chromosomes to keep buffered'
)
parser.add_option(
'-c','--chrom_col',
dest='chrom_col',
type='int',default=1,
help='Chromosome column'
)
parser.add_option(
'-s','--start_col',
dest='start_col',
type='int',default=2,
help='Start Column'
)
parser.add_option(
'-e','--end_col',
dest='end_col',
type='int',default=3,
help='End Column'
)
parser.add_option(
'-p','--path',
dest='path',
type='str',default='/depot/data2/galaxy/annotation_profiler/hg18',
help='Path to profiled data for this organism'
)
parser.add_option(
'-l','--lengths',
dest='lengths',
type='str',default='test-data/shared/ucsc/hg18.len',
help='Path to chromosome lengths data for this organism'
)
parser.add_option(
'-t','--table_names',
dest='table_names',
type='str',default='None',
help='Path to profiled data for this organism'
)
parser.add_option(
'-i','--input',
dest='interval_filename',
type='str',
help='Input Interval File'
)
parser.add_option(
'-o','--output',
dest='out_filename',
type='str',
help='Input Interval File'
)
parser.add_option(
'-S','--summary',
action="store_true",
dest='summary',
default=False,
help='Display Summary Results'
)
options, args = parser.parse_args()
table_names = options.table_names.split( "," )
if table_names == ['None']: table_names = None
coverage_reader = CachedCoverageReader( options.path, buffer = options.buffer, table_names = table_names )
if options.summary:
profile_summary( options.interval_filename, options.chrom_col - 1, options.start_col - 1, options.end_col -1, options.out_filename, options.keep_empty, coverage_reader, ChromosomeLengths( options.lengths ) )
else:
profile_per_interval( options.interval_filename, options.chrom_col - 1, options.start_col - 1, options.end_col -1, options.out_filename, options.keep_empty, coverage_reader )
if __name__ == "__main__": __main__()
| dbcls/dbcls-galaxy | tools/annotation_profiler/annotation_profiler_for_interval.py | Python | mit | 19,049 | [
"Galaxy"
] | 218c73bc861fe712a25e0a0349fe304e68f97b7bcc5a709bb5cef6a108c040fb |
import os
import sys
import math
import vtk
import time
import functools
import traceback
import PythonQt
from PythonQt import QtCore, QtGui
import director.applogic as app
from director import objectmodel as om
from director import transformUtils
from director import visualization as vis
from director.transformUtils import getTransformFromAxes
from director.timercallback import TimerCallback
from director import affordancemanager
from director.affordanceitems import *
from director.visualization import *
from director.filterUtils import *
from director.fieldcontainer import FieldContainer
from director.segmentationroutines import *
from director import cameraview
from thirdparty import qhull_2d
from thirdparty import min_bounding_rect
import numpy as np
import vtkNumpy
from debugVis import DebugData
from shallowCopy import shallowCopy
import ioUtils
from director.uuidutil import newUUID
DRILL_TRIANGLE_BOTTOM_LEFT = 'bottom left'
DRILL_TRIANGLE_BOTTOM_RIGHT = 'bottom right'
DRILL_TRIANGLE_TOP_LEFT = 'top left'
DRILL_TRIANGLE_TOP_RIGHT = 'top right'
# prefer drc plane segmentation instead of PCL
try:
planeSegmentationFilter = vtk.vtkPlaneSegmentation
except AttributeError:
planeSegmentationFilter = vtk.vtkPCLSACSegmentationPlane
_defaultSegmentationView = None
def getSegmentationView():
return _defaultSegmentationView or app.getViewManager().findView('Segmentation View')
def getDRCView():
return app.getDRCView()
def switchToView(viewName):
app.getViewManager().switchToView(viewName)
def getCurrentView():
return app.getCurrentRenderView()
def initAffordanceManager(view):
'''
Normally the affordance manager is initialized by the application.
This function can be called from scripts and tests to initialize the manager.
'''
global affordanceManager
affordanceManager = affordancemanager.AffordanceObjectModelManager(view)
def cropToLineSegment(polyData, point1, point2):
line = np.array(point2) - np.array(point1)
length = np.linalg.norm(line)
axis = line / length
polyData = labelPointDistanceAlongAxis(polyData, axis, origin=point1, resultArrayName='dist_along_line')
return thresholdPoints(polyData, 'dist_along_line', [0.0, length])
'''
icp programmable filter
import vtkFiltersGeneralPython as filtersGeneral
points = inputs[0]
block = inputs[1]
print points.GetNumberOfPoints()
print block.GetNumberOfPoints()
if points.GetNumberOfPoints() < block.GetNumberOfPoints():
block, points = points, block
icp = vtk.vtkIterativeClosestPointTransform()
icp.SetSource(points.VTKObject)
icp.SetTarget(block.VTKObject)
icp.GetLandmarkTransform().SetModeToRigidBody()
icp.Update()
t = filtersGeneral.vtkTransformPolyDataFilter()
t.SetInput(points.VTKObject)
t.SetTransform(icp)
t.Update()
output.ShallowCopy(t.GetOutput())
'''
def computeAToB(a,b):
t = vtk.vtkTransform()
t.PostMultiply()
t.Concatenate(b)
t.Concatenate(a.GetLinearInverse())
tt = vtk.vtkTransform()
tt.SetMatrix(t.GetMatrix())
return tt
def lockAffordanceToHand(aff, hand='l_hand'):
linkFrame = getLinkFrame(hand)
affT = aff.actor.GetUserTransform()
if not hasattr(aff, 'handToAffT') or not aff.handToAffT:
aff.handToAffT = computeAToB(linkFrame, affT)
t = vtk.vtkTransform()
t.PostMultiply()
t.Concatenate(aff.handToAffT)
t.Concatenate(linkFrame)
aff.actor.GetUserTransform().SetMatrix(t.GetMatrix())
handAffUpdater = None
def lockToHandOn():
aff = getDefaultAffordanceObject()
if not aff:
return
global handAffUpdater
if handAffUpdater is None:
handAffUpdater = TimerCallback()
handAffUpdater.targetFps = 30
handAffUpdater.callback = functools.partial(lockAffordanceToHand, aff)
handAffUpdater.start()
def lockToHandOff():
aff = getDefaultAffordanceObject()
if not aff:
return
handAffUpdater.stop()
aff.handToAffT = None
class DisparityPointCloudItem(vis.PolyDataItem):
def __init__(self, name, imagesChannel, cameraName, imageManager):
vis.PolyDataItem.__init__(self, name, vtk.vtkPolyData(), view=None)
self.addProperty('Channel', imagesChannel)
self.addProperty('Camera name', cameraName)
self.addProperty('Decimation', 0, attributes=om.PropertyAttributes(enumNames=['1', '2', '4', '8', '16']))
self.addProperty('Remove Size', 1000, attributes=om.PropertyAttributes(decimals=0, minimum=0, maximum=100000.0, singleStep=1000))
self.addProperty('Target FPS', 1.0, attributes=om.PropertyAttributes(decimals=1, minimum=0.1, maximum=30.0, singleStep=0.1))
self.addProperty('Max Range', 2.0, attributes=om.PropertyAttributes(decimals=2, minimum=0., maximum=30.0, singleStep=0.25))
self.timer = TimerCallback()
self.timer.callback = self.update
self.lastUtime = 0
self.imageManager = imageManager
self.cameraName = cameraName
self.setProperty('Visible', False)
def _onPropertyChanged(self, propertySet, propertyName):
vis.PolyDataItem._onPropertyChanged(self, propertySet, propertyName)
if propertyName == 'Visible':
if self.getProperty(propertyName):
self.timer.start()
else:
self.timer.stop()
elif propertyName in ('Decimation', 'Remove outliers', 'Max Range'):
self.lastUtime = 0
def onRemoveFromObjectModel(self):
vis.PolyDataItem.onRemoveFromObjectModel(self)
self.timer.stop()
def update(self):
utime = self.imageManager.queue.getCurrentImageTime(self.cameraName)
if utime == self.lastUtime:
return
if (utime < self.lastUtime ):
temp=0 # dummy
elif (utime - self.lastUtime < 1E6/self.getProperty('Target FPS')):
return
decimation = int(self.properties.getPropertyEnumValue('Decimation'))
removeSize = int(self.properties.getProperty('Remove Size'))
rangeThreshold = float(self.properties.getProperty('Max Range'))
polyData = getDisparityPointCloud(decimation, imagesChannel=self.getProperty('Channel'), cameraName=self.getProperty('Camera name'),
removeOutliers=False, removeSize=removeSize, rangeThreshold = rangeThreshold)
self.setPolyData(polyData)
if polyData.GetNumberOfPoints() > 0 and not self.lastUtime:
self.setProperty('Color By', 'rgb_colors')
self.lastUtime = utime
def extractLargestCluster(polyData, **kwargs):
'''
Calls applyEuclideanClustering and then extracts the first (largest) cluster.
The given keyword arguments are passed into the applyEuclideanClustering function.
'''
polyData = applyEuclideanClustering(polyData, **kwargs)
return thresholdPoints(polyData, 'cluster_labels', [1, 1])
def segmentGround(polyData, groundThickness=0.02, sceneHeightFromGround=0.05):
''' A More complex ground removal algorithm. Works when plane isn't
preceisely flat. First clusters on z to find approx ground height, then fits a plane there
'''
searchRegionThickness = 0.5
zvalues = vtkNumpy.getNumpyFromVtk(polyData, 'Points')[:,2]
groundHeight = np.percentile(zvalues, 5)
vtkNumpy.addNumpyToVtk(polyData, zvalues.copy(), 'z')
searchRegion = thresholdPoints(polyData, 'z', [groundHeight - searchRegionThickness/2.0, groundHeight + searchRegionThickness/2.0])
updatePolyData(searchRegion, 'ground search region', parent=getDebugFolder(), colorByName='z', visible=False)
_, origin, normal = applyPlaneFit(searchRegion, distanceThreshold=0.02, expectedNormal=[0,0,1], perpendicularAxis=[0,0,1], returnOrigin=True)
points = vtkNumpy.getNumpyFromVtk(polyData, 'Points')
dist = np.dot(points - origin, normal)
vtkNumpy.addNumpyToVtk(polyData, dist, 'dist_to_plane')
groundPoints = thresholdPoints(polyData, 'dist_to_plane', [-groundThickness/2.0, groundThickness/2.0])
scenePoints = thresholdPoints(polyData, 'dist_to_plane', [sceneHeightFromGround, 100])
return origin, normal, groundPoints, scenePoints
def segmentGroundPlane():
inputObj = om.findObjectByName('pointcloud snapshot')
inputObj.setProperty('Visible', False)
polyData = shallowCopy(inputObj.polyData)
zvalues = vtkNumpy.getNumpyFromVtk(polyData, 'Points')[:,2]
groundHeight = np.percentile(zvalues, 5)
searchRegion = thresholdPoints(polyData, 'z', [groundHeight - 0.3, groundHeight + 0.3])
updatePolyData(searchRegion, 'ground search region', parent=getDebugFolder(), colorByName='z', visible=False)
_, origin, normal = applyPlaneFit(searchRegion, distanceThreshold=0.02, expectedNormal=[0,0,1], perpendicularAxis=[0,0,1], returnOrigin=True)
points = vtkNumpy.getNumpyFromVtk(polyData, 'Points')
dist = np.dot(points - origin, normal)
vtkNumpy.addNumpyToVtk(polyData, dist, 'dist_to_plane')
groundPoints = thresholdPoints(polyData, 'dist_to_plane', [-0.01, 0.01])
scenePoints = thresholdPoints(polyData, 'dist_to_plane', [0.05, 10])
updatePolyData(groundPoints, 'ground points', alpha=0.3)
updatePolyData(scenePoints, 'scene points', alpha=0.3)
#scenePoints = applyEuclideanClustering(scenePoints, clusterTolerance=0.10, minClusterSize=100, maxClusterSize=1e6)
#updatePolyData(scenePoints, 'scene points', colorByName='cluster_labels')
def applyLocalPlaneFit(polyData, searchPoint, searchRadius, searchRadiusEnd=None, removeGroundFirst=True):
useVoxelGrid = True
voxelGridSize = 0.03
distanceToPlaneThreshold = 0.02
if useVoxelGrid:
polyData = applyVoxelGrid(polyData, leafSize=voxelGridSize)
if removeGroundFirst:
_, polyData = removeGround(polyData, groundThickness=0.02, sceneHeightFromGround=0.04)
cropped = cropToSphere(polyData, searchPoint, searchRadius)
updatePolyData(cropped, 'crop to sphere', visible=False, colorByName='distance_to_point')
polyData, normal = applyPlaneFit(polyData, distanceToPlaneThreshold, searchOrigin=searchPoint, searchRadius=searchRadius)
if searchRadiusEnd is not None:
polyData, normal = applyPlaneFit(polyData, distanceToPlaneThreshold, perpendicularAxis=normal, angleEpsilon=math.radians(30), searchOrigin=searchPoint, searchRadius=searchRadiusEnd)
fitPoints = thresholdPoints(polyData, 'dist_to_plane', [-distanceToPlaneThreshold, distanceToPlaneThreshold])
updatePolyData(fitPoints, 'fitPoints', visible=False)
fitPoints = labelDistanceToPoint(fitPoints, searchPoint)
clusters = extractClusters(fitPoints, clusterTolerance=0.05, minClusterSize=3)
clusters.sort(key=lambda x: vtkNumpy.getNumpyFromVtk(x, 'distance_to_point').min())
fitPoints = clusters[0]
return fitPoints, normal
normalEstimationSearchRadius = 0.065
f = vtk.vtkPCLNormalEstimation()
f.SetSearchRadius(normalEstimationSearchRadius)
f.SetInput(polyData)
f.Update()
scenePoints = shallowCopy(f.GetOutput())
normals = vtkNumpy.getNumpyFromVtk(scenePoints, 'normals')
normalsDotPlaneNormal = np.abs(np.dot(normals, normal))
vtkNumpy.addNumpyToVtk(scenePoints, normalsDotPlaneNormal, 'normals_dot_plane_normal')
showPolyData(scenePoints, 'scene_with_normals', parent=getDebugFolder(), colorByName='normals_dot_plane_normal')
surfaces = thresholdPoints(scenePoints, 'normals_dot_plane_normal', [0.95, 1.0])
clusters = extractClusters(surfaces, clusterTolerance=0.1, minClusterSize=5)
clusters = clusters[:10]
for i, cluster in enumerate(clusters):
showPolyData(cluster, 'plane cluster %i' % i, parent=getDebugFolder(), visible=False)
return fitPoints
def orientToMajorPlane(polyData, pickedPoint):
'''
Find the largest plane and transform the cloud to align that plane
Use the given point as the origin
'''
distanceToPlaneThreshold=0.02
searchRadius = 0.5
planePoints, origin, normal = applyPlaneFit(polyData, distanceToPlaneThreshold, searchOrigin=pickedPoint, searchRadius=searchRadius, returnOrigin=True)
vis.updatePolyData(planePoints, 'local plane fit', color=[0,1,0], parent=getDebugFolder(), visible=False)
planeFrame = transformUtils.getTransformFromOriginAndNormal(pickedPoint, normal)
vis.updateFrame(planeFrame, 'plane frame', scale=0.15, parent=getDebugFolder(), visible=False)
polyData = transformPolyData(polyData, planeFrame.GetLinearInverse() )
# if the mean point is below the horizontal plane, flip the cloud
zvalues = vtkNumpy.getNumpyFromVtk(polyData, 'Points')[:,2]
midCloudHeight = np.mean(zvalues)
if (midCloudHeight < 0):
flipTransform = transformUtils.frameFromPositionAndRPY([0,0,0], [0,180,0])
polyData = transformPolyData(polyData, flipTransform )
return polyData, planeFrame
def getMajorPlanes(polyData, useVoxelGrid=True):
voxelGridSize = 0.01
distanceToPlaneThreshold = 0.02
if useVoxelGrid:
polyData = applyVoxelGrid(polyData, leafSize=voxelGridSize)
polyDataList = []
minClusterSize = 100
while len(polyDataList) < 25:
f = planeSegmentationFilter()
f.SetInput(polyData)
f.SetDistanceThreshold(distanceToPlaneThreshold)
f.Update()
polyData = shallowCopy(f.GetOutput())
outliers = thresholdPoints(polyData, 'ransac_labels', [0, 0])
inliers = thresholdPoints(polyData, 'ransac_labels', [1, 1])
largestCluster = extractLargestCluster(inliers)
#i = len(polyDataList)
#showPolyData(inliers, 'inliers %d' % i, color=getRandomColor(), parent='major planes')
#showPolyData(outliers, 'outliers %d' % i, color=getRandomColor(), parent='major planes')
#showPolyData(largestCluster, 'cluster %d' % i, color=getRandomColor(), parent='major planes')
if largestCluster.GetNumberOfPoints() > minClusterSize:
polyDataList.append(largestCluster)
polyData = outliers
else:
break
return polyDataList
def showMajorPlanes(polyData=None):
if not polyData:
inputObj = om.findObjectByName('pointcloud snapshot')
inputObj.setProperty('Visible', False)
polyData = inputObj.polyData
om.removeFromObjectModel(om.findObjectByName('major planes'))
folderObj = om.findObjectByName('segmentation')
folderObj = om.getOrCreateContainer('major planes', folderObj)
origin = SegmentationContext.getGlobalInstance().getViewFrame().GetPosition()
polyData = labelDistanceToPoint(polyData, origin)
polyData = thresholdPoints(polyData, 'distance_to_point', [1, 4])
polyDataList = getMajorPlanes(polyData)
for i, polyData in enumerate(polyDataList):
obj = showPolyData(polyData, 'plane %d' % i, color=getRandomColor(), visible=True, parent='major planes')
obj.setProperty('Point Size', 3)
def cropToBox(polyData, transform, dimensions):
'''
dimensions is length 3 describing box dimensions
'''
origin = np.array(transform.GetPosition())
axes = transformUtils.getAxesFromTransform(transform)
for axis, length in zip(axes, dimensions):
cropAxis = np.array(axis)*(length/2.0)
polyData = cropToLineSegment(polyData, origin - cropAxis, origin + cropAxis)
return polyData
def cropToBounds(polyData, transform, bounds):
'''
bounds is a 2x3 containing the min/max values along the transform axes to use for cropping
'''
origin = np.array(transform.GetPosition())
axes = transformUtils.getAxesFromTransform(transform)
for axis, bound in zip(axes, bounds):
axis = np.array(axis)/np.linalg.norm(axis)
polyData = cropToLineSegment(polyData, origin + axis*bound[0], origin + axis*bound[1])
return polyData
def cropToSphere(polyData, origin, radius):
polyData = labelDistanceToPoint(polyData, origin)
return thresholdPoints(polyData, 'distance_to_point', [0, radius])
def applyPlaneFit(polyData, distanceThreshold=0.02, expectedNormal=None, perpendicularAxis=None, angleEpsilon=0.2, returnOrigin=False, searchOrigin=None, searchRadius=None):
expectedNormal = expectedNormal if expectedNormal is not None else [-1,0,0]
fitInput = polyData
if searchOrigin is not None:
assert searchRadius
fitInput = cropToSphere(fitInput, searchOrigin, searchRadius)
# perform plane segmentation
f = planeSegmentationFilter()
f.SetInput(fitInput)
f.SetDistanceThreshold(distanceThreshold)
if perpendicularAxis is not None:
f.SetPerpendicularConstraintEnabled(True)
f.SetPerpendicularAxis(perpendicularAxis)
f.SetAngleEpsilon(angleEpsilon)
f.Update()
origin = f.GetPlaneOrigin()
normal = np.array(f.GetPlaneNormal())
# flip the normal if needed
if np.dot(normal, expectedNormal) < 0:
normal = -normal
# for each point, compute signed distance to plane
polyData = shallowCopy(polyData)
points = vtkNumpy.getNumpyFromVtk(polyData, 'Points')
dist = np.dot(points - origin, normal)
vtkNumpy.addNumpyToVtk(polyData, dist, 'dist_to_plane')
if returnOrigin:
return polyData, origin, normal
else:
return polyData, normal
def flipNormalsWithViewDirection(polyData, viewDirection):
normals = vnp.getNumpyFromVtk(polyData, 'normals')
normals[np.dot(normals, viewDirection) > 0] *= -1
def normalEstimation(dataObj, searchCloud=None, searchRadius=0.05, useVoxelGrid=False, voxelGridLeafSize=0.05):
f = vtk.vtkPCLNormalEstimation()
f.SetSearchRadius(searchRadius)
f.SetInput(dataObj)
if searchCloud:
f.SetInput(1, searchCloud)
elif useVoxelGrid:
f.SetInput(1, applyVoxelGrid(dataObj, voxelGridLeafSize))
f.Update()
dataObj = shallowCopy(f.GetOutput())
dataObj.GetPointData().SetNormals(dataObj.GetPointData().GetArray('normals'))
return dataObj
def addCoordArraysToPolyData(polyData):
polyData = shallowCopy(polyData)
points = vtkNumpy.getNumpyFromVtk(polyData, 'Points')
vtkNumpy.addNumpyToVtk(polyData, points[:,0].copy(), 'x')
vtkNumpy.addNumpyToVtk(polyData, points[:,1].copy(), 'y')
vtkNumpy.addNumpyToVtk(polyData, points[:,2].copy(), 'z')
viewFrame = SegmentationContext.getGlobalInstance().getViewFrame()
viewOrigin = viewFrame.TransformPoint([0.0, 0.0, 0.0])
viewX = viewFrame.TransformVector([1.0, 0.0, 0.0])
viewY = viewFrame.TransformVector([0.0, 1.0, 0.0])
viewZ = viewFrame.TransformVector([0.0, 0.0, 1.0])
polyData = labelPointDistanceAlongAxis(polyData, viewX, origin=viewOrigin, resultArrayName='distance_along_view_x')
polyData = labelPointDistanceAlongAxis(polyData, viewY, origin=viewOrigin, resultArrayName='distance_along_view_y')
polyData = labelPointDistanceAlongAxis(polyData, viewZ, origin=viewOrigin, resultArrayName='distance_along_view_z')
return polyData
def getDebugRevolutionData():
#dataDir = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../../../drc-data'))
#filename = os.path.join(dataDir, 'valve_wall.vtp')
#filename = os.path.join(dataDir, 'bungie_valve.vtp')
#filename = os.path.join(dataDir, 'cinder-blocks.vtp')
#filename = os.path.join(dataDir, 'cylinder_table.vtp')
#filename = os.path.join(dataDir, 'firehose.vtp')
#filename = os.path.join(dataDir, 'debris.vtp')
#filename = os.path.join(dataDir, 'rev1.vtp')
#filename = os.path.join(dataDir, 'drill-in-hand.vtp')
filename = os.path.expanduser('~/Desktop/scans/debris-scan.vtp')
return addCoordArraysToPolyData(ioUtils.readPolyData(filename))
def getCurrentScanBundle(useVoxelGrid=False):
obj = om.findObjectByName('SCANS_HALF_SWEEP')
if not obj:
return None
revPolyData = obj.polyData
if not revPolyData or not revPolyData.GetNumberOfPoints():
return None
if useVoxelGrid:
revPolyData = applyVoxelGrid(revPolyData, leafSize=0.015)
return addCoordArraysToPolyData(revPolyData)
def getCurrentRevolutionData(useVoxelGrid=False):
from director import perception
revPolyData = perception._multisenseItem.model.revPolyData
if not revPolyData or not revPolyData.GetNumberOfPoints():
return getCurrentScanBundle()
if useVoxelGrid:
revPolyData = applyVoxelGrid(revPolyData, leafSize=0.015)
return addCoordArraysToPolyData(revPolyData)
def getDisparityPointCloud(decimation=4, removeOutliers=True, removeSize=0, rangeThreshold=-1, imagesChannel='MULTISENSE_CAMERA', cameraName='CAMERA_LEFT'):
p = cameraview.getStereoPointCloud(decimation, imagesChannel=imagesChannel, cameraName=cameraName, removeSize=removeSize, rangeThreshold=rangeThreshold)
if not p:
return None
if removeOutliers:
# attempt to scale outlier filtering, best tuned for decimation of 2 or 4
scaling = (10*16)/(decimation*decimation)
p = labelOutliers(p, searchRadius=0.06, neighborsInSearchRadius=scaling)
p = thresholdPoints(p, 'is_outlier', [0.0, 0.0])
return p
def getCurrentMapServerData():
mapServer = om.findObjectByName('Map Server')
polyData = None
if mapServer and mapServer.getProperty('Visible'):
polyData = mapServer.source.polyData
if not polyData or not polyData.GetNumberOfPoints():
return None
return addCoordArraysToPolyData(polyData)
def segmentGroundPlanes():
objs = []
for obj in om.getObjects():
name = obj.getProperty('Name')
if name.startswith('pointcloud snapshot'):
objs.append(obj)
objs = sorted(objs, key=lambda x: x.getProperty('Name'))
d = DebugData()
prevHeadAxis = None
for obj in objs:
name = obj.getProperty('Name')
print '----- %s---------' % name
print 'head axis:', obj.headAxis
origin, normal, groundPoints, _ = segmentGround(obj.polyData)
print 'ground normal:', normal
showPolyData(groundPoints, name + ' ground points', visible=False)
a = np.array([0,0,1])
b = np.array(normal)
diff = math.degrees(math.acos(np.dot(a,b) / (np.linalg.norm(a) * np.linalg.norm(b))))
if diff > 90:
print 180 - diff
else:
print diff
if prevHeadAxis is not None:
a = prevHeadAxis
b = np.array(obj.headAxis)
diff = math.degrees(math.acos(np.dot(a,b) / (np.linalg.norm(a) * np.linalg.norm(b))))
if diff > 90:
print 180 - diff
else:
print diff
prevHeadAxis = np.array(obj.headAxis)
d.addLine([0,0,0], normal)
updatePolyData(d.getPolyData(), 'normals')
def extractCircle(polyData, distanceThreshold=0.04, radiusLimit=None):
circleFit = vtk.vtkPCLSACSegmentationCircle()
circleFit.SetDistanceThreshold(distanceThreshold)
circleFit.SetInput(polyData)
if radiusLimit is not None:
circleFit.SetRadiusLimit(radiusLimit)
circleFit.SetRadiusConstraintEnabled(True)
circleFit.Update()
polyData = thresholdPoints(circleFit.GetOutput(), 'ransac_labels', [1.0, 1.0])
return polyData, circleFit
def removeMajorPlane(polyData, distanceThreshold=0.02):
# perform plane segmentation
f = planeSegmentationFilter()
f.SetInput(polyData)
f.SetDistanceThreshold(distanceThreshold)
f.Update()
polyData = thresholdPoints(f.GetOutput(), 'ransac_labels', [0.0, 0.0])
return polyData, f
def removeGroundSimple(polyData, groundThickness=0.02, sceneHeightFromGround=0.05):
''' Simple ground plane removal algorithm. Uses ground height
and does simple z distance filtering.
Suitable for noisy data e.g. kinect/stereo camera
(Default args should be relaxed, filtering simplfied)
'''
groundHeight = SegmentationContext.getGlobalInstance().getGroundHeight()
origin = [0, 0, groundHeight]
normal = [0, 0, 1]
points = vtkNumpy.getNumpyFromVtk(polyData, 'Points')
dist = np.dot(points - origin, normal)
vtkNumpy.addNumpyToVtk(polyData, dist, 'dist_to_plane')
groundPoints = thresholdPoints(polyData, 'dist_to_plane', [-groundThickness/2.0, groundThickness/2.0])
scenePoints = thresholdPoints(polyData, 'dist_to_plane', [sceneHeightFromGround, 100])
return groundPoints, scenePoints
def removeGround(polyData, groundThickness=0.02, sceneHeightFromGround=0.05):
origin, normal, groundPoints, scenePoints = segmentGround(polyData, groundThickness, sceneHeightFromGround)
return groundPoints, scenePoints
def generateFeetForValve():
aff = om.findObjectByName('valve affordance')
assert aff
params = aff.params
origin = np.array(params['origin'])
origin[2] = 0.0
xaxis = -params['axis']
zaxis = np.array([0,0,1])
yaxis = np.cross(zaxis, xaxis)
xaxis = np.cross(yaxis, zaxis)
stanceWidth = 0.2
stanceRotation = 25.0
stanceOffset = [-1.0, -0.5, 0.0]
valveFrame = getTransformFromAxes(xaxis, yaxis, zaxis)
valveFrame.PostMultiply()
valveFrame.Translate(origin)
stanceFrame, lfootFrame, rfootFrame = getFootFramesFromReferenceFrame(valveFrame, stanceWidth, stanceRotation, stanceOffset)
showFrame(boardFrame, 'board ground frame', parent=aff, scale=0.15, visible=False)
showFrame(lfootFrame, 'lfoot frame', parent=aff, scale=0.15)
showFrame(rfootFrame, 'rfoot frame', parent=aff, scale=0.15)
#d = DebugData()
#d.addLine(valveFrame.GetPosition(), stanceFrame.GetPosition())
#updatePolyData(d.getPolyData(), 'stance debug')
#publishSteppingGoal(lfootFrame, rfootFrame)
def generateFeetForDebris():
aff = om.findObjectByName('board A')
if not aff:
return
params = aff.params
origin = np.array(params['origin'])
origin = origin + params['zaxis']*params['zwidth']/2.0 - params['xaxis']*params['xwidth']/2.0
origin[2] = 0.0
yaxis = params['zaxis']
zaxis = np.array([0,0,1])
xaxis = np.cross(yaxis, zaxis)
stanceWidth = 0.35
stanceRotation = 0.0
stanceOffset = [-0.48, -0.08, 0]
boardFrame = getTransformFromAxes(xaxis, yaxis, zaxis)
boardFrame.PostMultiply()
boardFrame.Translate(origin)
stanceFrame, lfootFrame, rfootFrame = getFootFramesFromReferenceFrame(boardFrame, stanceWidth, stanceRotation, stanceOffset)
showFrame(boardFrame, 'board ground frame', parent=aff, scale=0.15, visible=False)
lfoot = showFrame(lfootFrame, 'lfoot frame', parent=aff, scale=0.15)
rfoot = showFrame(rfootFrame, 'rfoot frame', parent=aff, scale=0.15)
for obj in [lfoot, rfoot]:
obj.addToView(app.getDRCView())
#d = DebugData()
#d.addLine(valveFrame.GetPosition(), stanceFrame.GetPosition())
#updatePolyData(d.getPolyData(), 'stance debug')
#publishSteppingGoal(lfootFrame, rfootFrame)
def generateFeetForWye():
aff = om.findObjectByName('wye points')
if not aff:
return
params = aff.params
origin = np.array(params['origin'])
origin[2] = 0.0
yaxis = params['xaxis']
xaxis = -params['zaxis']
zaxis = np.cross(xaxis, yaxis)
stanceWidth = 0.20
stanceRotation = 0.0
stanceOffset = [-0.48, -0.08, 0]
affGroundFrame = getTransformFromAxes(xaxis, yaxis, zaxis)
affGroundFrame.PostMultiply()
affGroundFrame.Translate(origin)
stanceFrame, lfootFrame, rfootFrame = getFootFramesFromReferenceFrame(affGroundFrame, stanceWidth, stanceRotation, stanceOffset)
showFrame(affGroundFrame, 'affordance ground frame', parent=aff, scale=0.15, visible=False)
lfoot = showFrame(lfootFrame, 'lfoot frame', parent=aff, scale=0.15)
rfoot = showFrame(rfootFrame, 'rfoot frame', parent=aff, scale=0.15)
for obj in [lfoot, rfoot]:
obj.addToView(app.getDRCView())
def getFootFramesFromReferenceFrame(referenceFrame, stanceWidth, stanceRotation, stanceOffset):
footHeight=0.0745342
ref = vtk.vtkTransform()
ref.SetMatrix(referenceFrame.GetMatrix())
stanceFrame = vtk.vtkTransform()
stanceFrame.PostMultiply()
stanceFrame.RotateZ(stanceRotation)
stanceFrame.Translate(stanceOffset)
stanceFrame.Concatenate(ref)
lfootFrame = vtk.vtkTransform()
lfootFrame.PostMultiply()
lfootFrame.Translate(0, stanceWidth/2.0, footHeight)
lfootFrame.Concatenate(stanceFrame)
rfootFrame = vtk.vtkTransform()
rfootFrame.PostMultiply()
rfootFrame.Translate(0, -stanceWidth/2.0, footHeight)
rfootFrame.Concatenate(stanceFrame)
return stanceFrame, lfootFrame, rfootFrame
def poseFromFrame(frame):
import bot_core as lcmbotcore
pos, quat = transformUtils.poseFromTransform(frame)
trans = lcmbotcore.vector_3d_t()
trans.x, trans.y, trans.z = pos
quatMsg = lcmbotcore.quaternion_t()
quatMsg.w, quatMsg.x, quatMsg.y, quatMsg.z = quat
pose = lcmbotcore.position_3d_t()
pose.translation = trans
pose.rotation = quatMsg
return pose
def cropToPlane(polyData, origin, normal, threshold):
polyData = shallowCopy(polyData)
normal = normal/np.linalg.norm(normal)
points = vtkNumpy.getNumpyFromVtk(polyData, 'Points')
dist = np.dot(points - origin, normal)
vtkNumpy.addNumpyToVtk(polyData, dist, 'dist_to_plane')
cropped = thresholdPoints(polyData, 'dist_to_plane', threshold)
return cropped, polyData
def createLine(blockDimensions, p1, p2):
sliceWidth = np.array(blockDimensions).max()/2.0 + 0.02
sliceThreshold = [-sliceWidth, sliceWidth]
# require p1 to be point on left
if p1[0] > p2[0]:
p1, p2 = p2, p1
_, worldPt1 = getRayFromDisplayPoint(app.getCurrentRenderView(), p1)
_, worldPt2 = getRayFromDisplayPoint(app.getCurrentRenderView(), p2)
cameraPt = np.array(app.getCurrentRenderView().camera().GetPosition())
leftRay = worldPt1 - cameraPt
rightRay = worldPt2 - cameraPt
middleRay = (leftRay + rightRay) / 2.0
d = DebugData()
d.addLine(cameraPt, worldPt1)
d.addLine(cameraPt, worldPt2)
d.addLine(worldPt1, worldPt2)
d.addLine(cameraPt, cameraPt + middleRay)
updatePolyData(d.getPolyData(), 'line annotation', parent=getDebugFolder(), visible=False)
inputObj = om.findObjectByName('pointcloud snapshot')
if inputObj:
polyData = shallowCopy(inputObj.polyData)
else:
polyData = getCurrentRevolutionData()
origin = cameraPt
normal = np.cross(rightRay, leftRay)
leftNormal = np.cross(normal, leftRay)
rightNormal = np.cross(rightRay, normal)
normal /= np.linalg.norm(normal)
leftNormal /= np.linalg.norm(leftNormal)
rightNormal /= np.linalg.norm(rightNormal)
middleRay /= np.linalg.norm(middleRay)
cropped, polyData = cropToPlane(polyData, origin, normal, sliceThreshold)
updatePolyData(polyData, 'slice dist', parent=getDebugFolder(), colorByName='dist_to_plane', colorByRange=[-0.5, 0.5], visible=False)
updatePolyData(cropped, 'slice', parent=getDebugFolder(), colorByName='dist_to_plane', visible=False)
cropped, _ = cropToPlane(cropped, origin, leftNormal, [-1e6, 0])
cropped, _ = cropToPlane(cropped, origin, rightNormal, [-1e6, 0])
updatePolyData(cropped, 'slice segment', parent=getDebugFolder(), colorByName='dist_to_plane', visible=False)
planePoints, planeNormal = applyPlaneFit(cropped, distanceThreshold=0.005, perpendicularAxis=middleRay, angleEpsilon=math.radians(60))
planePoints = thresholdPoints(planePoints, 'dist_to_plane', [-0.005, 0.005])
updatePolyData(planePoints, 'board segmentation', parent=getDebugFolder(), color=getRandomColor(), visible=False)
'''
names = ['board A', 'board B', 'board C', 'board D', 'board E', 'board F', 'board G', 'board H', 'board I']
for name in names:
if not om.findObjectByName(name):
break
else:
name = 'board'
'''
name = 'board'
segmentBlockByTopPlane(planePoints, blockDimensions, expectedNormal=-middleRay, expectedXAxis=middleRay, edgeSign=-1, name=name)
def updateBlockAffordances(polyData=None):
for obj in om.getObjects():
if isinstance(obj, BoxAffordanceItem):
if 'refit' in obj.getProperty('Name'):
om.removeFromObjectModel(obj)
for obj in om.getObjects():
if isinstance(obj, BoxAffordanceItem):
updateBlockFit(obj, polyData)
def updateBlockFit(affordanceObj, polyData=None):
affordanceObj.updateParamsFromActorTransform()
name = affordanceObj.getProperty('Name') + ' refit'
origin = affordanceObj.params['origin']
normal = affordanceObj.params['yaxis']
edgePerpAxis = affordanceObj.params['xaxis']
blockDimensions = [affordanceObj.params['xwidth'], affordanceObj.params['ywidth']]
if polyData is None:
inputObj = om.findObjectByName('pointcloud snapshot')
polyData = shallowCopy(inputObj.polyData)
cropThreshold = 0.1
cropped = polyData
cropped, _ = cropToPlane(cropped, origin, normal, [-cropThreshold, cropThreshold])
cropped, _ = cropToPlane(cropped, origin, edgePerpAxis, [-cropThreshold, cropThreshold])
updatePolyData(cropped, 'refit search region', parent=getDebugFolder(), visible=False)
cropped = extractLargestCluster(cropped)
planePoints, planeNormal = applyPlaneFit(cropped, distanceThreshold=0.005, perpendicularAxis=normal, angleEpsilon=math.radians(10))
planePoints = thresholdPoints(planePoints, 'dist_to_plane', [-0.005, 0.005])
updatePolyData(planePoints, 'refit board segmentation', parent=getDebugFolder(), visible=False)
refitObj = segmentBlockByTopPlane(planePoints, blockDimensions, expectedNormal=normal, expectedXAxis=edgePerpAxis, edgeSign=-1, name=name)
refitOrigin = np.array(refitObj.params['origin'])
refitLength = refitObj.params['zwidth']
refitZAxis = refitObj.params['zaxis']
refitEndPoint1 = refitOrigin + refitZAxis*refitLength/2.0
originalLength = affordanceObj.params['zwidth']
correctedOrigin = refitEndPoint1 - refitZAxis*originalLength/2.0
originDelta = correctedOrigin - refitOrigin
refitObj.params['zwidth'] = originalLength
refitObj.polyData.DeepCopy(affordanceObj.polyData)
refitObj.actor.GetUserTransform().Translate(originDelta)
refitObj.updateParamsFromActorTransform()
def startInteractiveLineDraw(blockDimensions):
picker = LineDraw(app.getCurrentRenderView())
addViewPicker(picker)
picker.enabled = True
picker.start()
picker.annotationFunc = functools.partial(createLine, blockDimensions)
def startLeverValveSegmentation():
picker = PointPicker(numberOfPoints=2)
addViewPicker(picker)
picker.enabled = True
picker.drawLines = False
picker.start()
picker.annotationFunc = functools.partial(segmentLeverValve)
def refitValveAffordance(aff, point1, origin, normal):
xaxis = aff.params['xaxis']
yaxis = aff.params['yaxis']
zaxis = aff.params['zaxis']
origin = aff.params['origin']
zaxis = normal
xaxis = [0, 0, 1]
yaxis = np.cross(zaxis, xaxis)
xaxis = np.cross(yaxis, zaxis)
xaxis /= np.linalg.norm(xaxis)
yaxis /= np.linalg.norm(yaxis)
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
t.Translate(origin)
aff.actor.GetUserTransform().SetMatrix(t.GetMatrix())
aff.updateParamsFromActorTransform()
def segmentValve(expectedValveRadius, point1, point2):
inputObj = om.findObjectByName('pointcloud snapshot')
polyData = inputObj.polyData
viewPlaneNormal = np.array(getSegmentationView().camera().GetViewPlaneNormal())
polyData, _, wallNormal = applyPlaneFit(polyData, expectedNormal=viewPlaneNormal, searchOrigin=point1, searchRadius=0.2, angleEpsilon=0.7, returnOrigin=True)
wallPoints = thresholdPoints(polyData, 'dist_to_plane', [-0.01, 0.01])
updatePolyData(wallPoints, 'wall points', parent=getDebugFolder(), visible=False)
polyData, _, _ = applyPlaneFit(polyData, expectedNormal=wallNormal, searchOrigin=point2, searchRadius=expectedValveRadius, angleEpsilon=0.2, returnOrigin=True)
valveCluster = thresholdPoints(polyData, 'dist_to_plane', [-0.01, 0.01])
valveCluster = cropToSphere(valveCluster, point2, expectedValveRadius*2)
valveCluster = extractLargestCluster(valveCluster, minClusterSize=1)
updatePolyData(valveCluster, 'valve cluster', parent=getDebugFolder(), visible=False)
origin = np.average(vtkNumpy.getNumpyFromVtk(valveCluster, 'Points') , axis=0)
zaxis = wallNormal
xaxis = [0, 0, 1]
yaxis = np.cross(zaxis, xaxis)
xaxis = np.cross(yaxis, zaxis)
xaxis /= np.linalg.norm(xaxis)
yaxis /= np.linalg.norm(yaxis)
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
t.Translate(origin)
zwidth = 0.03
radius = expectedValveRadius
d = DebugData()
d.addLine(np.array([0,0,-zwidth/2.0]), np.array([0,0,zwidth/2.0]), radius=radius)
name = 'valve affordance'
obj = showPolyData(d.getPolyData(), name, cls=FrameAffordanceItem, parent='affordances', color=[0,1,0])
obj.actor.SetUserTransform(t)
obj.addToView(app.getDRCView())
refitWallCallbacks.append(functools.partial(refitValveAffordance, obj))
params = dict(axis=zaxis, radius=radius, length=zwidth, origin=origin, xaxis=xaxis, yaxis=yaxis, zaxis=zaxis,
xwidth=radius, ywidth=radius, zwidth=zwidth,
otdf_type='steering_cyl', friendly_name='valve')
obj.setAffordanceParams(params)
obj.updateParamsFromActorTransform()
frameObj = showFrame(obj.actor.GetUserTransform(), name + ' frame', parent=obj, scale=radius, visible=False)
frameObj.addToView(app.getDRCView())
def segmentValveByBoundingBox(polyData, searchPoint):
viewDirection = SegmentationContext.getGlobalInstance().getViewDirection()
polyData = cropToSphere(polyData, searchPoint, radius=0.6)
polyData = applyVoxelGrid(polyData, leafSize=0.015)
# extract tube search region
polyData = labelDistanceToLine(polyData, searchPoint, np.array(searchPoint) + np.array([0,0,1]))
searchRegion = thresholdPoints(polyData, 'distance_to_line', [0.0, 0.2])
updatePolyData(searchRegion, 'valve tube search region', parent=getDebugFolder(), color=[1,0,0], visible=False)
# guess valve plane
_, origin, normal = applyPlaneFit(searchRegion, distanceThreshold=0.01, perpendicularAxis=viewDirection, angleEpsilon=math.radians(30), expectedNormal=-viewDirection, returnOrigin=True)
# extract plane search region
polyData = labelPointDistanceAlongAxis(polyData, normal, origin)
searchRegion = thresholdPoints(polyData, 'distance_along_axis', [-0.05, 0.05])
updatePolyData(searchRegion, 'valve plane search region', parent=getDebugFolder(), colorByName='distance_along_axis', visible=False)
valvePoints = extractLargestCluster(searchRegion, minClusterSize=1)
updatePolyData(valvePoints, 'valve cluster', parent=getDebugFolder(), color=[0,1,0], visible=False)
valvePoints, _ = applyPlaneFit(valvePoints, expectedNormal=normal, perpendicularAxis=normal, distanceThreshold=0.01)
valveFit = thresholdPoints(valvePoints, 'dist_to_plane', [-0.01, 0.01])
updatePolyData(valveFit, 'valve cluster', parent=getDebugFolder(), color=[0,1,0], visible=False)
points = vtkNumpy.getNumpyFromVtk(valveFit, 'Points')
zvalues = points[:,2].copy()
minZ = np.min(zvalues)
maxZ = np.max(zvalues)
tubeRadius = 0.017
radius = float((maxZ - minZ) / 2.0) - tubeRadius
fields = makePolyDataFields(valveFit)
origin = np.array(fields.frame.GetPosition())
#origin = computeCentroid(valveFit)
zaxis = [0,0,1]
xaxis = normal
yaxis = np.cross(zaxis, xaxis)
yaxis /= np.linalg.norm(yaxis)
xaxis = np.cross(yaxis, zaxis)
xaxis /= np.linalg.norm(xaxis)
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
t.Translate(origin)
pose = transformUtils.poseFromTransform(t)
desc = dict(classname='CapsuleRingAffordanceItem', Name='valve', uuid=newUUID(), pose=pose, Color=[0,1,0], Radius=radius, Segments=20)
desc['Tube Radius'] = tubeRadius
obj = affordanceManager.newAffordanceFromDescription(desc)
obj.params = dict(radius=radius)
return obj
def segmentDoorPlane(polyData, doorPoint, stanceFrame):
doorPoint = np.array(doorPoint)
doorBand = 1.5
polyData = cropToLineSegment(polyData, doorPoint + [0.0,0.0,doorBand/2], doorPoint - [0.0,0.0,doorBand/2])
fitPoints, normal = applyLocalPlaneFit(polyData, doorPoint, searchRadius=0.2, searchRadiusEnd=1.0, removeGroundFirst=False)
updatePolyData(fitPoints, 'door points', visible=False, color=[0,1,0])
viewDirection = SegmentationContext.getGlobalInstance().getViewDirection()
if np.dot(normal, viewDirection) > 0:
normal = -normal
origin = computeCentroid(fitPoints)
groundHeight = stanceFrame.GetPosition()[2]
origin = [origin[0], origin[1], groundHeight]
xaxis = -normal
zaxis = [0,0,1]
yaxis = np.cross(zaxis, xaxis)
yaxis /= np.linalg.norm(yaxis)
xaxis = np.cross(yaxis, zaxis)
xaxis /= np.linalg.norm(xaxis)
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
t.Translate(origin)
return t
def segmentValveByRim(polyData, rimPoint1, rimPoint2):
viewDirection = SegmentationContext.getGlobalInstance().getViewDirection()
yaxis = np.array(rimPoint2) - np.array(rimPoint1)
zaxis = [0,0,1]
xaxis = np.cross(yaxis, zaxis)
xaxis /= np.linalg.norm(xaxis)
# flip xaxis to be with view direction
if np.dot(xaxis, viewDirection) < 0:
xaxis = -xaxis
yaxis = np.cross(zaxis, xaxis)
yaxis /= np.linalg.norm(yaxis)
origin = (np.array(rimPoint2) + np.array(rimPoint1)) / 2.0
polyData = labelPointDistanceAlongAxis(polyData, xaxis, origin)
polyData = thresholdPoints(polyData, 'distance_along_axis', [-0.05, 0.05])
updatePolyData(polyData, 'valve plane region', parent=getDebugFolder(), colorByName='distance_along_axis', visible=False)
polyData = cropToSphere(polyData, origin, radius=0.4)
polyData = applyVoxelGrid(polyData, leafSize=0.015)
updatePolyData(polyData, 'valve search region', parent=getDebugFolder(), color=[1,0,0], visible=False)
valveFit = extractLargestCluster(polyData, minClusterSize=1)
updatePolyData(valveFit, 'valve cluster', parent=getDebugFolder(), color=[0,1,0], visible=False)
points = vtkNumpy.getNumpyFromVtk(valveFit, 'Points')
zvalues = points[:,2].copy()
minZ = np.min(zvalues)
maxZ = np.max(zvalues)
tubeRadius = 0.017
radius = float((maxZ - minZ) / 2.0) - tubeRadius
fields = makePolyDataFields(valveFit)
origin = np.array(fields.frame.GetPosition())
vis.updatePolyData(transformPolyData(fields.box, fields.frame), 'valve cluster bounding box', visible=False)
#origin = computeCentroid(valveFit)
'''
zaxis = [0,0,1]
xaxis = normal
yaxis = np.cross(zaxis, xaxis)
yaxis /= np.linalg.norm(yaxis)
xaxis = np.cross(yaxis, zaxis)
xaxis /= np.linalg.norm(xaxis)
'''
radius = np.max(fields.dims)/2.0 - tubeRadius
proj = [np.abs(np.dot(xaxis, axis)) for axis in fields.axes]
xaxisNew = fields.axes[np.argmax(proj)]
if np.dot(xaxisNew, xaxis) < 0:
xaxisNew = -xaxisNew
xaxis = xaxisNew
yaxis = np.cross(zaxis, xaxis)
yaxis /= np.linalg.norm(yaxis)
xaxis = np.cross(yaxis, zaxis)
xaxis /= np.linalg.norm(xaxis)
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
t.Translate(origin)
pose = transformUtils.poseFromTransform(t)
desc = dict(classname='CapsuleRingAffordanceItem', Name='valve', uuid=newUUID(), pose=pose, Color=[0,1,0], Radius=float(radius), Segments=20)
desc['Tube Radius'] = tubeRadius
obj = affordanceManager.newAffordanceFromDescription(desc)
obj.params = dict(radius=radius)
return obj
def segmentValveByWallPlane(expectedValveRadius, point1, point2):
centerPoint = (point1 + point2) / 2.0
inputObj = om.findObjectByName('pointcloud snapshot')
polyData = inputObj.polyData
_ , polyData = removeGround(polyData)
viewDirection = SegmentationContext.getGlobalInstance().getViewDirection()
polyData, origin, normal = applyPlaneFit(polyData, expectedNormal=-viewDirection, returnOrigin=True)
perpLine = np.cross(point2 - point1, normal)
#perpLine /= np.linalg.norm(perpLine)
#perpLine * np.linalg.norm(point2 - point1)/2.0
point3, point4 = centerPoint + perpLine/2.0, centerPoint - perpLine/2.0
d = DebugData()
d.addLine(point1, point2)
d.addLine(point3, point4)
updatePolyData(d.getPolyData(), 'crop lines', parent=getDebugFolder(), visible=False)
wallPoints = thresholdPoints(polyData, 'dist_to_plane', [-0.01, 0.01])
updatePolyData(wallPoints, 'valve wall', parent=getDebugFolder(), visible=False)
searchRegion = thresholdPoints(polyData, 'dist_to_plane', [0.05, 0.5])
searchRegion = cropToLineSegment(searchRegion, point1, point2)
searchRegion = cropToLineSegment(searchRegion, point3, point4)
updatePolyData(searchRegion, 'valve search region', parent=getDebugFolder(), color=[1,0,0], visible=False)
searchRegionSpokes = shallowCopy(searchRegion)
searchRegion, origin, _ = applyPlaneFit(searchRegion, expectedNormal=normal, perpendicularAxis=normal, returnOrigin=True)
searchRegion = thresholdPoints(searchRegion, 'dist_to_plane', [-0.015, 0.015])
updatePolyData(searchRegion, 'valve search region 2', parent=getDebugFolder(), color=[0,1,0], visible=False)
largestCluster = extractLargestCluster(searchRegion, minClusterSize=1)
updatePolyData(largestCluster, 'valve cluster', parent=getDebugFolder(), color=[0,1,0], visible=False)
radiusLimit = [expectedValveRadius - 0.01, expectedValveRadius + 0.01] if expectedValveRadius else None
#radiusLimit = None
polyData, circleFit = extractCircle(largestCluster, distanceThreshold=0.01, radiusLimit=radiusLimit)
updatePolyData(polyData, 'circle fit', parent=getDebugFolder(), visible=False)
#polyData, circleFit = extractCircle(polyData, distanceThreshold=0.01)
#showPolyData(polyData, 'circle fit', colorByName='z')
radius = circleFit.GetCircleRadius()
origin = np.array(circleFit.GetCircleOrigin())
circleNormal = np.array(circleFit.GetCircleNormal())
circleNormal = circleNormal/np.linalg.norm(circleNormal)
if np.dot(circleNormal, normal) < 0:
circleNormal *= -1
# force use of the plane normal
circleNormal = normal
radius = expectedValveRadius
d = DebugData()
d.addLine(origin - normal*radius, origin + normal*radius)
d.addCircle(origin, circleNormal, radius)
updatePolyData(d.getPolyData(), 'valve axes', parent=getDebugFolder(), visible=False)
zaxis = -circleNormal
xaxis = [0, 0, 1]
yaxis = np.cross(zaxis, xaxis)
xaxis = np.cross(yaxis, zaxis)
xaxis /= np.linalg.norm(xaxis)
yaxis /= np.linalg.norm(yaxis)
#t = getTransformFromAxes(xaxis, yaxis, zaxis) # this was added to be consistent with segmentValveByRim
t = getTransformFromAxes(zaxis, -yaxis, xaxis) # this was added to be consistent with segmentValveByRim
t.PostMultiply()
t.Translate(origin)
# Spoke angle fitting:
if (1==0): # disabled jan 2015
# extract the relative positon of the points to the valve axis:
searchRegionSpokes = labelDistanceToLine(searchRegionSpokes, origin, [origin + circleNormal])
searchRegionSpokes = thresholdPoints(searchRegionSpokes, 'distance_to_line', [0.05, radius-0.04])
updatePolyData(searchRegionSpokes, 'valve spoke search', parent=getDebugFolder(), visible=False)
searchRegionSpokesLocal = transformPolyData(searchRegionSpokes, t.GetLinearInverse() )
points = vtkNumpy.getNumpyFromVtk(searchRegionSpokesLocal , 'Points')
spoke_angle = findValveSpokeAngle(points)
else:
spoke_angle = 0
spokeAngleTransform = transformUtils.frameFromPositionAndRPY([0,0,0], [0,0,spoke_angle])
spokeTransform = transformUtils.copyFrame(t)
spokeAngleTransform.Concatenate(spokeTransform)
spokeObj = showFrame(spokeAngleTransform, 'spoke frame', parent=getDebugFolder(), visible=False, scale=radius)
spokeObj.addToView(app.getDRCView())
t = spokeAngleTransform
tubeRadius = 0.017
pose = transformUtils.poseFromTransform(t)
desc = dict(classname='CapsuleRingAffordanceItem', Name='valve', uuid=newUUID(), pose=pose, Color=[0,1,0], Radius=float(radius), Segments=20)
desc['Tube Radius'] = tubeRadius
obj = affordanceManager.newAffordanceFromDescription(desc)
obj.params = dict(radius=radius)
def showHistogram(polyData, arrayName, numberOfBins=100):
import matplotlib.pyplot as plt
x = vnp.getNumpyFromVtk(polyData, arrayName)
hist, bins = np.histogram(x, bins=numberOfBins)
width = 0.7 * (bins[1] - bins[0])
center = (bins[:-1] + bins[1:]) / 2
plt.bar(center, hist, align='center', width=width)
plt.show()
return bins[np.argmax(hist)] + (bins[1] - bins[0])/2.0
def showTable(table, parent):
'''
explictly draw a table and its frames
'''
pose = transformUtils.poseFromTransform(table.frame)
desc = dict(classname='MeshAffordanceItem', Name='table', Color=[0,1,0], pose=pose)
aff = affordanceManager.newAffordanceFromDescription(desc)
aff.setPolyData(table.mesh)
tableBox = vis.showPolyData(table.box, 'table box', parent=aff, color=[0,1,0], visible=False)
tableBox.actor.SetUserTransform(table.frame)
def applyKmeansLabel(polyData, arrayName, numberOfClusters, whiten=False):
import scipy.cluster
ar = vnp.getNumpyFromVtk(polyData, arrayName).copy()
if whiten:
scipy.cluster.vq.whiten(ar)
codes, disturbances = scipy.cluster.vq.kmeans(ar, numberOfClusters)
if arrayName == 'normals' and numberOfClusters == 2:
v1 = codes[0]
v2 = codes[1]
v1 /= np.linalg.norm(v1)
v2 /= np.linalg.norm(v2)
angle = np.arccos(np.dot(v1, v2))
print 'angle between normals:', np.degrees(angle)
code, distance = scipy.cluster.vq.vq(ar, codes)
polyData = shallowCopy(polyData)
vnp.addNumpyToVtk(polyData, code, '%s_kmeans_label' % arrayName)
return polyData
def findValveSpokeAngle(points):
'''
Determine the location of the valve spoke angle
By binning the spoke returns. returns angle in degrees
'''
#np.savetxt("/home/mfallon/Desktop/spoke_points.csv", points, delimiter=",")
# convert all points to degrees in range [0,120]
angle = np.degrees( np.arctan2( points[:,1] , points[:,0] ) )
qq = np.where(angle < 0)[0]
angle[qq] += 360
angle = np.mod( angle, 120)
# find the spoke as the max of a histogram:
bins = range(0,130,10) # 0,10,...130
freq, bins = np.histogram(angle, bins)
amax = np.argmax(freq)
spoke_angle = bins[amax] + 5 # correct for 5deg offset
return spoke_angle
def findWallCenter(polyData, removeGroundMethod=removeGround):
'''
Find a frame at the center of the valve wall
X&Y: average of points on the wall plane
Z: 4 feet off the ground (determined using robot's feet
Orientation: z-normal into plane, y-axis horizontal
'''
_ , polyData = removeGroundMethod(polyData)
viewDirection = SegmentationContext.getGlobalInstance().getViewDirection()
polyData, origin, normal = applyPlaneFit(polyData, expectedNormal=-viewDirection, returnOrigin=True)
wallPoints = thresholdPoints(polyData, 'dist_to_plane', [-0.01, 0.01])
wallPoints = applyVoxelGrid(wallPoints, leafSize=0.03)
wallPoints = extractLargestCluster(wallPoints, minClusterSize=100)
updatePolyData(wallPoints, 'auto valve wall', parent=getDebugFolder(), visible=False)
xvalues = vtkNumpy.getNumpyFromVtk(wallPoints, 'Points')[:,0]
yvalues = vtkNumpy.getNumpyFromVtk(wallPoints, 'Points')[:,1]
# median or mid of max or min?
#xcenter = np.median(xvalues)
#ycenter = np.median(yvalues)
xcenter = (np.max(xvalues)+np.min(xvalues))/2
ycenter = (np.max(yvalues)+np.min(yvalues))/2
# not used, not very reliable
#zvalues = vtkNumpy.getNumpyFromVtk(wallPoints, 'Points')[:,2]
#zcenter = np.median(zvalues)
zcenter = SegmentationContext.getGlobalInstance().getGroundHeight() + 1.2192 # valves are 4ft from ground
point1 =np.array([ xcenter, ycenter, zcenter ]) # center of the valve wall
zaxis = -normal
xaxis = [0, 0, 1]
yaxis = np.cross(zaxis, xaxis)
xaxis = np.cross(yaxis, zaxis)
xaxis /= np.linalg.norm(xaxis)
yaxis /= np.linalg.norm(yaxis)
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
t.Translate(point1)
normalObj = showFrame(t, 'valve wall frame', parent=getDebugFolder(), visible=False) # z direction out of wall
normalObj.addToView(app.getDRCView())
return t
def segmentValveWallAuto(expectedValveRadius=.195, mode='both', removeGroundMethod=removeGround ):
'''
Automatically segment a valve hanging in front of the wall at the center
'''
# find the valve wall and its center
inputObj = om.findObjectByName('pointcloud snapshot')
polyData = inputObj.polyData
t = findWallCenter(polyData, removeGroundMethod)
valve_point1 = [ 0 , 0.6 , 0]
valveTransform1 = transformUtils.frameFromPositionAndRPY(valve_point1, [0,0,0])
valveTransform1.Concatenate(t)
point1 = np.array(valveTransform1.GetPosition()) # left of wall
valve_point2 = [ 0 , -0.6 , 0]
valveTransform2 = transformUtils.frameFromPositionAndRPY(valve_point2, [0,0,0])
valveTransform2.Concatenate(t)
point2 = np.array(valveTransform2.GetPosition()) # left of wall
valve_point3 = [ 0 , 1.0 , 0] # lever can over hang
valveTransform3 = transformUtils.frameFromPositionAndRPY(valve_point3, [0,0,0])
valveTransform3.Concatenate(t)
point3 =valveTransform3.GetPosition() # right of wall
d = DebugData()
d.addSphere(point2, radius=0.01)
d.addSphere(point1, radius=0.03)
d.addSphere(point3, radius=0.01)
updatePolyData(d.getPolyData(), 'auto wall points', parent=getDebugFolder(), visible=False)
if (mode=='valve'):
segmentValveByWallPlane(expectedValveRadius, point1, point2)
elif (mode=='lever'):
segmentLeverByWallPlane(point1, point3)
elif (mode=='both'):
segmentValveByWallPlane(expectedValveRadius, point1, point2)
segmentLeverByWallPlane(point1, point3)
else:
raise Exception('unexpected segmentation mode: ' + mode)
def segmentLeverByWallPlane(point1, point2):
'''
determine the position (including rotation of a lever near a wall
input is as for the valve - to points on the wall either side of the lever
'''
# 1. determine the wall plane and normal
centerPoint = (point1 + point2) / 2.0
inputObj = om.findObjectByName('pointcloud snapshot')
polyData = inputObj.polyData
viewDirection = SegmentationContext.getGlobalInstance().getViewDirection()
polyData, origin, normal = applyPlaneFit(polyData, expectedNormal=-viewDirection, returnOrigin=True)
# 2. Crop the cloud down to the lever only using the wall plane
perpLine = np.cross(point2 - point1, -normal)
#perpLine /= np.linalg.norm(perpLine)
#perpLine * np.linalg.norm(point2 - point1)/2.0
point3, point4 = centerPoint + perpLine/2.0, centerPoint - perpLine/2.0
d = DebugData()
d.addLine(point1, point2)
d.addLine(point3, point4)
updatePolyData(d.getPolyData(), 'lever crop lines', parent=getDebugFolder(), visible=False)
wallPoints = thresholdPoints(polyData, 'dist_to_plane', [-0.01, 0.01])
updatePolyData(wallPoints, 'lever valve wall', parent=getDebugFolder(), visible=False)
searchRegion = thresholdPoints(polyData, 'dist_to_plane', [0.12, 0.2]) # very tight threshold
searchRegion = cropToLineSegment(searchRegion, point1, point2)
searchRegion = cropToLineSegment(searchRegion, point3, point4)
updatePolyData(searchRegion, 'lever search region', parent=getDebugFolder(), color=[1,0,0], visible=False)
# 3. fit line to remaining points - all assumed to be the lever
linePoint, lineDirection, _ = applyLineFit(searchRegion, distanceThreshold=0.02)
#if np.dot(lineDirection, forwardDirection) < 0:
# lineDirection = -lineDirection
d = DebugData()
d.addSphere(linePoint, radius=0.02)
updatePolyData(d.getPolyData(), 'lever point', parent=getDebugFolder(), visible=False)
pts = vtkNumpy.getNumpyFromVtk(searchRegion, 'Points')
dists = np.dot(pts-linePoint, lineDirection)
lever_center = linePoint + lineDirection*np.min(dists)
lever_tip = linePoint + lineDirection*np.max(dists)
# 4. determine which lever point is closest to the lower left of the wall. That's the lever_center point
zaxis = -normal
xaxis = [0, 0, 1]
yaxis = np.cross(zaxis, xaxis)
xaxis = np.cross(yaxis, zaxis)
xaxis /= np.linalg.norm(xaxis)
yaxis /= np.linalg.norm(yaxis)
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
t.Translate(point1)
# a distant point down and left from wall
wall_point_lower_left = [ -20 , -20.0 , 0]
wall_point_lower_left_Transform = transformUtils.frameFromPositionAndRPY(wall_point_lower_left, [0,0,0])
wall_point_lower_left_Transform.Concatenate(t)
wall_point_lower_left = wall_point_lower_left_Transform.GetPosition()
d1 = np.sqrt( np.sum((wall_point_lower_left- projectPointToPlane(lever_center, origin, normal) )**2) )
d2 = np.sqrt( np.sum((wall_point_lower_left- projectPointToPlane(lever_tip, origin, normal) )**2) )
if (d2 < d1): # flip the points to match variable names
p_temp = lever_center
lever_center = lever_tip
lever_tip = p_temp
lineDirection = -lineDirection
# 5. compute the rotation angle of the lever and, using that, its frame
zaxis = -normal
xaxis = [0, 0, 1]
yaxis = np.cross(zaxis, xaxis)
xaxis = np.cross(yaxis, zaxis)
xaxis /= np.linalg.norm(xaxis)
yaxis /= np.linalg.norm(yaxis)
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
t.Translate(lever_center) # nominal frame at lever center
rotationAngle = -computeSignedAngleBetweenVectors(lineDirection, [0, 0, 1], -normal)
t_lever = transformUtils.frameFromPositionAndRPY( [0,0,0], [0,0, math.degrees( rotationAngle ) ] )
t_lever.PostMultiply()
t_lever.Concatenate(t)
d = DebugData()
# d.addSphere( point1 , radius=0.1)
d.addSphere( wall_point_lower_left , radius=0.1)
d.addSphere(lever_center, radius=0.04)
d.addSphere(lever_tip, radius=0.01)
d.addLine(lever_center, lever_tip)
updatePolyData(d.getPolyData(), 'lever end points', color=[0,1,0], parent=getDebugFolder(), visible=False)
radius = 0.01
length = np.sqrt( np.sum((lever_tip - lever_center )**2) )
d = DebugData()
d.addLine([0,0,0], [length, 0, 0], radius=radius)
d.addSphere ( [0, 0, 0], 0.02)
geometry = d.getPolyData()
obj = showPolyData(geometry, 'valve lever', cls=FrameAffordanceItem, parent='affordances' , color=[0,1,0], visible=True)
obj.actor.SetUserTransform(t_lever)
obj.addToView(app.getDRCView())
frameObj = showFrame(t_lever, 'lever frame', parent=obj, visible=False)
frameObj.addToView(app.getDRCView())
otdfType = 'lever_valve'
params = dict(origin=np.array(t_lever.GetPosition()), xaxis=xaxis, yaxis=yaxis, zaxis=zaxis, xwidth=0.1, ywidth=0.1, zwidth=0.1, radius=radius, length=length, friendly_name=otdfType, otdf_type=otdfType)
obj.setAffordanceParams(params)
obj.updateParamsFromActorTransform()
def applyICP(source, target):
icp = vtk.vtkIterativeClosestPointTransform()
icp.SetSource(source)
icp.SetTarget(target)
icp.GetLandmarkTransform().SetModeToRigidBody()
icp.Update()
t = vtk.vtkTransform()
t.SetMatrix(icp.GetMatrix())
return t
def applyDiskGlyphs(polyData, computeNormals=True):
voxelGridLeafSize = 0.03
normalEstimationSearchRadius = 0.05
diskRadius = 0.015
diskResolution = 12
if computeNormals:
scanInput = polyData
pd = applyVoxelGrid(scanInput, leafSize=voxelGridLeafSize)
pd = labelOutliers(pd, searchRadius=normalEstimationSearchRadius, neighborsInSearchRadius=3)
pd = thresholdPoints(pd, 'is_outlier', [0, 0])
pd = normalEstimation(pd, searchRadius=normalEstimationSearchRadius, searchCloud=scanInput)
else:
pd = polyData
assert polyData.GetPointData().GetNormals()
disk = vtk.vtkDiskSource()
disk.SetOuterRadius(diskRadius)
disk.SetInnerRadius(0.0)
disk.SetRadialResolution(0)
disk.SetCircumferentialResolution(diskResolution)
disk.Update()
t = vtk.vtkTransform()
t.RotateY(90)
disk = transformPolyData(disk.GetOutput(), t)
glyph = vtk.vtkGlyph3D()
glyph.ScalingOff()
glyph.OrientOn()
glyph.SetSource(disk)
glyph.SetInput(pd)
glyph.SetVectorModeToUseNormal()
glyph.Update()
return shallowCopy(glyph.GetOutput())
def applyArrowGlyphs(polyData, computeNormals=True, voxelGridLeafSize=0.03, normalEstimationSearchRadius=0.05, arrowSize=0.02):
if computeNormals:
polyData = applyVoxelGrid(polyData, leafSize=0.02)
voxelData = applyVoxelGrid(polyData, leafSize=voxelGridLeafSize)
polyData = normalEstimation(polyData, searchRadius=normalEstimationSearchRadius, searchCloud=voxelData)
polyData = removeNonFinitePoints(polyData, 'normals')
flipNormalsWithViewDirection(polyData, SegmentationContext.getGlobalInstance().getViewDirection())
assert polyData.GetPointData().GetNormals()
arrow = vtk.vtkArrowSource()
arrow.Update()
glyph = vtk.vtkGlyph3D()
glyph.SetScaleFactor(arrowSize)
glyph.SetSource(arrow.GetOutput())
glyph.SetInput(polyData)
glyph.SetVectorModeToUseNormal()
glyph.Update()
return shallowCopy(glyph.GetOutput())
def segmentLeverValve(point1, point2):
inputObj = om.findObjectByName('pointcloud snapshot')
polyData = inputObj.polyData
viewPlaneNormal = np.array(getSegmentationView().camera().GetViewPlaneNormal())
polyData, origin, normal = applyPlaneFit(polyData, expectedNormal=viewPlaneNormal, searchOrigin=point1, searchRadius=0.2, angleEpsilon=0.7, returnOrigin=True)
wallPoints = thresholdPoints(polyData, 'dist_to_plane', [-0.01, 0.01])
updatePolyData(wallPoints, 'wall points', parent=getDebugFolder(), visible=False)
radius = 0.01
length = 0.33
normal = -normal # set z to face into wall
zaxis = normal
xaxis = [0, 0, 1]
yaxis = np.cross(zaxis, xaxis)
xaxis = np.cross(yaxis, zaxis)
xaxis /= np.linalg.norm(xaxis)
yaxis /= np.linalg.norm(yaxis)
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
t.Translate(point2)
leverP1 = point2
leverP2 = point2 + xaxis * length
d = DebugData()
d.addLine([0,0,0], [length, 0, 0], radius=radius)
d.addSphere ( [0, 0, 0], 0.02)
geometry = d.getPolyData()
obj = showPolyData(geometry, 'valve lever', cls=FrameAffordanceItem, parent='affordances', color=[0,1,0], visible=True)
obj.actor.SetUserTransform(t)
obj.addToView(app.getDRCView())
frameObj = showFrame(t, 'lever frame', parent=obj, visible=False)
frameObj.addToView(app.getDRCView())
otdfType = 'lever_valve'
params = dict(origin=np.array(t.GetPosition()), xaxis=xaxis, yaxis=yaxis, zaxis=zaxis, xwidth=0.1, ywidth=0.1, zwidth=0.1, radius=radius, length=length, friendly_name=otdfType, otdf_type=otdfType)
obj.setAffordanceParams(params)
obj.updateParamsFromActorTransform()
def segmentWye(point1, point2):
inputObj = om.findObjectByName('pointcloud snapshot')
polyData = inputObj.polyData
viewPlaneNormal = np.array(getSegmentationView().camera().GetViewPlaneNormal())
polyData, origin, normal = applyPlaneFit(polyData, expectedNormal=viewPlaneNormal, searchOrigin=point1, searchRadius=0.2, angleEpsilon=0.7, returnOrigin=True)
wallPoints = thresholdPoints(polyData, 'dist_to_plane', [-0.01, 0.01])
updatePolyData(wallPoints, 'wall points', parent=getDebugFolder(), visible=False)
wyeMesh = ioUtils.readPolyData(os.path.join(app.getDRCBase(), 'software/models/otdf/wye.obj'))
wyeMeshPoint = np.array([0.0, 0.0, 0.005])
wyeMeshLeftHandle = np.array([0.032292, 0.02949, 0.068485])
xaxis = -normal
zaxis = [0, 0, 1]
yaxis = np.cross(zaxis, xaxis)
yaxis /= np.linalg.norm(yaxis)
zaxis = np.cross(xaxis, yaxis)
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PreMultiply()
t.Translate(-wyeMeshPoint)
t.PostMultiply()
t.Translate(point2)
d = DebugData()
d.addSphere(point2, radius=0.005)
updatePolyData(d.getPolyData(), 'wye pick point', parent=getDebugFolder(), visible=False)
wyeObj = showPolyData(wyeMesh, 'wye', cls=FrameAffordanceItem, color=[0,1,0], visible=True)
wyeObj.actor.SetUserTransform(t)
wyeObj.addToView(app.getDRCView())
frameObj = showFrame(t, 'wye frame', parent=wyeObj, visible=False)
frameObj.addToView(app.getDRCView())
params = dict(origin=np.array(t.GetPosition()), xaxis=xaxis, yaxis=yaxis, zaxis=zaxis, xwidth=0.1, ywidth=0.1, zwidth=0.1, friendly_name='wye', otdf_type='wye')
wyeObj.setAffordanceParams(params)
wyeObj.updateParamsFromActorTransform()
def segmentDoorHandle(otdfType, point1, point2):
inputObj = om.findObjectByName('pointcloud snapshot')
polyData = inputObj.polyData
viewPlaneNormal = np.array(getSegmentationView().camera().GetViewPlaneNormal())
polyData, origin, normal = applyPlaneFit(polyData, expectedNormal=viewPlaneNormal, searchOrigin=point1, searchRadius=0.2, angleEpsilon=0.7, returnOrigin=True)
wallPoints = thresholdPoints(polyData, 'dist_to_plane', [-0.01, 0.01])
updatePolyData(wallPoints, 'wall points', parent=getDebugFolder(), visible=False)
handlePoint = np.array([0.005, 0.065, 0.011])
xaxis = -normal
zaxis = [0, 0, 1]
yaxis = np.cross(zaxis, xaxis)
yaxis /= np.linalg.norm(yaxis)
zaxis = np.cross(xaxis, yaxis)
xwidth = 0.01
ywidth = 0.13
zwidth = 0.022
cube = vtk.vtkCubeSource()
cube.SetXLength(xwidth)
cube.SetYLength(ywidth)
cube.SetZLength(zwidth)
cube.Update()
cube = shallowCopy(cube.GetOutput())
t = getTransformFromAxes(xaxis, yaxis, zaxis)
#t.PreMultiply()
#t.Translate(-handlePoint)
t.PostMultiply()
t.Translate(point2)
name = 'door handle'
obj = showPolyData(cube, name, cls=FrameAffordanceItem, parent='affordances')
obj.actor.SetUserTransform(t)
obj.addToView(app.getDRCView())
params = dict(origin=origin, xwidth=xwidth, ywidth=ywidth, zwidth=zwidth, xaxis=xaxis, yaxis=yaxis, zaxis=zaxis, friendly_name=name, otdf_type=otdfType)
obj.setAffordanceParams(params)
obj.updateParamsFromActorTransform()
frameObj = showFrame(obj.actor.GetUserTransform(), name + ' frame', parent=obj, visible=False)
frameObj.addToView(app.getDRCView())
def segmentTruss(point1, point2):
edge = point2 - point1
edgeLength = np.linalg.norm(edge)
stanceOffset = [-0.42, 0.0, 0.0]
stanceYaw = 0.0
d = DebugData()
p1 = [0.0, 0.0, 0.0]
p2 = -np.array([0.0, -1.0, 0.0]) * edgeLength
d.addSphere(p1, radius=0.02)
d.addSphere(p2, radius=0.02)
d.addLine(p1, p2)
stanceTransform = vtk.vtkTransform()
stanceTransform.PostMultiply()
stanceTransform.Translate(stanceOffset)
#stanceTransform.RotateZ(stanceYaw)
geometry = transformPolyData(d.getPolyData(), stanceTransform.GetLinearInverse())
yaxis = edge/edgeLength
zaxis = [0.0, 0.0, 1.0]
xaxis = np.cross(yaxis, zaxis)
xaxis /= np.linalg.norm(xaxis)
yaxis = np.cross(zaxis, xaxis)
yaxis /= np.linalg.norm(yaxis)
xwidth = 0.1
ywidth = edgeLength
zwidth = 0.1
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PreMultiply()
t.Concatenate(stanceTransform)
t.PostMultiply()
t.Translate(point1)
name = 'truss'
otdfType = 'robot_knees'
obj = showPolyData(geometry, name, cls=FrameAffordanceItem, parent='affordances')
obj.actor.SetUserTransform(t)
obj.addToView(app.getDRCView())
params = dict(origin=t.GetPosition(), xwidth=xwidth, ywidth=ywidth, zwidth=zwidth, xaxis=xaxis, yaxis=yaxis, zaxis=zaxis, friendly_name=name, otdf_type=otdfType)
obj.setAffordanceParams(params)
obj.updateParamsFromActorTransform()
frameObj = showFrame(obj.actor.GetUserTransform(), name + ' frame', parent=obj, visible=False)
frameObj.addToView(app.getDRCView())
def segmentHoseNozzle(point1):
inputObj = om.findObjectByName('pointcloud snapshot')
polyData = inputObj.polyData
searchRegion = cropToSphere(polyData, point1, 0.10)
updatePolyData(searchRegion, 'nozzle search region', parent=getDebugFolder(), visible=False)
xaxis = [1,0,0]
yaxis = [0,-1,0]
zaxis = [0,0,-1]
origin = point1
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
t.Translate(point1)
nozzleRadius = 0.0266
nozzleLength = 0.042
nozzleTipRadius = 0.031
nozzleTipLength = 0.024
d = DebugData()
d.addLine(np.array([0,0,-nozzleLength/2.0]), np.array([0,0,nozzleLength/2.0]), radius=nozzleRadius)
d.addLine(np.array([0,0,nozzleLength/2.0]), np.array([0,0,nozzleLength/2.0 + nozzleTipLength]), radius=nozzleTipRadius)
obj = showPolyData(d.getPolyData(), 'hose nozzle', cls=FrameAffordanceItem, color=[0,1,0], visible=True)
obj.actor.SetUserTransform(t)
obj.addToView(app.getDRCView())
frameObj = showFrame(t, 'nozzle frame', parent=obj, visible=False)
frameObj.addToView(app.getDRCView())
params = dict(origin=origin, xaxis=xaxis, yaxis=yaxis, zaxis=zaxis, xwidth=0.1, ywidth=0.1, zwidth=0.1, friendly_name='firehose', otdf_type='firehose')
obj.setAffordanceParams(params)
obj.updateParamsFromActorTransform()
def segmentDrillWall(point1, point2, point3):
inputObj = om.findObjectByName('pointcloud snapshot')
polyData = inputObj.polyData
points = [point1, point2, point3]
viewPlaneNormal = np.array(getSegmentationView().camera().GetViewPlaneNormal())
expectedNormal = np.cross(point2 - point1, point3 - point1)
expectedNormal /= np.linalg.norm(expectedNormal)
if np.dot(expectedNormal, viewPlaneNormal) < 0:
expectedNormal *= -1.0
polyData, origin, normal = applyPlaneFit(polyData, expectedNormal=expectedNormal, searchOrigin=(point1 + point2 + point3)/3.0, searchRadius=0.3, angleEpsilon=0.3, returnOrigin=True)
points = [projectPointToPlane(point, origin, normal) for point in points]
xaxis = -normal
zaxis = [0, 0, 1]
yaxis = np.cross(zaxis, xaxis)
yaxis /= np.linalg.norm(yaxis)
zaxis = np.cross(xaxis, yaxis)
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
t.Translate(points[0])
d = DebugData()
pointsInWallFrame = []
for p in points:
pp = np.zeros(3)
t.GetLinearInverse().TransformPoint(p, pp)
pointsInWallFrame.append(pp)
d.addSphere(pp, radius=0.02)
for a, b in zip(pointsInWallFrame, pointsInWallFrame[1:] + [pointsInWallFrame[0]]):
d.addLine(a, b, radius=0.015)
aff = showPolyData(d.getPolyData(), 'drill target', cls=FrameAffordanceItem, color=[0,1,0], visible=True)
aff.actor.SetUserTransform(t)
showFrame(t, 'drill target frame', parent=aff, visible=False)
refitWallCallbacks.append(functools.partial(refitDrillWall, aff))
params = dict(origin=points[0], xaxis=xaxis, yaxis=yaxis, zaxis=zaxis, xwidth=0.1, ywidth=0.1, zwidth=0.1,
p1y=pointsInWallFrame[0][1], p1z=pointsInWallFrame[0][2],
p2y=pointsInWallFrame[1][1], p2z=pointsInWallFrame[1][2],
p3y=pointsInWallFrame[2][1], p3z=pointsInWallFrame[2][2],
friendly_name='drill_wall', otdf_type='drill_wall')
aff.setAffordanceParams(params)
aff.updateParamsFromActorTransform()
aff.addToView(app.getDRCView())
refitWallCallbacks = []
def refitWall(point1):
inputObj = om.findObjectByName('pointcloud snapshot')
polyData = inputObj.polyData
viewPlaneNormal = np.array(getSegmentationView().camera().GetViewPlaneNormal())
polyData, origin, normal = applyPlaneFit(polyData, expectedNormal=viewPlaneNormal, searchOrigin=point1, searchRadius=0.2, angleEpsilon=0.7, returnOrigin=True)
wallPoints = thresholdPoints(polyData, 'dist_to_plane', [-0.01, 0.01])
updatePolyData(wallPoints, 'wall points', parent=getDebugFolder(), visible=False)
for func in refitWallCallbacks:
func(point1, origin, normal)
def refitDrillWall(aff, point1, origin, normal):
t = aff.actor.GetUserTransform()
targetOrigin = np.array(t.GetPosition())
projectedOrigin = projectPointToPlane(targetOrigin, origin, normal)
projectedOrigin[2] = targetOrigin[2]
xaxis = -normal
zaxis = [0, 0, 1]
yaxis = np.cross(zaxis, xaxis)
yaxis /= np.linalg.norm(yaxis)
zaxis = np.cross(xaxis, yaxis)
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
t.Translate(projectedOrigin)
aff.actor.GetUserTransform().SetMatrix(t.GetMatrix())
# this should be depreciated!
def getGroundHeightFromFeet():
rfoot = getLinkFrame( drcargs.getDirectorConfig()['rightFootLink'] )
return np.array(rfoot.GetPosition())[2] - 0.0745342
# this should be depreciated!
def getTranslationRelativeToFoot(t):
rfoot = getLinkFrame( drcargs.getDirectorConfig()['rightFootLink'] )
def segmentDrillWallConstrained(rightAngleLocation, point1, point2):
inputObj = om.findObjectByName('pointcloud snapshot')
polyData = inputObj.polyData
viewPlaneNormal = np.array(getSegmentationView().camera().GetViewPlaneNormal())
expectedNormal = np.cross(point2 - point1, [0.0, 0.0, 1.0])
expectedNormal /= np.linalg.norm(expectedNormal)
if np.dot(expectedNormal, viewPlaneNormal) < 0:
expectedNormal *= -1.0
polyData, origin, normal = applyPlaneFit(polyData, expectedNormal=expectedNormal, searchOrigin=point1, searchRadius=0.3, angleEpsilon=0.3, returnOrigin=True)
triangleOrigin = projectPointToPlane(point2, origin, normal)
xaxis = -normal
zaxis = [0, 0, 1]
yaxis = np.cross(zaxis, xaxis)
yaxis /= np.linalg.norm(yaxis)
zaxis = np.cross(xaxis, yaxis)
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
t.Translate(triangleOrigin)
createDrillWall(rightAngleLocation, t)
def createDrillWall(rightAngleLocation, trianglePose):
# recover the origin and axes from the pose:
triangleOrigin = trianglePose.GetPosition()
xaxis, yaxis, zaxis = transformUtils.getAxesFromTransform( trianglePose )
# 0.6096 = 24 * .0254 (m = feet)
# 0.3048 = 12 * .0254 (m = feet)
edgeRight = np.array([0.0, -1.0, 0.0]) * (0.6)
edgeUp = np.array([0.0, 0.0, 1.0]) * (0.3)
pointsInWallFrame = np.zeros((3,3))
if rightAngleLocation == DRILL_TRIANGLE_BOTTOM_LEFT:
pointsInWallFrame[1] = edgeUp
pointsInWallFrame[2] = edgeRight
elif rightAngleLocation == DRILL_TRIANGLE_BOTTOM_RIGHT:
pointsInWallFrame[1] = edgeUp # edgeRight +edgeUp
pointsInWallFrame[2] = -edgeRight # edgeRight
elif rightAngleLocation == DRILL_TRIANGLE_TOP_LEFT:
pointsInWallFrame[1] = edgeRight
pointsInWallFrame[2] = -edgeUp
elif rightAngleLocation == DRILL_TRIANGLE_TOP_RIGHT:
pointsInWallFrame[1] = edgeRight
pointsInWallFrame[2] = edgeRight - edgeUp
else:
raise Exception('unexpected value for right angle location: ', + rightAngleLocation)
center = pointsInWallFrame.sum(axis=0)/3.0
shrinkFactor = 1#0.90
shrinkPoints = (pointsInWallFrame - center) * shrinkFactor + center
d = DebugData()
for p in pointsInWallFrame:
d.addSphere(p, radius=0.015)
for a, b in zip(pointsInWallFrame, np.vstack((pointsInWallFrame[1:], pointsInWallFrame[0]))):
d.addLine(a, b, radius=0.005)#01)
for a, b in zip(shrinkPoints, np.vstack((shrinkPoints[1:], shrinkPoints[0]))):
d.addLine(a, b, radius=0.005)#0.025
folder = om.getOrCreateContainer('affordances')
wall = om.findObjectByName('wall')
om.removeFromObjectModel(wall)
aff = showPolyData(d.getPolyData(), 'wall', cls=FrameAffordanceItem, color=[0,1,0], visible=True, parent=folder)
aff.actor.SetUserTransform(trianglePose)
aff.addToView(app.getDRCView())
refitWallCallbacks.append(functools.partial(refitDrillWall, aff))
frameObj = showFrame(trianglePose, 'wall frame', parent=aff, scale=0.2, visible=False)
frameObj.addToView(app.getDRCView())
params = dict(origin=triangleOrigin, xaxis=xaxis, yaxis=yaxis, zaxis=zaxis, xwidth=0.1, ywidth=0.1, zwidth=0.1,
p1y=shrinkPoints[0][1], p1z=shrinkPoints[0][2],
p2y=shrinkPoints[1][1], p2z=shrinkPoints[1][2],
p3y=shrinkPoints[2][1], p3z=shrinkPoints[2][2],
friendly_name='drill_wall', otdf_type='drill_wall')
aff.setAffordanceParams(params)
aff.updateParamsFromActorTransform()
'''
rfoot = getLinkFrame(drcargs.getDirectorConfig()['rightFootLink'])
tt = getTransformFromAxes(xaxis, yaxis, zaxis)
tt.PostMultiply()
tt.Translate(rfoot.GetPosition())
showFrame(tt, 'rfoot with wall orientation')
aff.footToAffTransform = computeAToB(tt, trianglePose)
footToAff = list(aff.footToAffTransform.GetPosition())
tt.TransformVector(footToAff, footToAff)
d = DebugData()
d.addSphere(tt.GetPosition(), radius=0.02)
d.addLine(tt.GetPosition(), np.array(tt.GetPosition()) + np.array(footToAff))
showPolyData(d.getPolyData(), 'rfoot debug')
'''
def getDrillAffordanceParams(origin, xaxis, yaxis, zaxis, drillType="dewalt_button"):
if (drillType=="dewalt_button"):
params = dict(origin=origin, xaxis=xaxis, yaxis=yaxis, zaxis=zaxis, xwidth=0.1, ywidth=0.1, zwidth=0.1,
button_x=0.007,
button_y=-0.035,
button_z=-0.06,
button_roll=-90.0,
button_pitch=-90.0,
button_yaw=0.0,
bit_x=-0.01,
bit_y=0.0,
bit_z=0.15,
bit_roll=0,
bit_pitch=-90,
bit_yaw=0,
friendly_name='dewalt_button', otdf_type='dewalt_button')
else:
params = dict(origin=origin, xaxis=xaxis, yaxis=yaxis, zaxis=zaxis, xwidth=0.1, ywidth=0.1, zwidth=0.1,
button_x=0.007,
button_y=-0.035,
button_z=-0.06,
button_roll=0.0,
button_pitch=0.0,
button_yaw=0.0,
bit_x=0.18,
bit_y=0.0,
bit_z=0.13,
bit_roll=0,
bit_pitch=0,
bit_yaw=0,
friendly_name='dewalt_barrel', otdf_type='dewalt_barrel')
return params
def getDrillMesh(applyBitOffset=False):
button = np.array([0.007, -0.035, -0.06])
drillMesh = ioUtils.readPolyData(os.path.join(app.getDRCBase(), 'software/models/otdf/dewalt_button.obj'))
if applyBitOffset:
t = vtk.vtkTransform()
t.Translate(0.01, 0.0, 0.0)
drillMesh = transformPolyData(drillMesh, t)
d = DebugData()
d.addPolyData(drillMesh)
d.addSphere(button, radius=0.005, color=[0,1,0])
d.addLine([0.0,0.0,0.155], [0.0, 0.0, 0.14], radius=0.001, color=[0,1,0])
return shallowCopy(d.getPolyData())
def getDrillBarrelMesh():
return ioUtils.readPolyData(os.path.join(app.getDRCBase(), 'software/models/otdf/dewalt.ply'), computeNormals=True)
def segmentDrill(point1, point2, point3):
inputObj = om.findObjectByName('pointcloud snapshot')
polyData = inputObj.polyData
viewPlaneNormal = np.array(getSegmentationView().camera().GetViewPlaneNormal())
polyData, origin, normal = applyPlaneFit(polyData, expectedNormal=viewPlaneNormal, searchOrigin=point1, searchRadius=0.2, angleEpsilon=0.7, returnOrigin=True)
tablePoints = thresholdPoints(polyData, 'dist_to_plane', [-0.01, 0.01])
updatePolyData(tablePoints, 'table plane points', parent=getDebugFolder(), visible=False)
searchRegion = thresholdPoints(polyData, 'dist_to_plane', [0.03, 0.4])
searchRegion = cropToSphere(searchRegion, point2, 0.30)
drillPoints = extractLargestCluster(searchRegion)
drillToTopPoint = np.array([-0.002904, -0.010029, 0.153182])
zaxis = normal
yaxis = point3 - point2
yaxis /= np.linalg.norm(yaxis)
xaxis = np.cross(yaxis, zaxis)
xaxis /= np.linalg.norm(xaxis)
yaxis = np.cross(zaxis, xaxis)
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PreMultiply()
t.Translate(-drillToTopPoint)
t.PostMultiply()
t.Translate(point2)
drillMesh = getDrillMesh()
aff = showPolyData(drillMesh, 'drill', cls=FrameAffordanceItem, visible=True)
aff.actor.SetUserTransform(t)
showFrame(t, 'drill frame', parent=aff, visible=False).addToView(app.getDRCView())
params = getDrillAffordanceParams(origin, xaxis, yaxis, zaxis)
aff.setAffordanceParams(params)
aff.updateParamsFromActorTransform()
aff.addToView(app.getDRCView())
def makePolyDataFields(pd):
mesh = computeDelaunay3D(pd)
if not mesh.GetNumberOfPoints():
return None
origin, edges, wireframe = getOrientedBoundingBox(mesh)
edgeLengths = np.array([np.linalg.norm(edge) for edge in edges])
axes = [edge / np.linalg.norm(edge) for edge in edges]
# find axis nearest to the +/- up vector
upVector = [0, 0, 1]
dotProducts = [np.abs(np.dot(axe, upVector)) for axe in axes]
zAxisIndex = np.argmax(dotProducts)
# re-index axes and edge lengths so that the found axis is the z axis
axesInds = [(zAxisIndex+1)%3, (zAxisIndex+2)%3, zAxisIndex]
axes = [axes[i] for i in axesInds]
edgeLengths = [edgeLengths[i] for i in axesInds]
# flip if necessary so that z axis points toward up
if np.dot(axes[2], upVector) < 0:
axes[1] = -axes[1]
axes[2] = -axes[2]
boxCenter = computeCentroid(wireframe)
t = getTransformFromAxes(axes[0], axes[1], axes[2])
t.PostMultiply()
t.Translate(boxCenter)
pd = transformPolyData(pd, t.GetLinearInverse())
wireframe = transformPolyData(wireframe, t.GetLinearInverse())
mesh = transformPolyData(mesh, t.GetLinearInverse())
return FieldContainer(points=pd, box=wireframe, mesh=mesh, frame=t, dims=edgeLengths, axes=axes)
def makeMovable(obj, initialTransform=None):
'''
Adds a child frame to the given PolyDataItem. If initialTransform is not
given, then an origin frame is computed for the polydata using the
center and orientation of the oriented bounding of the polydata. The polydata
is transformed using the inverse of initialTransform and then a child frame
is assigned to the object to reposition it.
'''
pd = obj.polyData
t = initialTransform
if t is None:
origin, edges, wireframe = getOrientedBoundingBox(pd)
edgeLengths = np.array([np.linalg.norm(edge) for edge in edges])
axes = [edge / np.linalg.norm(edge) for edge in edges]
boxCenter = computeCentroid(wireframe)
t = getTransformFromAxes(axes[0], axes[1], axes[2])
t.PostMultiply()
t.Translate(boxCenter)
pd = transformPolyData(pd, t.GetLinearInverse())
obj.setPolyData(pd)
frame = obj.getChildFrame()
if frame:
frame.copyFrame(t)
else:
frame = vis.showFrame(t, obj.getProperty('Name') + ' frame', parent=obj, scale=0.2, visible=False)
obj.actor.SetUserTransform(t)
def segmentTable(polyData, searchPoint):
'''
NB: If you wish to use the table frame use segmentTableAndFrame instead
##################
Segment a horizontal table surface (perpendicular to +Z) in the given polyData
Input:
- polyData
- search point on plane
Output:
- polyData, tablePoints, origin, normal
- polyData is the input polyData with a new 'dist_to_plane' attribute.
'''
expectedNormal = np.array([0.0, 0.0, 1.0])
tableNormalEpsilon = 0.4
polyData = applyVoxelGrid(polyData, leafSize=0.01)
polyData, origin, normal = applyPlaneFit(polyData, expectedNormal=expectedNormal, perpendicularAxis=expectedNormal, searchOrigin=searchPoint, searchRadius=0.3, angleEpsilon=tableNormalEpsilon, returnOrigin=True)
tablePoints = thresholdPoints(polyData, 'dist_to_plane', [-0.01, 0.01])
tablePoints = labelDistanceToPoint(tablePoints, searchPoint)
tablePointsClusters = extractClusters(tablePoints, minClusterSize=10, clusterTolerance=0.1)
tablePointsClusters.sort(key=lambda x: vtkNumpy.getNumpyFromVtk(x, 'distance_to_point').min())
tablePoints = tablePointsClusters[0]
updatePolyData(tablePoints, 'table plane points', parent=getDebugFolder(), visible=False)
updatePolyData(tablePoints, 'table points', parent=getDebugFolder(), visible=False)
return polyData, tablePoints, origin, normal
def filterClusterObjects(clusters):
result = []
for cluster in clusters:
if np.abs(np.dot(cluster.axes[2], [0,0,1])) < 0.5:
continue
if cluster.dims[2] < 0.1:
continue
result.append(cluster)
return result
def segmentTableScene(polyData, searchPoint, filterClustering = True):
objectClusters, tableData = segmentTableSceneClusters(polyData, searchPoint)
clusters = [makePolyDataFields(cluster) for cluster in objectClusters]
clusters = [cluster for cluster in clusters if cluster is not None]
# Add an additional frame to these objects which has z-axis aligned upwards
# but rotated to have the x-axis facing away from the robot
table_axes= transformUtils.getAxesFromTransform(tableData.frame)
for cluster in clusters:
cluster_axes= transformUtils.getAxesFromTransform(cluster.frame)
zaxis = cluster_axes[2]
xaxis = table_axes[0]
yaxis = np.cross(zaxis, xaxis)
xaxis = np.cross(yaxis, zaxis)
xaxis /= np.linalg.norm(xaxis)
yaxis /= np.linalg.norm(yaxis)
orientedFrame = transformUtils.getTransformFromAxesAndOrigin(xaxis, yaxis, zaxis, cluster.frame.GetPosition() )
cluster._add_fields(oriented_frame=orientedFrame)
if (filterClustering):
clusters = filterClusterObjects(clusters)
return FieldContainer(table=tableData, clusters=clusters)
def segmentTableSceneClusters(polyData, searchPoint, clusterInXY=False):
''' Given a point cloud of a table with some objects on it
and a point on that table
determine the plane of the table and
extract clusters above the table
'''
tableData, polyData = segmentTableAndFrame(polyData, searchPoint)
searchRegion = thresholdPoints(polyData, 'dist_to_plane', [0.02, 0.5])
# TODO: replace with 'all points above the table':
searchRegion = cropToSphere(searchRegion, tableData.frame.GetPosition() , 0.5) # was 1.0
showFrame(tableData.frame, 'tableFrame', visible=False, parent=getDebugFolder(), scale=0.15)
showPolyData(searchRegion, 'searchRegion', color=[1,0,0], visible=False, parent=getDebugFolder())
objectClusters = extractClusters(searchRegion, clusterInXY, clusterTolerance=0.02, minClusterSize=10)
#print 'got %d clusters' % len(objectClusters)
for i,c in enumerate(objectClusters):
name= "cluster %d" % i
showPolyData(c, name, color=getRandomColor(), visible=False, parent=getDebugFolder())
return objectClusters, tableData
def segmentTableAndFrame(polyData, searchPoint):
'''
Segment a table using a searchPoint on the table top
and then recover its coordinate frame, facing away from the robot
Objects/points on the table are ignored
Input: polyData and searchPoint on the table
Output: FieldContainer with:
- all relevent details about the table (only)
'''
polyData, tablePoints, _, _ = segmentTable(polyData, searchPoint)
tableMesh = computeDelaunay3D(tablePoints)
viewFrame = SegmentationContext.getGlobalInstance().getViewFrame()
viewDirection = SegmentationContext.getGlobalInstance().getViewDirection()
robotYaw = math.atan2( viewDirection[1], viewDirection[0] )*180.0/np.pi
linkFrame = transformUtils.frameFromPositionAndRPY( viewFrame.GetPosition() , [0,0, robotYaw ] )
# Function returns corner point that is far right from the robot
cornerTransform, rectDepth, rectWidth, _ = findMinimumBoundingRectangle(tablePoints, linkFrame)
rectHeight = 0.02 # arbitrary table width
# recover mid point
t = transformUtils.copyFrame(cornerTransform)
t.PreMultiply()
table_center = [-rectDepth/2, rectWidth/2, 0]
t3 = transformUtils.frameFromPositionAndRPY(table_center,[0,0,0])
t.Concatenate(t3)
# Create required outputs
edgeLengths = [rectDepth, rectWidth, rectHeight]
tableXAxis, tableYAxis, tableZAxis = transformUtils.getAxesFromTransform(t)
axes = tableXAxis, tableYAxis, tableZAxis
wf = vtk.vtkOutlineSource()
wf.SetBounds([-rectDepth/2,rectDepth/2, -rectWidth/2,rectWidth/2, -rectHeight/2,rectHeight/2])
#wf.SetBoxTypeToOriented()
#cube =[0,0,0,1,0,0,0,1,0,1,1,0,0,0,1,1,0,1,0,1,1,1,1,1]
#wf.SetCorners(cube)
wireframe = wf.GetOutput()
tablePoints = transformPolyData(tablePoints, t.GetLinearInverse())
#wireframe = transformPolyData(wireframe, t.GetLinearInverse())
tableMesh = transformPolyData(tableMesh, t.GetLinearInverse())
return FieldContainer(points=tablePoints, box=wireframe, mesh=tableMesh, frame=t, dims=edgeLengths, axes=axes), polyData
def segmentDrillAuto(point1, polyData=None):
if polyData is None:
inputObj = om.findObjectByName('pointcloud snapshot')
polyData = inputObj.polyData
expectedNormal = np.array([0.0, 0.0, 1.0])
polyData, origin, normal = applyPlaneFit(polyData, expectedNormal=expectedNormal, perpendicularAxis=expectedNormal, searchOrigin=point1, searchRadius=0.4, angleEpsilon=0.2, returnOrigin=True)
tablePoints = thresholdPoints(polyData, 'dist_to_plane', [-0.01, 0.01])
updatePolyData(tablePoints, 'table plane points', parent=getDebugFolder(), visible=False)
tablePoints = labelDistanceToPoint(tablePoints, point1)
tablePointsClusters = extractClusters(tablePoints)
tablePointsClusters.sort(key=lambda x: vtkNumpy.getNumpyFromVtk(x, 'distance_to_point').min())
tablePoints = tablePointsClusters[0]
updatePolyData(tablePoints, 'table points', parent=getDebugFolder(), visible=False)
searchRegion = thresholdPoints(polyData, 'dist_to_plane', [0.03, 0.4])
searchRegion = cropToSphere(searchRegion, point1, 0.30)
drillPoints = extractLargestCluster(searchRegion, minClusterSize=1)
# determine drill orientation (rotation about z axis)
centroids = computeCentroids(drillPoints, axis=normal)
centroidsPolyData = vtkNumpy.getVtkPolyDataFromNumpyPoints(centroids)
d = DebugData()
updatePolyData(centroidsPolyData, 'cluster centroids', parent=getDebugFolder(), visible=False)
drillToTopPoint = np.array([-0.002904, -0.010029, 0.153182])
zaxis = normal
yaxis = centroids[0] - centroids[-1]
yaxis /= np.linalg.norm(yaxis)
xaxis = np.cross(yaxis, zaxis)
xaxis /= np.linalg.norm(xaxis)
yaxis = np.cross(zaxis, xaxis)
# note this hack to orient the drill correctly:
t = getTransformFromAxes(yaxis, -xaxis, zaxis)
t.PreMultiply()
t.Translate(-drillToTopPoint)
t.PostMultiply()
t.Translate(centroids[-1])
drillMesh = getDrillMesh()
aff = showPolyData(drillMesh, 'drill', cls=FrameAffordanceItem, visible=True)
aff.actor.SetUserTransform(t)
showFrame(t, 'drill frame', parent=aff, visible=False, scale=0.2).addToView(app.getDRCView())
params = getDrillAffordanceParams(origin, xaxis, yaxis, zaxis)
aff.setAffordanceParams(params)
aff.updateParamsFromActorTransform()
aff.addToView(app.getDRCView())
def segmentDrillButton(point1):
d = DebugData()
d.addSphere([0,0,0], radius=0.005)
obj = updatePolyData(d.getPolyData(), 'sensed drill button', color=[0,0.5,0.5], visible=True)
# there is no orientation, but this allows the XYZ point to be queried
pointerTipFrame = transformUtils.frameFromPositionAndRPY(point1, [0,0,0])
obj.actor.SetUserTransform(pointerTipFrame)
obj.addToView(app.getDRCView())
frameObj = updateFrame(obj.actor.GetUserTransform(), 'sensed drill button frame', parent=obj, scale=0.2, visible=False)
frameObj.addToView(app.getDRCView())
def segmentPointerTip(point1):
d = DebugData()
d.addSphere([0,0,0], radius=0.005)
obj = updatePolyData(d.getPolyData(), 'sensed pointer tip', color=[0.5,0.5,0.0], visible=True)
# there is no orientation, but this allows the XYZ point to be queried
pointerTipFrame = transformUtils.frameFromPositionAndRPY(point1, [0,0,0])
obj.actor.SetUserTransform(pointerTipFrame)
obj.addToView(app.getDRCView())
frameObj = updateFrame(obj.actor.GetUserTransform(), 'sensed pointer tip frame', parent=obj, scale=0.2, visible=False)
frameObj.addToView(app.getDRCView())
def fitGroundObject(polyData=None, expectedDimensionsMin=[0.2, 0.02], expectedDimensionsMax=[1.3, 0.1]):
removeGroundFunc = removeGroundSimple
polyData = polyData or getCurrentRevolutionData()
groundPoints, scenePoints = removeGroundFunc(polyData, groundThickness=0.02, sceneHeightFromGround=0.035)
searchRegion = thresholdPoints(scenePoints, 'dist_to_plane', [0.05, 0.2])
clusters = extractClusters(searchRegion, clusterTolerance=0.07, minClusterSize=4)
candidates = []
for clusterId, cluster in enumerate(clusters):
origin, edges, _ = getOrientedBoundingBox(cluster)
edgeLengths = [np.linalg.norm(edge) for edge in edges[:2]]
found = (expectedDimensionsMin[0] <= edgeLengths[0] < expectedDimensionsMax[0]
and expectedDimensionsMin[1] <= edgeLengths[1] < expectedDimensionsMax[1])
if not found:
updatePolyData(cluster, 'candidate cluster %d' % clusterId, color=[1,1,0], parent=getDebugFolder(), visible=False)
continue
updatePolyData(cluster, 'cluster %d' % clusterId, color=[0,1,0], parent=getDebugFolder(), visible=False)
candidates.append(cluster)
if not candidates:
return None
viewFrame = SegmentationContext.getGlobalInstance().getViewFrame()
viewOrigin = np.array(viewFrame.GetPosition())
dists = [np.linalg.norm(viewOrigin - computeCentroid(cluster)) for cluster in candidates]
candidates = [candidates[i] for i in np.argsort(dists)]
cluster = candidates[0]
obj = makePolyDataFields(cluster)
return vis.showClusterObjects([obj], parent='segmentation')[0]
def findHorizontalSurfaces(polyData, removeGroundFirst=False, normalEstimationSearchRadius=0.05,
clusterTolerance=0.025, minClusterSize=150, distanceToPlaneThreshold=0.0025, normalsDotUpRange=[0.95, 1.0], showClusters=False):
'''
Find the horizontal surfaces, tuned to work with walking terrain
'''
searchZ = [0.0, 2.0]
voxelGridLeafSize = 0.01
verboseFlag = False
if (removeGroundFirst):
groundPoints, scenePoints = removeGround(polyData, groundThickness=0.02, sceneHeightFromGround=0.05)
scenePoints = thresholdPoints(scenePoints, 'dist_to_plane', searchZ)
updatePolyData(groundPoints, 'ground points', parent=getDebugFolder(), visible=verboseFlag)
else:
scenePoints = polyData
if not scenePoints.GetNumberOfPoints():
return
f = vtk.vtkPCLNormalEstimation()
f.SetSearchRadius(normalEstimationSearchRadius)
f.SetInput(scenePoints)
f.SetInput(1, applyVoxelGrid(scenePoints, voxelGridLeafSize))
# Duration 0.2 sec for V1 log:
f.Update()
scenePoints = shallowCopy(f.GetOutput())
normals = vtkNumpy.getNumpyFromVtk(scenePoints, 'normals')
normalsDotUp = np.abs(np.dot(normals, [0,0,1]))
vtkNumpy.addNumpyToVtk(scenePoints, normalsDotUp, 'normals_dot_up')
surfaces = thresholdPoints(scenePoints, 'normals_dot_up', normalsDotUpRange)
updatePolyData(scenePoints, 'scene points', parent=getDebugFolder(), colorByName='normals_dot_up', visible=verboseFlag)
updatePolyData(surfaces, 'surfaces points', parent=getDebugFolder(), colorByName='normals_dot_up', visible=verboseFlag)
clusters = extractClusters(surfaces, clusterTolerance=clusterTolerance, minClusterSize=minClusterSize)
planeClusters = []
clustersLarge = []
om.removeFromObjectModel(om.findObjectByName('surface clusters'))
folder = om.getOrCreateContainer('surface clusters', parentObj=getDebugFolder())
for i, cluster in enumerate(clusters):
updatePolyData(cluster, 'surface cluster %d' % i, parent=folder, color=getRandomColor(), visible=verboseFlag)
planePoints, _ = applyPlaneFit(cluster, distanceToPlaneThreshold)
planePoints = thresholdPoints(planePoints, 'dist_to_plane', [-distanceToPlaneThreshold, distanceToPlaneThreshold])
if planePoints.GetNumberOfPoints() > minClusterSize:
clustersLarge.append(cluster)
obj = makePolyDataFields(planePoints)
if obj is not None:
planeClusters.append(obj)
folder = om.getOrCreateContainer('surface objects', parentObj=getDebugFolder())
if showClusters:
vis.showClusterObjects(planeClusters, parent=folder)
return clustersLarge
def fitVerticalPosts(polyData):
groundPoints, scenePoints = removeGround(polyData)
scenePoints = thresholdPoints(scenePoints, 'dist_to_plane', [0.1, 4.0])
if not scenePoints.GetNumberOfPoints():
return
scenePoints = applyVoxelGrid(scenePoints, leafSize=0.03)
clusters = extractClusters(scenePoints, clusterTolerance=0.15, minClusterSize=10)
def isPostCluster(cluster, lineDirection):
up = [0,0,1]
minPostLength = 1.0
maxRadius = 0.3
angle = math.degrees(math.acos(np.dot(up,lineDirection) / (np.linalg.norm(up) * np.linalg.norm(lineDirection))))
if angle > 15:
return False
origin, edges, _ = getOrientedBoundingBox(cluster)
edgeLengths = [np.linalg.norm(edge) for edge in edges]
if edgeLengths[0] < minPostLength:
return False
# extract top half
zvalues = vtkNumpy.getNumpyFromVtk(cluster, 'Points')[:,2].copy()
vtkNumpy.addNumpyToVtk(cluster, zvalues, 'z')
minZ = np.min(zvalues)
maxZ = np.max(zvalues)
cluster = thresholdPoints(cluster, 'z', [(minZ + maxZ)/2.0, maxZ])
origin, edges, _ = getOrientedBoundingBox(cluster)
edgeLengths = [np.linalg.norm(edge) for edge in edges]
if edgeLengths[1] > maxRadius or edgeLengths[2] > maxRadius:
return False
return True
def makeCylinderAffordance(linePoints, lineDirection, lineOrigin, postId):
pts = vtkNumpy.getNumpyFromVtk(linePoints, 'Points')
dists = np.dot(pts-lineOrigin, lineDirection)
p1 = lineOrigin + lineDirection*np.min(dists)
p2 = lineOrigin + lineDirection*np.max(dists)
origin = (p1+p2)/2.0
lineLength = np.linalg.norm(p2-p1)
t = transformUtils.getTransformFromOriginAndNormal(origin, lineDirection)
pose = transformUtils.poseFromTransform(t)
desc = dict(classname='CylinderAffordanceItem', Name='post %d' % postId,
uuid=newUUID(), pose=pose, Radius=0.05, Length=float(lineLength), Color=[0.0, 1.0, 0.0])
desc['Collision Enabled'] = True
return affordanceManager.newAffordanceFromDescription(desc)
rejectFolder = om.getOrCreateContainer('nonpost clusters', parentObj=getDebugFolder())
keepFolder = om.getOrCreateContainer('post clusters', parentObj=getDebugFolder())
for i, cluster in enumerate(clusters):
linePoint, lineDirection, linePoints = applyLineFit(cluster, distanceThreshold=0.1)
if isPostCluster(cluster, lineDirection):
vis.showPolyData(cluster, 'cluster %d' % i, visible=False, color=getRandomColor(), alpha=0.5, parent=keepFolder)
makeCylinderAffordance(linePoints, lineDirection, linePoint, i)
else:
vis.showPolyData(cluster, 'cluster %d' % i, visible=False, color=getRandomColor(), alpha=0.5, parent=rejectFolder)
def findAndFitDrillBarrel(polyData=None):
''' Find the horizontal surfaces
on the horizontal surfaces, find all the drills
'''
inputObj = om.findObjectByName('pointcloud snapshot')
polyData = polyData or inputObj.polyData
groundPoints, scenePoints = removeGround(polyData, groundThickness=0.02, sceneHeightFromGround=0.50)
scenePoints = thresholdPoints(scenePoints, 'dist_to_plane', [0.5, 1.7])
if not scenePoints.GetNumberOfPoints():
return
normalEstimationSearchRadius = 0.10
f = vtk.vtkPCLNormalEstimation()
f.SetSearchRadius(normalEstimationSearchRadius)
f.SetInput(scenePoints)
f.Update()
scenePoints = shallowCopy(f.GetOutput())
normals = vtkNumpy.getNumpyFromVtk(scenePoints, 'normals')
normalsDotUp = np.abs(np.dot(normals, [0,0,1]))
vtkNumpy.addNumpyToVtk(scenePoints, normalsDotUp, 'normals_dot_up')
surfaces = thresholdPoints(scenePoints, 'normals_dot_up', [0.95, 1.0])
updatePolyData(groundPoints, 'ground points', parent=getDebugFolder(), visible=False)
updatePolyData(scenePoints, 'scene points', parent=getDebugFolder(), colorByName='normals_dot_up', visible=False)
updatePolyData(surfaces, 'surfaces', parent=getDebugFolder(), visible=False)
clusters = extractClusters(surfaces, clusterTolerance=0.15, minClusterSize=50)
fitResults = []
viewFrame = SegmentationContext.getGlobalInstance().getViewFrame()
forwardDirection = np.array([1.0, 0.0, 0.0])
viewFrame.TransformVector(forwardDirection, forwardDirection)
robotOrigin = viewFrame.GetPosition()
robotForward =forwardDirection
#print 'robot origin:', robotOrigin
#print 'robot forward:', robotForward
centroid =[]
for clusterId, cluster in enumerate(clusters):
clusterObj = updatePolyData(cluster, 'surface cluster %d' % clusterId, color=[1,1,0], parent=getDebugFolder(), visible=False)
origin, edges, _ = getOrientedBoundingBox(cluster)
edgeLengths = [np.linalg.norm(edge) for edge in edges[:2]]
skipCluster = False
for edgeLength in edgeLengths:
#print 'cluster %d edge length: %f' % (clusterId, edgeLength)
if edgeLength < 0.35 or edgeLength > 0.75:
skipCluster = True
if skipCluster:
continue
clusterObj.setSolidColor([0, 0, 1])
centroid = np.average(vtkNumpy.getNumpyFromVtk(cluster, 'Points'), axis=0)
try:
drillFrame = segmentDrillBarrelFrame(centroid, polyData=scenePoints, forwardDirection=robotForward)
if drillFrame is not None:
fitResults.append((clusterObj, drillFrame))
except:
print traceback.format_exc()
print 'fit drill failed for cluster:', clusterId
if not fitResults:
return
sortFittedDrills(fitResults, robotOrigin, robotForward)
return centroid
def sortFittedDrills(fitResults, robotOrigin, robotForward):
angleToFitResults = []
for fitResult in fitResults:
cluster, drillFrame = fitResult
drillOrigin = np.array(drillFrame.GetPosition())
angleToDrill = np.abs(computeSignedAngleBetweenVectors(robotForward, drillOrigin - robotOrigin, [0,0,1]))
angleToFitResults.append((angleToDrill, cluster, drillFrame))
#print 'angle to candidate drill:', angleToDrill
angleToFitResults.sort(key=lambda x: x[0])
#print 'using drill at angle:', angleToFitResults[0][0]
drillMesh = getDrillBarrelMesh()
for i, fitResult in enumerate(angleToFitResults):
angleToDrill, cluster, drillFrame = fitResult
if i == 0:
drill = om.findObjectByName('drill')
drill = updatePolyData(drillMesh, 'drill', color=[0, 1, 0], cls=FrameAffordanceItem, visible=True)
drillFrame = updateFrame(drillFrame, 'drill frame', parent=drill, visible=False)
drill.actor.SetUserTransform(drillFrame.transform)
drill.setAffordanceParams(dict(otdf_type='dewalt_button', friendly_name='dewalt_button'))
drill.updateParamsFromActorTransform()
drill.setSolidColor([0, 1, 0])
#cluster.setProperty('Visible', True)
else:
drill = showPolyData(drillMesh, 'drill candidate', color=[1,0,0], visible=False, parent=getDebugFolder())
drill.actor.SetUserTransform(drillFrame)
om.addToObjectModel(drill, parentObj=getDebugFolder())
def computeSignedAngleBetweenVectors(v1, v2, perpendicularVector):
'''
Computes the signed angle between two vectors in 3d, given a perpendicular vector
to determine sign. Result returned is radians.
'''
v1 = np.array(v1, dtype=float)
v2 = np.array(v2, dtype=float)
perpendicularVector = np.array(perpendicularVector, dtype=float)
v1 /= np.linalg.norm(v1)
v2 /= np.linalg.norm(v2)
perpendicularVector /= np.linalg.norm(perpendicularVector)
return math.atan2(np.dot(perpendicularVector, np.cross(v1, v2)), np.dot(v1, v2))
def segmentDrillBarrelFrame(point1, polyData, forwardDirection):
tableClusterSearchRadius = 0.4
drillClusterSearchRadius = 0.5 #0.3
expectedNormal = np.array([0.0, 0.0, 1.0])
if not polyData.GetNumberOfPoints():
return
polyData, plane_origin, plane_normal = applyPlaneFit(polyData, expectedNormal=expectedNormal,
perpendicularAxis=expectedNormal, searchOrigin=point1,
searchRadius=tableClusterSearchRadius, angleEpsilon=0.2, returnOrigin=True)
if not polyData.GetNumberOfPoints():
return
tablePoints = thresholdPoints(polyData, 'dist_to_plane', [-0.01, 0.01])
updatePolyData(tablePoints, 'table plane points', parent=getDebugFolder(), visible=False)
tablePoints = labelDistanceToPoint(tablePoints, point1)
tablePointsClusters = extractClusters(tablePoints)
tablePointsClusters.sort(key=lambda x: vtkNumpy.getNumpyFromVtk(x, 'distance_to_point').min())
if not tablePointsClusters:
return
tablePoints = tablePointsClusters[0]
updatePolyData(tablePoints, 'table points', parent=getDebugFolder(), visible=False)
searchRegion = thresholdPoints(polyData, 'dist_to_plane', [0.02, 0.3])
if not searchRegion.GetNumberOfPoints():
return
searchRegion = cropToSphere(searchRegion, point1, drillClusterSearchRadius)
#drillPoints = extractLargestCluster(searchRegion, minClusterSize=1)
t = fitDrillBarrel (searchRegion, forwardDirection, plane_origin, plane_normal)
return t
def segmentDrillBarrel(point1):
inputObj = om.findObjectByName('pointcloud snapshot')
polyData = inputObj.polyData
forwardDirection = -np.array(getCurrentView().camera().GetViewPlaneNormal())
t = segmentDrillBarrel(point1, polyData, forwardDirection)
assert t is not None
drillMesh = getDrillBarrelMesh()
aff = showPolyData(drillMesh, 'drill', visible=True)
aff.addToView(app.getDRCView())
aff.actor.SetUserTransform(t)
drillFrame = showFrame(t, 'drill frame', parent=aff, visible=False)
drillFrame.addToView(app.getDRCView())
return aff, drillFrame
def segmentDrillAlignedWithTable(point, polyData = None):
'''
Yet Another Drill Fitting Algorithm [tm]
This one fits the button drill assuming its on the table
and aligned with the table frame (because the button drill orientation is difficult to find)
Table must have long side facing robot
'''
inputObj = om.findObjectByName('pointcloud snapshot')
polyData = polyData or inputObj.polyData
# segment the table and recover the precise up direction normal:
polyDataOut, tablePoints, origin, normal = segmentTable(polyData,point)
#print origin # this origin is bunk
#tableCentroid = computeCentroid(tablePoints)
# get the bounding box edges
OBBorigin, edges, _ = getOrientedBoundingBox(tablePoints)
#print "OBB out"
#print OBBorigin
#print edges
edgeLengths = np.array([np.linalg.norm(edge) for edge in edges])
axes = [edge / np.linalg.norm(edge) for edge in edges]
#print edgeLengths
#print axes
# check which direction the robot is facing and flip x-axis of table if necessary
viewDirection = SegmentationContext.getGlobalInstance().getViewDirection()
#print "main axes", axes[1]
#print "viewDirection", viewDirection
#dp = np.dot(axes[1], viewDirection)
#print dp
if np.dot(axes[1], viewDirection) < 0:
#print "flip the x-direction"
axes[1] = -axes[1]
# define the x-axis to be along the 2nd largest edge
xaxis = axes[1]
xaxis = np.array(xaxis)
zaxis = np.array( normal )
yaxis = np.cross(zaxis, xaxis)
yaxis /= np.linalg.norm(yaxis)
xaxis = np.cross(yaxis, zaxis)
tableOrientation = transformUtils.getTransformFromAxes(xaxis, yaxis, zaxis)
#tableTransform = transformUtils.frameFromPositionAndRPY( tableCentroid , tableOrientation.GetOrientation() )
#updateFrame(tableTransform, 'table frame [z up, x away face]', parent="segmentation", visible=True).addToView(app.getDRCView())
data = segmentTableScene(polyData, point )
#vis.showClusterObjects(data.clusters + [data.table], parent='segmentation')
# crude use of the table frame to determine the frame of the drill on the table
#t2 = transformUtils.frameFromPositionAndRPY([0,0,0], [180, 0 , 90] )
#drillOrientationTransform = transformUtils.copyFrame( om.findObjectByName('object 1 frame').transform )
#drillOrientationTransform.PreMultiply()
#drillOrientationTransform.Concatenate(t2)
#vis.updateFrame(t, 'drillOrientationTransform',visible=True)
#table_xaxis, table_yaxis, table_zaxis = transformUtils.getAxesFromTransform( data.table.frame )
#drillOrientation = transformUtils.orientationFromAxes( table_yaxis, table_xaxis, -1*np.array( table_zaxis) )
drillTransform = transformUtils.frameFromPositionAndRPY( data.clusters[0].frame.GetPosition() , tableOrientation.GetOrientation() )
drillMesh = getDrillMesh()
drill = om.findObjectByName('drill')
om.removeFromObjectModel(drill)
aff = showPolyData(drillMesh, 'drill', color=[0.0, 1.0, 0.0], cls=FrameAffordanceItem, visible=True)
aff.actor.SetUserTransform(drillTransform)
aff.addToView(app.getDRCView())
frameObj = updateFrame(drillTransform, 'drill frame', parent=aff, scale=0.2, visible=False)
frameObj.addToView(app.getDRCView())
params = getDrillAffordanceParams(np.array(drillTransform.GetPosition()), [1,0,0], [0,1,0], [0,0,1], drillType="dewalt_button")
aff.setAffordanceParams(params)
def segmentDrillInHand(p1, p2):
inputObj = om.findObjectByName('pointcloud snapshot')
polyData = inputObj.polyData
distanceToLineThreshold = 0.05
polyData = labelDistanceToLine(polyData, p1, p2)
polyData = thresholdPoints(polyData, 'distance_to_line', [0.0, distanceToLineThreshold])
lineSegment = p2 - p1
lineLength = np.linalg.norm(lineSegment)
cropped, polyData = cropToPlane(polyData, p1, lineSegment/lineLength, [-0.03, lineLength + 0.03])
updatePolyData(cropped, 'drill cluster', parent=getDebugFolder(), visible=False)
drillPoints = cropped
normal = lineSegment/lineLength
centroids = computeCentroids(drillPoints, axis=normal)
centroidsPolyData = vtkNumpy.getVtkPolyDataFromNumpyPoints(centroids)
d = DebugData()
updatePolyData(centroidsPolyData, 'cluster centroids', parent=getDebugFolder(), visible=False)
drillToTopPoint = np.array([-0.002904, -0.010029, 0.153182])
zaxis = normal
yaxis = centroids[0] - centroids[-1]
yaxis /= np.linalg.norm(yaxis)
xaxis = np.cross(yaxis, zaxis)
xaxis /= np.linalg.norm(xaxis)
yaxis = np.cross(zaxis, xaxis)
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PreMultiply()
t.Translate(-drillToTopPoint)
t.PostMultiply()
t.Translate(p2)
drillMesh = getDrillMesh()
aff = showPolyData(drillMesh, 'drill', cls=FrameAffordanceItem, visible=True)
aff.actor.SetUserTransform(t)
showFrame(t, 'drill frame', parent=aff, visible=False).addToView(app.getDRCView())
params = getDrillAffordanceParams(np.array(t.GetPosition()), xaxis, yaxis, zaxis)
aff.setAffordanceParams(params)
aff.updateParamsFromActorTransform()
aff.addToView(app.getDRCView())
def addDrillAffordance():
drillMesh = getDrillMesh()
aff = showPolyData(drillMesh, 'drill', cls=FrameAffordanceItem, visible=True)
t = vtk.vtkTransform()
t.PostMultiply()
aff.actor.SetUserTransform(t)
showFrame(t, 'drill frame', parent=aff, visible=False).addToView(app.getDRCView())
params = getDrillAffordanceParams(np.array(t.GetPosition()), [1,0,0], [0,1,0], [0,0,1])
aff.setAffordanceParams(params)
aff.updateParamsFromActorTransform()
aff.addToView(app.getDRCView())
return aff
def getLinkFrame(linkName):
robotStateModel = om.findObjectByName('robot state model')
assert robotStateModel
t = vtk.vtkTransform()
robotStateModel.model.getLinkToWorld(linkName, t)
return t
def getDrillInHandOffset(zRotation=0.0, zTranslation=0.0, xTranslation=0.0, yTranslation=0.0,flip=False):
drillOffset = vtk.vtkTransform()
drillOffset.PostMultiply()
if flip:
drillOffset.RotateY(180)
drillOffset.RotateZ(zRotation)
drillOffset.RotateY(-90)
#drillOffset.Translate(0, 0.09, zTranslation - 0.015)
#drillOffset.Translate(zTranslation - 0.015, 0.035 + xTranslation, 0.0)
drillOffset.Translate(zTranslation, xTranslation, 0.0 + yTranslation)
return drillOffset
def moveDrillToHand(drillOffset, hand='right'):
drill = om.findObjectByName('drill')
if not drill:
drill = addDrillAffordance()
assert hand in ('right', 'left')
drillTransform = drill.actor.GetUserTransform()
rightBaseLink = getLinkFrame('%s_hand_face' % hand[0])
drillTransform.PostMultiply()
drillTransform.Identity()
drillTransform.Concatenate(drillOffset)
drillTransform.Concatenate(rightBaseLink)
drill._renderAllViews()
class PointPicker(TimerCallback):
def __init__(self, numberOfPoints=3):
TimerCallback.__init__(self)
self.targetFps = 30
self.enabled = False
self.numberOfPoints = numberOfPoints
self.annotationObj = None
self.drawLines = True
self.clear()
def clear(self):
self.points = [None for i in xrange(self.numberOfPoints)]
self.hoverPos = None
self.annotationFunc = None
self.lastMovePos = [0, 0]
def onMouseMove(self, displayPoint, modifiers=None):
self.lastMovePos = displayPoint
def onMousePress(self, displayPoint, modifiers=None):
#print 'mouse press:', modifiers
#if not modifiers:
# return
for i in xrange(self.numberOfPoints):
if self.points[i] is None:
self.points[i] = self.hoverPos
break
if self.points[-1] is not None:
self.finish()
def finish(self):
self.enabled = False
om.removeFromObjectModel(self.annotationObj)
points = [p.copy() for p in self.points]
if self.annotationFunc is not None:
self.annotationFunc(*points)
removeViewPicker(self)
def handleRelease(self, displayPoint):
pass
def draw(self):
d = DebugData()
points = [p if p is not None else self.hoverPos for p in self.points]
# draw points
for p in points:
if p is not None:
d.addSphere(p, radius=0.01)
if self.drawLines:
# draw lines
for a, b in zip(points, points[1:]):
if b is not None:
d.addLine(a, b)
# connect end points
if points[-1] is not None:
d.addLine(points[0], points[-1])
self.annotationObj = updatePolyData(d.getPolyData(), 'annotation', parent=getDebugFolder())
self.annotationObj.setProperty('Color', QtGui.QColor(0, 255, 0))
self.annotationObj.actor.SetPickable(False)
def tick(self):
if not self.enabled:
return
if not om.findObjectByName('pointcloud snapshot'):
self.annotationFunc = None
self.finish()
return
pickedPointFields = pickPoint(self.lastMovePos, getSegmentationView(), obj='pointcloud snapshot')
self.hoverPos = pickedPointFields.pickedPoint
self.draw()
class LineDraw(TimerCallback):
def __init__(self, view):
TimerCallback.__init__(self)
self.targetFps = 30
self.enabled = False
self.view = view
self.renderer = view.renderer()
self.line = vtk.vtkLeaderActor2D()
self.line.SetArrowPlacementToNone()
self.line.GetPositionCoordinate().SetCoordinateSystemToViewport()
self.line.GetPosition2Coordinate().SetCoordinateSystemToViewport()
self.line.GetProperty().SetLineWidth(4)
self.line.SetPosition(0,0)
self.line.SetPosition2(0,0)
self.clear()
def clear(self):
self.p1 = None
self.p2 = None
self.annotationFunc = None
self.lastMovePos = [0, 0]
self.renderer.RemoveActor2D(self.line)
def onMouseMove(self, displayPoint, modifiers=None):
self.lastMovePos = displayPoint
def onMousePress(self, displayPoint, modifiers=None):
if self.p1 is None:
self.p1 = list(self.lastMovePos)
if self.p1 is not None:
self.renderer.AddActor2D(self.line)
else:
self.p2 = self.lastMovePos
self.finish()
def finish(self):
self.enabled = False
self.renderer.RemoveActor2D(self.line)
if self.annotationFunc is not None:
self.annotationFunc(self.p1, self.p2)
def handleRelease(self, displayPoint):
pass
def tick(self):
if not self.enabled:
return
if self.p1:
self.line.SetPosition(self.p1)
self.line.SetPosition2(self.lastMovePos)
self.view.render()
viewPickers = []
def addViewPicker(picker):
global viewPickers
viewPickers.append(picker)
def removeViewPicker(picker):
global viewPickers
viewPickers.remove(picker)
def distanceToLine(x0, x1, x2):
numerator = np.sqrt(np.sum(np.cross((x0 - x1), (x0-x2))**2))
denom = np.linalg.norm(x2-x1)
return numerator / denom
def labelDistanceToLine(polyData, linePoint1, linePoint2, resultArrayName='distance_to_line'):
x0 = vtkNumpy.getNumpyFromVtk(polyData, 'Points')
x1 = np.array(linePoint1)
x2 = np.array(linePoint2)
numerator = np.sqrt(np.sum(np.cross((x0 - x1), (x0-x2))**2, axis=1))
denom = np.linalg.norm(x2-x1)
dists = numerator / denom
polyData = shallowCopy(polyData)
vtkNumpy.addNumpyToVtk(polyData, dists, resultArrayName)
return polyData
def labelDistanceToPoint(polyData, point, resultArrayName='distance_to_point'):
assert polyData.GetNumberOfPoints()
points = vtkNumpy.getNumpyFromVtk(polyData, 'Points')
points = points - point
dists = np.sqrt(np.sum(points**2, axis=1))
polyData = shallowCopy(polyData)
vtkNumpy.addNumpyToVtk(polyData, dists, resultArrayName)
return polyData
def getPlaneEquationFromPolyData(polyData, expectedNormal):
_, origin, normal = applyPlaneFit(polyData, expectedNormal=expectedNormal, returnOrigin=True)
return origin, normal, np.hstack((normal, [np.dot(origin, normal)]))
def computeEdge(polyData, edgeAxis, perpAxis, binWidth=0.03):
polyData = labelPointDistanceAlongAxis(polyData, edgeAxis, resultArrayName='dist_along_edge')
polyData = labelPointDistanceAlongAxis(polyData, perpAxis, resultArrayName='dist_perp_to_edge')
polyData, bins = binByScalar(polyData, 'dist_along_edge', binWidth)
points = vtkNumpy.getNumpyFromVtk(polyData, 'Points')
binLabels = vtkNumpy.getNumpyFromVtk(polyData, 'bin_labels')
distToEdge = vtkNumpy.getNumpyFromVtk(polyData, 'dist_perp_to_edge')
numberOfBins = len(bins) - 1
edgePoints = []
for i in xrange(numberOfBins):
binPoints = points[binLabels == i]
binDists = distToEdge[binLabels == i]
if len(binDists):
edgePoints.append(binPoints[binDists.argmax()])
return np.array(edgePoints)
def computeCentroids(polyData, axis, binWidth=0.025):
polyData = labelPointDistanceAlongAxis(polyData, axis, resultArrayName='dist_along_axis')
polyData, bins = binByScalar(polyData, 'dist_along_axis', binWidth)
points = vtkNumpy.getNumpyFromVtk(polyData, 'Points')
binLabels = vtkNumpy.getNumpyFromVtk(polyData, 'bin_labels')
numberOfBins = len(bins) - 1
centroids = []
for i in xrange(numberOfBins):
binPoints = points[binLabels == i]
if len(binPoints):
centroids.append(np.average(binPoints, axis=0))
return np.array(centroids)
def computePointCountsAlongAxis(polyData, axis, binWidth=0.025):
polyData = labelPointDistanceAlongAxis(polyData, axis, resultArrayName='dist_along_axis')
polyData, bins = binByScalar(polyData, 'dist_along_axis', binWidth)
points = vtkNumpy.getNumpyFromVtk(polyData, 'Points')
binLabels = vtkNumpy.getNumpyFromVtk(polyData, 'bin_labels')
numberOfBins = len(bins) - 1
binCount = []
for i in xrange(numberOfBins):
binPoints = points[binLabels == i]
binCount.append(len(binPoints))
return np.array(binCount)
def binByScalar(lidarData, scalarArrayName, binWidth, binLabelsArrayName='bin_labels'):
'''
Gets the array with name scalarArrayName from lidarData.
Computes bins by dividing the scalar array into bins of size binWidth.
Adds a new label array to the lidar points identifying which bin the point belongs to,
where the first bin is labeled with 0.
Returns the new, labeled lidar data and the bins.
The bins are an array where each value represents a bin edge.
'''
scalars = vtkNumpy.getNumpyFromVtk(lidarData, scalarArrayName)
bins = np.arange(scalars.min(), scalars.max()+binWidth, binWidth)
binLabels = np.digitize(scalars, bins) - 1
assert(len(binLabels) == len(scalars))
newData = shallowCopy(lidarData)
vtkNumpy.addNumpyToVtk(newData, binLabels, binLabelsArrayName)
return newData, bins
def showObbs(polyData):
labelsArrayName = 'cluster_labels'
assert polyData.GetPointData().GetArray(labelsArrayName)
f = vtk.vtkAnnotateOBBs()
f.SetInputArrayToProcess(0,0,0, vtk.vtkDataObject.FIELD_ASSOCIATION_POINTS, labelsArrayName)
f.SetInput(polyData)
f.Update()
showPolyData(f.GetOutput(), 'bboxes')
def getOrientedBoundingBox(polyData):
'''
returns origin, edges, and outline wireframe
'''
nPoints = polyData.GetNumberOfPoints()
assert nPoints
polyData = shallowCopy(polyData)
labelsArrayName = 'bbox_labels'
labels = np.ones(nPoints)
vtkNumpy.addNumpyToVtk(polyData, labels, labelsArrayName)
f = vtk.vtkAnnotateOBBs()
f.SetInputArrayToProcess(0,0,0, vtk.vtkDataObject.FIELD_ASSOCIATION_POINTS, labelsArrayName)
f.SetInput(polyData)
f.Update()
assert f.GetNumberOfBoundingBoxes() == 1
origin = np.zeros(3)
edges = [np.zeros(3) for i in xrange(3)]
f.GetBoundingBoxOrigin(0, origin)
for i in xrange(3):
f.GetBoundingBoxEdge(0, i, edges[i])
return origin, edges, shallowCopy(f.GetOutput())
def segmentBlockByAnnotation(blockDimensions, p1, p2, p3):
segmentationObj = om.findObjectByName('pointcloud snapshot')
segmentationObj.mapper.ScalarVisibilityOff()
segmentationObj.setProperty('Point Size', 2)
segmentationObj.setProperty('Alpha', 0.8)
# constraint z to lie in plane
#p1[2] = p2[2] = p3[2] = max(p1[2], p2[2], p3[2])
zedge = p2 - p1
zaxis = zedge / np.linalg.norm(zedge)
#xwidth = distanceToLine(p3, p1, p2)
# expected dimensions
xwidth, ywidth = blockDimensions
zwidth = np.linalg.norm(zedge)
yaxis = np.cross(p2 - p1, p3 - p1)
yaxis = yaxis / np.linalg.norm(yaxis)
xaxis = np.cross(yaxis, zaxis)
# reorient axes
viewPlaneNormal = getSegmentationView().camera().GetViewPlaneNormal()
if np.dot(yaxis, viewPlaneNormal) < 0:
yaxis *= -1
if np.dot(xaxis, p3 - p1) < 0:
xaxis *= -1
# make right handed
zaxis = np.cross(xaxis, yaxis)
origin = ((p1 + p2) / 2.0) + xaxis*xwidth/2.0 + yaxis*ywidth/2.0
d = DebugData()
d.addSphere(origin, radius=0.01)
d.addLine(origin - xaxis*xwidth/2.0, origin + xaxis*xwidth/2.0)
d.addLine(origin - yaxis*ywidth/2.0, origin + yaxis*ywidth/2.0)
d.addLine(origin - zaxis*zwidth/2.0, origin + zaxis*zwidth/2.0)
obj = updatePolyData(d.getPolyData(), 'block axes')
obj.setProperty('Color', QtGui.QColor(255, 255, 0))
obj.setProperty('Visible', False)
om.findObjectByName('annotation').setProperty('Visible', False)
cube = vtk.vtkCubeSource()
cube.SetXLength(xwidth)
cube.SetYLength(ywidth)
cube.SetZLength(zwidth)
cube.Update()
cube = shallowCopy(cube.GetOutput())
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
t.Translate(origin)
obj = updatePolyData(cube, 'block affordance', cls=BlockAffordanceItem, parent='affordances')
obj.actor.SetUserTransform(t)
obj.addToView(app.getDRCView())
params = dict(origin=origin, xwidth=xwidth, ywidth=ywidth, zwidth=zwidth, xaxis=xaxis, yaxis=yaxis, zaxis=zaxis)
obj.setAffordanceParams(params)
obj.updateParamsFromActorTransform()
####
# debrs task ground frame
def getBoardCorners(params):
axes = [np.array(params[axis]) for axis in ['xaxis', 'yaxis', 'zaxis']]
widths = [np.array(params[axis])/2.0 for axis in ['xwidth', 'ywidth', 'zwidth']]
edges = [axes[i] * widths[i] for i in xrange(3)]
origin = np.array(params['origin'])
return [
origin + edges[0] + edges[1] + edges[2],
origin - edges[0] + edges[1] + edges[2],
origin - edges[0] - edges[1] + edges[2],
origin + edges[0] - edges[1] + edges[2],
origin + edges[0] + edges[1] - edges[2],
origin - edges[0] + edges[1] - edges[2],
origin - edges[0] - edges[1] - edges[2],
origin + edges[0] - edges[1] - edges[2],
]
def getPointDistances(target, points):
return np.array([np.linalg.norm(target - p) for p in points])
def computeClosestCorner(aff, referenceFrame):
corners = getBoardCorners(aff.params)
dists = getPointDistances(np.array(referenceFrame.GetPosition()), corners)
return corners[dists.argmin()]
def computeGroundFrame(aff, referenceFrame):
refAxis = [0.0, -1.0, 0.0]
referenceFrame.TransformVector(refAxis, refAxis)
refAxis = np.array(refAxis)
axes = [np.array(aff.params[axis]) for axis in ['xaxis', 'yaxis', 'zaxis']]
axisProjections = np.array([np.abs(np.dot(axis, refAxis)) for axis in axes])
boardAxis = axes[axisProjections.argmax()]
if np.dot(boardAxis, refAxis) < 0:
boardAxis = -boardAxis
xaxis = boardAxis
zaxis = np.array([0.0, 0.0, 1.0])
yaxis = np.cross(zaxis, xaxis)
yaxis /= np.linalg.norm(yaxis)
xaxis = np.cross(yaxis, zaxis)
closestCorner = computeClosestCorner(aff, referenceFrame)
groundFrame = getTransformFromAxes(xaxis, yaxis, zaxis)
groundFrame.PostMultiply()
groundFrame.Translate(closestCorner[0], closestCorner[1], 0.0)
return groundFrame
def computeCornerFrame(aff, referenceFrame):
refAxis = [0.0, -1.0, 0.0]
referenceFrame.TransformVector(refAxis, refAxis)
refAxis = np.array(refAxis)
axes = [np.array(aff.params[axis]) for axis in ['xaxis', 'yaxis', 'zaxis']]
edgeLengths = [edgeLength for edgeLength in ['xwidth', 'ywidth', 'zwidth']]
axisProjections = np.array([np.abs(np.dot(axis, refAxis)) for axis in axes])
boardAxis = axes[axisProjections.argmax()]
if np.dot(boardAxis, refAxis) < 0:
boardAxis = -boardAxis
longAxis = axes[np.argmax(edgeLengths)]
xaxis = boardAxis
yaxis = axes[2]
zaxis = np.cross(xaxis, yaxis)
closestCorner = computeClosestCorner(aff, referenceFrame)
cornerFrame = getTransformFromAxes(xaxis, yaxis, zaxis)
cornerFrame.PostMultiply()
cornerFrame.Translate(closestCorner)
return cornerFrame
def createBlockAffordance(origin, xaxis, yaxis, zaxis, xwidth, ywidth, zwidth, name, parent='affordances'):
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
t.Translate(origin)
obj = BoxAffordanceItem(name, view=app.getCurrentRenderView())
obj.setProperty('Dimensions', [float(v) for v in [xwidth, ywidth, zwidth]])
obj.actor.SetUserTransform(t)
om.addToObjectModel(obj, parentObj=om.getOrCreateContainer(parent))
frameObj = vis.showFrame(t, name + ' frame', scale=0.2, visible=False, parent=obj)
obj.addToView(app.getDRCView())
frameObj.addToView(app.getDRCView())
affordanceManager.registerAffordance(obj)
return obj
def segmentBlockByTopPlane(polyData, blockDimensions, expectedNormal, expectedXAxis, edgeSign=1, name='block affordance'):
polyData, planeOrigin, normal = applyPlaneFit(polyData, distanceThreshold=0.05, expectedNormal=expectedNormal, returnOrigin=True)
_, lineDirection, _ = applyLineFit(polyData)
zaxis = lineDirection
yaxis = normal
xaxis = np.cross(yaxis, zaxis)
if np.dot(xaxis, expectedXAxis) < 0:
xaxis *= -1
# make right handed
zaxis = np.cross(xaxis, yaxis)
xaxis /= np.linalg.norm(xaxis)
yaxis /= np.linalg.norm(yaxis)
zaxis /= np.linalg.norm(zaxis)
expectedXAxis = np.array(xaxis)
edgePoints = computeEdge(polyData, zaxis, xaxis*edgeSign)
edgePoints = vtkNumpy.getVtkPolyDataFromNumpyPoints(edgePoints)
d = DebugData()
obj = updatePolyData(edgePoints, 'edge points', parent=getDebugFolder(), visible=False)
linePoint, lineDirection, _ = applyLineFit(edgePoints)
zaxis = lineDirection
xaxis = np.cross(yaxis, zaxis)
if np.dot(xaxis, expectedXAxis) < 0:
xaxis *= -1
# make right handed
zaxis = np.cross(xaxis, yaxis)
xaxis /= np.linalg.norm(xaxis)
yaxis /= np.linalg.norm(yaxis)
zaxis /= np.linalg.norm(zaxis)
polyData = labelPointDistanceAlongAxis(polyData, xaxis, resultArrayName='dist_along_line')
pts = vtkNumpy.getNumpyFromVtk(polyData, 'Points')
dists = np.dot(pts-linePoint, zaxis)
p1 = linePoint + zaxis*np.min(dists)
p2 = linePoint + zaxis*np.max(dists)
p1 = projectPointToPlane(p1, planeOrigin, normal)
p2 = projectPointToPlane(p2, planeOrigin, normal)
xwidth, ywidth = blockDimensions
zwidth = np.linalg.norm(p2 - p1)
origin = p1 - edgeSign*xaxis*xwidth/2.0 - yaxis*ywidth/2.0 + zaxis*zwidth/2.0
d = DebugData()
#d.addSphere(linePoint, radius=0.02)
#d.addLine(linePoint, linePoint + yaxis*ywidth)
#d.addLine(linePoint, linePoint + xaxis*xwidth)
#d.addLine(linePoint, linePoint + zaxis*zwidth)
d.addSphere(p1, radius=0.01)
d.addSphere(p2, radius=0.01)
d.addLine(p1, p2)
d.addSphere(origin, radius=0.01)
#d.addLine(origin - xaxis*xwidth/2.0, origin + xaxis*xwidth/2.0)
#d.addLine(origin - yaxis*ywidth/2.0, origin + yaxis*ywidth/2.0)
#d.addLine(origin - zaxis*zwidth/2.0, origin + zaxis*zwidth/2.0)
d.addLine(origin, origin + xaxis*xwidth/2.0)
d.addLine(origin, origin + yaxis*ywidth/2.0)
d.addLine(origin, origin + zaxis*zwidth/2.0)
#obj = updatePolyData(d.getPolyData(), 'block axes')
#obj.setProperty('Color', QtGui.QColor(255, 255, 0))
#obj.setProperty('Visible', False)
obj = createBlockAffordance(origin, xaxis, yaxis, zaxis, xwidth, ywidth, zwidth, name)
obj.setProperty('Color', [222/255.0, 184/255.0, 135/255.0])
computeDebrisGraspSeed(obj)
t = computeDebrisStanceFrame(obj)
if t:
showFrame(t, 'debris stance frame', parent=obj)
return obj
def computeDebrisGraspSeed(aff):
debrisReferenceFrame = om.findObjectByName('debris reference frame')
if debrisReferenceFrame:
debrisReferenceFrame = debrisReferenceFrame.transform
affCornerFrame = computeCornerFrame(aff, debrisReferenceFrame)
showFrame(affCornerFrame, 'board corner frame', parent=aff, visible=False)
def computeDebrisStanceFrame(aff):
debrisReferenceFrame = om.findObjectByName('debris reference frame')
debrisWallEdge = om.findObjectByName('debris plane edge')
if debrisReferenceFrame and debrisWallEdge:
debrisReferenceFrame = debrisReferenceFrame.transform
affGroundFrame = computeGroundFrame(aff, debrisReferenceFrame)
updateFrame(affGroundFrame, 'board ground frame', parent=getDebugFolder(), visible=False)
affWallEdge = computeGroundFrame(aff, debrisReferenceFrame)
framePos = np.array(affGroundFrame.GetPosition())
p1, p2 = debrisWallEdge.points
edgeAxis = p2 - p1
edgeAxis /= np.linalg.norm(edgeAxis)
projectedPos = p1 + edgeAxis * np.dot(framePos - p1, edgeAxis)
affWallFrame = vtk.vtkTransform()
affWallFrame.PostMultiply()
useWallFrameForRotation = True
if useWallFrameForRotation:
affWallFrame.SetMatrix(debrisReferenceFrame.GetMatrix())
affWallFrame.Translate(projectedPos - np.array(debrisReferenceFrame.GetPosition()))
stanceWidth = 0.20
stanceOffsetX = -0.35
stanceOffsetY = 0.45
stanceRotation = 0.0
else:
affWallFrame.SetMatrix(affGroundFrame.GetMatrix())
affWallFrame.Translate(projectedPos - framePos)
stanceWidth = 0.20
stanceOffsetX = -0.35
stanceOffsetY = -0.45
stanceRotation = math.pi/2.0
stanceFrame, _, _ = getFootFramesFromReferenceFrame(affWallFrame, stanceWidth, math.degrees(stanceRotation), [stanceOffsetX, stanceOffsetY, 0.0])
return stanceFrame
def segmentBlockByPlanes(blockDimensions):
planes = om.findObjectByName('selected planes').children()[:2]
viewPlaneNormal = getSegmentationView().camera().GetViewPlaneNormal()
origin1, normal1, plane1 = getPlaneEquationFromPolyData(planes[0].polyData, expectedNormal=viewPlaneNormal)
origin2, normal2, plane2 = getPlaneEquationFromPolyData(planes[1].polyData, expectedNormal=viewPlaneNormal)
xaxis = normal2
yaxis = normal1
zaxis = np.cross(xaxis, yaxis)
xaxis = np.cross(yaxis, zaxis)
pts1 = vtkNumpy.getNumpyFromVtk(planes[0].polyData, 'Points')
pts2 = vtkNumpy.getNumpyFromVtk(planes[1].polyData, 'Points')
linePoint = np.zeros(3)
centroid2 = np.sum(pts2, axis=0)/len(pts2)
vtk.vtkPlane.ProjectPoint(centroid2, origin1, normal1, linePoint)
dists = np.dot(pts1-linePoint, zaxis)
p1 = linePoint + zaxis*np.min(dists)
p2 = linePoint + zaxis*np.max(dists)
xwidth, ywidth = blockDimensions
zwidth = np.linalg.norm(p2 - p1)
origin = p1 + xaxis*xwidth/2.0 + yaxis*ywidth/2.0 + zaxis*zwidth/2.0
d = DebugData()
d.addSphere(linePoint, radius=0.02)
d.addSphere(p1, radius=0.01)
d.addSphere(p2, radius=0.01)
d.addLine(p1, p2)
d.addSphere(origin, radius=0.01)
d.addLine(origin - xaxis*xwidth/2.0, origin + xaxis*xwidth/2.0)
d.addLine(origin - yaxis*ywidth/2.0, origin + yaxis*ywidth/2.0)
d.addLine(origin - zaxis*zwidth/2.0, origin + zaxis*zwidth/2.0)
obj = updatePolyData(d.getPolyData(), 'block axes')
obj.setProperty('Color', QtGui.QColor(255, 255, 0))
obj.setProperty('Visible', False)
cube = vtk.vtkCubeSource()
cube.SetXLength(xwidth)
cube.SetYLength(ywidth)
cube.SetZLength(zwidth)
cube.Update()
cube = shallowCopy(cube.GetOutput())
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
t.Translate(origin)
obj = updatePolyData(cube, 'block affordance', cls=BlockAffordanceItem, parent='affordances')
obj.actor.SetUserTransform(t)
obj.addToView(app.getDRCView())
params = dict(origin=origin, xwidth=xwidth, ywidth=ywidth, zwidth=zwidth, xaxis=xaxis, yaxis=yaxis, zaxis=zaxis)
obj.setAffordanceParams(params)
obj.updateParamsFromActorTransform()
def estimatePointerTip(robotModel, polyData):
'''
Given a robot model, uses forward kinematics to determine a pointer tip
search region, then does a ransac line fit in the search region to find
points on the pointer, and selects the maximum point along the line fit
as the pointer tip. Returns the pointer tip xyz on success and returns
None on failure.
'''
palmFrame = robotModel.getLinkFrame('r_hand_force_torque')
p1 = [0.0, 0.14, -0.06]
p2 = [0.0, 0.24, -0.06]
palmFrame.TransformPoint(p1, p1)
palmFrame.TransformPoint(p2, p2)
p1 = np.array(p1)
p2 = np.array(p2)
d = DebugData()
d.addSphere(p1, radius=0.005)
d.addSphere(p2, radius=0.005)
d.addLine(p1, p2)
vis.updatePolyData(d.getPolyData(), 'pointer line', color=[1,0,0], parent=getDebugFolder(), visible=False)
polyData = cropToLineSegment(polyData, p1, p2)
if not polyData.GetNumberOfPoints():
#print 'pointer search region is empty'
return None
vis.updatePolyData(polyData, 'cropped to pointer line', parent=getDebugFolder(), visible=False)
polyData = labelDistanceToLine(polyData, p1, p2)
polyData = thresholdPoints(polyData, 'distance_to_line', [0.0, 0.07])
if polyData.GetNumberOfPoints() < 2:
#print 'pointer search region is empty'
return None
updatePolyData(polyData, 'distance to pointer line', colorByName='distance_to_line', parent=getDebugFolder(), visible=False)
ransacDistanceThreshold = 0.0075
lineOrigin, lineDirection, polyData = applyLineFit(polyData, distanceThreshold=ransacDistanceThreshold)
updatePolyData(polyData, 'line fit ransac', colorByName='ransac_labels', parent=getDebugFolder(), visible=False)
lineDirection = np.array(lineDirection)
lineDirection /= np.linalg.norm(lineDirection)
if np.dot(lineDirection, (p2 - p1)) < 0:
lineDirection *= -1
polyData = thresholdPoints(polyData, 'ransac_labels', [1.0, 1.0])
if polyData.GetNumberOfPoints() < 2:
#print 'pointer ransac line fit failed to find inliers'
return None
obj = updatePolyData(polyData, 'line fit points', colorByName='dist_along_line', parent=getDebugFolder(), visible=True)
obj.setProperty('Point Size', 5)
pts = vtkNumpy.getNumpyFromVtk(polyData, 'Points')
dists = np.dot(pts-lineOrigin, lineDirection)
p1 = lineOrigin + lineDirection*np.min(dists)
p2 = lineOrigin + lineDirection*np.max(dists)
d = DebugData()
#d.addSphere(p1, radius=0.005)
d.addSphere(p2, radius=0.005)
d.addLine(p1, p2)
vis.updatePolyData(d.getPolyData(), 'fit pointer line', color=[0,1,0], parent=getDebugFolder(), visible=True)
return p2
def startBoundedPlaneSegmentation():
picker = PointPicker(numberOfPoints=2)
addViewPicker(picker)
picker.enabled = True
picker.start()
picker.annotationFunc = functools.partial(segmentBoundedPlaneByAnnotation)
def startValveSegmentationByWallPlane(expectedValveRadius):
picker = PointPicker(numberOfPoints=2)
addViewPicker(picker)
picker.enabled = True
picker.start()
picker.annotationFunc = functools.partial(segmentValveByWallPlane, expectedValveRadius)
def startValveSegmentationManual(expectedValveRadius):
picker = PointPicker(numberOfPoints=2)
addViewPicker(picker)
picker.enabled = True
picker.drawLines = False
picker.start()
picker.annotationFunc = functools.partial(segmentValve, expectedValveRadius)
def startRefitWall():
picker = PointPicker(numberOfPoints=1)
addViewPicker(picker)
picker.enabled = True
picker.start()
picker.annotationFunc = refitWall
def startWyeSegmentation():
picker = PointPicker(numberOfPoints=2)
addViewPicker(picker)
picker.enabled = True
picker.drawLines = False
picker.start()
picker.annotationFunc = functools.partial(segmentWye)
def startDoorHandleSegmentation(otdfType):
picker = PointPicker(numberOfPoints=2)
addViewPicker(picker)
picker.enabled = True
picker.drawLines = False
picker.start()
picker.annotationFunc = functools.partial(segmentDoorHandle, otdfType)
def startTrussSegmentation():
picker = PointPicker(numberOfPoints=2)
addViewPicker(picker)
picker.enabled = True
picker.drawLines = True
picker.start()
picker.annotationFunc = functools.partial(segmentTruss)
def startHoseNozzleSegmentation():
picker = PointPicker(numberOfPoints=1)
addViewPicker(picker)
picker.enabled = True
picker.drawLines = False
picker.start()
picker.annotationFunc = functools.partial(segmentHoseNozzle)
def storePoint(p):
global _pickPoint
_pickPoint = p
def getPickPoint():
global _pickPoint
return _pickPoint
def startPickPoint():
picker = PointPicker(numberOfPoints=1)
addViewPicker(picker)
picker.enabled = True
picker.drawLines = False
picker.start()
picker.annotationFunc = storePoint
def startSelectToolTip():
picker = PointPicker(numberOfPoints=1)
addViewPicker(picker)
picker.enabled = True
picker.drawLines = False
picker.start()
picker.annotationFunc = selectToolTip
def startDrillSegmentation():
picker = PointPicker(numberOfPoints=3)
addViewPicker(picker)
picker.enabled = True
picker.drawLines = False
picker.start()
picker.annotationFunc = functools.partial(segmentDrill)
def startDrillAutoSegmentation():
picker = PointPicker(numberOfPoints=1)
addViewPicker(picker)
picker.enabled = True
picker.drawLines = False
picker.start()
picker.annotationFunc = functools.partial(segmentDrillAuto)
def startDrillButtonSegmentation():
picker = PointPicker(numberOfPoints=1)
addViewPicker(picker)
picker.enabled = True
picker.drawLines = False
picker.start()
picker.annotationFunc = functools.partial(segmentDrillButton)
def startPointerTipSegmentation():
picker = PointPicker(numberOfPoints=1)
addViewPicker(picker)
picker.enabled = True
picker.drawLines = False
picker.start()
picker.annotationFunc = functools.partial(segmentPointerTip)
def startDrillAutoSegmentationAlignedWithTable():
picker = PointPicker(numberOfPoints=1)
addViewPicker(picker)
picker.enabled = True
picker.drawLines = False
picker.start()
picker.annotationFunc = functools.partial(segmentDrillAlignedWithTable)
def startDrillBarrelSegmentation():
picker = PointPicker(numberOfPoints=1)
addViewPicker(picker)
picker.enabled = True
picker.drawLines = False
picker.start()
picker.annotationFunc = functools.partial(segmentDrillBarrel)
def startDrillWallSegmentation():
picker = PointPicker(numberOfPoints=3)
addViewPicker(picker)
picker.enabled = True
picker.drawLines = True
picker.start()
picker.annotationFunc = functools.partial(segmentDrillWall)
def startDrillWallSegmentationConstrained(rightAngleLocation):
picker = PointPicker(numberOfPoints=2)
addViewPicker(picker)
picker.enabled = True
picker.drawLines = False
picker.start()
picker.annotationFunc = functools.partial(segmentDrillWallConstrained, rightAngleLocation)
def startDrillInHandSegmentation():
picker = PointPicker(numberOfPoints=2)
addViewPicker(picker)
picker.enabled = True
picker.drawLines = True
picker.start()
picker.annotationFunc = functools.partial(segmentDrillInHand)
def startSegmentDebrisWall():
picker = PointPicker(numberOfPoints=1)
addViewPicker(picker)
picker.enabled = True
picker.start()
picker.annotationFunc = functools.partial(segmentDebrisWall)
def startSegmentDebrisWallManual():
picker = PointPicker(numberOfPoints=2)
addViewPicker(picker)
picker.enabled = True
picker.start()
picker.annotationFunc = functools.partial(segmentDebrisWallManual)
def selectToolTip(point1):
print point1
def segmentDebrisWallManual(point1, point2):
p1, p2 = point1, point2
d = DebugData()
d.addSphere(p1, radius=0.01)
d.addSphere(p2, radius=0.01)
d.addLine(p1, p2)
edgeObj = updatePolyData(d.getPolyData(), 'debris plane edge', visible=True)
edgeObj.points = [p1, p2]
xaxis = p2 - p1
xaxis /= np.linalg.norm(xaxis)
zaxis = np.array([0.0, 0.0, 1.0])
yaxis = np.cross(zaxis, xaxis)
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
t.Translate(p1)
updateFrame(t, 'debris plane frame', parent=edgeObj, visible=False)
refFrame = vtk.vtkTransform()
refFrame.PostMultiply()
refFrame.SetMatrix(t.GetMatrix())
refFrame.Translate(-xaxis + yaxis + zaxis*20.0)
updateFrame(refFrame, 'debris reference frame', parent=edgeObj, visible=False)
def segmentDebrisWall(point1):
inputObj = om.findObjectByName('pointcloud snapshot')
polyData = shallowCopy(inputObj.polyData)
viewPlaneNormal = np.array(getSegmentationView().camera().GetViewPlaneNormal())
polyData, origin, normal = applyPlaneFit(polyData, distanceThreshold=0.02, expectedNormal=viewPlaneNormal, perpendicularAxis=viewPlaneNormal,
searchOrigin=point1, searchRadius=0.25, angleEpsilon=0.7, returnOrigin=True)
planePoints = thresholdPoints(polyData, 'dist_to_plane', [-0.02, 0.02])
updatePolyData(planePoints, 'unbounded plane points', parent=getDebugFolder(), visible=False)
planePoints = applyVoxelGrid(planePoints, leafSize=0.03)
planePoints = labelOutliers(planePoints, searchRadius=0.06, neighborsInSearchRadius=10)
updatePolyData(planePoints, 'voxel plane points', parent=getDebugFolder(), colorByName='is_outlier', visible=False)
planePoints = thresholdPoints(planePoints, 'is_outlier', [0, 0])
planePoints = labelDistanceToPoint(planePoints, point1)
clusters = extractClusters(planePoints, clusterTolerance=0.10)
clusters.sort(key=lambda x: vtkNumpy.getNumpyFromVtk(x, 'distance_to_point').min())
planePoints = clusters[0]
planeObj = updatePolyData(planePoints, 'debris plane points', parent=getDebugFolder(), visible=False)
perpAxis = [0,0,-1]
perpAxis /= np.linalg.norm(perpAxis)
edgeAxis = np.cross(normal, perpAxis)
edgePoints = computeEdge(planePoints, edgeAxis, perpAxis)
edgePoints = vtkNumpy.getVtkPolyDataFromNumpyPoints(edgePoints)
updatePolyData(edgePoints, 'edge points', parent=getDebugFolder(), visible=False)
linePoint, lineDirection, _ = applyLineFit(edgePoints)
#binCounts = computePointCountsAlongAxis(planePoints, lineDirection)
xaxis = lineDirection
yaxis = normal
zaxis = np.cross(xaxis, yaxis)
if np.dot(zaxis, [0, 0, 1]) < 0:
zaxis *= -1
xaxis *= -1
pts = vtkNumpy.getNumpyFromVtk(planePoints, 'Points')
dists = np.dot(pts-linePoint, xaxis)
p1 = linePoint + xaxis*np.min(dists)
p2 = linePoint + xaxis*np.max(dists)
p1 = projectPointToPlane(p1, origin, normal)
p2 = projectPointToPlane(p2, origin, normal)
d = DebugData()
d.addSphere(p1, radius=0.01)
d.addSphere(p2, radius=0.01)
d.addLine(p1, p2)
edgeObj = updatePolyData(d.getPolyData(), 'debris plane edge', parent=planeObj, visible=True)
edgeObj.points = [p1, p2]
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
t.Translate(p1)
updateFrame(t, 'debris plane frame', parent=planeObj, visible=False)
refFrame = vtk.vtkTransform()
refFrame.PostMultiply()
refFrame.SetMatrix(t.GetMatrix())
refFrame.Translate(-xaxis + yaxis + zaxis*20.0)
updateFrame(refFrame, 'debris reference frame', parent=planeObj, visible=False)
def segmentBoundedPlaneByAnnotation(point1, point2):
inputObj = om.findObjectByName('pointcloud snapshot')
polyData = shallowCopy(inputObj.polyData)
viewPlaneNormal = np.array(getSegmentationView().camera().GetViewPlaneNormal())
polyData, origin, normal = applyPlaneFit(polyData, distanceThreshold=0.015, expectedNormal=viewPlaneNormal, perpendicularAxis=viewPlaneNormal,
searchOrigin=point1, searchRadius=0.3, angleEpsilon=0.7, returnOrigin=True)
planePoints = thresholdPoints(polyData, 'dist_to_plane', [-0.015, 0.015])
updatePolyData(planePoints, 'unbounded plane points', parent=getDebugFolder(), visible=False)
planePoints = applyVoxelGrid(planePoints, leafSize=0.03)
planePoints = labelOutliers(planePoints, searchRadius=0.06, neighborsInSearchRadius=12)
updatePolyData(planePoints, 'voxel plane points', parent=getDebugFolder(), colorByName='is_outlier', visible=False)
planePoints = thresholdPoints(planePoints, 'is_outlier', [0, 0])
planePoints = labelDistanceToPoint(planePoints, point1)
clusters = extractClusters(planePoints, clusterTolerance=0.10)
clusters.sort(key=lambda x: vtkNumpy.getNumpyFromVtk(x, 'distance_to_point').min())
planePoints = clusters[0]
updatePolyData(planePoints, 'plane points', parent=getDebugFolder(), visible=False)
perpAxis = point2 - point1
perpAxis /= np.linalg.norm(perpAxis)
edgeAxis = np.cross(normal, perpAxis)
edgePoints = computeEdge(planePoints, edgeAxis, perpAxis)
edgePoints = vtkNumpy.getVtkPolyDataFromNumpyPoints(edgePoints)
updatePolyData(edgePoints, 'edge points', parent=getDebugFolder(), visible=False)
linePoint, lineDirection, _ = applyLineFit(edgePoints)
zaxis = normal
yaxis = lineDirection
xaxis = np.cross(yaxis, zaxis)
if np.dot(xaxis, perpAxis) < 0:
xaxis *= -1
# make right handed
yaxis = np.cross(zaxis, xaxis)
pts = vtkNumpy.getNumpyFromVtk(planePoints, 'Points')
dists = np.dot(pts-linePoint, yaxis)
p1 = linePoint + yaxis*np.min(dists)
p2 = linePoint + yaxis*np.max(dists)
p1 = projectPointToPlane(p1, origin, normal)
p2 = projectPointToPlane(p2, origin, normal)
d = DebugData()
d.addSphere(p1, radius=0.01)
d.addSphere(p2, radius=0.01)
d.addLine(p1, p2)
updatePolyData(d.getPolyData(), 'plane edge', parent=getDebugFolder(), visible=False)
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
t.Translate((p1 + p2)/ 2.0)
updateFrame(t, 'plane edge frame', parent=getDebugFolder(), visible=False)
savedCameraParams = None
def perspective():
global savedCameraParams
if savedCameraParams is None:
return
aff = getDefaultAffordanceObject()
if aff:
aff.setProperty('Alpha', 1.0)
obj = om.findObjectByName('pointcloud snapshot')
if obj is not None:
obj.actor.SetPickable(1)
view = getSegmentationView()
c = view.camera()
c.ParallelProjectionOff()
c.SetPosition(savedCameraParams['Position'])
c.SetFocalPoint(savedCameraParams['FocalPoint'])
c.SetViewUp(savedCameraParams['ViewUp'])
view.setCameraManipulationStyle()
view.render()
def saveCameraParams(overwrite=False):
global savedCameraParams
if overwrite or (savedCameraParams is None):
view = getSegmentationView()
c = view.camera()
savedCameraParams = dict(Position=c.GetPosition(), FocalPoint=c.GetFocalPoint(), ViewUp=c.GetViewUp())
def getDefaultAffordanceObject():
obj = om.getActiveObject()
if isinstance(obj, AffordanceItem):
return obj
for obj in om.getObjects():
if isinstance(obj, AffordanceItem):
return obj
def orthoX():
aff = getDefaultAffordanceObject()
if not aff:
return
saveCameraParams()
aff.updateParamsFromActorTransform()
aff.setProperty('Alpha', 0.3)
om.findObjectByName('pointcloud snapshot').actor.SetPickable(0)
view = getSegmentationView()
c = view.camera()
c.ParallelProjectionOn()
origin = aff.params['origin']
viewDirection = aff.params['xaxis']
viewUp = -aff.params['yaxis']
viewDistance = aff.params['xwidth']*3
scale = aff.params['zwidth']
c.SetFocalPoint(origin)
c.SetPosition(origin - viewDirection*viewDistance)
c.SetViewUp(viewUp)
c.SetParallelScale(scale)
view.setActorManipulationStyle()
view.render()
def orthoY():
aff = getDefaultAffordanceObject()
if not aff:
return
saveCameraParams()
aff.updateParamsFromActorTransform()
aff.setProperty('Alpha', 0.3)
om.findObjectByName('pointcloud snapshot').actor.SetPickable(0)
view = getSegmentationView()
c = view.camera()
c.ParallelProjectionOn()
origin = aff.params['origin']
viewDirection = aff.params['yaxis']
viewUp = -aff.params['xaxis']
viewDistance = aff.params['ywidth']*4
scale = aff.params['zwidth']
c.SetFocalPoint(origin)
c.SetPosition(origin - viewDirection*viewDistance)
c.SetViewUp(viewUp)
c.SetParallelScale(scale)
view.setActorManipulationStyle()
view.render()
def orthoZ():
aff = getDefaultAffordanceObject()
if not aff:
return
saveCameraParams()
aff.updateParamsFromActorTransform()
aff.setProperty('Alpha', 0.3)
om.findObjectByName('pointcloud snapshot').actor.SetPickable(0)
view = getSegmentationView()
c = view.camera()
c.ParallelProjectionOn()
origin = aff.params['origin']
viewDirection = aff.params['zaxis']
viewUp = -aff.params['yaxis']
viewDistance = aff.params['zwidth']
scale = aff.params['ywidth']*6
c.SetFocalPoint(origin)
c.SetPosition(origin - viewDirection*viewDistance)
c.SetViewUp(viewUp)
c.SetParallelScale(scale)
view.setActorManipulationStyle()
view.render()
def zoomToDisplayPoint(displayPoint, boundsRadius=0.5, view=None):
pickedPointFields = pickPoint(displayPoint, getSegmentationView(), obj='pointcloud snapshot')
pickedPoint = pickedPointFields.pickedPoint
if pickedPoint is None:
return
view = view or app.getCurrentRenderView()
worldPt1, worldPt2 = getRayFromDisplayPoint(getSegmentationView(), displayPoint)
diagonal = np.array([boundsRadius, boundsRadius, boundsRadius])
bounds = np.hstack([pickedPoint - diagonal, pickedPoint + diagonal])
bounds = [bounds[0], bounds[3], bounds[1], bounds[4], bounds[2], bounds[5]]
view.renderer().ResetCamera(bounds)
view.camera().SetFocalPoint(pickedPoint)
view.render()
def extractPointsAlongClickRay(position, ray, polyData=None, distanceToLineThreshold=0.025, nearestToCamera=False):
#segmentationObj = om.findObjectByName('pointcloud snapshot')
if polyData is None:
polyData = getCurrentRevolutionData()
if not polyData or not polyData.GetNumberOfPoints():
return None
polyData = labelDistanceToLine(polyData, position, position + ray)
# extract points near line
polyData = thresholdPoints(polyData, 'distance_to_line', [0.0, distanceToLineThreshold])
if not polyData.GetNumberOfPoints():
return None
polyData = labelPointDistanceAlongAxis(polyData, ray, origin=position, resultArrayName='distance_along_line')
polyData = thresholdPoints(polyData, 'distance_along_line', [0.20, 1e6])
if not polyData.GetNumberOfPoints():
return None
updatePolyData(polyData, 'ray points', colorByName='distance_to_line', visible=False, parent=getDebugFolder())
if nearestToCamera:
dists = vtkNumpy.getNumpyFromVtk(polyData, 'distance_along_line')
else:
dists = vtkNumpy.getNumpyFromVtk(polyData, 'distance_to_line')
points = vtkNumpy.getNumpyFromVtk(polyData, 'Points')
intersectionPoint = points[dists.argmin()]
d = DebugData()
d.addSphere( intersectionPoint, radius=0.005)
d.addLine(position, intersectionPoint)
obj = updatePolyData(d.getPolyData(), 'intersecting ray', visible=False, color=[0,1,0], parent=getDebugFolder())
obj.actor.GetProperty().SetLineWidth(2)
d2 = DebugData()
end_of_ray = position + 2*ray
d2.addLine(position, end_of_ray)
obj2 = updatePolyData(d2.getPolyData(), 'camera ray', visible=False, color=[1,0,0], parent=getDebugFolder())
obj2.actor.GetProperty().SetLineWidth(2)
return intersectionPoint
def segmentDrillWallFromTag(position, ray):
'''
Fix the drill wall relative to a ray intersected with the wall
Desc: given a position and a ray (typically derived from a camera pixel)
Use that point to determine a position for the Drill Wall
This function uses a hard coded offset between the position on the wall
to produce the drill cutting origin
'''
#inputObj = om.findObjectByName('pointcloud snapshot')
#polyData = shallowCopy(inputObj.polyData)
polyData = getCurrentRevolutionData()
if (polyData is None): # no data yet
print "no LIDAR data yet"
return False
point1 = extractPointsAlongClickRay(position, ray, polyData )
# view direction is out:
viewDirection = -1 * SegmentationContext.getGlobalInstance().getViewDirection()
polyDataOut, origin, normal = applyPlaneFit(polyData, expectedNormal=viewDirection, searchOrigin=point1, searchRadius=0.3, angleEpsilon=0.3, returnOrigin=True)
# project the lidar point onto the plane (older, variance is >1cm with robot 2m away)
#intersection_point = projectPointToPlane(point1, origin, normal)
# intersect the ray with the plane (variance was about 4mm with robot 2m away)
intersection_point = intersectLineWithPlane(position, ray, origin, normal)
# Define a frame:
xaxis = -normal
zaxis = [0, 0, 1]
yaxis = np.cross(zaxis, xaxis)
yaxis /= np.linalg.norm(yaxis)
zaxis = np.cross(xaxis, yaxis)
t = transformUtils.getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
t.Translate(intersection_point)
t2 = transformUtils.copyFrame(t)
t2.PreMultiply()
t3 = transformUtils.frameFromPositionAndRPY( [0,0.6,-0.25] , [0,0,0] )
t2.Concatenate(t3)
rightAngleLocation = 'bottom left'
createDrillWall(rightAngleLocation, t2)
wall= om.findObjectByName('wall')
vis.updateFrame( t ,'wall fit tag', parent=wall, visible=False, scale=0.2)
d = DebugData()
d.addSphere( intersection_point, radius=0.002)
obj = updatePolyData(d.getPolyData(), 'intersection', parent=wall, visible=False, color=[0,1,0]) #
obj.actor.GetProperty().SetLineWidth(1)
return True
def segmentDrillWallFromWallCenter():
'''
Get the drill wall target as an offset from the center of
the full wall
'''
# find the valve wall and its center
inputObj = om.findObjectByName('pointcloud snapshot')
polyData = inputObj.polyData
# hardcoded position to target frame from center of wall
# conincides with the distance from the april tag to this position
wallFrame = transformUtils.copyFrame( findWallCenter(polyData) )
wallFrame.PreMultiply()
t3 = transformUtils.frameFromPositionAndRPY( [-0.07,-0.3276,0] , [180,-90,0] )
wallFrame.Concatenate(t3)
rightAngleLocation = 'bottom left'
createDrillWall(rightAngleLocation, wallFrame)
wall= om.findObjectByName('wall')
vis.updateFrame( wallFrame ,'wall fit lidar', parent=wall, visible=False, scale=0.2)
def findFarRightCorner(polyData, linkFrame):
'''
Within a point cloud find the point to the far right from the link
The input is the 4 corners of a minimum bounding box
'''
diagonalTransform = transformUtils.copyFrame(linkFrame)
diagonalTransform.PreMultiply()
diagonalTransform.Concatenate( transformUtils.frameFromPositionAndRPY([0,0,0], [0,0,45]) )
vis.updateFrame(diagonalTransform, 'diagonal frame', parent=getDebugFolder(), visible=False)
points = vtkNumpy.getNumpyFromVtk(polyData, 'Points')
viewOrigin = diagonalTransform.TransformPoint([0.0, 0.0, 0.0])
viewX = diagonalTransform.TransformVector([1.0, 0.0, 0.0])
viewY = diagonalTransform.TransformVector([0.0, 1.0, 0.0])
viewZ = diagonalTransform.TransformVector([0.0, 0.0, 1.0])
polyData = labelPointDistanceAlongAxis(polyData, viewY, origin=viewOrigin, resultArrayName='distance_along_foot_y')
vis.updatePolyData( polyData, 'cornerPoints', parent='segmentation', visible=False)
farRightIndex = vtkNumpy.getNumpyFromVtk(polyData, 'distance_along_foot_y').argmin()
points = vtkNumpy.getNumpyFromVtk(polyData, 'Points')
return points[farRightIndex,:]
def findMinimumBoundingRectangle(polyData, linkFrame):
'''
Find minimum bounding rectangle of a rectangular point cloud
The input is assumed to be a rectangular point cloud e.g. the top of a block or table
Returns transform of far right corner (pointing away from robot)
'''
# Originally From: https://github.com/dbworth/minimum-area-bounding-rectangle
polyData = applyVoxelGrid(polyData, leafSize=0.02)
def get2DAsPolyData(xy_points):
'''
Convert a 2D numpy array to a 3D polydata by appending z=0
'''
d = np.vstack((xy_points.T, np.zeros( xy_points.shape[0]) )).T
d2=d.copy()
return vtkNumpy.getVtkPolyDataFromNumpyPoints( d2 )
pts =vtkNumpy.getNumpyFromVtk( polyData , 'Points' )
xy_points = pts[:,[0,1]]
vis.updatePolyData( get2DAsPolyData(xy_points) , 'xy_points', parent=getDebugFolder(), visible=False)
hull_points = qhull_2d.qhull2D(xy_points)
vis.updatePolyData( get2DAsPolyData(hull_points) , 'hull_points', parent=getDebugFolder(), visible=False)
# Reverse order of points, to match output from other qhull implementations
hull_points = hull_points[::-1]
# print 'Convex hull points: \n', hull_points, "\n"
# Find minimum area bounding rectangle
(rot_angle, rectArea, rectDepth, rectWidth, center_point, corner_points_ground) = min_bounding_rect.minBoundingRect(hull_points)
vis.updatePolyData( get2DAsPolyData(corner_points_ground) , 'corner_points_ground', parent=getDebugFolder(), visible=False)
polyDataCentroid = computeCentroid(polyData)
cornerPoints = np.vstack((corner_points_ground.T, polyDataCentroid[2]*np.ones( corner_points_ground.shape[0]) )).T
cornerPolyData = vtkNumpy.getVtkPolyDataFromNumpyPoints(cornerPoints)
# Create a frame at the far right point - which points away from the robot
farRightCorner = findFarRightCorner(cornerPolyData , linkFrame)
viewDirection = SegmentationContext.getGlobalInstance().getViewDirection()
viewFrame = SegmentationContext.getGlobalInstance().getViewFrame()
#vis.showFrame(viewFrame, "viewFrame")
robotYaw = math.atan2( viewDirection[1], viewDirection[0] )*180.0/np.pi
blockAngle = rot_angle*(180/math.pi)
#print "robotYaw ", robotYaw
#print "blockAngle ", blockAngle
blockAngleAll = np.array([blockAngle , blockAngle+90 , blockAngle+180, blockAngle+270])
values = blockAngleAll - robotYaw
for i in range(0,4):
if(values[i]>180):
values[i]=values[i]-360
values = abs(values)
min_idx = np.argmin(values)
if ( (min_idx==1) or (min_idx==3) ):
#print "flip rectDepth and rectWidth as angle is not away from robot"
temp = rectWidth ; rectWidth = rectDepth ; rectDepth = temp
#print "best angle", blockAngleAll[min_idx]
rot_angle = blockAngleAll[min_idx]*math.pi/180.0
cornerTransform = transformUtils.frameFromPositionAndRPY( farRightCorner , [0,0, np.rad2deg(rot_angle) ] )
vis.showFrame(cornerTransform, "cornerTransform", parent=getDebugFolder(), visible=False)
#print "Minimum area bounding box:"
#print "Rotation angle:", rot_angle, "rad (", rot_angle*(180/math.pi), "deg )"
#print "rectDepth:", rectDepth, " rectWidth:", rectWidth, " Area:", rectArea
#print "Center point: \n", center_point # numpy array
#print "Corner points: \n", cornerPoints, "\n" # numpy array
return cornerTransform, rectDepth, rectWidth, rectArea
| manuelli/director | src/python/director/segmentation.py | Python | bsd-3-clause | 165,457 | [
"VTK"
] | dbdf5f3687196cb338314cd531f1e1c5cd07af9684b236a2e247a7e4e9d75609 |
"""
Unit Tests for assemble module.
"""
import unittest
import sys
import numpy
import argparse
import collections
import pysam
from mixemt import observe
from mixemt import assemble
from mixemt import phylotree
from mixemt import preprocess
# TODO: Write tests for the follwing functions.
# write_haplotypes(samfile, contrib_reads, reads, read_sigs, prefix, verbose)
class TestContributors(unittest.TestCase):
def setUp(self):
parser = argparse.ArgumentParser()
self.args = parser.parse_args([])
self.args.verbose = False
self.args.min_reads = 1
self.args.min_var_reads = 1
self.args.frac_var_reads = 0.02
self.args.var_fraction = 0.5
self.args.var_count = None
self.args.var_check = False
self.args.contributors = None
phy_in = ['I, A1G ,,',
',H, A3T A5T ,,',
',,F, A6T ,,',
',,,B, A8T ,,',
',,,C, T5A ,,',
',,G, A7T ,,',
',,,D, A9T ,,',
',,,E, A4T ,,',
',A, A2T A4T ,,']
self.ref = "AAAAAAAAA"
self.phy = phylotree.Phylotree(phy_in, refseq=self.ref)
self.cons = [['A', 0.4], ['E', 0.3]]
self.obs = observe.ObservedBases()
self.obs.obs_tab[1]['T'] = 1
self.obs.obs_tab[3]['T'] = 2
self.obs.obs_tab[0]['G'] = 1
self.obs.obs_tab[6]['T'] = 1
self.obs.obs_tab[2]['T'] = 1
self.obs.obs_tab[4]['T'] = 1
self.wts = numpy.array([1, 1, 1])
self.haps = list('ABCDEFGHI')
self.props = numpy.array([0.40, 0.01, 0.01, 0.01, 0.3,
0.01, 0.01, 0.01, 0.01])
self.mix_mat = numpy.array([
[0.91, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.91, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.01, 0.01, 0.01, 0.01, 0.91, 0.01, 0.01, 0.01, 0.01]])
self.em_results = (self.props, self.mix_mat)
def test_get_contributors_no_phy_vars(self):
self.args.var_check = False
res = assemble.get_contributors(self.phy, self.obs, self.haps,
self.wts, self.em_results, self.args)
exp = [['hap1', 'A', 0.40], ['hap2', 'E', 0.3]]
self.assertEqual(res, exp)
def test_get_contributors_no_phy_vars_rm(self):
self.args.var_check = False
self.args.min_reads = 2
self.args.min_var_reads = 2
res = assemble.get_contributors(self.phy, self.obs, self.haps,
self.wts, self.em_results, self.args)
exp = [['hap1', 'A', 0.40]]
self.assertEqual(res, exp)
def test_get_contributors_with_phy_vars(self):
self.args.var_check = True
res = assemble.get_contributors(self.phy, self.obs, self.haps,
self.wts, self.em_results, self.args)
exp = [['hap1', 'A', 0.40], ['hap2', 'E', 0.3]]
self.assertEqual(res, exp)
def test_get_contributors_with_phy_vars_rm(self):
self.args.var_check = True
self.args.min_reads = 2
self.args.min_var_reads = 2
res = assemble.get_contributors(self.phy, self.obs, self.haps,
self.wts, self.em_results, self.args)
exp = []
self.assertEqual(res, exp)
def test_get_contributors_sorted(self):
self.props[4] = 0.6
self.args.var_check = False
res = assemble.get_contributors(self.phy, self.obs, self.haps,
self.wts, self.em_results, self.args)
exp = [['hap1', 'E', 0.60], ['hap2', 'A', 0.4]]
self.assertEqual(res, exp)
def test_get_contributors_manual(self):
self.args.contributors = "A,E"
res = assemble.get_contributors(self.phy, self.obs, self.haps,
self.wts, self.em_results, self.args)
exp = [['hap1', 'A', 0.40], ['hap2', 'E', 0.3]]
self.assertEqual(res, exp)
def test_get_contributors_manual_weird_choice(self):
self.args.contributors = "E,F"
res = assemble.get_contributors(self.phy, self.obs, self.haps,
self.wts, self.em_results, self.args)
exp = [['hap1', 'E', 0.3], ['hap2', 'F', 0.01]]
self.assertEqual(res, exp)
def test_get_contributors_manual_bad_choice(self):
with self.assertRaises(ValueError):
self.args.contributors = "E,Z"
res = assemble.get_contributors(self.phy, self.obs, self.haps,
self.wts, self.em_results,
self.args)
def test_find_contribs_from_reads_wts_all_one(self):
res = assemble._find_contribs_from_reads(self.mix_mat, self.wts,
self.args)
exp = [0, 4]
self.assertEqual(res, exp)
def test_find_contribs_from_reads_wts_all_one_min_reads(self):
self.args.min_reads = 2
res = assemble._find_contribs_from_reads(self.mix_mat, self.wts,
self.args)
exp = [0]
self.assertEqual(res, exp)
def test_find_contribs_from_reads_wts_save_min_reads(self):
self.args.min_reads = 2
self.wts = [1, 1, 2]
res = assemble._find_contribs_from_reads(self.mix_mat, self.wts,
self.args)
exp = [0, 4]
self.assertEqual(res, exp)
def test_check_contrib_phy_vars_no_rm(self):
res = assemble._check_contrib_phy_vars(self.phy, self.obs,
self.cons, self.args)
self.assertEqual(self.cons, res)
def test_check_contrib_phy_vars_rm_none(self):
self.cons.append(['C', 0.1])
res = assemble._check_contrib_phy_vars(self.phy, self.obs,
self.cons, self.args)
self.assertNotEqual(self.cons, res)
self.assertEqual(self.cons[0:2], res)
def test_check_contrib_phy_vars_no_rm_one_base(self):
self.cons.append(['C', 0.1])
self.obs.obs_tab[5]['T'] = 1
res = assemble._check_contrib_phy_vars(self.phy, self.obs,
self.cons, self.args)
self.assertEqual(self.cons, res)
def test_check_contrib_phy_vars_rm_shared(self):
# Should remove E, A4T already seen in A, does not count for E.
self.obs.obs_tab[6]['T'] = 0
self.obs.obs_tab[2]['T'] = 0
res = assemble._check_contrib_phy_vars(self.phy, self.obs,
self.cons, self.args)
self.assertEqual(self.cons[0:1], res)
def test_check_contrib_phy_vars_empty_obs(self):
# no observations, no keepers.
self.obs.obs_tab = collections.defaultdict(collections.Counter)
res = assemble._check_contrib_phy_vars(self.phy, self.obs,
self.cons, self.args)
self.assertEqual([], res)
def test_check_contrib_phy_vars_high_min_reads(self):
# required number of reads too high.
self.args.min_var_reads = 10
res = assemble._check_contrib_phy_vars(self.phy, self.obs,
self.cons, self.args)
self.assertEqual([], res)
def test_check_contrib_phy_vars_high_min_fraction_requirement(self):
self.args.var_fraction = 0.9
self.cons.append(['C', 0.1])
self.obs.obs_tab[4]['A'] = 1
res = assemble._check_contrib_phy_vars(self.phy, self.obs,
self.cons, self.args)
self.assertEqual(self.cons[0:2], res)
def test_check_contrib_phy_vars_high_var_count_requirement(self):
# required number of variants too high.
self.args.var_count = 1
self.args.var_fraction = 0.9
self.cons.append(['C', 0.1])
self.obs.obs_tab[5]['T'] = 1
res = assemble._check_contrib_phy_vars(self.phy, self.obs,
self.cons, self.args)
self.assertEqual(self.cons, res)
def test_check_contrib_phy_vars_check_ancestral(self):
# should pass if A isn't present than 4 can be ancestral evidence
self.cons.append(['C', 0.1])
self.obs.obs_tab[4]['A'] = 1
self.obs.obs_tab[1]['T'] = 0
self.obs.obs_tab[3]['T'] = 0
res = assemble._check_contrib_phy_vars(self.phy, self.obs,
self.cons, self.args)
self.assertEqual(self.cons[1:], res)
def test_check_contrib_phy_vars_ignore_ancestral(self):
# should not find C, T5A is explained by 'A'
# self.cons.append(['C', 0.1])
self.obs.obs_tab[4]['A'] = 1
res = assemble._check_contrib_phy_vars(self.phy, self.obs,
self.cons, self.args)
self.assertEqual(self.cons, res)
class TestAssignReads(unittest.TestCase):
def setUp(self):
self.haps = list('ABCDEFGHI')
self.props = numpy.array([0.40, 0.01, 0.01, 0.01, 0.3,
0.01, 0.01, 0.01, 0.01])
self.mix_mat = numpy.log(numpy.array([
[0.91, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.91, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.30, 0.01, 0.01, 0.01, 0.40, 0.01, 0.01, 0.01, 0.01],
[0.01, 0.01, 0.01, 0.01, 0.91, 0.01, 0.01, 0.01, 0.01]]))
self.em_results = (self.props, self.mix_mat)
self.cons = [['hap1', 'A', 0.40], ['hap2', 'E', 0.3]]
self.reads = [['A', 'B'], ['C'], ['D'], ['E', 'F', 'G']]
def test_find_best_n_for_read_n2(self):
prob = numpy.array([0.1, 0.2, 0.1, 0.3, 0.9, 0.1])
con_i = [1,3,5]
res = assemble._find_best_n_for_read(prob, con_i, 2)
self.assertEqual(res, [3,1])
def test_find_best_n_for_read_n3(self):
prob = numpy.array([0.1, 0.2, 0.1, 0.3, 0.9, 0.1])
con_i = [1,3,5]
res = assemble._find_best_n_for_read(prob, con_i, 3)
self.assertEqual(res, [3,1,5])
def test_assign_reads_simple(self):
res = assemble.assign_read_indexes(self.cons, self.em_results,
self.haps, self.reads, 2.0)
exp = {"hap1":{0, 1}, "hap2":{3}, "unassigned":{2}}
self.assertEqual(res, exp)
def test_assign_reads_simple_low_min_fold(self):
res = assemble.assign_read_indexes(self.cons, self.em_results,
self.haps, self.reads, 1.5)
exp = {"hap1":{0, 1}, "hap2":{2, 3}}
self.assertEqual(res, exp)
def test_assign_reads_simple_high_min_fold(self):
res = assemble.assign_read_indexes(self.cons, self.em_results,
self.haps, self.reads, 200)
exp = {"unassigned":{0, 1, 2, 3}}
self.assertEqual(res, exp)
def test_assign_reads_simple_only_one_con(self):
res = assemble.assign_read_indexes(self.cons[0:1], self.em_results,
self.haps, self.reads, 2)
exp = {"hap1":{0, 1, 2, 3}}
self.assertEqual(res, exp)
def test_get_contrib_read_ids_simple(self):
idxs = [0, 2]
res = assemble.get_contrib_read_ids(idxs, self.reads)
self.assertEqual(set('ABD'), res)
def test_get_contrib_read_ids_all(self):
idxs = list(range(len(self.reads)))
res = assemble.get_contrib_read_ids(idxs, self.reads)
self.assertEqual(set('ABCDEFG'), res)
def test_get_contrib_read_ids_empty(self):
idxs = []
res = assemble.get_contrib_read_ids(idxs, self.reads)
self.assertEqual(set(), res)
class TestExtendAssemblies(unittest.TestCase):
def setUp(self):
parser = argparse.ArgumentParser()
self.ref = 'AAAAAAAAAAAAAAAAAAAA'
self.args = parser.parse_args([])
self.args.min_mq = 30
self.args.min_bq = 30
self.args.cons_cov = 2
self.aln1 = pysam.AlignedSegment()
self.aln1.reference_start = 10
self.aln1.query_name = 'read1'
self.aln1.mapping_quality = 30
self.aln1.query_sequence = "AAAAA"
self.aln1.query_qualities = [30] * 5
self.aln1.cigarstring = '5M'
self.aln2 = pysam.AlignedSegment()
self.aln2.reference_start = 13
self.aln2.query_name = 'read2'
self.aln2.mapping_quality = 30
self.aln2.query_sequence = "AAAAA"
self.aln2.query_qualities = [30] * 5
self.aln2.cigarstring = '5M'
self.aln3 = pysam.AlignedSegment()
self.aln3.reference_start = 15
self.aln3.query_name = 'read3'
self.aln3.mapping_quality = 30
self.aln3.query_sequence = "TAAAA"
self.aln3.query_qualities = [30] * 5
self.aln3.cigarstring = '5M'
return
def test_call_consensus_no_aligned_sequences(self):
res = assemble.call_consensus(self.ref, [], self.args.cons_cov,
self.args, strict=True)
exp = ''
self.assertEqual(res, exp)
def test_call_consensus_simple(self):
res = assemble.call_consensus(self.ref, [self.aln1, self.aln2],
self.args.cons_cov, self.args,
strict=True)
exp = 'NNNNNNNNNNNNNAANNNNN'
self.assertEqual(res, exp)
def test_call_consensus_high_coverage_requirement(self):
self.args.cons_cov = 3
res = assemble.call_consensus(self.ref, [self.aln1, self.aln2],
self.args.cons_cov, self.args,
strict=True)
exp = 'NNNNNNNNNNNNNNNNNNNN'
self.assertEqual(res, exp)
def test_call_consensus_low_coverage_requirement(self):
self.args.cons_cov = 1
res = assemble.call_consensus(self.ref, [self.aln1, self.aln2],
self.args.cons_cov, self.args,
strict=True)
exp = 'NNNNNNNNNNAAAAAAAANN'
self.assertEqual(res, exp)
def test_call_consensus_disagreement_low_coverage_requirement(self):
self.args.cons_cov = 1
res = assemble.call_consensus(self.ref,
[self.aln1, self.aln2, self.aln3],
self.args.cons_cov, self.args,
strict=True)
exp = 'NNNNNNNNNNAAAAANAAAA'
self.assertEqual(res, exp)
def test_call_consensus_disagreement_coverage_requirement(self):
res = assemble.call_consensus(self.ref,
[self.aln1, self.aln2, self.aln3],
self.args.cons_cov, self.args,
strict=True)
exp = 'NNNNNNNNNNNNNAANAANN'
self.assertEqual(res, exp)
def test_find_new_variants_empty_input(self):
res = assemble.find_new_variants(self.ref, {}, self.args)
exp = {}
self.assertEqual(res, exp)
def test_find_new_variants_none_to_find(self):
res = assemble.find_new_variants(self.ref,
{'A':[self.aln1, self.aln2],
'B':[self.aln1, self.aln1],
'unassigned':[self.aln3]}, self.args)
exp = {}
self.assertEqual(res, exp)
def test_find_new_variants_one_diff_two_contributors(self):
res = assemble.find_new_variants(self.ref,
{'A':[self.aln2, self.aln2],
'B':[self.aln3, self.aln3],
'unassigned':[self.aln1]}, self.args)
exp = {(15, 'A'):'A', (15, 'T'):'B'}
self.assertEqual(res, exp)
def test_find_new_variants_one_diff_three_contributors(self):
res = assemble.find_new_variants(self.ref,
{'A':[self.aln2, self.aln2],
'B':[self.aln3, self.aln3],
'C':[self.aln2, self.aln2],
'unassigned':[self.aln1]}, self.args)
# base 'T' can be assigned to hap 'B', but 'A' cannot be assigned
# because 'A' and 'C' both have 'A'.
exp = {(15, 'T'):'B'}
self.assertEqual(res, exp)
def test_find_new_variants_two_diff_missing_cov_three_contributors(self):
self.aln1.query_sequence = "AAGAA"
res = assemble.find_new_variants(self.ref,
{'A':[self.aln2, self.aln2],
'B':[self.aln3, self.aln3],
'C':[self.aln1, self.aln2,
self.aln2],
'unassigned':[self.aln1]}, self.args)
# A13G variant is only covered in hap 'C'. Cannot call new variant.
exp = {(15, 'T'):'B'}
self.assertEqual(res, exp)
def test_find_new_variants_one_diff_cant_assign(self):
res = assemble.find_new_variants(self.ref,
{'A':[self.aln2, self.aln3],
'B':[self.aln2, self.aln3],
'unassigned':[self.aln1]}, self.args)
exp = {}
self.assertEqual(res, exp)
def test_assign_reads_from_new_vars_no_assign(self):
contrib_reads = {'A':[self.aln2, self.aln2],
'B':[self.aln3, self.aln3],
'unassigned':[self.aln1]}
new_var = {(15, 'A'):'A', (15, 'T'):'B'}
res = assemble.assign_reads_from_new_vars(contrib_reads, new_var,
self.args)
exp = {'A':[self.aln2, self.aln2],
'B':[self.aln3, self.aln3],
'unassigned':[self.aln1]}
self.assertEqual(res, exp)
def test_assign_reads_from_new_vars_simple_assign(self):
contrib_reads = {'A':[self.aln2, self.aln2],
'B':[self.aln3, self.aln3],
'unassigned':[self.aln2]}
new_var = {(15, 'A'):'A', (15, 'T'):'B'}
res = assemble.assign_reads_from_new_vars(contrib_reads, new_var,
self.args)
exp = {'A':[self.aln2, self.aln2, self.aln2],
'B':[self.aln3, self.aln3],
'unassigned':[]}
self.assertEqual(res, exp)
def test_assign_reads_from_new_vars_multi_assign(self):
contrib_reads = {'A':[self.aln2, self.aln2],
'B':[self.aln3, self.aln3],
'unassigned':[self.aln1, self.aln2, self.aln3]}
new_var = {(15, 'A'):'A', (15, 'T'):'B'}
res = assemble.assign_reads_from_new_vars(contrib_reads, new_var,
self.args)
exp = {'A':[self.aln2, self.aln2, self.aln2],
'B':[self.aln3, self.aln3, self.aln3],
'unassigned':[self.aln1]}
self.assertEqual(res, exp)
def test_assign_reads_from_new_vars_single_assign(self):
contrib_reads = {'A':[self.aln2, self.aln2],
'B':[self.aln3, self.aln3],
'C':[self.aln3, self.aln3],
'unassigned':[self.aln1, self.aln2, self.aln3]}
new_var = {(15, 'A'):'A'}
res = assemble.assign_reads_from_new_vars(contrib_reads, new_var,
self.args)
exp = {'A':[self.aln2, self.aln2, self.aln2],
'B':[self.aln3, self.aln3],
'C':[self.aln3, self.aln3],
'unassigned':[self.aln1, self.aln3]}
self.assertEqual(res, exp)
def test_assign_reads_from_new_vars_disagree(self):
contrib_reads = {'A':[self.aln2, self.aln2],
'B':[self.aln3, self.aln3],
'unassigned':[self.aln1, self.aln2, self.aln3]}
new_var = {(13, 'A'):'B', (15, 'A'):'A'}
res = assemble.assign_reads_from_new_vars(contrib_reads, new_var,
self.args)
exp = {'A':[self.aln2, self.aln2],
'B':[self.aln3, self.aln3, self.aln1],
'unassigned':[self.aln2, self.aln3]}
self.maxDiff = None
self.assertEqual(res, exp)
def test_assign_reads_from_new_vars_empty_unassigned(self):
contrib_reads = {'A':[self.aln2, self.aln2],
'B':[self.aln3, self.aln3],
'unassigned':[]}
new_var = {(13, 'A'):'B', (15, 'A'):'A'}
res = assemble.assign_reads_from_new_vars(contrib_reads, new_var,
self.args)
exp = {'A':[self.aln2, self.aln2],
'B':[self.aln3, self.aln3],
'unassigned':[]}
self.maxDiff = None
self.assertEqual(res, exp)
def test_assign_reads_from_new_vars_no_vars(self):
contrib_reads = {'A':[self.aln2, self.aln2],
'B':[self.aln3, self.aln3],
'unassigned':[self.aln1, self.aln2, self.aln3]}
new_var = {}
res = assemble.assign_reads_from_new_vars(contrib_reads, new_var,
self.args)
exp = {'A':[self.aln2, self.aln2],
'B':[self.aln3, self.aln3],
'unassigned':[self.aln1, self.aln2, self.aln3]}
self.maxDiff = None
self.assertEqual(res, exp)
if __name__ == '__main__':
unittest.main()
| svohr/mixemt | mixemt/test/assemble_test.py | Python | mit | 22,073 | [
"pysam"
] | 83fd7adc8b56c669421303064e9502192ade7b3a7b02f651c634310864821135 |
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the creator dashboard and the notifications dashboard."""
from core.controllers import creator_dashboard
from core.domain import event_services
from core.domain import exp_services
from core.domain import feedback_domain
from core.domain import feedback_services
from core.domain import rating_services
from core.domain import subscription_services
from core.domain import rights_manager
from core.domain import stats_jobs_continuous_test
from core.domain import user_jobs_continuous
from core.domain import user_jobs_continuous_test
from core.domain import user_services
from core.platform import models
from core.tests import test_utils
import feconf
(user_models, stats_models) = models.Registry.import_models(
[models.NAMES.user, models.NAMES.statistics])
taskqueue_services = models.Registry.import_taskqueue_services()
class HomePageTest(test_utils.GenericTestBase):
def test_logged_out_homepage(self):
"""Test the logged-out version of the home page."""
response = self.testapp.get('/')
self.assertEqual(response.status_int, 302)
self.assertIn('splash', response.headers['location'])
def test_notifications_dashboard_redirects_for_logged_out_users(self):
"""Test the logged-out view of the notifications dashboard."""
response = self.testapp.get('/notifications_dashboard')
self.assertEqual(response.status_int, 302)
# This should redirect to the login page.
self.assertIn('signup', response.headers['location'])
self.assertIn('notifications_dashboard', response.headers['location'])
self.login('reader@example.com')
response = self.testapp.get('/notifications_dashboard')
# This should redirect the user to complete signup.
self.assertEqual(response.status_int, 302)
self.logout()
def test_logged_in_notifications_dashboard(self):
"""Test the logged-in view of the notifications dashboard."""
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.login(self.EDITOR_EMAIL)
response = self.testapp.get('/notifications_dashboard')
self.assertEqual(response.status_int, 200)
self.logout()
class CreatorDashboardStatisticsTest(test_utils.GenericTestBase):
OWNER_EMAIL_1 = 'owner1@example.com'
OWNER_USERNAME_1 = 'owner1'
OWNER_EMAIL_2 = 'owner2@example.com'
OWNER_USERNAME_2 = 'owner2'
EXP_ID_1 = 'exp_id_1'
EXP_TITLE_1 = 'Exploration title 1'
EXP_ID_2 = 'exp_id_2'
EXP_TITLE_2 = 'Exploration title 2'
EXP_DEFAULT_VERSION = 1
USER_SESSION_ID = 'session1'
USER_IMPACT_SCORE_DEFAULT = 0.0
def setUp(self):
super(CreatorDashboardStatisticsTest, self).setUp()
self.signup(self.OWNER_EMAIL_1, self.OWNER_USERNAME_1)
self.signup(self.OWNER_EMAIL_2, self.OWNER_USERNAME_2)
self.owner_id_1 = self.get_user_id_from_email(self.OWNER_EMAIL_1)
self.owner_id_2 = self.get_user_id_from_email(self.OWNER_EMAIL_2)
self.owner_1 = user_services.UserActionsInfo(self.owner_id_1)
def _record_start(self, exp_id, exp_version, state):
"""Record start event to an exploration.
Completing the exploration is not necessary here since the total_plays
are currently being counted taking into account only the # of starts.
"""
event_services.StartExplorationEventHandler.record(
exp_id, exp_version, state, self.USER_SESSION_ID, {},
feconf.PLAY_TYPE_NORMAL)
def _rate_exploration(self, exp_id, ratings):
"""Create num_ratings ratings for exploration with exp_id,
of values from ratings.
"""
# Generate unique user ids to rate an exploration. Each user id needs
# to be unique since each user can only give an exploration one rating.
user_ids = ['user%d' % i for i in range(len(ratings))]
self.process_and_flush_pending_tasks()
for ind, user_id in enumerate(user_ids):
rating_services.assign_rating_to_exploration(
user_id, exp_id, ratings[ind])
self.process_and_flush_pending_tasks()
def _run_user_stats_aggregator_job(self):
(user_jobs_continuous_test.ModifiedUserStatsAggregator.
start_computation())
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_CONTINUOUS_JOBS), 1)
self.process_and_flush_pending_tasks()
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_CONTINUOUS_JOBS), 0)
self.process_and_flush_pending_tasks()
def _run_stats_aggregator_jobs(self):
(stats_jobs_continuous_test.ModifiedStatisticsAggregator
.start_computation())
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_CONTINUOUS_JOBS), 1)
self.process_and_flush_pending_tasks()
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_CONTINUOUS_JOBS), 0)
self.process_and_flush_pending_tasks()
def test_stats_no_explorations(self):
self.login(self.OWNER_EMAIL_1)
response = self.get_json(feconf.CREATOR_DASHBOARD_DATA_URL)
self.assertEqual(response['explorations_list'], [])
self._run_user_stats_aggregator_job()
self.assertIsNone(user_models.UserStatsModel.get(
self.owner_id_1, strict=False))
self.logout()
def test_one_play_for_single_exploration(self):
exploration = self.save_new_default_exploration(
self.EXP_ID_1, self.owner_id_1, title=self.EXP_TITLE_1)
self.login(self.OWNER_EMAIL_1)
response = self.get_json(feconf.CREATOR_DASHBOARD_DATA_URL)
self.assertEqual(len(response['explorations_list']), 1)
exp_version = self.EXP_DEFAULT_VERSION
exp_id = self.EXP_ID_1
state = exploration.init_state_name
self._record_start(exp_id, exp_version, state)
self._run_stats_aggregator_jobs()
self._run_user_stats_aggregator_job()
user_model = user_models.UserStatsModel.get(self.owner_id_1)
self.assertEquals(user_model.total_plays, 1)
self.assertEquals(
user_model.impact_score, self.USER_IMPACT_SCORE_DEFAULT)
self.assertEquals(user_model.num_ratings, 0)
self.assertIsNone(user_model.average_ratings)
self.logout()
def test_one_rating_for_single_exploration(self):
self.save_new_default_exploration(
self.EXP_ID_1, self.owner_id_1, title=self.EXP_TITLE_1)
self.login(self.OWNER_EMAIL_1)
response = self.get_json(feconf.CREATOR_DASHBOARD_DATA_URL)
self.assertEqual(len(response['explorations_list']), 1)
exp_id = self.EXP_ID_1
self._rate_exploration(exp_id, [4])
self._run_user_stats_aggregator_job()
user_model = user_models.UserStatsModel.get(self.owner_id_1)
self.assertEquals(user_model.total_plays, 0)
self.assertEquals(
user_model.impact_score, self.USER_IMPACT_SCORE_DEFAULT)
self.assertEquals(user_model.num_ratings, 1)
self.assertEquals(user_model.average_ratings, 4)
self.logout()
def test_one_play_and_rating_for_single_exploration(self):
exploration = self.save_new_default_exploration(
self.EXP_ID_1, self.owner_id_1, title=self.EXP_TITLE_1)
self.login(self.OWNER_EMAIL_1)
response = self.get_json(feconf.CREATOR_DASHBOARD_DATA_URL)
self.assertEqual(len(response['explorations_list']), 1)
exp_id = self.EXP_ID_1
exp_version = self.EXP_DEFAULT_VERSION
state = exploration.init_state_name
self._record_start(exp_id, exp_version, state)
self._run_stats_aggregator_jobs()
self._rate_exploration(exp_id, [3])
self._run_user_stats_aggregator_job()
user_model = user_models.UserStatsModel.get(self.owner_id_1)
self.assertEquals(user_model.total_plays, 1)
self.assertEquals(
user_model.impact_score, self.USER_IMPACT_SCORE_DEFAULT)
self.assertEquals(user_model.num_ratings, 1)
self.assertEquals(user_model.average_ratings, 3)
self.logout()
def test_multiple_plays_and_ratings_for_single_exploration(self):
exploration = self.save_new_default_exploration(
self.EXP_ID_1, self.owner_id_1, title=self.EXP_TITLE_1)
self.login(self.OWNER_EMAIL_1)
response = self.get_json(feconf.CREATOR_DASHBOARD_DATA_URL)
self.assertEqual(len(response['explorations_list']), 1)
exp_version = self.EXP_DEFAULT_VERSION
exp_id = self.EXP_ID_1
state = exploration.init_state_name
self._record_start(exp_id, exp_version, state)
self._record_start(exp_id, exp_version, state)
self._record_start(exp_id, exp_version, state)
self._record_start(exp_id, exp_version, state)
self._run_stats_aggregator_jobs()
self._rate_exploration(exp_id, [3, 4, 5])
self._run_user_stats_aggregator_job()
user_model = user_models.UserStatsModel.get(self.owner_id_1)
self.assertEquals(user_model.total_plays, 4)
self.assertEquals(
user_model.impact_score, self.USER_IMPACT_SCORE_DEFAULT)
self.assertEquals(user_model.num_ratings, 3)
self.assertEquals(user_model.average_ratings, 4)
self.logout()
def test_one_play_and_rating_for_multiple_explorations(self):
exploration_1 = self.save_new_default_exploration(
self.EXP_ID_1, self.owner_id_1, title=self.EXP_TITLE_1)
self.save_new_default_exploration(
self.EXP_ID_2, self.owner_id_1, title=self.EXP_TITLE_2)
self.login(self.OWNER_EMAIL_1)
response = self.get_json(feconf.CREATOR_DASHBOARD_DATA_URL)
self.assertEqual(len(response['explorations_list']), 2)
exp_version = self.EXP_DEFAULT_VERSION
exp_id_1 = self.EXP_ID_1
state_1 = exploration_1.init_state_name
self._record_start(exp_id_1, exp_version, state_1)
self._run_stats_aggregator_jobs()
self._rate_exploration(exp_id_1, [4])
self._run_user_stats_aggregator_job()
user_model = user_models.UserStatsModel.get(self.owner_id_1)
self.assertEquals(user_model.total_plays, 1)
self.assertEquals(
user_model.impact_score, self.USER_IMPACT_SCORE_DEFAULT)
self.assertEquals(user_model.num_ratings, 1)
self.assertEquals(user_model.average_ratings, 4)
self.logout()
def test_multiple_plays_and_ratings_for_multiple_explorations(self):
exploration_1 = self.save_new_default_exploration(
self.EXP_ID_1, self.owner_id_1, title=self.EXP_TITLE_1)
exploration_2 = self.save_new_default_exploration(
self.EXP_ID_2, self.owner_id_1, title=self.EXP_TITLE_2)
self.login(self.OWNER_EMAIL_1)
response = self.get_json(feconf.CREATOR_DASHBOARD_DATA_URL)
self.assertEqual(len(response['explorations_list']), 2)
exp_version = self.EXP_DEFAULT_VERSION
exp_id_1 = self.EXP_ID_1
state_1 = exploration_1.init_state_name
exp_id_2 = self.EXP_ID_2
state_2 = exploration_2.init_state_name
self._record_start(exp_id_1, exp_version, state_1)
self._record_start(exp_id_2, exp_version, state_2)
self._record_start(exp_id_2, exp_version, state_2)
self._rate_exploration(exp_id_1, [4])
self._rate_exploration(exp_id_2, [3, 3])
self._run_stats_aggregator_jobs()
self._run_user_stats_aggregator_job()
user_model = user_models.UserStatsModel.get(self.owner_id_1)
self.assertEquals(user_model.total_plays, 3)
self.assertEquals(
user_model.impact_score, self.USER_IMPACT_SCORE_DEFAULT)
self.assertEquals(user_model.num_ratings, 3)
self.assertEquals(user_model.average_ratings, 10/3.0)
self.logout()
def test_stats_for_single_exploration_with_multiple_owners(self):
exploration = self.save_new_default_exploration(
self.EXP_ID_1, self.owner_id_1, title=self.EXP_TITLE_1)
rights_manager.assign_role_for_exploration(
self.owner_1, self.EXP_ID_1, self.owner_id_2,
rights_manager.ROLE_OWNER)
self.login(self.OWNER_EMAIL_1)
response = self.get_json(feconf.CREATOR_DASHBOARD_DATA_URL)
self.assertEqual(len(response['explorations_list']), 1)
exp_version = self.EXP_DEFAULT_VERSION
exp_id = self.EXP_ID_1
state = exploration.init_state_name
self._record_start(exp_id, exp_version, state)
self._record_start(exp_id, exp_version, state)
self._run_stats_aggregator_jobs()
self._rate_exploration(exp_id, [3, 4, 5])
self.logout()
self.login(self.OWNER_EMAIL_2)
response = self.get_json(feconf.CREATOR_DASHBOARD_DATA_URL)
self.assertEqual(len(response['explorations_list']), 1)
self._rate_exploration(exp_id, [3, 4, 5])
self._run_user_stats_aggregator_job()
user_model_1 = user_models.UserStatsModel.get(
self.owner_id_1)
self.assertEquals(user_model_1.total_plays, 2)
self.assertEquals(
user_model_1.impact_score, self.USER_IMPACT_SCORE_DEFAULT)
self.assertEquals(user_model_1.num_ratings, 3)
self.assertEquals(user_model_1.average_ratings, 4)
user_model_2 = user_models.UserStatsModel.get(
self.owner_id_2)
self.assertEquals(user_model_2.total_plays, 2)
self.assertEquals(
user_model_2.impact_score, self.USER_IMPACT_SCORE_DEFAULT)
self.assertEquals(user_model_2.num_ratings, 3)
self.assertEquals(user_model_2.average_ratings, 4)
self.logout()
def test_stats_for_multiple_explorations_with_multiple_owners(self):
exploration_1 = self.save_new_default_exploration(
self.EXP_ID_1, self.owner_id_1, title=self.EXP_TITLE_1)
exploration_2 = self.save_new_default_exploration(
self.EXP_ID_2, self.owner_id_1, title=self.EXP_TITLE_2)
rights_manager.assign_role_for_exploration(
self.owner_1, self.EXP_ID_1, self.owner_id_2,
rights_manager.ROLE_OWNER)
rights_manager.assign_role_for_exploration(
self.owner_1, self.EXP_ID_2, self.owner_id_2,
rights_manager.ROLE_OWNER)
self.login(self.OWNER_EMAIL_2)
response = self.get_json(feconf.CREATOR_DASHBOARD_DATA_URL)
self.assertEqual(len(response['explorations_list']), 2)
exp_version = self.EXP_DEFAULT_VERSION
exp_id_1 = self.EXP_ID_1
state_1 = exploration_1.init_state_name
exp_id_2 = self.EXP_ID_2
state_2 = exploration_2.init_state_name
self._record_start(exp_id_1, exp_version, state_1)
self._record_start(exp_id_1, exp_version, state_1)
self._record_start(exp_id_2, exp_version, state_2)
self._record_start(exp_id_2, exp_version, state_2)
self._record_start(exp_id_2, exp_version, state_2)
self._run_stats_aggregator_jobs()
self._rate_exploration(exp_id_1, [5, 3])
self._rate_exploration(exp_id_2, [5, 5])
self._run_user_stats_aggregator_job()
expected_results = {
'total_plays': 5,
'num_ratings': 4,
'average_ratings': 18/4.0
}
user_model_2 = user_models.UserStatsModel.get(self.owner_id_2)
self.assertEquals(
user_model_2.total_plays, expected_results['total_plays'])
self.assertEquals(
user_model_2.impact_score, self.USER_IMPACT_SCORE_DEFAULT)
self.assertEquals(
user_model_2.num_ratings, expected_results['num_ratings'])
self.assertEquals(
user_model_2.average_ratings, expected_results['average_ratings'])
self.logout()
self.login(self.OWNER_EMAIL_1)
response = self.get_json(feconf.CREATOR_DASHBOARD_DATA_URL)
self.assertEqual(len(response['explorations_list']), 2)
user_model_1 = user_models.UserStatsModel.get(self.owner_id_1)
self.assertEquals(
user_model_1.total_plays, expected_results['total_plays'])
self.assertEquals(
user_model_1.impact_score, self.USER_IMPACT_SCORE_DEFAULT)
self.assertEquals(
user_model_1.num_ratings, expected_results['num_ratings'])
self.assertEquals(
user_model_1.average_ratings, expected_results['average_ratings'])
self.logout()
class CreatorDashboardHandlerTest(test_utils.GenericTestBase):
COLLABORATOR_EMAIL = 'collaborator@example.com'
COLLABORATOR_USERNAME = 'collaborator'
OWNER_EMAIL_1 = 'owner1@example.com'
OWNER_USERNAME_1 = 'owner1'
OWNER_EMAIL_2 = 'owner2@example.com'
OWNER_USERNAME_2 = 'owner2'
EXP_ID = 'exp_id'
EXP_TITLE = 'Exploration title'
EXP_ID_1 = 'exp_id_1'
EXP_TITLE_1 = 'Exploration title 1'
EXP_ID_2 = 'exp_id_2'
EXP_TITLE_2 = 'Exploration title 2'
EXP_ID_3 = 'exp_id_3'
EXP_TITLE_3 = 'Exploration title 3'
def setUp(self):
super(CreatorDashboardHandlerTest, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup(self.OWNER_EMAIL_1, self.OWNER_USERNAME_1)
self.signup(self.OWNER_EMAIL_2, self.OWNER_USERNAME_2)
self.signup(self.COLLABORATOR_EMAIL, self.COLLABORATOR_USERNAME)
self.signup(self.VIEWER_EMAIL, self.VIEWER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.owner_id_1 = self.get_user_id_from_email(self.OWNER_EMAIL_1)
self.owner_id_2 = self.get_user_id_from_email(self.OWNER_EMAIL_2)
self.owner = user_services.UserActionsInfo(self.owner_id)
self.owner_1 = user_services.UserActionsInfo(self.owner_id_1)
self.collaborator_id = self.get_user_id_from_email(
self.COLLABORATOR_EMAIL)
self.viewer_id = self.get_user_id_from_email(self.VIEWER_EMAIL)
def test_no_explorations(self):
self.login(self.OWNER_EMAIL)
response = self.get_json(feconf.CREATOR_DASHBOARD_DATA_URL)
self.assertEqual(response['explorations_list'], [])
self.logout()
def test_no_explorations_and_visit_dashboard(self):
self.login(self.OWNER_EMAIL)
# Testing that creator only visit dashboard without any exploration
# created.
response = self.get_json(feconf.CREATOR_DASHBOARD_DATA_URL)
self.assertEqual(len(response['explorations_list']), 0)
self.logout()
def test_create_single_exploration_and_visit_dashboard(self):
self.login(self.OWNER_EMAIL)
self.save_new_default_exploration(
self.EXP_ID, self.owner_id, title=self.EXP_TITLE)
# Testing the quantity of exploration created and it should be 1.
response = self.get_json(feconf.CREATOR_DASHBOARD_DATA_URL)
self.assertEqual(len(response['explorations_list']), 1)
self.logout()
def test_create_two_explorations_delete_one_and_visit_dashboard(self):
self.login(self.OWNER_EMAIL_1)
self.save_new_default_exploration(
self.EXP_ID_1, self.owner_id_1, title=self.EXP_TITLE_1)
self.save_new_default_exploration(
self.EXP_ID_2, self.owner_id_1, title=self.EXP_TITLE_2)
# Testing the quantity of exploration and it should be 2.
response = self.get_json(feconf.CREATOR_DASHBOARD_DATA_URL)
self.assertEqual(len(response['explorations_list']), 2)
exp_services.delete_exploration(self.owner_id_1, self.EXP_ID_1)
# Testing whether 1 exploration left after deletion of previous one.
response = self.get_json(feconf.CREATOR_DASHBOARD_DATA_URL)
self.assertEqual(len(response['explorations_list']), 1)
self.logout()
def test_create_multiple_explorations_delete_all_and_visit_dashboard(self):
self.login(self.OWNER_EMAIL_2)
self.save_new_default_exploration(
self.EXP_ID_1, self.owner_id_2, title=self.EXP_TITLE_1)
self.save_new_default_exploration(
self.EXP_ID_2, self.owner_id_2, title=self.EXP_TITLE_2)
self.save_new_default_exploration(
self.EXP_ID_3, self.owner_id_2, title=self.EXP_TITLE_3)
# Testing for quantity of explorations to be 3.
response = self.get_json(feconf.CREATOR_DASHBOARD_DATA_URL)
self.assertEqual(len(response['explorations_list']), 3)
# Testing for deletion of all created previously.
exp_services.delete_exploration(self.owner_id_2, self.EXP_ID_1)
exp_services.delete_exploration(self.owner_id_2, self.EXP_ID_2)
exp_services.delete_exploration(self.owner_id_2, self.EXP_ID_3)
# All explorations have been deleted, so the dashboard query should not
# load any explorations.
response = self.get_json(feconf.CREATOR_DASHBOARD_DATA_URL)
self.assertEqual(len(response['explorations_list']), 0)
self.logout()
def test_managers_can_see_explorations(self):
self.save_new_default_exploration(
self.EXP_ID, self.owner_id, title=self.EXP_TITLE)
self.set_admins([self.OWNER_USERNAME])
self.login(self.OWNER_EMAIL)
response = self.get_json(feconf.CREATOR_DASHBOARD_DATA_URL)
self.assertEqual(len(response['explorations_list']), 1)
self.assertEqual(
response['explorations_list'][0]['status'],
rights_manager.ACTIVITY_STATUS_PRIVATE)
rights_manager.publish_exploration(self.owner, self.EXP_ID)
response = self.get_json(feconf.CREATOR_DASHBOARD_DATA_URL)
self.assertEqual(len(response['explorations_list']), 1)
self.assertEqual(
response['explorations_list'][0]['status'],
rights_manager.ACTIVITY_STATUS_PUBLIC)
self.logout()
def test_collaborators_can_see_explorations(self):
self.save_new_default_exploration(
self.EXP_ID, self.owner_id, title=self.EXP_TITLE)
rights_manager.assign_role_for_exploration(
self.owner, self.EXP_ID, self.collaborator_id,
rights_manager.ROLE_EDITOR)
self.set_admins([self.OWNER_USERNAME])
self.login(self.COLLABORATOR_EMAIL)
response = self.get_json(feconf.CREATOR_DASHBOARD_DATA_URL)
self.assertEqual(len(response['explorations_list']), 1)
self.assertEqual(
response['explorations_list'][0]['status'],
rights_manager.ACTIVITY_STATUS_PRIVATE)
rights_manager.publish_exploration(self.owner, self.EXP_ID)
response = self.get_json(feconf.CREATOR_DASHBOARD_DATA_URL)
self.assertEqual(len(response['explorations_list']), 1)
self.assertEqual(
response['explorations_list'][0]['status'],
rights_manager.ACTIVITY_STATUS_PUBLIC)
self.logout()
def test_viewer_cannot_see_explorations(self):
self.save_new_default_exploration(
self.EXP_ID, self.owner_id, title=self.EXP_TITLE)
rights_manager.assign_role_for_exploration(
self.owner, self.EXP_ID, self.viewer_id,
rights_manager.ROLE_VIEWER)
self.set_admins([self.OWNER_USERNAME])
self.login(self.VIEWER_EMAIL)
response = self.get_json(feconf.CREATOR_DASHBOARD_DATA_URL)
self.assertEqual(response['explorations_list'], [])
rights_manager.publish_exploration(self.owner, self.EXP_ID)
response = self.get_json(feconf.CREATOR_DASHBOARD_DATA_URL)
self.assertEqual(response['explorations_list'], [])
self.logout()
def test_can_see_feedback_thread_counts(self):
self.save_new_default_exploration(
self.EXP_ID, self.owner_id, title=self.EXP_TITLE)
self.login(self.OWNER_EMAIL)
response = self.get_json(feconf.CREATOR_DASHBOARD_DATA_URL)
self.assertEqual(len(response['explorations_list']), 1)
self.assertEqual(
response['explorations_list'][0]['num_open_threads'], 0)
self.assertEqual(
response['explorations_list'][0]['num_total_threads'], 0)
def mock_get_thread_analytics_multi(unused_exploration_ids):
return [feedback_domain.FeedbackAnalytics(self.EXP_ID, 2, 3)]
with self.swap(
feedback_services, 'get_thread_analytics_multi',
mock_get_thread_analytics_multi):
response = self.get_json(feconf.CREATOR_DASHBOARD_DATA_URL)
self.assertEqual(len(response['explorations_list']), 1)
self.assertEqual(
response['explorations_list'][0]['num_open_threads'], 2)
self.assertEqual(
response['explorations_list'][0]['num_total_threads'], 3)
self.logout()
def test_can_see_subscribers(self):
self.login(self.OWNER_EMAIL)
response = self.get_json(feconf.CREATOR_DASHBOARD_DATA_URL)
self.assertEqual(len(response['subscribers_list']), 0)
# Subscribe to creator.
subscription_services.subscribe_to_creator(
self.viewer_id, self.owner_id)
response = self.get_json(feconf.CREATOR_DASHBOARD_DATA_URL)
self.assertEqual(len(response['subscribers_list']), 1)
self.assertEqual(
response['subscribers_list'][0]['subscriber_username'],
self.VIEWER_USERNAME)
# Unsubscribe from creator.
subscription_services.unsubscribe_from_creator(
self.viewer_id, self.owner_id)
response = self.get_json(feconf.CREATOR_DASHBOARD_DATA_URL)
self.assertEqual(len(response['subscribers_list']), 0)
class NotificationsDashboardHandlerTest(test_utils.GenericTestBase):
DASHBOARD_DATA_URL = '/notificationsdashboardhandler/data'
def setUp(self):
super(NotificationsDashboardHandlerTest, self).setUp()
self.signup(self.VIEWER_EMAIL, self.VIEWER_USERNAME)
self.viewer_id = self.get_user_id_from_email(self.VIEWER_EMAIL)
def _get_recent_notifications_mock_by_viewer(self, unused_user_id):
"""Returns a single feedback thread by VIEWER_ID."""
return (100000, [{
'activity_id': 'exp_id',
'activity_title': 'exp_title',
'author_id': self.viewer_id,
'last_updated_ms': 100000,
'subject': 'Feedback Message Subject',
'type': feconf.UPDATE_TYPE_FEEDBACK_MESSAGE,
}])
def _get_recent_notifications_mock_by_anonymous_user(self, unused_user_id):
"""Returns a single feedback thread by an anonymous user."""
return (200000, [{
'activity_id': 'exp_id',
'activity_title': 'exp_title',
'author_id': None,
'last_updated_ms': 100000,
'subject': 'Feedback Message Subject',
'type': feconf.UPDATE_TYPE_FEEDBACK_MESSAGE,
}])
def test_author_ids_are_handled_correctly(self):
"""Test that author ids are converted into author usernames
and that anonymous authors are handled correctly.
"""
with self.swap(
user_jobs_continuous.DashboardRecentUpdatesAggregator,
'get_recent_notifications',
self._get_recent_notifications_mock_by_viewer):
self.login(self.VIEWER_EMAIL)
response = self.get_json(self.DASHBOARD_DATA_URL)
self.assertEqual(len(response['recent_notifications']), 1)
self.assertEqual(
response['recent_notifications'][0]['author_username'],
self.VIEWER_USERNAME)
self.assertNotIn('author_id', response['recent_notifications'][0])
with self.swap(
user_jobs_continuous.DashboardRecentUpdatesAggregator,
'get_recent_notifications',
self._get_recent_notifications_mock_by_anonymous_user):
self.login(self.VIEWER_EMAIL)
response = self.get_json(self.DASHBOARD_DATA_URL)
self.assertEqual(len(response['recent_notifications']), 1)
self.assertEqual(
response['recent_notifications'][0]['author_username'], '')
self.assertNotIn('author_id', response['recent_notifications'][0])
class CreationButtonsTest(test_utils.GenericTestBase):
def setUp(self):
super(CreationButtonsTest, self).setUp()
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
def test_new_exploration_ids(self):
"""Test generation of exploration ids."""
self.login(self.EDITOR_EMAIL)
response = self.testapp.get(feconf.CREATOR_DASHBOARD_URL)
self.assertEqual(response.status_int, 200)
csrf_token = self.get_csrf_token_from_response(response)
exp_a_id = self.post_json(
feconf.NEW_EXPLORATION_URL, {}, csrf_token
)[creator_dashboard.EXPLORATION_ID_KEY]
self.assertEqual(len(exp_a_id), 12)
self.logout()
| MAKOSCAFEE/oppia | core/controllers/creator_dashboard_test.py | Python | apache-2.0 | 29,835 | [
"VisIt"
] | a87ab498c74e268204b67dad85c2c796fd3813bd2941ff80d3183d9183f15031 |
from __future__ import print_function
import os
import sys
import json
import requests
import ipdb
import xmltodict
import lxml
from pymongo import MongoClient
from tqdm import tqdm
from venomkb_builder import VenomKB
PDB_BASE_URL_PREFIX = 'http://www.rcsb.org/pdb/rest/getBlastPDB1?sequence='
PDB_BASE_URL_SUFFIX = '&eCutOff=10.0&matrix=BLOSUM62&outputFormat=XML'
PDB_IMG_URL_PREFIX = 'http://www.rcsb.org/pdb/images/'
PDB_IMG_URL_SUFFIX = '_bio_r_250.jpg'
VKB = VenomKB()
VKB.load_database()
counter = 0
image_url_index = []
for p in VKB.proteins:
# does the species have a reference to PDB?
try:
known_3d_structure = False
best_pdb_id = None
if ("PDB" in p.out_links.keys()):
known_3d_structure = True
best_pdb_id = p.out_links['PDB']['id']
print("Already a match for {0}".format(p.venomkb_id))
else:
# Search PDB using BLAST
r = requests.get(PDB_BASE_URL_PREFIX + p.aa_sequence + PDB_BASE_URL_SUFFIX)
data = xmltodict.parse(r.text)
print("Searching URL: {0}".format(PDB_BASE_URL_PREFIX + p.aa_sequence + PDB_BASE_URL_SUFFIX))
try:
best_pdb_id = data['BlastOutput']['BlastOutput_iterations']['Iteration']['Iteration_hits']['Hit'][0]['Hit_def'].split(':')[0]
except KeyError: # 1 result or fewer
try:
best_pdb_id = data['BlastOutput']['BlastOutput_iterations']['Iteration']['Iteration_hits']['Hit']['Hit_def'].split(':')[0]
except KeyError: # No results
best_pdb_id = ""
if best_pdb_id != "":
image_url_index.append([
p._mongo_id.__str__(),
known_3d_structure,
"{0}{1}{2}".format(PDB_IMG_URL_PREFIX, best_pdb_id, PDB_IMG_URL_SUFFIX)
])
else:
image_url_index.append([
p._mongo_id.__str__(),
known_3d_structure,
""
])
except Exception: # if an XML parsing error occurred, just skip it
pass
with open('image_url_index.json', 'w') as fp:
json.dump(image_url_index, fp) | JDRomano2/VenomKB | venomkb/archive/scripts/get_protein_image_urls.py | Python | gpl-2.0 | 2,201 | [
"BLAST"
] | 0ce69758fb439ae4007007f3bf0c18e11b836d9b6a51b5237c80c38902ff385f |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.